text stringlengths 8 6.05M |
|---|
# from scrapy import Item,Field
from scrapy.linkextractors import LinkExtractor
import scrapy
from ..items import book_info
#---字段定义放在了items.py中---
# class book_info(Item):
# name = Field()
# price = Field()
# rank = Field()
# ISBN = Field()
# stockamount = Field()
# reviewamount = Field()
class bookspider_moreinfo(scrapy.Spider):
name = 'books_moreinfo'
def start_requests(self):
yield scrapy.Request(url='http://books.toscrape.com/',
callback=self.parse_book,
dont_filter = True)
def parse_book(self,response):
#提取页面所有书籍链接
links = LinkExtractor(restrict_css='div.image_container',tags='a',attrs='href').extract_links(response)
for link in links :
next_url = link.url
yield scrapy.Request(next_url,callback=self.parse_book_moreinfo)
#提取下一页链接
links = LinkExtractor(restrict_css='ul.pager li.next').extract_links(response)
if links:
next_url = links[0].url
yield scrapy.Request(next_url,callback=self.parse_book)
def parse_book_moreinfo(self,response):
books = book_info()
books['name'] = response.css('div.product_main>h1::text').extract_first()
books['price'] = response.css('div.product_main p.price_color::text').extract_first()
books['stockamount'] = response.xpath('//p[@class="instock availability"]/text()')[1].re_first('In stock \((\d+) available\)')
books['rank'] = response.css('div.product_main p.star-rating::attr(class)').re_first('star-rating ([A-Za-z]+)')
books['ISBN'] = response.css('table tr:first_child>td::text').extract_first()
books['reviewamount'] = response.css('table>tr:last_child td::text').extract_first()
yield books
|
#!/usr/bin/env python3
"""
Example script for plotting reaction times across all sessions for a cohort of mice as a
lineplot of averages.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import pandas as pd
from reach import Cohort
from reach.session import Outcomes
def main(cohort):
trials = pd.DataFrame(cohort.get_trials())
trials["reaction_time"] = trials.end - trials.start
trials.loc[trials["outcome"] == Outcomes.MISSED, "reaction_time"] = np.nan
trials.loc[trials["outcome"] == Outcomes.CANCELLED, "reaction_time"] = np.nan
averages = {}
for mouse in cohort:
mouse_averages = trials.loc[trials["mouse_id"] == mouse.mouse_id].groupby('day').mean()
mouse_averages = mouse_averages['reaction_time'].reset_index()
averages[mouse.mouse_id] = pd.DataFrame(mouse_averages)
df = pd.concat(averages)
df.reset_index(inplace=True)
sns.lineplot(
data=df,
x='day',
y='reaction_time',
hue='level_0', # uncomment this to plot each mouse with its own line
)
plt.gca().set_ylim(bottom=0, top=10)
plt.show()
if __name__ == '__main__':
mouse_ids = []
for i in sys.argv[2:]:
mouse_ids.append(i)
if not mouse_ids:
raise SystemError(
f'Usage: {__file__} /path/to/json/folder mouse1 mouse2'
)
cohort = Cohort.init_from_files(
data_dir=sys.argv[1],
mouse_ids=mouse_ids,
)
main(cohort)
|
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt
import class_analyse_tools as tools
if __name__ == '__main__':
iteration = list()
perf_rand = list()
perf = list()
perf_rand_sort = list()
perf_sort = list()
diff= list()
iteration, perf, perf_rand = tools.load_class_eval(sys.argv[1])
iteration, tabs = tools.sort_data(iteration, perf, perf_rand)
perf_sort = tabs[0]
perf_rand_sort = tabs[1]
perf_expert = list(1 for p in perf_rand_sort)
diff = tools.performance(perf_rand_sort,perf_expert,perf_sort)
diff_aver = list()
diff_aver.append(diff[0])
for i in range(1,len(diff)):
diff_aver.append((diff[i-1]+diff[i])/2.)
# plt.plot(iteration,perf_rand_sort,'o-',iteration,perf_sort,'o-')
plt.plot(iteration,diff,'o-')
plt.plot(iteration,diff_aver,'r-')
plt.fill_between(iteration,diff,0)
plt.ylim(-1,1)
plt.show()
|
# -*- coding:utf-8 -*-
# Created by LuoJie at 11/16/19
import re
import jieba
import pandas as pd
from utils.multi_proc_utils import parallelize
from utils.config import train_seg_path, test_seg_path, merger_seg_path, user_dict, train_x_seg_path, test_x_seg_path, \
train_x_pad_path, train_y_pad_path, test_x_pad_path, wv_train_epochs
from utils.config import stop_word_path, train_data_path, test_data_path
import codecs
from gensim.models.word2vec import LineSentence, Word2Vec
import numpy as np
# 引入日志配置
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from utils.config import save_wv_model_path
# 自定义词表
jieba.load_userdict(user_dict)
def build_dataset(train_data_path, test_data_path):
'''
数据加载+预处理
:param train_data_path:训练集路径
:param test_data_path: 测试集路径
:return: 训练数据 测试数据 合并后的数据
'''
# 1.加载数据
train_df = pd.read_csv(train_data_path)
test_df = pd.read_csv(test_data_path)
print('train data size {},test data size {}'.format(len(train_df), len(test_df)))
# 2. 空值填充
train_df.dropna(subset=['Question', 'Dialogue', 'Report'], how='any', inplace=True)
test_df.dropna(subset=['Question', 'Dialogue'], how='any', inplace=True)
# 3.多进程, 批量数据处理
train_df = parallelize(train_df, sentences_proc)
test_df = parallelize(test_df, sentences_proc)
# 4. 合并训练测试集合
train_df['merged'] = train_df[['Question', 'Dialogue', 'Report']].apply(lambda x: ' '.join(x), axis=1)
test_df['merged'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)
merged_df = pd.concat([train_df[['merged']], test_df[['merged']]], axis=0)
print('train data size {},test data size {},merged_df data size {}'.format(len(train_df), len(test_df),
len(merged_df)))
# 5.保存处理好的 训练 测试集合
train_df = train_df.drop(['merged'], axis=1)
test_df = test_df.drop(['merged'], axis=1)
train_df.to_csv(train_seg_path, index=None, header=True)
test_df.to_csv(test_seg_path, index=None, header=True)
# 6. 保存合并数据
merged_df.to_csv(merger_seg_path, index=None, header=False)
# 7. 训练词向量
print('start build w2v model')
wv_model = Word2Vec(LineSentence(merger_seg_path), size=300, negative=5, workers=8, iter=wv_train_epochs, window=3,
min_count=5)
# 8. 分离数据和标签
train_df['X'] = train_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)
test_df['X'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)
# 9. 填充开始结束符号,未知词填充 oov, 长度填充
# 使用GenSim训练得出的vocab
vocab = wv_model.wv.vocab
# 训练集X处理
# 获取适当的最大长度
train_x_max_len = get_max_len(train_df['X'])
test_X_max_len = get_max_len(test_df['X'])
X_max_len = max(train_x_max_len, test_X_max_len)
train_df['X'] = train_df['X'].apply(lambda x: pad_proc(x, X_max_len, vocab))
# 测试集X处理
# 获取适当的最大长度
test_df['X'] = test_df['X'].apply(lambda x: pad_proc(x, X_max_len, vocab))
# 训练集Y处理
# 获取适当的最大长度
train_y_max_len = get_max_len(train_df['Report'])
train_df['Y'] = train_df['Report'].apply(lambda x: pad_proc(x, train_y_max_len, vocab))
# 10. 保存pad oov处理后的,数据和标签
train_df['X'].to_csv(train_x_pad_path, index=None, header=False)
train_df['Y'].to_csv(train_y_pad_path, index=None, header=False)
test_df['X'].to_csv(test_x_pad_path, index=None, header=False)
# 11. 词向量再次训练
print('start retrain w2v model')
wv_model.build_vocab(LineSentence(train_x_pad_path), update=True)
wv_model.train(LineSentence(train_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)
print('1/3')
wv_model.build_vocab(LineSentence(train_y_pad_path), update=True)
wv_model.train(LineSentence(train_y_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)
print('2/3')
wv_model.build_vocab(LineSentence(test_x_pad_path), update=True)
wv_model.train(LineSentence(test_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)
# 保存词向量模型
wv_model.save(save_wv_model_path)
print('finish retrain w2v model')
print('final w2v_model has vocabulary of ', len(wv_model.wv.vocab))
return train_df['X'], train_df['Y'], test_df['X'], wv_model
def get_max_len(data):
"""
获得合适的最大长度值
:param data: 待统计的数据 train_df['Question']
:return: 最大长度值
"""
max_lens = data.apply(lambda x: x.count(' '))
return int(np.mean(max_lens) + 2 * np.std(max_lens))
def pad_proc(sentence, max_len, vocab):
'''
< start > < end > < pad > < unk > max_lens
'''
# 0.按空格统计切分出词
words = sentence.strip().split(' ')
# 1. 截取规定长度的词数
words = words[:max_len]
# 2. 填充< unk > ,判断是否在vocab中, 不在填充 < unk >
sentence = [word if word in vocab else '<UNK>' for word in words]
# 3. 填充< start > < end >
sentence = ['<START>'] + sentence + ['<STOP>']
# 4. 判断长度,填充 < pad >
sentence = sentence + ['<PAD>'] * (max_len + 2 - len(words))
return ' '.join(sentence)
def load_word2vec_file(word2vec_file):
'''
加载词向量
:param word2vec_file:词向量路径
:return: 词向量字典, 维度
'''
word2vec_dict = {}
input = codecs.open(word2vec_file, "r", "utf-8")
lines = input.readlines()
input.close()
word_num, dim = lines[0].split(" ")
dim = int(dim)
lines = lines[1:]
for l in lines:
l = l.strip()
tokens = l.split(" ")
if len(tokens) != dim + 1:
continue
w = tokens[0]
v = np.array(map(lambda x: float(x), tokens[1:]))
word2vec_dict[w] = v
return word2vec_dict, dim
def load_dataset():
'''
数据数据集
:return:
'''
# 读取数据集
train_X = pd.read_csv(train_x_pad_path, header=None).rename(columns={0: 'X'})
train_Y = pd.read_csv(train_y_pad_path, header=None).rename(columns={0: 'Y'})
test_X = pd.read_csv(test_x_pad_path, header=None).rename(columns={0: 'X'})
# 保存词向量模型
wv_model = Word2Vec.load(save_wv_model_path)
return train_X['X'], train_Y['Y'], test_X['X'], wv_model
def load_stop_words(stop_word_path):
'''
加载停用词
:param stop_word_path:停用词路径
:return: 停用词表 list
'''
# 打开文件
file = open(stop_word_path, 'r', encoding='utf-8')
# 读取所有行
stop_words = file.readlines()
# 去除每一个停用词前后 空格 换行符
stop_words = [stop_word.strip() for stop_word in stop_words]
return stop_words
# 加载停用词
stop_words = load_stop_words(stop_word_path)
def clean_sentence(sentence):
'''
特殊符号去除
:param sentence: 待处理的字符串
:return: 过滤特殊字符后的字符串
'''
if isinstance(sentence, str):
return re.sub(
r'[\s+\-\!\/\|\[\]\{\}_,.$%^*(+\"\')]+|[::+——()?【】“”!,。?、~@#¥%……&*()]+|车主说|技师说|语音|图片|你好|您好',
'', sentence)
else:
return ' '
def filter_stopwords(words):
'''
过滤停用词
:param seg_list: 切好词的列表 [word1 ,word2 .......]
:return: 过滤后的停用词
'''
return [word for word in words if word not in stop_words]
def sentence_proc(sentence):
'''
预处理模块
:param sentence:待处理字符串
:return: 处理后的字符串
'''
# 清除无用词
sentence = clean_sentence(sentence)
# 切词,默认精确模式,全模式cut参数cut_all=True
words = jieba.cut(sentence)
# 过滤停用词
words = filter_stopwords(words)
# 拼接成一个字符串,按空格分隔
return ' '.join(words)
def sentences_proc(df):
'''
数据集批量处理方法
:param df: 数据集
:return:处理好的数据集
'''
# 批量预处理 训练集和测试集
for col_name in ['Brand', 'Model', 'Question', 'Dialogue']:
df[col_name] = df[col_name].apply(sentence_proc)
if 'Report' in df.columns:
# 训练集 Report 预处理
df['Report'] = df['Report'].apply(sentence_proc)
return df
if __name__ == '__main__':
# 数据集批量处理
build_dataset(train_data_path, test_data_path)
|
"""Preprocess the Yelp Dataset"""
import json
def read_and_process_businesses(json_file_path):
"""Read in the json dataset file and process it line by line."""
post_codes = {}
num_businesses = 0
with open(json_file_path) as fin:
for line in fin:
line_contents = json.loads(line)
pc = line_contents.get('postal_code', 0)
pc_num = post_codes.get(pc, 0)
post_codes[pc] = pc_num + 1
num_businesses += 1
with open('post_codes.json', 'w') as fp:
json.dump(post_codes, fp, sort_keys=True, indent=4)
print("Number of businesses {}".format(num_businesses))
def get_business_ids_in_postal_code(postal_code):
# get business IDs
businesses = {}
with open('../data/yelp_academic_dataset_business.json') as fin:
for line in fin:
line_contents = json.loads(line)
pc = line_contents.get('postal_code', 0)
if pc == postal_code:
businesses[line_contents.get('business_id')] = {}
businesses[line_contents.get('business_id')]['name'] = line_contents.get('name')
with open('businesses.json', 'w') as fp:
json.dump(businesses, fp, sort_keys=True, indent=4)
return businesses
def get_reviews_for_businesses(b_to_get_reviews_for):
fp = open('restautant_reviews.json', 'w')
num_reviews = 0
with open('../data/yelp_academic_dataset_review.json') as fin:
users = {}
for line in fin:
line_contents = json.loads(line)
b_id = line_contents.get("business_id")
del line_contents['text']
del line_contents['date']
if b_id in b_to_get_reviews_for:
# json.dump(line_contents, fp)
fp.write("{}\n".format(line_contents))
num_reviews += 1
if num_reviews % 1000 == 0:
print('{}'.format(num_reviews))
print("Number of restaurant reviews: {}".format(num_reviews))
fp.close()
def get_restaurants():
num_restaurants = 0
restaurants = {}
with open('../data/yelp_academic_dataset_business.json') as fin:
for line in fin:
line_contents = json.loads(line)
at = line_contents.get('categories', [])
if at is not None:
if "Restaurants" in at:
num_restaurants += 1
restaurants[line_contents.get("business_id")] = line_contents
if num_restaurants % 1000 == 0:
print("Number of restaurants: {}".format(num_restaurants))
with open('restaurants.json', 'w') as fp:
json.dump(restaurants, fp, sort_keys=True, indent=4)
def process_restaurant_reviews(src, dest):
restaurants = {}
num_restaurants = 0
with open(src) as fin:
for line in fin:
line_contents = json.loads(line)
b_id = line_contents.get("business_id")
if b_id not in restaurants:
num_restaurants += 1
if num_restaurants % 100 == 0:
print("Num Restaurants: {}".format(num_restaurants))
restaurants[b_id] = {}
restaurants[b_id]["reviews"] = []
restaurants[b_id]["reviews"].append(line_contents)
with open(dest, 'w') as fp:
json.dump(restaurants, fp, sort_keys=True, indent=4)
def replace_quotes(src, dest):
fp = open(dest, 'w')
num_line = 0
with open(src) as fin:
for line in fin:
num_line += 1
if num_line % 1000 == 0:
print("Num: {}".format(num_line))
line = line.replace("'", '"')
fp.write(line)
fp.close()
def remove_slash(src, dest):
fp = open(dest, 'w')
num_line = 0
with open(src) as fin:
for line in fin:
num_line += 1
if num_line % 1000 == 0:
print("Num: {}".format(num_line))
line = line.replace('\\', "")
fp.write(line)
fp.close()
def remove_quotes(src, dest):
fp = open(dest, 'w')
fp = open(src, 'r')
data = json.loads(fp)
for b_id in data:
for review in data[b_id]:
nop
def get_users_from_reviews(src, dest):
# make a list of the users in reviews
users = set()
num_users = 0
with open(src) as fin:
for line in fin:
line_contents = json.loads(line)
user_id = line_contents.get("user_id")
if user_id not in users:
users.add(user_id)
num_users += 1
if num_users % 1000 == 0:
print("Pre Num Users: {}".format(num_users))
fp = open(dest, 'w')
num_users = 0
# get users info and place into file
with open('../data/yelp_academic_dataset_user.json') as fin:
for line in fin:
line_contents = json.loads(line)
user_id = line_contents.get('user_id')
if user_id in users:
fp.write(line)
num_users += 1
if num_users % 1000 == 0:
print("Post Num Users: {}".format(num_users))
print("Total Users: {}".format(num_users))
def get_restaurant_subset(src, dest, num):
new_restaurants = {}
count = 0
fp = open(src, 'r')
data = json.load(fp)
# sum the reviews
for b_id in data:
data[b_id]["num_reviews"] = len(data[b_id]["reviews"])
# select restaurants with only more than num reviews
for b_id in data:
if data[b_id]["num_reviews"] > num:
new_restaurants[b_id] = data[b_id]
count += 1
print("Num Restaurants {} with more than {} reviews".format(count, num))
fp.close()
fp = open(dest, 'w')
json.dump(new_restaurants, fp, sort_keys=True, indent=4)
def get_user_subset(restaurant_src, user_src, user_dest):
restaurant_fp = open(restaurant_src, 'r')
restaurant_data = json.load(restaurant_fp)
restaurant_fp.close()
new_users = set()
num_users = 0
for b_id, b_info in restaurant_data.items():
for review in b_info['reviews']:
new_users.add(review['user_id'])
num_users += 1
if num_users % 10 == 0:
print("Read Num Users {}".format(num_users))
del restaurant_data
num_users = 0
user_fp = open(user_src, 'r')
user_data = json.load(user_fp)
user_fp.close()
new_user_data = {}
num_new_users = 0
for u_id, u_data in user_data.items():
if u_id in new_users:
new_user_data[u_id] = u_data
num_users += 1
if num_users % 10 == 0:
print("Gather Num Users {}".format(num_users))
if num_users > 500:
break
del user_data
fp = open(user_dest, 'w')
json.dump(new_user_data, fp, sort_keys=True, indent=4)
def convert_user_json(src, dest):
dest_users = {}
num_users = 0
with open(src) as fin:
for line in fin:
line_contents = json.loads(line)
user_id = line_contents.get('user_id')
dest_users[user_id] = line_contents
num_users += 1
if num_users % 1000 == 0:
print("Num Users: {}".format(num_users))
fp = open(dest, 'w')
json.dump(dest_users, fp, sort_keys=True, indent=4)
def get_business_info(b_id):
with open('../data/yelp_academic_dataset_business.json') as fin:
for line in fin:
line_contents = json.loads(line)
id = line_contents.get("business_id")
if b_id == id:
print(line_contents)
break
def reduce_restaurants(src, dest, num):
import pdb
pdb.set_trace()
new_restaurants = {}
count = 0
fp = open(src, 'r')
data = json.load(fp)
for b_id in data:
new_restaurants[b_id] = data[b_id]
break
fp.close()
fp = open(dest, 'w')
json.dump(new_restaurants, fp, sort_keys=True, indent=4)
def get_noras_user_set():
"""
In this user set create a josn with users and what they rated noras
Take all the features that the user has and then create a dictionart.
"""
###########################################################################
# create set of all users that rated noras
print("Create set of all users that rated Noras")
users = set()
fp = open('restaurant_noras_reviews.json')
data = json.load(fp)
for r in data["pHJu8tj3sI8eC5aIHLFEfQ"]["reviews"]:
users.add(r["user_id"])
fp.close()
noras_user_profile = {}
for u in users:
noras_user_profile[u] = {}
noras_user_profile[u]["categories"] = []
noras_user_profile[u]["ratings"] = []
for r in data["pHJu8tj3sI8eC5aIHLFEfQ"]["reviews"]:
noras_user_profile[r["user_id"]]["noras_rating"] = r["stars"]
###########################################################################
# Process reviews and create user profiles
fp = open('restaurants.json')
restaurants = json.load(fp)
print("process reviews")
num_reviews = 0
with open('../data/yelp_academic_dataset_review.json') as fin:
for line in fin:
line_contents = json.loads(line)
u_id = line_contents.get('user_id')
if u_id in users:
num_reviews += 1
if num_reviews % 100 == 0:
print("Processing reviews {}".format(num_reviews))
if line_contents.get('business_id') in restaurants:
noras_user_profile[u_id]["ratings"].append(line_contents.get("stars"))
r_data = restaurants[line_contents.get('business_id')]
for cat in r_data["categories"].split(','):
cat = cat.strip()
noras_user_profile[u_id]["categories"].append(cat)
fp.close()
###########################################################################
# compute average restaurant review
print("computing average restaurant reviews")
num_reviews = 0
for u_id in noras_user_profile:
num_reviews += 1
if num_reviews % 10 == 0:
print("computing average {}".format(num_reviews))
noras_user_profile[u_id]["avg_restaurant_rating"] = sum(noras_user_profile[u_id]["ratings"]) / len(noras_user_profile[u_id]["ratings"])
###########################################################################
# get user data from the user json file
print("getting other user data")
num_users = 0
with open('../data/yelp_academic_dataset_user.json') as fin:
for line in fin:
line_contents = json.loads(line)
u_id = line_contents.get('user_id')
if u_id in users:
num_users += 1
if num_users % 50 == 0:
print("getting more user data {}".format(num_users))
noras_user_profile[u_id]["avg_rating"] = line_contents.get("average_stars")
noras_user_profile[u_id]["review_count"] = line_contents.get("review_count")
noras_user_profile[u_id]["useful"] = line_contents.get("useful")
###########################################################################
# convert set to list
print("convert set to list")
for u_id in noras_user_profile:
noras_user_profile[u_id]["categories"] = list(noras_user_profile[u_id]["categories"])
###########################################################################
# write to file
print("writing to file")
fp = open('noras_user_profile.json', 'w')
json.dump(noras_user_profile, fp, sort_keys=True, indent=4)
fp.close()
if __name__ == '__main__':
# import pdb
# pdb.set_trace()
# read_and_process_businesses('../data/yelp_academic_dataset_business.json')
# businesses = get_business_ids_in_postal_code("06502")
# get_reviews_for_businesses(businesses)
# myrestaurants = get_restaurants()
# get_reviews_for_businesses(myrestaurants)
# process_restaurant_reviews("restaurant_reviews.json", "restaurants.json")
# get_users_from_reviews("restaurant_reviews.json", "users.json")
# replace_quotes("restautant_reviews.json", "restaurant_reviews.json")
# remove_slash("restaurants.json", "restaurants2.json")
# get_restaurant_subset('restaurants.json', 'restaurants_more_than_1000_reviews.json', 1000)
# convert_user_json('users.json', 'users_keyed.json')
# get_business_info("pHJu8tj3sI8eC5aIHLFEfQ")
# get_restaurants()
# reduce_restaurants('restaurants_more_than_1000_reviews.json', 'restaurant.json', 1)
# get_user_subset('restaurants_1000_subset.json', 'users_keyed.json', 'user.json')
get_noras_user_set() |
from ucfg_updater import *
from ucfg_utils import info, error
from ucfg_main import ConfigUtilities
import os
import re
from _use import USEUpdater
class UUpdater(USEUpdater):
def printDescription(self, options):
info(options, "Updates Unicore/X configuration from pre 6.6.0 versions to the 6.6.0 syntax")
def run(self, options):
info(options, "UNICORE/X configuration updater")
super(UUpdater, self).run(options, 'unicorex')
cfgUtil = ConfigUtilities(options, 'unicorex')
dict = {
'defaultsms.workdir' : 'coreServices.defaultsms.workdir',
'uas.targetsystemfactory.xnjs.configfile' : 'coreServices.targetsystemfactory.xnjs.configfile',
'uas.sms.protocols' : 'coreServices.sms.protocols',
'uas.storagefactory.types' : 'coreServices.sms.enabledFactories',
'unicore.gridbean.directory' : 'coreServices.gridbean.directory',
'cip.data.path' : 'coreServices.cip.dataPath',
'bes.naming.profile' : 'coreServices.bes.namingProfile',
'bes.job.mode' : 'coreServices.bes.jobMode',
'bes.is.accepting.new.activities' : 'coreServices.bes.isAcceptingNewActivities',
'bes.naming.profile' : 'coreServices.bes.namingProfile',
'bes.local.resource.manager.type' : 'coreServices.bes.localResourceManagerType',
'bes.common.name' : 'coreServices.bes.commonName',
'bes.long.description' : 'coreServices.bes.longDescription',
'bes.extension' : 'coreServices.bes.extension',
}
javaProps = cfgUtil.getJavaPropertyKeys('uas.config')
for propName in javaProps:
m = re.match('^(uas\.storagefactory\.)(.*)', propName)
if m != None and not (propName == 'uas.storagefactory.types'):
dict[propName] = 'coreServices.sms.factory.' + m.group(2)
cfgUtil.updateJavaPropertyNames('uas.config', dict)
dict = {
'bes.factory.id' : 'This property is not available anymore without a replacement.'
}
cfgUtil.commentJavaProperties('uas.config', dict);
info(options, "Finished update of configuration of Unicore/X")
|
# import labop
# from labop.lib.library_type_inference import primitive_type_inference_functions
#
# ##############################
# # Class for carrying a typing process
#
#
# class ProtocolTyping:
# def __init__(self):
# self.flow_values = {} # dictionary of labop.Flow : type value, includes subprotocols too
# self.typed_protocols = set() # protocol and subprotocols already evaluated or in process of evaluation
# self.cache = {} # kludge for accelerating inflow satisfaction computation
#
# def infer_typing(self, protocol : labop.Protocol):
# self.typed_protocols.add(protocol)
# pending_activities = set(protocol.activities)
# print('Building activity cache non-blocked')
# self.cache.update({a:a.input_flows() for a in pending_activities}) # speed kludge
# while pending_activities:
# print('Collecting non-blocked activities out of pending '+str(len(pending_activities)))
# non_blocked = {a for a in pending_activities if self.inflows_satisfied(a)}
# if not non_blocked:
# raise ValueError("Could not infer all flow types in "+protocol.identity+": circular dependency?")
# for activity in non_blocked:
# print('Inferring typing for '+activity.identity)
# activity.infer_typing(self)
# pending_activities -= non_blocked
#
# def inflows_satisfied(self, activity):
# #unsatisfied = {flow for flow in activity.input_flows() if flow not in self.flow_values.keys()}
# unsatisfied = {flow for flow in self.cache[activity] if flow not in self.flow_values.keys()}
# return len(unsatisfied) == 0
#
#
# #############################
# # Inference utilities
#
# def pin_input_type(self, typing: ProtocolTyping):
# try:
# return self.value
# except AttributeError:
# in_flows = self.input_flows()
# assert len(in_flows) == 1, \
# ValueError("Expected one input flow for '" + self.get_parent().identity + "' but found " + len(in_flows))
# return typing.flow_values[in_flows.pop()]
# labop.Pin.input_type = pin_input_type
#
#
# def pin_assert_output_type(self, typing: ProtocolTyping, value):
# out_flows = self.output_flows()
# # TODO: need to decide if this type of implicit fork is acceptable
# for f in out_flows:
# typing.flow_values[f] = value
# # assert len(out_flows) == 1, \
# # ValueError("Expected one output flow for '" + self.get_parent().identity + "' but found " + str(len(out_flows)))
# # typing.flow_values[out_flows.pop()] = value
# labop.Pin.assert_output_type = pin_assert_output_type
#
#
# #############################
# # Inference over Activities
# def initial_infer_typing(self, typing: ProtocolTyping):
# typing.flow_values.update({f: None for f in self.direct_output_flows()})
# labop.Initial.infer_typing = initial_infer_typing
#
#
# def final_infer_typing(self, _: ProtocolTyping):
# assert len(self.direct_output_flows()) == 0 # should be no outputs
# labop.Final.infer_typing = final_infer_typing
#
#
# def fork_decision_infer_typing(self, typing: ProtocolTyping):
# assert len(self.direct_input_flows()) == 1 # should be precisely one input
# in_type = typing.flow_values[self.direct_input_flows().pop()]
# typing.flow_values.update({f: in_type for f in self.direct_output_flows()})
# labop.Fork.infer_typing = fork_decision_infer_typing
# labop.Decision.infer_typing = fork_decision_infer_typing
#
#
# def join_infer_typing(self, typing: ProtocolTyping):
# #assert len(self.direct_output_flows()) == 1 # should be precisely one output
# value = join_values({typing.flow_values[f] for f in self.direct_input_flows()})
# typing.flow_values.update({f: value for f in self.direct_output_flows()})
# labop.Join.infer_typing = join_infer_typing
#
# # TODO: add type inference for Merge
#
#
# def primitiveexecutable_infer_typing(self, typing: ProtocolTyping):
# typing.flow_values.update({f: None for f in self.direct_output_flows()})
# inference_function = primitive_type_inference_functions[self.instance_of.lookup().identity]
# inference_function(self, typing)
# labop.PrimitiveExecutable.infer_typing = primitiveexecutable_infer_typing
#
# # TODO: add type inference for SubProtocol
# def subprotocol_infer_typing(self: labop.SubProtocol, typing: ProtocolTyping):
# typing.flow_values.update({f: None for f in self.direct_output_flows()})
# subprotocol = self.instance_of.lookup()
# if subprotocol not in typing.typed_protocols:
# # add types for inputs
# input_pin_flows = self.input_flows() - self.direct_input_flows()
# for f in input_pin_flows:
# typing.flow_values.update({subflow: typing.flow_values[f] for subflow in subprotocol.get_input(f.sink.lookup().name).activity.lookup().direct_output_flows()})
# # run the actual inference
# typing.infer_typing(subprotocol)
# # pull values from outputs' inferred values
# output_pin_flows = self.output_flows() - self.direct_output_flows()
# for f in output_pin_flows:
# typing.flow_values.update({subflow: typing.flow_values[f] for subflow in subprotocol.get_output(f.source.lookup().name).activity.lookup().direct_input_flows()})
# labop.SubProtocol.infer_typing = subprotocol_infer_typing
#
#
# def type_to_value(type_name: str, **kwargs):
# if type_name == 'http://bioprotocols.org/labop#LocatedSamples':
# return labop.LocatedSamples(**kwargs)
# elif type_name == 'http://bioprotocols.org/labop#LocatedData':
# return labop.LocatedData(**kwargs)
# else:
# ValueError("Don't know how to make dummy object for type "+type_name)
#
#
# def value_infer_typing(self: labop.Value, typing: ProtocolTyping):
# # assert len(self.direct_output_flows()) == 1 # should be precisely one output --- or maybe not. TODO: decide
# output_instance = (type_to_value(self.type, name=self.name) if self.type else None)
# # Don't overwrite values that are already written
# unset_values = {f for f in self.direct_output_flows() if f not in typing.flow_values.keys()}
# typing.flow_values.update({f: output_instance for f in unset_values})
# labop.Value.infer_typing = value_infer_typing
#
#
#
# #################
# # Join is a kludge for now
# # TODO: Make a more principled approach to inference of Join, which will also provide an architcture for Merge
# def join_locations(value_set):
# if not value_set:
# return labop.HeterogeneousSamples()
# next_value = value_set.pop()
# rest = join_locations(value_set)
# if isinstance(next_value, labop.ReplicateSamples):
# rest.replicate_samples.append(next_value)
# elif isinstance(next_value, labop.HeterogeneousSamples):
# for x in next_value.replicate_samples:
# rest.replicate_samples.append(x)
# else:
# raise ValueError("Don't know how to join locations for "+str(value_set))
# return rest
#
# def join_values(value_set):
# if all(isinstance(x,labop.LocatedSamples) for x in value_set):
# return join_locations(value_set)
# elif all(x is None for x in value_set):
# return None
# # if we fall through to the end, then we didn't know how to infer
# raise ValueError("Don't know how to join values types for "+str(value_set))
#
#
#
#
|
import string
import os
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.crypto import get_random_string
from celery import shared_task, task
from subprocess import Popen
from eventlogUploader.models import Document
import shutil
@shared_task
def handle_pretsa_upload(kValue, tValue, anonValue, path, pathDB, secure_token, metadataValue):
command = Popen(["python", os.getcwd()+"/algorithms/PRETSA/runPretsa.py", str(path), str(kValue), str(tValue), str(anonValue), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/PRETSA")
if metadataValue:
output_path = path.replace(".xes","_k%s_pretsa.csv" % (kValue))
print("\n output_path: ",output_path,"\n")
command_two = Popen(["python", os.getcwd()+"/algorithms/metadata/privacy_metadata.py", str(output_path), 'pretsa', str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/metadata")
return
@shared_task
def handle_laplace_df_upload(epsilonValue, path, pathDB, secure_token, metadataValue):
command = Popen(["python", os.getcwd()+"/algorithms/laplace_df/privatize_df.py", str(path), str(epsilonValue), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/laplace_df")
if metadataValue:
output_path = path.replace(".xes","_%s.xes" % (epsilonValue))
print("\n output_path: ",output_path,"\n")
command_two = Popen(["python", os.getcwd()+"/algorithms/metadata/privacy_metadata.py", str(output_path),'laplace_df', str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/metadata")
return
@shared_task
def handle_laplace_tv_upload(epsilonValue, nValue, pValue, path, pathDB, secure_token, metadataValue):
command = Popen(["python", os.getcwd()+"/algorithms/laplace_tv/trace_variant_query.py", str(path), str(epsilonValue), str(nValue), str(pValue), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/laplace_tv")
if metadataValue:
output_path = path.replace(".xes","_%s_%s_%s.xes" % (epsilonValue, nValue, pValue))
print("\n output_path: ",output_path,"\n")
command_two = Popen(["python", os.getcwd()+"/algorithms/metadata/privacy_metadata.py", str(output_path),'laplace_tv', str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/metadata")
return
@shared_task
def handle_pripel_upload(epsilonValue, nValue, kValue, path, pathDB, secure_token, metadataValue):
command = Popen(["python", os.getcwd()+"/algorithms/pripel/pripel.py", str(path), str(epsilonValue), str(nValue), str(kValue), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/pripel")
if metadataValue:
new_ending = "_epsilon_" + "_k" + str(kValue) + "_anonymizied.xes"
output_path = path.replace(".xes",new_ending)
print("\n output_path: ",output_path,"\n")
command_two = Popen(["python", os.getcwd()+"/algorithms/metadata/privacy_metadata.py", str(output_path),'pripel', str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/metadata")
return
@shared_task
def handle_risk_upload(path, pathDB, secure_token):
command = Popen(["python", os.getcwd()+"/algorithms/re_ident_risk/columns.py", str(path), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/re_ident_risk")
return
@shared_task
def handle_risk_upload_with_columns(projection, case_attributes, event_attributes, path, pathDB, secure_token):
command = Popen(["python", os.getcwd()+"/algorithms/re_ident_risk/re_ident_test.py", str(path), str(projection), str(case_attributes), str(event_attributes), str(pathDB), str(secure_token)], cwd=os.getcwd()+"/algorithms/re_ident_risk")
return
@shared_task
def remove_overdue_files():
print("Removing overdue files and database entries")
#remove overdue database entries
Document.objects.filter(expires_on__lte = datetime.now()).delete()
#remove all directories that do not have a database entry that is overdue
directories = os.listdir(settings.MEDIA_ROOT)
for directory in directories:
if not Document.objects.filter(token = directory).exists():
shutil.rmtree(os.path.join(settings.MEDIA_ROOT, directory))
|
import datetime
import PackageStatus
class Package:
def __init__(self, id, address, city, state, zip, delivery_deadline, mass, special_notes):
self._id = id
self._address = address
self._city = city
self._state = state
self._zip = zip
self._delivery_deadline = delivery_deadline
self._mass = mass
self._special_notes = special_notes
self._delivery_status = PackageStatus.PackageStatus(datetime.time(8, 0, 0))
self._delivery = 1
def __repr__(self):
return '{}, {}, {}, {}, {}, {}, {}, {}, {}'\
.format(self._id, self._address, self._city, self._state, self._zip, self._delivery_deadline,
self._mass, self._special_notes, self._delivery_status)
# setters and getters
def id(self, i=None):
if i: self._id = i
return self._id
def address(self, a=None):
if a: self._address = a
return '{}'.format(self._address)
def city(self, c=None):
if c: self._city = c
return '{}'.format(self._city)
def state(self, s=None):
if s: self._state = s
return '{}'.format(self._state)
def zip(self, z=None):
if z: self._zip = z
return '{}'.format(self._zip)
def delivery_deadline(self, d=None):
if d: self._delivery_deadline = d
return '{}'.format(self._delivery_deadline)
def mass(self, m=None):
if m: self._mass = m
return '{}'.format(self._mass)
def special_notes(self, s=None):
if s: self._special_notes = s
return '{}'.format(self._special_notes)
def delivery_status(self, dt=None, ds=None):
if ds and dt: self._delivery_status.changeStatus(ds, dt)
return '{}'.format(self._delivery_status)
def delivery(self, d=None):
if d: self._delivery = d
return '{}'.format(self._delivery)
|
#!/usr/bin/env python
# author (alterer is a more suitable word) : Guillaume Pierron - "Guiwiz"
#
# This script is largely based on the work of Arnaud Bertrand - "Arn-O"
# You can find his original work (a wonderful python script to control XBMC) here :
# https://github.com/Arn-O/py-xbmc-remote-controller
#
# This script is also based on the work (a python script for xchat/hexchat to control
# the linux player amarok locally) of zir0faive, not publically available yet :)
__module_name__ = "Kodi NowPlaying"
__module_version__ = "0.89c"
__module_description__ = "A dirty/quickly adapted script to print currently playing music on distant Kodi"
print "\003",__module_name__, __module_version__,"has been loaded\003"
import xchat
import socket
import json
from string import Template
BUFFER_SIZE = 1024
''' USERS SHOULD MODIFY THIS SECTION '''
XBMC_IP = "192.168.1.210"
XBMC_PORT = 9090
''' USERS MAY MODIFY THIS TOO '''
COMPATIBLE_ENCODING = 'iso-8859-1'
SCRIPTCMD = 'zik'
'''STRING FORMATTING PREFS PART'''
TITLE = 'Kodi '
DISPLAY_PATTERN = TITLE + '15# $artist 15- $title ' + \
'15(#$track 15- $album 15- $year15) ' + \
'15[$p_min15:$p_0sec15/$t_min15:$t_0sec ' + \
'15,15$elapsed14,14$remaining15]'
BAR_LENGTH = 10
CHAR_ELAPSED = '#'
CHAR_REMAINING = '='
def now_playing(item, properties):
if item:
#constructing initial data
full_data = {}
full_data.update(item)
full_data.update(properties)
# retrieving first artist field only
if item['artist']:
full_data['artist'] = item['artist'][0]
# computing progress bar and time values
n = int(BAR_LENGTH * properties['percentage'] / 100)
full_data['elapsed'] = CHAR_ELAPSED * n
full_data['remaining'] = CHAR_REMAINING * (BAR_LENGTH - n)
full_data['p_min'] = properties['time']['hours'] * 60 + \
properties['time']['minutes']
full_data['p_0sec'] = "%02d" % properties['time']['seconds']
full_data['t_min'] = properties['totaltime']['hours'] * 60 + \
properties['totaltime']['minutes']
full_data['t_0sec'] = "%02d" % properties['totaltime']['seconds']
str_ret = Template(DISPLAY_PATTERN).substitute(full_data)
else:
str_ret= "[is not playing anything]"
return str_ret
def get_item(ip, port):
command = {"jsonrpc": "2.0",
"method": "Player.GetItem",
"params": {
"playerid": 0,
"properties": [
"album",
"title",
"track",
"artist",
"year",
"genre" ] },
"id": 1}
ret = call_api(ip, port, command)
item = None
try:
item = ret['result']['item']
except KeyError:
pass
return item
def get_properties(ip, port):
command = {"jsonrpc": "2.0",
"method": "Player.GetProperties",
"params": {
"playerid": 0,
"properties": [
"time",
"totaltime",
"percentage",
"position"] },
"id": 1}
ret = call_api(ip, port, command)
result = None
try:
result = ret['result']
except KeyError:
pass
return result
def call_api(ip, port, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.send(json.dumps(command))
data = ''
while True:
filler = s.recv(BUFFER_SIZE)
data += filler
nb_open_brackets = data.count('{') - data.count('}')
if nb_open_brackets == 0:
break
s.close()
ret = json.loads(data)
return ret
def play_what():
item = get_item(XBMC_IP, XBMC_PORT)
properties = get_properties(XBMC_IP, XBMC_PORT)
return now_playing(item, properties)
def xchat_kodi_cmd(argv, arg_to_eol, c):
if len(argv) == 1:
current=play_what()
xchat.command('me %s' % current.encode(COMPATIBLE_ENCODING))
return xchat.EAT_ALL
xchat.hook_command(SCRIPTCMD, xchat_kodi_cmd, help="/"+SCRIPTCMD)
|
# -*- coding: utf-8 -*-
import scrapy
import os
import csv
class OnlineradioboxSpider(scrapy.Spider):
name = 'onlineradiobox'
allowed_domains = ['onlineradiobox.com']
start_urls = ['https://onlineradiobox.com/']
def parse(self, response):
links = response.xpath('.//*[@class="catalog__mainland-list"]/li/a/@href').extract()
for link in links:
yield scrapy.Request(response.urljoin(link),callback=self.getcountry)
def getcountry(self,response):
datas = response.xpath('.//*[@class="countries__countries-list tab-pane fade in active"]/li').extract()
for data in datas:
sel = scrapy.Selector(text=data)
link = sel.xpath('.//a/@href').extract_first()
country = sel.xpath('.//a/text()').extract_first()
yield scrapy.Request(response.urljoin(link),callback=self.getstates,meta={
'country':country
})
def getstates(self,response):
links = response.xpath('.//*[@class="regions-list"]/li/a/@href').extract()
for link in links:
yield scrapy.Request(response.urljoin(link),callback=self.getstations,meta={
'country':response.meta.get('country')
})
def getstations(self,response):
stations = response.xpath('.//*[@class="stations__station__title"]/a/@href').extract()
for station in stations:
yield scrapy.Request(response.urljoin(station),callback=self.getdatas,meta={
'country':response.meta.get('country')
})
def getdatas(self,response):
title = response.xpath('.//*[@class="station__title"]/text()').extract_first()
location = response.xpath('.//*[@itemprop="additionalProperty"]/text()').extract_first()
try:
tags = ', '.join(response.xpath('.//*[@class="station__tags"]/li/a/text()').extract())
except:
tags = ''
description = response.xpath('.//*[@itemprop="description"]/text()').extract_first()
website = response.xpath('.//*[@itemprop="url"]/@href').extract_first()
facebook = response.xpath('.//*[@title="Facebook"]/@href').extract_first()
twitter = response.xpath('.//*[@title="Twitter"]/@href').extract_first()
wikipedia = response.xpath('.//*[@title="Wikipedia"]/@href').extract_first()
if 'onlineradiobox.csv' not in os.listdir(os.getcwd()):
with open("onlineradiobox.csv","a") as f:
writer = csv.writer(f)
writer.writerow(['country','title','location','tags','description','website','facebook','twitter','wikipedia'])
with open("onlineradiobox.csv","a") as f:
writer = csv.writer(f)
writer.writerow([response.meta.get('country'),title,location,tags,description,website,facebook,twitter,wikipedia])
print([response.meta.get('country'),title,location,tags,description,website,facebook,twitter,wikipedia])
|
import os
import sys
sys.path.insert(0, 'scripts')
import experiments as exp
def get_possible_strategies():
return ["SPR", "EVAL"]
def get_jointsearch_datasets():
root_datadir = os.path.join(exp.datasets_root, "joint_search")
datasets = {}
for dataset in os.listdir(root_datadir):
datasets[dataset] = os.path.join(root_datadir, dataset)
return datasets
def get_jointsearch_command(gene_tree, species_tree, mapping, alignment, strategy, cores, output_dir, mode, additional_arguments):
executable = exp.joint_search_exec
if (mode == "gprof"):
executable = exp.joint_search_gprof_exec
elif (mode == "scalasca"):
executable = exp.joint_search_scalasca_exec
joint_search_output = os.path.join(output_dir, "join_search")
command = []
command.append("mpirun")
command.append("-np")
command.append(str(cores))
command.append(executable)
command.append("-g")
command.append(gene_tree)
command.append("-a")
command.append(alignment)
command.append("-s")
command.append(species_tree)
command.append("-m")
command.append(mapping)
command.append("--strategy")
command.append(strategy)
command.append("-p")
command.append(joint_search_output)
command.extend(additional_arguments)
return " ".join(command)
|
# NAME EMOJI EMOJIXPRESS, MIL. INSTAGRAM, MIL. TWITTER, MIL.
# Grinning image 2.26 1.02 87.3
# Beaming image 19.1 1.69 150
# ROFL image 25.6 0.774 0
# Tears of Joy image 233 7.31 2270
# Winking image 15.2 2.36 264
# Happy image 22.7 4.26 565
# Heart Eyes image 64.6 11.2 834
# Kissing image 87.5 5.13 432
# Thinking image 6.81 0.636 0
# Unamused image 6 0.236 478
# Sunglasses image 4.72 3.93 198
# Loudly Crying image 24.7 1.35 654
# Kiss Mark image 21.7 2.87 98.7
# Two Hearts image 10 5.69 445
# Heart image 118 26 1080
# Heart Suit image 3.31 1.82 697
# Thumbs Up image 23.1 3.75 227
# Shrugging image 1.74 0.11 0
# Fire image 4.5 2.49 150
# Recycle image 0.0333 0.056 932
intro_text = "We're studying the statistics of various emojis."
print(intro_text)
smile = ':-)'
laughing = ':-D'
kiss = ':-*'
print(smile)
print(laughing)
print(kiss)
heart = '❤️'
fire = '🔥'
shrug = '🤷'
print(heart)
print(fire)
print(shrug)
#########################################################
instagram = [1.02,1.69,0.774,7.31,2.36]
print(instagram)
grinning_row =['Grinning',2.26,1.02,87.3]
print(grinning_row)
emojixpress = [2.26, 19.1, 25.6, 233.0, 15.2, 22.7, 64.6, 87.5, 6.81, 6.0]
total = emojixpress[0] + emojixpress[1] + emojixpress[2] + emojixpress[3] + emojixpress[4]
print("{:.2f}".format(total))
|
from .model import FusionModel
from .poisson import PoissonFusion
|
def parse_stringAlphabetic(s):
i = 0
j = 0
temp = str(s[i])
string_list = []
index = 1
while True:
j = i + 1
if s[i] <= s[j]:
temp += str(s[j])
i += 1
print temp
else:
i = j
string_list[index] = temp
index += 1
temp = str(s[i])
if j >= len(s):
break
return max(string_list)
|
# ############################################################################ #
# #
# ::: :::::::: #
# error.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: cacharle <me@cacharle.xyz> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/09/29 09:09:31 by cacharle #+# #+# #
# Updated: 2020/10/05 13:51:40 by cacharle ### ########.fr #
# #
# ############################################################################ #
import textwrap
class Philo(Exception):
pass
class ShouldFail(Philo):
def __init__(self, msg: str):
self._msg = msg
Philo.__init__(self)
@property
def full_summary(self):
return self.summary
@property
def summary(self):
return "Should fail: {}".format(self._msg)
class Format(Philo):
def __init__(self, line: str, msg: str):
self._line = line
self._msg = msg
Philo.__init__(self)
@property
def full_summary(self):
return """FORMAT ERROR: {}
{}
""".format(self._msg, self._line)
@property
def summary(self):
return "format: {} {}".format(self._line, self._msg)
class Log(Philo):
def __init__(self, logs: [str], msg: str):
self._logs = logs
self._msg = msg
Philo.__init__(self)
@property
def full_summary(self):
return textwrap.dedent("""\
LOG ERROR: {}
{}
""").format(self._msg, '\n'.join([str(log) for log in self._logs]))
@property
def summary(self):
return "log: {}".format(self._msg)
|
import config
import gui
import config_io
import usb_reader
import midi_output
import midi_event_sender
import midi_key_router
import hmi_event_interpreter
from threading import Thread
STORAGE_FILENAME = "configuration.sav"
DEVICE_VENDOR_ID = 0x17CC
DEVICE_PRODUCT_ID = 0x1410
class kontrol_main:
def __init__(self):
self.database = {"when_im_alone" : {"name" : {"When I'm Alone"}, "midi_program" : 3}, \
"beautiful_day" : {"name" : {"Beautiful Day"}, "midi_program" : 0}, \
"im_all_over_it" : {"name" : {"I'm all over it"}, "midi_program" : 2}, \
"broken_wings" : {"name" : {"Broken Wings"}, "midi_program" : 1}, \
"story" : {"name" : {"Story"}, "midi_program" : 4, "add_sound" : {"midi2"}}}
self.setlist = ["story", "beautiful_day", "broken_wings", "when_im_alone", "im_all_over_it"]
namelist = []
for so in self.setlist:
namelist += self.database[so]["name"]
self.controller = gui.instrument_controller(self.setlist, namelist, self)
self.usb = usb_reader.USBReader(DEVICE_VENDOR_ID, DEVICE_PRODUCT_ID)
self.midi_out = midi_output.MidiRouter()
midi_router = midi_key_router.MIDIKeyRouter(self.midi_out)
hmi_interpreter = hmi_event_interpreter.HMIEventInterpreter(self.controller)
self.usb.add_midi_key_subscriber(midi_router)
self.usb.add_hmi_subscriber(hmi_interpreter)
self.midi_out.add_midi_device("midi1")
self.midi_out.add_midi_device("midi2")
self.midi_out.activate_midi_key_route("midi1")
self.midi_out.activate_midi_control_route("midi1")
self.midi_sender = midi_event_sender.MidiEventSender(self.midi_out)
def start(self):
self.usb.start()
self.controller.start()
def setActiveInstrument(self, instrument):
program_id = self.database[instrument]["midi_program"]
self.midi_sender.set_pedal_off()
self.midi_sender.set_program_event(program_id)
if("add_sound" in self.database[instrument].keys()):
self.midi_out.activate_midi_key_route("midi2")
else:
self.midi_out.deactivate_midi_key_route("midi2")
main = kontrol_main()
main.start()
|
#!/usr/bin/python3
def decode_string(code, upper_limit):
lower_limit = 0
for e in code:
if e in ['F', 'L']:
upper_limit = (lower_limit + upper_limit) // 2
elif e in ['B', 'R']:
# I think the +1 is needed just because we start at 0, but i'm not sure
lower_limit = ((lower_limit + upper_limit) // 2) + 1
else:
raise Exception("bad code: ", e)
return lower_limit if code[-1] in ['F', 'L'] else upper_limit
def get_seat_id(boarding):
return decode_string(boarding[:7], 127) * 8 + decode_string(boarding[7:], 7)
def fst(boarding_list):
return max([get_seat_id(item.strip()) for item in boarding_list])
def snd(boarding_list):
seats = set([get_seat_id(item.strip()) for item in boarding_list])
lst = []
# generate a set with all seats filled and takes the diference from both sets
for i in range(min(seats), max(seats)):
lst.append(i)
return max(set(lst).difference(seats))
if __name__ == '__main__':
# test cases
#print(get_seat_id('FBFBBFFRLR'))
#print(get_seat_id('BFFFBBFRRR'))
#print(get_seat_id('FFFBBBFRRR'))
#print(get_seat_id('BBFFBBFRLL'))
boarding_list = open('/tmp/input.txt', 'r').readlines()
#print(fst(boarding_list))
print(snd(boarding_list))
|
# def foo():
# print("starting...")
# while True:
# res = yield 4
# print("res:",res)
# g = foo()
# print(next(g))
# print("*"*20)
# print(g.send(7))
# print(next(g))
def foo(num):
print("starting...")
while num<10:
num=num+1
yield num
for n in foo(0):
print(n) |
from scipy.fftpack import fft
import scipy.signal as signal
import numpy as np
def get_fft(y, t):
N = len(y)
fft_y = fft(y)
fft_freq = np.linspace(0., 1./(2. * t), N//2)
fft_rs = np.reshape(fft_y, (N))
fft_rs = 2.0/N * np.abs(fft_rs[0:N//2])
return (fft_rs, fft_freq)
def get_dominant_periods(projected, t):
(fft_rs, fft_freq) = get_fft(projected[:,0], t)
widths = np.arange(1,10)
peaks = signal.find_peaks_cwt(fft_rs, widths, min_length=1)
freqs = fft_freq[peaks]
return 1./freqs
def get_dominant_period(projected, dt = 0.01):
return get_period(projected[:,0], dt)
"""
Compute the oscillation period of a timeseries.
Sample usage:
pca = PCA()
projected_X = pca.fit_transform(X)
dominant_period = get_period(projected_X[:,0])
"""
def get_period(timeseries, dt = 0.01):
# Only compute the
(pc_fft, pc_fft_freq) = get_fft(timeseries, dt)
peak = np.where(pc_fft == np.amax(pc_fft))
freq_1 = pc_fft_freq[peak]
per_1 = 1./freq_1[0]
return per_1
"""
Get the eigenvalues from a fitted PCA.
Sample usage:
n = X.shape[0]
pca = PCA()
eigen_vals = get_eigenvalues_from_pca(pca, n)
# You can confirm that this transformation from singular values is equivalent
# to the eigenvalues if we had done PCA manually through covariance matrix as below.
# See https://towardsdatascience.com/pca-and-svd-explained-with-numpy-5d13b0d2a4d8
C = np.dot(X.T, X) / (n-1)
eigen_vals, _ = np.linalg.eig(C)
# You can then confirm these two definitions are equivalent.
"""
def get_eigenvalues_from_pca(pca, n):
return pca.singular_values_ ** 2.0
"""
Measure of dimensionality by litwin-kumar, et al. 2017.
Sample usage:
n = X.shape[0]
pca = PCA()
eigen_vals = get_eigenvalues_from_pca(pca, n)
get_dimensionality(eigen_vals)
"""
def get_dimensionality(w):
w_sum = sum(w)
w_sqr_sum = w_sum * w_sum #calculate the squared sum of the eigen values
w_sqr = w * w # Pointwise multiplication of w
w_sum_sqr = sum(w_sqr)
return 1.0 * w_sqr_sum / w_sum_sqr
"""
We assume the timeseries has already stabilized.
If not, discard the the first few timesteps of your timeseries.
Some papers like [Fletcher 2016 - From global to local...] filters for oscillation based on amplitude.
"""
def get_amplitude(timeseries):
return max(timeseries) - min(timeseries)
"""
Compare the amplitude of two half-times.
A good amplitude difference is something small like 0.1, because the worm amplitudes already go up to as high as 500.
Returns (raw value, normalized value)
"""
def get_amplitude_differences(timeseries):
cutoff = int(len(timeseries) / 2.0)
amp1 = get_amplitude(timeseries[cutoff:])
amp2 = get_amplitude(timeseries[:cutoff])
raw_diff = abs(amp1 - amp2)
normalized_diff = raw_diff / amp1
return raw_diff, normalized_diff
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.build_files.fmt.base import FmtBuildFilesRequest
from pants.backend.python.lint.yapf import subsystem as yapf_subsystem
from pants.backend.python.lint.yapf.rules import _run_yapf
from pants.backend.python.lint.yapf.subsystem import Yapf
from pants.backend.python.subsystems.python_tool_base import get_lockfile_interpreter_constraints
from pants.core.goals.fmt import FmtResult
from pants.engine.rules import collect_rules, rule
from pants.util.logging import LogLevel
class YapfRequest(FmtBuildFilesRequest):
tool_subsystem = Yapf
@rule(desc="Format with Yapf", level=LogLevel.DEBUG)
async def yapf_fmt(request: YapfRequest.Batch, yapf: Yapf) -> FmtResult:
yapf_ics = await get_lockfile_interpreter_constraints(yapf)
return await _run_yapf(request, yapf, yapf_ics)
def rules():
return [
*collect_rules(),
*YapfRequest.rules(),
*yapf_subsystem.rules(),
]
|
#!/usr/bin/python
#-*-coding:UTF-8-*-
import random
import struct
def work():
#产生一个随机浮点数
nNum = random.randint(1,10)
if nNum == 1:
nValue = random.uniform(0,1)
elif nNum > 1 and nNum < 10:
nValue = random.uniform(1,5)
else:
nValue = random.uniform(5,6)
#“HH”以两个字节为分界,把4个字节的str分成了两个unsigned short型,小端存储的整数。
return struct.unpack('<HH',struct.pack('<f',nValue))
|
from rest_framework import serializers
from .models import Room, Time
class RoomSerializer(serializers.ModelSerializer):
class Meta:
model = Room
fields = (
'name',
'updated_at',
'created_at',
'pk')
class TimeSerializer(serializers.ModelSerializer):
class Meta:
model = Time
fields = (
'room',
'start_time',
'end_time',
'interviewee',
'interviewer',
'updated_at',
'created_at',
'pk') |
#!/usr/bin/python
#-*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import types
# ==========================================================
#
# Dataset loading
#
mnist = input_data.read_data_sets("./samples/MNIST_data/", one_hot=True)
# ==========================================================
#
# Set up model
#
# 28*28 pixels -> 28^2 x(features) -> 784 features
x = tf.placeholder(tf.float32, [None, 784])
# None -> unlimited
# To get a one y(output), there should be same number of W(weights) compares to x
# There should be 10 kinds of y,
# So W has a bimension of 784*10
W = tf.Variable(tf.zeros([784, 10]))
# b(bias) of each hypothesis for each target(label)
b = tf.Variable(tf.zeros([10]))
# Hypothesis + Softmax
y = tf.nn.softmax(tf.matmul(x, W) + b)
# y는 우리가 예측한 확률 분포이고, y_ 는 실제 분포 (우리가 입력할 one-hot 벡터)
y_ = tf.placeholder(tf.float32, [None, 10])
# Cost Function( = loss)
# cross_entropy = -tf.reduce_sum(y_*tf.log(y))
cross_entropy = tf.reduce_sum(y_*(-tf.log(y)))
# ndarray로 행렬의 성분곱을 연산자 *만으로 간단히 실행
# labeled vector와 (-tf.log(y))의 곱으로 Cost를 정의
# Gradient Descent
# tf.train.GradientDescentOptimizer(learning_rate)
# tf.train.Optimizer.minimize(loss)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# ==========================================================
#
# Learning
#
# Session
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Learning
# 100 개씩 뭉쳐와서 Learning 실행
# each row of batch_xs stands for each number of image(pixel data)
# each row of batch_ys stands for its label
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
# stochastic training
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Do not close session at this point
# We just finish train the model with training set
# We are going to use this session again in testing procedure.
# sess.close()
# ==========================================================
#
# Validation & Result
#
# Validation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# tf.argmax(input, dimension, name=None)
# Returns: A Tensor of type int64.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Result should be approximately 91%.
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
sess.close()
|
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import generics, filters
from rest_framework.exceptions import NotFound
from workprogramsapp.expertise.models import UserExpertise, ExpertiseComments, Expertise
from workprogramsapp.expertise.serializers import UserExpertiseSerializer, CommentSerializer, ExpertiseSerializer
from workprogramsapp.permissions import IsMemberOfExpertise, IsRpdDeveloperOrReadOnly, IsMemberOfUserExpertise, \
IsExpertiseMaster, IsWorkProgramMemberOfExpertise
from workprogramsapp.workprogram_additions.models import UserStructuralUnit
class UserExpertiseListView(generics.ListAPIView):
"""
Вывод всей информации об экспертизе для эксперта (автоматически по токену пользователя выдает экспертизы, в которых он учавствует):
Если нужна опредленная экспертиза от пользователя, то надо указать ее id
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
return UserExpertise.objects.filter(expertise=self.kwargs['pk'], expert=self.request.user)
else:
return UserExpertise.objects.filter(expert=self.request.user)
class UserExpertiseCreateView(generics.CreateAPIView):
"""
создание экспертизы
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCommentsView(generics.ListAPIView):
"""
View для получения и отправки комментариев
Комментарии можно получить или отправить, указав в адресе id экспертизы,
При желании можно в параметрах указать блок комментариев для GET-запроса
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
def get_queryset(self, *args, **kwargs):
if ('pk' in dict(self.kwargs)):
if self.request.query_params.get('block') != None:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'],
comment_block=self.request.query_params.get('block'))
else:
return ExpertiseComments.objects.filter(user_expertise__expertise=self.kwargs['pk'])
else:
return ExpertiseComments.objects.all()
class ExpertiseCommentCreateView(generics.CreateAPIView):
"""
создание коммента к экспертизе
"""
queryset = ExpertiseComments.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseWorkProgramView(generics.RetrieveAPIView):
# TODO: Зачем вообще эта вьюха нужна?
"""
ссылка выдает все экспертизы связанные с id рабочей программы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsWorkProgramMemberOfExpertise, IsRpdDeveloperOrReadOnly]
def get_object(self):
try:
return Expertise.objects.get(work_program__id=self.kwargs['pk'])
except Expertise.DoesNotExist:
raise NotFound()
class ExpertiseListView(generics.ListAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
filter_backends = [filters.SearchFilter, filters.OrderingFilter, DjangoFilterBackend]
filterset_fields = ['date_of_last_change', 'expertise_status', 'work_program__title', 'work_program__qualification',
'work_program__discipline_code', 'work_program__editors__first_name', 'expertse_users_in_rpd__expert__first_name',
'work_program__editors__last_name', 'expertse_users_in_rpd__expert__last_name']
search_fields = ['work_program__title', 'work_program__qualification',
'work_program__discipline_code', 'work_program__editors__first_name', 'expertse_users_in_rpd__expert__first_name',
'work_program__editors__last_name', 'expertse_users_in_rpd__expert__last_name']
def get_queryset(self):
# Note the use of `get_queryset()` instead of `self.queryset`
request = self.request
if request.user.groups.filter(name="expertise_master"):
queryset = Expertise.objects.all()
elif UserStructuralUnit.objects.filter(user=request.user, status__in=["leader", "deputy"]):
queryset = Expertise.objects.filter(
work_program__structural_unit__user_in_structural_unit__user=request.user,
work_program__structural_unit__user_in_structural_unit__status__in=["leader", "deputy"]).distinct() | \
Expertise.objects.filter(expertse_users_in_rpd__expert=request.user).distinct()
else:
queryset = Expertise.objects.filter(expertse_users_in_rpd__expert=request.user)
return queryset
class ExpertiseViewById(generics.RetrieveAPIView):
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsMemberOfExpertise]
class ExpertiseCreateView(generics.CreateAPIView):
"""
Создание экспертизы
Автоматически добавляет пользователя-создателя как лидера экспертизы
(Подробней о создании экспертизы см. сериализатор)
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsRpdDeveloperOrReadOnly]
class ChangeExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы
"""
queryset = Expertise.objects.all()
serializer_class = ExpertiseSerializer
permission_classes = [IsExpertiseMaster]
class ChangeUserExpertiseView(generics.UpdateAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsMemberOfUserExpertise]
class DeleteUserExpertise(generics.DestroyAPIView):
"""
Редактирование экспертизы отдельного пользователя
"""
queryset = UserExpertise.objects.all()
serializer_class = UserExpertiseSerializer
permission_classes = [IsExpertiseMaster]
|
class FindMilk(object):
def __init__(self, width=10):
self.width = width
self.milk_pos = (width-1, width-1)
self.neg_pos = [(6,6), (4,5), (3,4), (8,7), (2,1), (6,3), (3,8), (4,9), (8,0), (7,9)]
self.pos_pos = [(1,3), (7,6), (4,4), (7,4), (5,5)]
self.actions = [0, 1, 2, 3]
return
def reset(self):
self.state = (0, 0, 0, 0, 0, 0)
#self.timestamp = 0
self.neg_pos = [(6,6), (4,5), (3,4), (8,7), (2,1), (6,3), (3,8), (4,9), (8,0), (7,9)]
self.pos_pos = [(1,3), (7,6), (4,4), (7,4), (5,5)]
self.done = False
self.neg_passed = 0
self.pos_passed = 0
return self.state
# use to ensure the agent is not going out of the grid
def clip(self, x):
return min(max(x, 0), self.width-1)
def next_pos(self, x, y, action):
if action == 0: y += 1 # aller en bas
elif action == 1: y -= 1 # aller en haut
elif action == 2: x -= 1 # aller à gauche
elif action == 3: x += 1 # aller à droite
return self.clip(x), self.clip(y)
def step(self, action):
if action not in self.actions:
raise AssertionError
x, y, _, _, _, _ = self.state
next_x, next_y = self.next_pos(x, y, action)
if (next_x, next_y) in self.neg_pos:
self.neg_pos.remove((next_x, next_y))
self.neg_passed += 1
elif (next_x, next_y) in self.pos_pos:
self.pos_pos.remove((next_x, next_y))
self.pos_passed += 1
self.state = (next_x, next_y) + tuple([0 + (self.next_pos(next_x, next_y, a) in self.pos_pos)
- (self.next_pos(next_x, next_y, a) in self.neg_pos) for a in self.actions])
if (next_x, next_y) == self.milk_pos:
self.done = True
if self.done: reward = 20
else: reward = -1
return self.state, reward, self.done
def log(self):
return self.neg_passed, self.pos_passed
|
import requests
from bs4 import BeautifulSoup
# I change the size of the film name data set from 500(I mentioned in Milestone1)to 200.
# Since the API I need to use in next steps has a 1000 limits per day.
# I don't want my program can only run two times a day...
def get_film_name():
name_list = []
pages = [1, 51, 101, 151]
for page_number in pages:
url = 'https://www.imdb.com/search/title/?groups=top_1000&sort=user_rating,desc&start={}&ref_=adv_nxt'.format(page_number)
r = requests.get(url)
page = r.text
soup = BeautifulSoup(page, 'html.parser')
for tag in soup.find_all('div', class_='lister-item mode-advanced'):
info = tag.find('h3', class_='lister-item-header')
name = info.find('a').get_text()
name_list.append(name)
return name_list
def main():
print('Getting the film names from the website')
films = get_film_name()
print('Writing the film names to movies_name.txt')
f = open('../data/movies_name.txt', 'w')
for name in films:
f.write(name + '\n')
f.close()
print('Success!')
if __name__ == '__main__':
main()
|
# -*-coding: UTF-8-*-
from numpy import *
import matplotlib.pyplot as plt
import random
'''加载文件,返回数据集和标签集'''
def openFile(fileName):
dataSet = loadtxt(fileName, str, delimiter=',')
data = dataSet[1:, 0:len(dataSet[0])-1].astype(float)
label = dataSet[1:, len(dataSet[0])-1].astype(float)
# print(dataSet)
return data, label
'''选取两列数据进行数据可视化'''
def viewData():
data, label = openFile("train.csv")
# 放大标签之间的视觉差异
label[label == 0] = 0.1
label[label == 1] = 3
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(data[:, 2], data[:, 17], 15.0*label, 15.0*label)
plt.show()
'''求两个点之间的欧氏距离'''
def getDistance(aimerArr, baseArr):
l = len(aimerArr)
distance = 0
for i in range(l):
distance += (aimerArr[i] - baseArr[i])**2
if i == l-1:
distance **= 0.5
return distance
''' KNN '''
def KNN(k, dataSet, labelSet, aimData):
row, line = shape(dataSet)
distance = zeros((2, row))
for i in range(row):
distance[0][i] = i
distance[1][i] = getDistance(aimData, dataSet[i])
distance = distance.T[distance.T[:, 1].argsort()].T
counter_normal = 0
counter_liar = 0
# print(labelSet)
for i in range(k):
if labelSet[distance[0][i]] == 0:
counter_normal += 1
else:
counter_liar += 1
if counter_normal > counter_liar:
flag = 0
else:
flag = 1
return flag
'''主函数'''
if __name__ == '__main__':
print("hello world")
trainData, trainLabel = openFile("train.csv")
testData = loadtxt("test.csv", str, delimiter=',')
testData= testData[1:, :].astype(float)
# print(trainLabel)
# viewData()
k = 51
random_num = 200
row, line = shape(trainData)
# trainData = trainData[0:(row/200), :]
# trainLabel = trainLabel[0:(row/200)]
randomTrainData = zeros((random_num, line))
randomTrainLabel = zeros(random_num)
'''随机抽取5000条数据'''
last_index = -1
for i in range(random_num):
index = random.randint(0, row - 1)
while index == last_index:
index = random.randint(0, row - 1)
last_index = index
for j in range(line):
randomTrainData[i][j] = trainData[index][j]
randomTrainLabel[i] = trainLabel[index]
row, line = shape(trainData)
n, m = shape(testData)
# prediction = zeros(row)
# counter = 0.0001
prediciton = zeros(n)
for i in range(n):
prediciton[i] = KNN(k, randomTrainData, randomTrainLabel, testData[i])
# if trainLabel[i] == KNN(k, trainData, trainLabel, testData[i]):
# counter += 1
#
# print(counter/row)
# print(dis)
# print(prediciton)
myCsvFile = zeros((n, 2), dtype=int)
# myCsvFile[0][0] = 'user_id'
# myCsvFile[0][1] = 'label'
for i in range(n):
myCsvFile[i][0] = testData[i][0]
myCsvFile[i][1] = prediciton[i]
if prediciton[i] == 1:
print("-----------------------")
savetxt('201531060235.csv', myCsvFile, fmt='%d', delimiter=',', header='user_id,label')
print("hello world") |
import sys
import BFS
import Common
startState = Common.read_data_set(sys.argv[1]);
goalState = Common.read_data_set(sys.argv[2]);
outfile = sys.argv[4];
path = ""
if sys.argv[3] == 'dfs':
path = Common.dfs_main(startState,goalState);
elif sys.argv[3] == 'bfs':
path = BFS.BFS(startState, goalState);
elif sys.argv[3] == 'iddfs':
path = Common.idfs(startState, goalState);
elif sys.argv[3] == 'astar':
path = Common.astar_main(startState, goalState);
else:
print("Invalid argument for algorithm MODE")
f = open(outfile,'w')
f.write(path)
f.close()
|
# Andrew算法,学到了但没完全学到,太🐂了
# 凸包定理 O(logn)
class Solution:
def outerTrees(self, trees: List[List[int]]) -> List[List[int]]:
# 判断是否逆时针左拐
def cross(p: List[int], q: List[int], r: List[int]) -> int:
return (q[0] - p[0]) * (r[1] - q[1]) - (q[1] - p[1]) * (r[0] - q[0])
n = len(trees)
if n < 4:
return trees
# 按照 x 从小到大排序,如果 x 相同,则按照 y 从小到大排序
trees.sort()
hull = [0] # hull[0] 需要入栈两次,不标记
used = [False] * n
# 求凸包的下半部分
for i in range(1, n):
while len(hull) > 1 and cross(trees[hull[-2]], trees[hull[-1]], trees[i]) < 0:
used[hull.pop()] = False
used[i] = True
hull.append(i)
# 求凸包的上半部分
m = len(hull)
for i in range(n - 2, -1, -1):
if not used[i]:
while len(hull) > m and cross(trees[hull[-2]], trees[hull[-1]], trees[i]) < 0:
used[hull.pop()] = False
used[i] = True
hull.append(i)
# hull[0] 同时参与凸包的上半部分检测,因此需去掉重复的 hull[0]
hull.pop()
return [trees[i] for i in hull]
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#####################################################################################
# #
# create_top_sun_angle_html.py: creating the top sun angle html page #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Jan 21, 2020 #
# #
#####################################################################################
import os
import sys
import re
import string
import random
import math
import sqlite3
import unittest
import time
import numpy
import astropy.io.fits as pyfits
import Chandra.Time
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(mta_dir)
sys.path.append(bin_dir)
#
import mta_common_functions as mcf #---- mta common functions
#
#--- set a temporary file name
#
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
web_address = 'https://' + web_address
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------
def create_top_html():
dline = '<th colspan=4 class="blue">Full Range</th>\n'
"""
dline = dline + '<th colspan=4 class="blue">Past 5 Years</th>\n'
dline = dline + '<th colspan=4 class="blue">Past 1 Year</th>\n'
dline = dline + '<th colspan=4 class="blue">Quarterly</th>\n'
dline = dline + '<th colspan=4 class="blue">Weekly</th>\n'
"""
dline = dline + '</tr>\n'
"""
gfile = house_keeping + 'sub_html_list_sun_angle'
f = open(gfile, 'r')
mdata = [line.strip() for line in f.readlines()]
f.close()
ms_dict = {}
for ent in mdata:
atemp = re.split('::', ent)
rout = atemp[1].replace('_plot.html', '')
ms_list = re.split(':', rout)
ms_dict[atemp[0]] = ms_list
"""
gfile = house_keeping + 'group_descriptions_sun_angle'
gdata = mcf.read_data_file(gfile)
g_list = []
gn_dict = {}
gd_dict = {}
gn_list = []
g_disc = []
for ent in gdata:
mc = re.search('#', ent)
if mc is not None:
ent = ent.replace('#', '')
g_list.append(ent)
gname = ent
elif ent == "":
gn_dict[gname] = gn_list
gd_dict[gname] = g_disc
gn_list = []
g_disc = []
else:
atemp = re.split('::', ent)
gn_list.append(atemp[0])
g_disc.append(atemp[1])
mlist = ('mid', 'min', 'max')
mname = ('Avg', 'Min', 'Max')
for gval in g_list:
group_list = gn_dict[gval]
discip_list = gd_dict[gval]
line = line + '<tr><th class="blue">' + gval + '</th>\n'
line = line + dline
for k in range(0, len(group_list)):
line = line + '<tr>\n'
line = line + '<th>' + discip_list[k] + '</th</tr>\n'
gnam = group_list[k].lower()
mpart = '<td><a href="./' + gnam.capitalize() + '/' + gnam + '_'
for m in range(0, 3):
line = line + mpart + mlist[m]
line = line + '_long_sun_angle.html">' + mname[m] + '</a></td>\n'
line = line + '</tr>\n'
line = line + '<tr><th colspan=4> </th></tr>\n\n'
line = line + '</tr>\n'
jfile = house_keeping + '/Templates/java_script_deposit'
with open(jfile, 'r') as f:
j_script = f.read()
template = house_keeping + 'Templates/top_header'
with open(template, 'r') as f:
page = f.read()
page = page.replace('#JAVASCRIPT#', j_script)
page = page.replace('#TITLE#', 'Sun Angle-MSID Trend')
page = page.replace('#TABLE#', line)
page = page.replace('#EXPLANATIONS#', '')
page = page.replace('how_to_create_plots.html', 'how_to_create_sun_angle.html')
page = page.replace('#OTHER_H#', 'mta_trending_main.html')
page = page.replace('#OTHER#', '<b>Open MSID Trending Page</b>')
page = page.replace('#OTHER_H2#', '')
page = page.replace('#OTHER2#', '')
page = page.replace('#OTHER_H3#', '')
page = page.replace('#OTHER3#', '')
page = page.replace('#PAGE_EXP#', 'how_to_create_sun_angle.html')
atext = '<p>This page presents the relation between the sun angle '
atext = atext + 'and (mainly) the temperature related msids.</p> '
atext = atext + '<p>The data are divided into one-year length to show the possible '
atext = atext + 'time evolution of the relation between the sun angle and the msid.</p>'
page = page.replace('<!-- EXTRA -->', atext)
efile = house_keeping + 'Templates/html_close'
with open(efile, 'r') as f:
end_line = f.read()
page = page + end_line
outfile = web_dir + 'mta_trending_sun_angle_main.html'
with open(outfile, 'w') as fo:
fo.write(page)
#----------------------------------------------------------------------------------------
if __name__ == "__main__":
create_top_html()
|
#!/usr/bin/python3
import re
# from p6Driver import *
from p5Dict import *
from exceptionHandler import *
verbose = False
def tokenizePrint(line):
matchObj = line.split()
for item in matchObj:
if item.upper().startswith("\"") and item.endswith("\""):
print(item[1:-1],end=" ")
else:
if item.upper() in varValueD:
if varValueD[item.upper()].startswith("\""):
print(varValueD[item.upper()][1:-1],end=" ")
else:
print(varValueD[item.upper()],end=" ")
print("")
#---------------------------------------------------------
def isVar(sentence):
if sentence[0:3] == 'VAR':
return True
def isPrint(sentence):
if sentence[0:5] == 'PRINT':
return True
def isAssign(sentence):
if sentence[0:6] == 'ASSIGN':
return True
def isGoto(sentence):
if 'GOTO' in sentence:
return True
def isLabel(sentence):
token = sentence.split(':')
if token[0].upper() in labelD:
return True
#-----------------------------------------------------------------
def addTokens(wordTokens):
#format: ASSIGN dime + dime 1
# 0 1 2 3 4
sumToken = int(varValueD[wordTokens[3].upper()])
sumToken += int(wordTokens[4])
varValueD[wordTokens[1].upper()] = str (sumToken)
def multiplyTokens(wordTokens):
stringProduct = (varValueD[wordTokens[3].upper()][1:-1]) * (int(varValueD[wordTokens[4].upper()]))
varValueD[wordTokens[1].upper()] = str (stringProduct)
def greaterThan(wordTokens):
#general format: if > 25 working LAfter25
#tokens--------> 0 1 2 3 4
# print("--->",wordTokens)
try:
if wordTokens[2].upper() in varValueD:
number1 = int (varValueD[wordTokens[2].upper()])
else:
number1 = int (wordTokens[2])
except:
raise InvalidValueType("'%s' is not numeric" % (number1))
try:
if wordTokens[3].upper() in varValueD:
number2 = int ( varValueD[wordTokens[3].upper()])
else:
number2 = int (wordTokens[3])
except:
raise InvalidValueType("'%s' is not numeric" % (number2))
return number1 > number2
def concatTokens(wordTokens):
#format: ASSIGN working & working 25
#tok----> 0 1 2 3 4
if (str(wordTokens[3]).upper() in varValueD):
if(str(wordTokens[4]).upper() in varValueD):
a = str(varValueD[wordTokens[3].upper()])
b = str(varValueD[wordTokens[4].upper()])
else:
a = str(varValueD[wordTokens[3].upper()])
b = str(wordTokens[4])
elif (str(wordTokens[4]).upper() in varValueD):
a = str(wordTokens[3])
b = str(varValueD[wordTokens[4].upper()])
else:
a = str(wordTokens[3])
b = str(wordTokens[4])
# return a + b
varValueD[wordTokens[1].upper()] = a + b
def subtractTokens(wordTokens):
#format: ASSIGN working - working 25
#tok----> 0 1 2 3 4
diffToken = int(varValueD[wordTokens[3].upper()])
diffToken -= int(wordTokens[4])
varValueD[wordTokens[1].upper()] = str (diffToken)
def greaterThanEqual(wordTokens):
try:
if wordTokens[2].upper() in varValueD:
number1 = int (varValueD[wordTokens[2].upper()])
else:
number1 = int (wordTokens[2])
except:
raise InvalidValueType("'%s' is not numeric" % (number1))
try:
if wordTokens[3].upper() in varValueD:
number2 = int ( varValueD[wordTokens[3].upper()])
else:
number2 = int (wordTokens[3])
except:
raise InvalidValueType("'%s' is not numeric" % (number2))
return number1 >= number2
def assignFromVar(wordTokens):
#format: ASSIGN working money
#tokens: 0 1 2
varValueD[wordTokens[1].upper()] = varValueD[wordTokens[2].upper()]
def gotoFinder(labelName):
lineNumber = 0
for i in range(len(linelist)):
tempLine = linelist[i].strip().upper()
if tempLine.startswith(labelName.upper() + ":"):
# print(labelName.upper(),"<<<",lineNumber)
return lineNumber
lineNumber+=1
def labelLoops(line,currentLineNumber):
# Loop25: if > 25 working LAfter25
# 0 1 2 3 4 5
# ASSIGN quarter + quarter 1
# ASSIGN working - working 25
# GOTO Loop25
# LAfter25: PRINT "quarters=" quarter
tokens = line.strip().split()
labelName = tokens[0]
loopCondition = True
if '>' in tokens:
if not greaterThan(tokens[len(tokens)-5:]):
while loopCondition:
if not greaterThan(tokens[len(tokens)-5:]):
tempLine = linelist[currentLineNumber].strip()
if isAssign(tempLine):
evalAssign(tempLine)
if isPrint(tempLine):
tokenizePrint(tempLine)
if isGoto(tempLine):
jumpLineNumber = gotoFinder(labelName[:-1])
currentLineNumber = jumpLineNumber - 1
return currentLineNumber
if isIfStatement(tempLine):
currentLineNumber = evalIfStatement(tempLine,currentLineNumber) - 1
# return currentLineNumber
currentLineNumber+=1
else:
return gotoFinder(tokens[-1])
else:
return gotoFinder(tokens[-1])
if '>=' in tokens:
if not greaterThanEqual(tokens[len(tokens)-5:]):
while loopCondition:
if not greaterThanEqual(tokens[len(tokens)-5:]):
tempLine = linelist[currentLineNumber].strip()
if isAssign(tempLine):
evalAssign(tempLine)
if isGoto(tempLine):
jumpLineNumber = gotoFinder(labelName[:-1])
print(labelName[:-1],jumpLineNumber)
currentLineNumber = jumpLineNumber - 1
return currentLineNumber
if isPrint(tempLine):
tokenizePrint(tempLine)
if isIfStatement(tempLine):
currentLineNumber = evalIfStatement(tempLine,currentLineNumber) - 1
# return currentLineNumber
currentLineNumber+=1
else:
return gotoFinder(tokens[-1])
else:
return gotoFinder(tokens[-1]) #- currentLineNumber
return currentLineNumber#returnCount
def evalAssign(sentence):
tokens = sentence.split()
if '+' in tokens:
addTokens(tokens)
elif '*' in tokens:
multiplyTokens(tokens)
elif '>=' in tokens:
greateThanEqual(tokens)
elif '>' in tokens:
greaterThan(tokens[len(tokens)-5:])
elif '&' in tokens:
concatTokens(tokens)
elif '-' in tokens:
subtractTokens(tokens)
elif (len(tokens)) == 3:
assignFromVar(tokens)
def isIfStatement(sentence):
if sentence.strip()[:2].upper() == 'IF':
return True
def evalIfStatement(line,currentLineNumber):
tokens = line.strip().split()
loopCondition = True
# returnCount = 0
if '>' in tokens:
if not greaterThan(tokens[len(tokens)-5:]):
while loopCondition:
# print(3,line)
if not greaterThan(tokens[len(tokens)-5:]):
tempLine = linelist[currentLineNumber].strip()
currentLineNumber+=1
# returnCount += 1
if isAssign(tempLine):
evalAssign(tempLine)
if isPrint(tempLine):
tokenizePrint(tempLine)
if isGoto(tempLine):
jumpLineNumber = gotoFinder((re.match(r'.*GOTO (.*)',tempLine)).group(1))
return jumpLineNumber
else:
return gotoFinder(tokens[-1])
else:
return gotoFinder(tokens[-1])
def now():
line = 0
while line < len(linelist):
lines = linelist[line]
if verbose:
print()
if isPrint(lines):
tokenizePrint(lines)
if isAssign(lines):
evalAssign(lines)
if isLabel(lines):
if 'PRINT' in lines:
tokenizePrint("".join((lines.split(':'))[1:]).strip())
else:
line=labelLoops(lines,line) - 1
if isIfStatement(lines):
line = evalIfStatement(lines,line) - 1
line+=1 |
import configparser
import logging
import os
class App(object):
def __init__(self):
"""
Initiate the different parameters, i.e. import all the default settings from the parameters.ini file.
"""
self.feature = None
self.classifier = None
# Instantiate Logging (change to debug to see messages)
logging.basicConfig(level=logging.FATAL)
logging.debug("App instantiation")
# Import parameters
self.config = configparser.ConfigParser()
self.config.read(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'parameters.ini'))
self.default_classifier = self.config.get('DEFAULT', 'ClassifierFile')
self.default_feature = self.config.get('DEFAULT', 'FeatureFile')
# Import module and classes
# Load ImageExtractor
# To compile doc change the package to logorec.imageExtractor
module = __import__("imageExtractor." + self.__lowercase(self.config.get('DEFAULT', 'ImageExtractorFile')),
fromlist=['imageExtractor'])
class_ = getattr(module, self.config.get('DEFAULT', 'ImageExtractorFile'))
self.image_extractor = class_()
# Load ImageManager
# To compile doc change the package to logorec.imageManager
module = __import__("imageManager." + self.__lowercase(self.config.get('DEFAULT', 'ImageManagerFile')),
fromlist=['imageManager'])
class_ = getattr(module, self.config.get('DEFAULT', 'ImageManagerFile'))
self.image_manager = class_()
# ############################ HELPER ############################
@staticmethod
def __lowercase(s):
"""
Lowercase the first letter of the given string.
:param s: String to lowercase
:return: String with lowercase first letter
"""
if s:
return s[:1].lower() + s[1:]
else:
return ''
@staticmethod
def __uppercase(s):
"""
Uppercase the first letter of the given string.
:param s: String to uppercase
:return: String with uppercase first letter
"""
if s:
return s[:1].upper() + s[1:]
else:
return ''
def __load_classifier(self, classifier=None):
"""
Set given classifier as current (self.classifier). If the classifier is not given the default one is set as current.
:param classifier: Name of the classifier class (i.e. RandomForest)
:return: Nothing
"""
# Load given classifier if it is not None
if classifier is not None:
module = __import__("classifiers." + self.__lowercase(classifier),
fromlist=['classifiers'])
class_ = getattr(module, classifier)
# Load default classifiers
else:
module = __import__("classifiers." + self.__lowercase(self.config.get('DEFAULT', 'ClassifierFile')),
fromlist=['classifiers'])
class_ = getattr(module, self.config.get('DEFAULT', 'ClassifierFile'))
self.classifier = class_()
def __load_feature(self, feature=None):
"""
Set given feature as current (self.feature). If the feature is not given the default one is set as current.
:param feature: Name of the feature class (i.e. Bow)
:return: Nothing
"""
# Load given feature if it is not None
if feature is not None:
module = __import__("features." + self.__lowercase(feature),
fromlist=['features'])
class_ = getattr(module, feature)
# Load default feature
else:
module = __import__("features." + self.__lowercase(self.config.get('DEFAULT', 'FeatureFile')),
fromlist=['features'])
class_ = getattr(module, self.config.get('DEFAULT', 'FeatureFile'))
self.feature = class_()
def feature_need_train(self, feature):
"""
Check if the given feature need a training phase.
:param feature: Name of the feature class (i.e. Bow)
:return: True if the feature need a train otherwise False
"""
self.__load_feature(feature)
return self.feature.need_train()
def get_categories(self):
"""
Retrieve all the logo categories present in the default ImageManager.
:return: List of logo categories
"""
logging.debug("get categories")
return self.image_manager.get_categories()
def get_features(self):
"""
Retrieve all the feature algorithm present in the library.
:return: List of feature algorithms
"""
logging.debug("get features")
feature = []
for file in os.listdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "features")):
if file.endswith(".py") and not file == '__init__.py' and not file == '__pycache__':
filename, file_extension = os.path.splitext(file)
feature.append(self.__uppercase(filename))
return feature
def get_classifiers(self):
"""
Retrieve all the classifier present in the library.
:return: List of classifiers
"""
logging.debug("get classifiers")
classifier = []
for file in os.listdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "classifiers")):
if file.endswith(".py") and not file == '__init__.py' and not file == '__pycache__':
filename, file_extension = os.path.splitext(file)
classifier.append(self.__uppercase(filename))
return classifier
def get_default_classifier(self):
"""
Retrieve the default classifier from the parameters.ini.
:return: Default classifier
"""
logging.debug("get default classifier")
return self.default_classifier
def get_default_feature(self):
"""
Retrieve the default feature from the parameters.ini.
:return: Default feature
"""
logging.debug("get default feature")
return self.default_feature
# ############################ RETRIEVE INFORMATION ############################
def get_probability(self, website, parameters, classifier, feature):
"""
Get the probability that the given website is a web shop. The given website, classifier and feature must exist!
If the parameters do not correspond to the ImageExtractor implementation a ValueError is raised. If the
classifier is not trained a ModuleNotFoundError is raised. If files for the classification are missing a
FileNotFoundError is raised. If the given websites do not correspond with the implementation a AttributeError is
raised.
:param website: Website url (i.e. http://www.google.com)
:param parameters: Parameter for the ImageExtractor. See specific implementation doc.
:param classifier: Classifier to use for the classification phase (None to use the default one)
:param feature: Feature algorithm to extract information from website's images
:return: Probability (0-100) that the given website is a web shop
"""
logging.debug("getting probability")
self.__load_feature(feature)
self.__load_classifier(classifier)
if not self.classifier.is_trained():
raise ModuleNotFoundError
images = self.image_extractor.extract(website, parameters)
if images:
probs = self.feature.probability(images, self.classifier)
self.image_extractor.clear()
else:
probs = 0
return probs
def get_services(self, website, parameters, classifier, feature):
"""
Get the services offered by the given website. The given website, classifier and feature must exist! If the
parameters do not correspond to the ImageExtractor implementation a ValueError is raised. If the classifier is
not trained a ModuleNotFoundError is raised. If files for the classification are missing a FileNotFoundError is
raised. If the given websites do not correspond with the implementation a AttributeError is raised.
:param website: Website url (i.e. http://www.google.com)
:param parameters: Parameter for the ImageExtractor. See specific implementation doc.
:param classifier: Classifier to use for the classification phase (None to use the default one)
:param feature: Feature algorithm to extract information from website's images
:return: List of services probability (0-100). The number of probability is the number of available categories
"""
logging.debug("getting probability")
self.__load_feature(feature)
self.__load_classifier(classifier)
if not self.classifier.is_trained():
raise ModuleNotFoundError
images = self.image_extractor.extract(website, parameters)
if images:
services = self.feature.services(images, self.classifier)
self.image_extractor.clear()
else:
services = len(self.get_categories()) * [0]
return services
# ############################ TRAIN OPERATIONS ############################
def train_feature(self, feature):
"""
Train the given feature. Check before that the feature need a training phase with feature_need_train(). The
given feature must exist! If the given feature type does not have a default implementation a ModuleNotFoundError
is raised.
:param feature: Feature algorithm to train
:return: Nothing
"""
logging.debug("training feature")
self.__load_feature(feature)
if self.feature.default_exist():
self.feature.train(self.image_manager.get_all())
else:
raise ModuleNotFoundError
def train_classifier(self, classifier, feature):
"""
Train the given classifier with the given feature. If the feature is None the default one is used. If the given
feature need a train, it must be trained before the classifier otherwise, a ModuleNotFoundError is raised. If
the classifier does not have a default implementation a ModuleNotFoundError is raised.
:param classifier: Classifier to train
:param feature: Feature to train the classifier
:return: Nothing
"""
logging.debug("training classifier")
self.__load_feature(feature)
self.__load_classifier(classifier)
if ((self.feature.need_train() and self.feature.is_trained())
or not self.feature.need_train()) and self.classifier.default_exist():
self.feature.train_classifier(self.image_manager.get_all(),
self.image_manager.generate_targets(),
self.classifier)
else:
raise ModuleNotFoundError
# ############################ ADD OPERATIONS ############################
def add_category(self, category):
"""
Add a new logo category with the default ImageManager. The given category must exist!
:param category: Name of the new logo category
:return: Nothing
"""
logging.debug("add category")
self.image_manager.add_category(category)
def add_image(self, image, category):
"""
Add a new image to the given logo category using the default ImageManager. The given category and image must
exist!
:param image: Image path to the new image
:param category: Logo category name
:return: Nothing
"""
logging.debug("saving image")
self.image_manager.save(image, category)
def add_classifier(self, classifier, parameters):
"""
Add a new classifier variation. The given classifier must exist! If the classifier variation already exists a
FileExistsError is raised. If parameters don't correspond with the implementation a AttributeError is raised.
:param classifier: Classifier type
:param parameters: Parameters for the classifier. Check specific implementation doc.
:return: Nothing
"""
logging.debug("adding classifier")
self.__load_classifier(classifier)
self.classifier.add(parameters)
def add_feature(self, feature, parameters):
"""
Add a new feature variation. The given feature must exist! If the feature variation already exists a
FileExistsError is raised. If parameters do not correspond with the implementation a AttributeError is raised.
:param feature: Feature type
:param parameters: Parameters for the Feature. Check specific implementation doc.
:return: Nothing
"""
logging.debug("adding feature")
self.__load_feature(feature)
self.feature.add(parameters)
# ############################ SHOW OPERATIONS ############################
def show_images_by_category(self, category):
"""
Show all the images of a given category of logo. The given category must exist!
:param category: Logo category name
:return: List of images paths. If there are not images the list is empty.
"""
logging.debug("showing images in category")
return self.image_manager.get_by_category(category)
def show_classifier_variations(self, classifier):
"""
Show all the classifier variations. The given classifier must exist!
:param classifier: Classifier name
:return: List of classifier parameters. If there are not variations the list is empty.
"""
logging.debug("showing classifier variations")
self.__load_classifier(classifier)
return self.classifier.show()
def show_feature_variations(self, feature):
"""
Show all the feature variations. The given feature must exist!
:param feature: Feature name
:return: List of feature parameters. If there are not variations the list is empty.
"""
logging.debug("showing feature variations")
self.__load_feature(feature)
return self.feature.show()
# ############################ DELETE OPERATIONS ############################
def delete_image_by_category(self, category, image):
"""
Delete the image from the given logo category set. The given category must exist! If the image does not exist a
FileNotFoundError is raised.
:param category: Name of the logo category
:param image: Name of the image
:return: Nothing
"""
logging.debug("deleting image")
self.image_manager.delete_by_category(category, image)
def delete_category(self, category):
"""
Delete the given category and all images contained in it. The given category must exist!
:param category: Name of the logo category
:return: Nothing
"""
logging.debug("deleting category")
self.image_manager.delete_category(category)
def delete_classifier(self, classifier, parameters):
"""
Delete the variation of the given classifier. The given classifier must exist! If the parameters do not
represent any classifier variations a FileNotFoundError is raised.
:param classifier: Name of the classifier
:param parameters: Parameters of the classifier variation
:return: Nothing
"""
logging.debug("deleting classifier")
self.__load_classifier(classifier)
self.classifier.delete(parameters)
def delete_feature(self, feature, parameters):
"""
Delete the variation of the given feature. The given feature must exist! If the parameters do not represent
any feature variations a FileNotFoundError is raised.
:param feature: Name of the classifier
:param parameters: Parameters of the feature variation
:return: Nothing
"""
logging.debug("deleting feature")
self.__load_feature(feature)
self.feature.delete(parameters)
# ############################ SET OPERATIONS ############################
def set_default_classifier(self, classifier, parameters):
"""
Set the variation of the given classifier as default. The given classifier must exist! If the parameters do not
represent any classifier variations a FileNotFoundError is raised.
:param classifier: Name of the classifier
:param parameters: Parameters of the classifier variation
:return: Nothing
"""
logging.debug("setting default classifier")
self.__load_classifier(classifier)
self.classifier.set_default(parameters)
def set_default_feature(self, feature, parameters):
"""
Set the variation of the given feature as default. The given feature must exist! If the parameters do not
represent any feature variations a FileNotFoundError is raised.
:param feature: Name of the classifier
:param parameters: Parameters of the feature variation
:return: Nothing
"""
logging.debug("setting default feature")
self.__load_feature(feature)
self.feature.set_default(parameters)
if __name__ == '__main__':
app = App()
|
import requests
from lxml import html
sess = requests.Session()
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
url = "https://stackoverflow.com/"
r = sess.get(url, headers=headers)
tree = html.fromstring(r.text)
links = tree.xpath('//div[@class="summary"]/h3/a/@href')
|
class Banque:
_titulaires = 0
_nomBanque = ""
def __init__(this, nomBanque):
this_nomBanque = nomBanque
def creerClient(this, nom, prenom, comptes = []):
this._titulaires.append(Titulaire(nom, prenom, comptes))
def supprimerClient(this, nClient):
this._titulaires.remove(this._titulaire[nClient])
def getTotalSoldes(this):
total = 0
for titulaire in this._titulaires:
total += titulaire.getTotalSolde()
return total
|
number = input("Input number:")
print(int(number[0])+int(number[1])+int(number[2]))
|
from config import wallet_address, wallet_password
from connector import contract_instance, web3
def get_end_blocks():
result = contract_instance.getEndBlocks()
return result
def check_to_end(index):
web3.personal.unlockAccount(wallet_address, wallet_password)
contract_instance.checkToDelete(index, transact={'from': wallet_address})
|
val = float(input('Qual o valor do produto? '))
print('Digite a forma de pagamento:')
print('[1] À vista no dinheiro')
print('[2] À vista no cartão')
print('[3] Até 2x no cartão')
print('[4] 3x ou mais no cartão')
tip = int(input('Qual sua opção de pagamento? '))
if(tip == 1):
pagamento = val*0.9
elif(tip == 2):
pagamento = val*0.95
elif(tip == 3):
pagamento = val
else:
pagamento = val*1.2
print('Voce pagará no total R${:.2f}'.format(pagamento)) |
from django.apps import AppConfig
from watson import search as watson
class BooksConfig(AppConfig):
name = 'books'
def ready(self):
Product = self.get_model("Product")
watson.register(Product)
|
from django.contrib import admin
from .models import Decade, Fad
# Register your models here.
admin.site.register(Decade)
admin.site.register(Fad) |
import pytest
import pulp
from .core import Problem, Variable, negate, logical_and, logical_or, minimum, maximum, logical_xor, implies
from .errors import NonBinaryVariableError, CitrusError, assert_binary
def test_that_negate_produces_negated_variable():
p = Problem('negation test', pulp.LpMinimize)
x = p.make_var('x', cat=pulp.LpBinary)
y = negate(x)
z = negate(y)
p.setObjective(2 * x + y)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert x.value() == 0
assert y.value() == 1
assert z.value() == 0
def test_that_logical_and_produces_constrained_value():
p = Problem('logical_and test', pulp.LpMinimize)
t1 = p.make_var('t1', cat=pulp.LpBinary)
t2 = p.make_var('t2', cat=pulp.LpBinary)
f1 = p.make_var('f1', cat=pulp.LpBinary)
f2 = p.make_var('f2', cat=pulp.LpBinary)
tt = logical_and(t1, t2)
tf = logical_and(t1, f1)
ft = logical_and(f1, t1)
ff = logical_and(f1, f2)
p.addConstraint(t1 == 1)
p.addConstraint(t2 == 1)
p.addConstraint(f1 == 0)
p.addConstraint(f2 == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tt.value() == 1
assert tf.value() == 0
assert ft.value() == 0
assert ff.value() == 0
def test_that_anding_with_itself_is_okay():
p = Problem('anding with self', pulp.LpMinimize)
t = p.make_var('t', cat=pulp.LpBinary)
f = p.make_var('f', cat=pulp.LpBinary)
tt = logical_and(t, t)
tf = logical_and(t, f)
ft = logical_and(f, t)
ff = logical_and(f, f)
p.addConstraint(t == 1)
p.addConstraint(f == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tt.value() == 1
assert tf.value() == 0
assert ft.value() == 0
assert ff.value() == 0
def test_that_logical_or_produces_constrained_value():
p = Problem('logical_or tests', pulp.LpMinimize)
t = p.make_var('t', cat=pulp.LpBinary)
f = p.make_var('f', cat=pulp.LpBinary)
tt = logical_or(t, t)
tf = logical_or(t, f)
ft = logical_or(f, t)
ff = logical_or(f, f)
p.addConstraint(t == 1)
p.addConstraint(f == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tt.value() == 1
assert tf.value() == 1
assert ft.value() == 1
assert ff.value() == 0
def test_that_funcs_throws_on_non_binary_variable():
p = Problem('problem', pulp.LpMinimize)
x = p.make_var('x', cat=pulp.LpInteger)
y = p.make_var('y', cat=pulp.LpInteger)
with pytest.raises(NonBinaryVariableError):
logical_and(x, y)
with pytest.raises(NonBinaryVariableError):
logical_or(x, y)
with pytest.raises(NonBinaryVariableError):
logical_xor(x, y)
with pytest.raises(NonBinaryVariableError):
implies(x, y)
def test_that_vars_from_diff_problems_raise_error():
a = Problem('problem a', pulp.LpMinimize)
b = Problem('problem b', pulp.LpMinimize)
x = a.make_var('x', cat=pulp.LpBinary)
y = b.make_var('y', cat=pulp.LpBinary)
with pytest.raises(CitrusError):
logical_or(x, y)
with pytest.raises(CitrusError):
logical_and(x, y)
with pytest.raises(CitrusError):
logical_xor(x, y)
with pytest.raises(CitrusError):
implies(x, y)
def test_that_from_lp_var_works():
p = Problem('anding with self', pulp.LpMinimize)
t = pulp.LpVariable('t', cat=pulp.LpBinary)
f = p.make_var('f', cat=pulp.LpBinary)
t = Variable.from_lp_var(t, p)
tf = logical_and(t, f)
p.addConstraint(t == 1)
p.addConstraint(f == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tf.value() == 0
def test_that_minimum_is_truly_min():
p = Problem('minimum', pulp.LpMaximize)
x = p.make_var('x', cat=pulp.LpContinuous)
p.addConstraint(x <= 52)
y = p.make_var('y', cat=pulp.LpContinuous)
p.addConstraint(y <= 12)
z = p.make_var('z', cat=pulp.LpContinuous)
p.addConstraint(z <= 15)
m = minimum(x, y, z)
p.setObjective(x + y + z + m)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert m.value() == 12
def test_that_maximum_is_truly_min():
p = Problem('maximum', pulp.LpMinimize)
x = p.make_var('x', cat=pulp.LpContinuous)
p.addConstraint(x >= 52)
y = p.make_var('y', cat=pulp.LpContinuous)
p.addConstraint(y >= 12)
z = p.make_var('z', cat=pulp.LpContinuous)
p.addConstraint(z >= 15)
m = maximum(x, y, z)
p.setObjective(x + y + z + m)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert m.value() == 52
def test_logical_xor():
p = Problem('logical_xor tests', pulp.LpMinimize)
t = p.make_var('t', cat=pulp.LpBinary)
f = p.make_var('f', cat=pulp.LpBinary)
tt = logical_xor(t, t)
tf = logical_xor(t, f)
ft = logical_xor(f, t)
ff = logical_xor(f, f)
p.addConstraint(t == 1)
p.addConstraint(f == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tt.value() == 0
assert tf.value() == 1
assert ft.value() == 1
assert ff.value() == 0
def test_implies():
p = Problem('implies tests', pulp.LpMinimize)
t = p.make_var('t', cat=pulp.LpBinary)
f = p.make_var('f', cat=pulp.LpBinary)
tt = implies(t, t)
tf = implies(t, f)
ft = implies(f, t)
ff = implies(f, f)
p.addConstraint(t == 1)
p.addConstraint(f == 0)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert tt.value() == 1
assert tf.value() == 0
assert ft.value() == 1
assert ff.value() == 1
def test_addition_retains_problem_reference():
"""
adding two Variables should produce an AffineExpression
(not an LpAffineExpression, which loses its reference to the Problem)
"""
p = Problem('works with addition', pulp.LpMinimize)
a = p.make_var('a', cat=pulp.LpContinuous)
b = p.make_var('b', cat=pulp.LpContinuous)
c = a + b
assert c._problem == p, "c should retain the problem from a, b"
def test_maximum_operates_on_affine_expr():
p = Problem('maximum test', pulp.LpMinimize)
a = p.make_var('a', cat=pulp.LpContinuous)
b = p.make_var('b', cat=pulp.LpContinuous)
c = 2 * a + b
p.addConstraint((a + b + c) <= 12, 'a + b + c<= 12')
p.addConstraint(a >= 0, 'pos a')
p.addConstraint(b >= 0, 'pos b')
p.addConstraint(c >= 0, 'pos c')
largest = maximum(a, b, c)
p.setObjective(largest)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
def test_constants_count_as_binary():
p = Problem('constants binary', pulp.LpMinimize)
a = p.make_var('a', cat=pulp.LpBinary)
a.setInitialValue(1)
a.fixValue()
assert_binary(a)
def test_abs_value():
p = Problem('abs value', pulp.LpMinimize)
a = p.make_var('a', cat=pulp.LpContinuous)
b = p.make_var('b', cat=pulp.LpContinuous)
p.addConstraint(abs(a - b) <= 5, '|a - b| <= 5')
p.addConstraint(a >= 10, 'a >= 10')
p.addConstraint(abs(b) >= 0) # not active, just making sure we can apply abs to variables
p.setObjective(b)
p.solve()
assert pulp.LpStatus[p.status] == 'Optimal'
assert a.value() == 10
assert b.value() == 5
|
#Defining a script
'''
Introduction:
1. A function is a self block of code
2. A function can be called as section of a program that is written once and can be executed
whenever required in the program, thus making code reusability
3. A function is a subprogram that works on data and produce same output.
Types of functions:
There are two type of function.
a) Built-in functions:
* Functions that are predefined.
* We have used many predefined functions in python
b) user defined:
* Functions that are created according to the requirements.
#User defined functions
#procedure call is calling the funtion with this is name
#Funtion declaration
Ex: 1
def myFunction():
print("Hi.. i am user defined function")
def add():
a=100 #a, b are local variable in add funciton
b=200
print(a+b)
#Function calling
myFunction()
add()
add()
# we can create n number of funtion in package and can call the function n number times based on requirement.
Ex 2:
def add(a=100, b=200): #a, b are arguments and 100, 200 are paraments
print(a+b)
add()
#Ex 3:
def add(a,b): #a,b - arguments
print(a+b)
add(100,200) #100, 200 - parameters
#Ex 4:
def add(a,b): #a,b - arguments
print("a value is:",a)
print("b value is:",b)
print(a+b)
add(a=100,b=200) #100, 200 - parameters
#Ex 5:
def add(a,b): #a,b - arguments
print("a value is:",a)
print("b value is:",b)
print(a+b)
add(b=100,a=200) #100, 200 - parameters
output:
a value is: 200
b value is: 100
300
#Ex 6:
def add(a=2,b=3): #a,b - arguments
print("a value is:",a)
print("b value is:",b)
print(a+b)
add(b=100,a=200) #100, 200 - parameters
output:
a value is: 200
b value is: 100
300
# function will take the value from calling function parameters. if parameter is not available, it will take the default value assigned in fucntion
Ex 7:
def add(a=2,b=3): #a,b - arguments
a=50
b=60
print("a value is:",a)
print("b value is:",b)
print(a+b)
add(b=100,a=200) #100, 200 - parameters
output:
a value is: 50
b value is: 60
110
#value assigned inside declared funtion will be stored to execute
def add(a=2,b=3): #a,b - arguments
print("a value is:",a)
print("b value is:",b)
print(a+b)
x=10
y=20
add(b=x,a=y) #100, 200 - parameters
####Returing function:
#Function Declaration
def add(a,b): #a,b - arguments
return a+b
print(add(a=1,b=4))
output:
5
def add(a,b): #a,b - arguments
return a+b
x=add(a=1,b=4)
print(x-2)
output:
3
'''
|
#Finding the product of numbers upto a limit
n=int(input("Enter the limit"))
product=1
for i in range(1,n+1):
product=product*i
print("Product of numbers is:",product) |
from time import sleep # Library will let us put in delays
import RPi.GPIO as GPIO # Import the RPi Library for GPIO pin control
button1_pin=12 # Button 1 is connected to physical pin 12
GPIO.setmode(GPIO.BOARD) # Use Physical Pin Numbering Scheme
GPIO.setup(button1_pin,GPIO.IN,pull_up_down=GPIO.PUD_UP)
# Make button1_pin an input, Activate Pull UP Resistor
while(1): # Create an infinite Loop
input1=GPIO.input(button1_pin)
if input1==0:
sleep(.1)
print ('Button 1 Pressed')
# Look for button 1 press # Delay
# Notify User
|
import socket
import threading
import logging.config
import os
import re as reg
import client_protocol_support
import Queue
from Tkinter import *
import ConfigParser
import ctypes
import time
import json
ENCODING = "utf-8"
USERNAME_MAX_LENGTH = 15
MAX_ROOM_NAME_LENGTH = 15
ROOM_USERS_REFRESH_RATE = 200
CONFIG_FILE = 'config.ini'
SPAM_MESSAGE = "Im spamming! Nonono I shouldn't do this."
logging.config.fileConfig(os.path.join(os.path.dirname(__file__), "logger.config"))
try:
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
HOST = config.get('main', 'host')
PORT = int(config.get('main', 'port'))
except:
logging.getLogger(__name__).error("config file is missing!")
sys.exit(-1)
def error_window(message):
ctypes.windll.user32.MessageBoxW(0, u'{0}'.format(message), u'{0}'.format(message), 0)
def server_is_running():
dummy_address_in_case_of_dns_defaults = ""
host_addr = ""
try:
dummy_address_in_case_of_dns_defaults = socket.gethostbyname("someinexistenaddreswhatever123321sdsdsd1.com")
except:
pass
try:
host_addr = socket.gethostbyname(HOST)
if (dummy_address_in_case_of_dns_defaults == host_addr):
error_window("Error, server is unavailable")
sys.exit(-1)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_ip = socket.gethostbyname(HOST)
s.connect((remote_ip, PORT))
cl = Client()
cl._client_socket = s
cl.username = 'ping'
cl.password = 'ping'
cl.send_message(cl.form_raw_command(client_protocol_support.COMMAND_PING))
logging.debug('ping: {0}'.format(cl.recieve_message()))
except Exception, e:
error_window("Error, server is unavailable")
sys.exit(-1)
class Client(object):
def __init__(self):
object.__init__(self)
self._log = logging.getLogger(__name__)
self._client_socket = None
self.root_tk = None
self.root_chat = None
self.username = None
self.password = None
self.logout_requested = False
self.is_login_attempt = False
self._socket_lock = threading.RLock()
self.data_response_queue = Queue.Queue()
self.users_in_my_room_queue = Queue.Queue()
self.get_credentials_callback = None
self.validate_message = None
self.validate_username = None
def recieve_message(self):
'''
Method serves as transport level gate, it receives length-code and then message body. It should be the only place when program directly reads from socket.
Currently it is used only by server_response_handler thread and thus integrity is preserved.
(it is used in a few other places but only when server_response_handler is not in action)
'''
chunks = []
bytes_recd = 0
length_code = self._client_socket.recv(client_protocol_support.MESSAGE_LENGTH_CODE) # 3 bytes length-code
if length_code == '':
self._log.debug('Connection is lost')
raise RuntimeError('Connection is lost')
for length in client_protocol_support.MESSAGE_LENGTH_CODES.keys():
if client_protocol_support.MESSAGE_LENGTH_CODES[length] == length_code:
msg_length = length
while bytes_recd < msg_length:
chunk = self._client_socket.recv(msg_length)
if chunk == '':
self._log.debug('Connection is lost')
raise RuntimeError('Connection is lost')
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return ''.join(chunks).split(client_protocol_support.MESSAGE_CONTENT_END)[0]
def send_message(self, message):
'''
Method serves as transport level gate, it sends length-code and then message body. It should be the only place when program writes directly into socket.
'''
try:
with self._socket_lock:
approx_len = min([key for key, value in client_protocol_support.MESSAGE_LENGTH_CODES.iteritems() if
key >= len(message) + len(client_protocol_support.MESSAGE_CONTENT_END)])
code = client_protocol_support.MESSAGE_LENGTH_CODES.get(approx_len)
# sending code
self._client_socket.sendall(code.encode(ENCODING))
# sending actual message
self._client_socket.sendall(message + client_protocol_support.MESSAGE_CONTENT_END + ''.join(
['0' for x in range(len(message + client_protocol_support.MESSAGE_CONTENT_END), approx_len)]).encode(
ENCODING))
except Exception, e:
self._client_socket.close()
error_window("Error, connection is lost.")
sys.exit()
def rooms_window(self):
# presumed that only main thread can send data request, thus message order is guaranteed
self.send_message(self.form_raw_command(client_protocol_support.COMMAND_GET_ROOMS))
rooms_serialized = json.loads(self.data_response_queue.get().replace(client_protocol_support.RESPONSE_MARKER_DATA, ''))
rooms_tk = Toplevel()
rooms_tk.title('Chat rooms')
rooms_tk.geometry('430x700')
rooms_tk.resizable(0, 0)
# scrollable frame setup below
def on_frame_configure(event):
canvas.configure(scrollregion=canvas.bbox("all"))
canvas = Canvas(rooms_tk, borderwidth=0)
wrapper_frame = Frame(canvas, background="#ffffff")
vsb = Scrollbar(rooms_tk, orient="vertical", command=canvas.yview)
canvas.configure(yscrollcommand=vsb.set)
vsb.pack(side="right", fill="y")
canvas.pack(side="left", fill="both", expand=True)
canvas.create_window((4, 4), window=wrapper_frame, anchor="nw",
tags="wrapper_frame")
wrapper_frame.bind("<Configure>", on_frame_configure)
for index, room in enumerate(rooms_serialized):
room_frame = Frame(wrapper_frame)
room_frame.grid(row=index, column=0)
name = StringVar()
name.set(room.get('name'))
is_open = Checkbutton(room_frame, text="Public", state=DISABLED)
if room.get('is_open'):
is_open.select()
enter_button = Button(room_frame, text="Enter", command=lambda r=room: enter_room(r))
elif not self.password:
enter_button = Button(room_frame, text="Enter", state='disabled', command=lambda r=room: enter_room(r))
else:
enter_button = Button(room_frame, text="Enter", command=lambda r=room: enter_room(r))
is_open.pack(side='left')
enter_button.pack(side='right')
room_name = Entry(room_frame, textvariable=name, state='disabled', width='50')
room_name.pack(side='right', expand='true')
def enter_room(r):
# if room is password-guarded - draw a prompt window+
if r.get('has_password'):
room_password_tk = Toplevel()
room_password_tk.resizable(0, 0)
room_password_tk.title('Password')
room_password_tk.geometry('180x80')
error_var = StringVar()
password_var = StringVar()
def send_room_password():
self.send_message(
self.form_raw_command(client_protocol_support.COMMAND_ENTER_ROOM, str(r.get('room_id')),
password_var.get()))
if self.data_response_queue.get().replace(client_protocol_support.RESPONSE_MARKER_DATA,
'') == client_protocol_support.SUCCESS:
self.root_chat.delete(1.0, END)
room_password_tk.withdraw()
rooms_tk.withdraw()
else:
error_var.set("Invalid password")
room_password_tk.update_idletasks()
if password_var.get():
send_room_password()
Button(room_password_tk, text="Enter", command=send_room_password).pack(side='bottom')
Label(room_password_tk, textvariable=error_var, fg='red').pack()
password_field = Entry(room_password_tk, textvariable=password_var)
password_field.focus_set()
password_field.pack(side='top', fill='x', expand='true')
else:
self.send_message(self.form_raw_command(client_protocol_support.COMMAND_ENTER_ROOM, str(r.get('room_id'))))
if self.data_response_queue.get().replace(client_protocol_support.RESPONSE_MARKER_DATA,
'') == client_protocol_support.SUCCESS:
self.root_chat.delete(1.0, END)
rooms_tk.withdraw()
# only for registered users
if self.password:
new_room_frame = Frame(rooms_tk)
new_room_frame.pack(side='top')
new_name = StringVar()
new_password = StringVar()
main_label_var = StringVar()
is_closed = IntVar()
main_label_var.set('Name / Password(optional)')
Checkbutton(new_room_frame, text="Closed for anonymous", variable=is_closed).pack()
Label(new_room_frame, textvariable=main_label_var).pack()
Entry(new_room_frame, textvariable=new_password).pack(side='bottom', fill='x', expand='false')
name_field = Entry(new_room_frame, textvariable=new_name)
name_field.focus_set()
name_field.pack(side='bottom', fill='x', expand='false')
def create_room():
name = new_name.get()
password = new_password.get() if new_password.get() else 'NONE'
closed_room = is_closed.get()
if name and len(name) <= MAX_ROOM_NAME_LENGTH:
self.send_message(
self.form_raw_command(client_protocol_support.COMMAND_CREATE_ROOM, str(closed_room), name, password))
rooms_tk.withdraw()
create_button = Button(new_room_frame, text="Create new", command=create_room)
create_button.pack()
canvas.pack(side="top", fill="both", expand=True)
def start_client(self):
self.cleanup()
self.connect_to_server()
last_message = set()
# get credentials
while not self.username:
self.get_credentials_callback(self)
if not self.is_login_attempt:
sys.exit(-1)
# UI section
main_tk = Tk()
self.root_tk = main_tk
main_tk.resizable(0, 0)
name = StringVar()
name.set(self.username)
main_tk.title('Chat')
main_tk.geometry('900x600')
sendframe = Frame(main_tk)
sendframe.pack(side='bottom')
credframe = Frame(main_tk, bd='4')
credframe.pack(side='top')
main_buttons_frame = Frame(main_tk, bd='4')
main_buttons_frame.pack(side='top')
room_users_frame = Frame(main_tk, bd='2')
room_users_frame.pack(side='right')
chat_log = Text(main_tk, height=300, width=450)
chat_log.pack(side='top', fill='both', expand='false')
self.root_chat = chat_log
Label(credframe, text='Hello, {0}!'.format(self.username), bd='5').pack(side='left')
msg = Text(sendframe, height=5, width=107)
msg.pack(side='left', fill='x', expand='true')
def sendproc(event):
'''
user input -> server form_request routing function. Also capable to parse text commands.
Runs in current, main thread, binded to user send-event.
'''
message_text = msg.get('1.0', END).encode('ascii', 'ignore').rstrip('\n')
msg.delete('1.0', END)
result_message = ''
if message_text.startswith(client_protocol_support.COMMAND_START_SIGN):
result_message = client_protocol_support.form_request(self, message_text)
if not result_message:
chat_log.insert(END, 'NO SUCH COMMAND\n')
return
else:
self.send_message(result_message)
else:
validation = self.validate_message(message_text, last_message)
if validation == client_protocol_support.SUCCESS:
result_message = client_protocol_support.format_protocol_message(client_protocol_support.COMMAND_TEXT_MESSAGE,
self.username,
self.password, message_text)
last_message.clear()
last_message.add(message_text)
self.send_message(result_message)
else:
msg.insert('1.0', validation)
def server_response_handler():
'''
server response -> text area printing function. Message also could consist of special data, then it is putted into data containers instead of printing.
Runs in additional thread, started below
'''
# needed to end this thread when user re-logged (ui re-created)
owned_by = self.root_tk
# wait, else it blocks main thread's Tk() main_loop for some reason
time.sleep(0.5)
while True:
try:
# if root thread exists. Else - shut down this expired handler
if owned_by == self.root_tk:
data = self.recieve_message()
if not data:
if self.logout_requested:
self._log.debug('logging out')
break
self._log.debug('empty data from server. exiting...')
error_window("Error, server is unavailable")
self._client_socket.close()
sys.exit(-1)
# case of waiting for data from server
if data.startswith(client_protocol_support.RESPONSE_MARKER_DATA):
self.data_response_queue.put(data.decode(ENCODING))
elif data.startswith(client_protocol_support.RESPONSE_MARKER_USERS_CURRENLY_IN_ROOM):
self.users_in_my_room_queue.put(data.decode(ENCODING))
else:
try:
chat_log.see(END)
chat_log.insert(END, data.decode(ENCODING))
except:
# OK, means that current root Tk() is destroyed (logout)
pass
else:
break
except socket.error:
self._client_socket.close()
sys.exit()
# starting response handler thread to be separate from UI thread
th_server_response_handler = threading.Thread(name="server_response_handler", target=server_response_handler)
th_server_response_handler.setDaemon(True)
th_server_response_handler.start()
room_users_frames = []
def redraw_current_room_users():
'''
Method is called in UI loop to renew list of room-connected users when updates came from server.
:return:
'''
if not self.users_in_my_room_queue.empty():
# get data from queue
users_from_server = json.loads(self.users_in_my_room_queue.get().replace(
client_protocol_support.RESPONSE_MARKER_USERS_CURRENLY_IN_ROOM, ''))
# check if i am admin
i_am_admin_of_current_room = False
for u in users_from_server:
if u.get('username') == self.username:
i_am_admin_of_current_room = u.get('is_admin').lower() == 'true'
# cleanup
for user_frame in room_users_frames:
user_frame.destroy()
for user_data in users_from_server:
room_user_frame = Frame(room_users_frame)
room_user_frame.pack(side='top')
room_users_frames.append(room_user_frame)
login = user_data.get('username')
admin = user_data.get('is_admin').lower() == 'true'
registered = user_data.get('is_registered').lower() == 'true'
is_under_kick_vote = user_data.get('is_under_kick_vote').lower() == 'true'
is_admin = Checkbutton(room_user_frame, text="Admin", state=DISABLED)
is_admin.deselect()
is_registered = Checkbutton(room_user_frame, text="Authorized", state=DISABLED)
is_registered.deselect()
if admin:
is_admin.select()
is_admin.pack(side='right')
if registered:
is_registered.select()
is_registered.pack(side='right')
# user-actions menu (e.g. for kick)
def open_user_menu(target_login, target_is_admin, target_registered, target_is_under_kick_vote):
user_menu_tk = Toplevel()
user_menu_tk.resizable(0, 0)
user_menu_tk.title('Actions on "{0}"'.format(target_login))
user_menu_tk.geometry('275x50')
def send_menu_command(command, target_user):
self.send_message(self.form_raw_command(command, target_user))
user_menu_tk.withdraw()
# admin section
if i_am_admin_of_current_room:
if target_registered and not target_is_admin:
Button(user_menu_tk, text='Make admin', command=lambda trg=target_login: send_menu_command(client_protocol_support.COMMAND_MAKE_ADMIN, trg)).pack(
side='left')
else:
Button(user_menu_tk, text='Make admin', state=DISABLED).pack(side='left')
if target_is_admin:
Button(user_menu_tk, text='Kick', state=DISABLED).pack(side='left')
else:
Button(user_menu_tk, text='Kick', command=lambda trg=target_login: send_menu_command(client_protocol_support.COMMAND_KICK, trg)).pack(side='left')
# common section
if target_is_under_kick_vote:
Button(user_menu_tk, text='Kick vote', state=DISABLED).pack(side='left')
Button(user_menu_tk, text='Vote yes',
command=lambda trg=target_login: send_menu_command(client_protocol_support.COMMAND_VOTE_FOR_KICK_VOTE_YES, trg)).pack(side='left')
Button(user_menu_tk, text='Vote no',
command=lambda trg=target_login: send_menu_command(client_protocol_support.COMMAND_VOTE_FOR_KICK_VOTE_NO, trg)).pack(side='left')
else:
if target_is_admin:
Button(user_menu_tk, text='Kick vote', state=DISABLED).pack(side='left')
else:
Button(user_menu_tk, text='Kick vote',
command=lambda trg=target_login: send_menu_command(client_protocol_support.COMMAND_VOTE_FOR_KICK, trg)).pack(side='left')
Button(user_menu_tk, text='Vote yes', state=DISABLED).pack(side='left')
Button(user_menu_tk, text='Vote no', state=DISABLED).pack(side='left')
# enable user-actions if it is not my button
if login != self.username:
room_user_repr_button = Button(room_user_frame, text=login,
command=lambda target_login=login, target_is_admin=admin, target_registered=registered,
target_is_under_kick_vote=is_under_kick_vote: open_user_menu(target_login,
target_is_admin,
target_registered,
target_is_under_kick_vote))
else:
room_user_repr_button = Button(room_user_frame, text=login, state=DISABLED)
room_user_repr_button.pack(side='top')
main_tk.after(ROOM_USERS_REFRESH_RATE, redraw_current_room_users)
send_button = Button(sendframe, text="Send", command=lambda: sendproc(None), bd='2', pady='30')
send_button.pack(side='right')
send_button = Button(credframe, text="Logout", command=self.logout, bd='2')
send_button.pack(side='right')
rooms_button = Button(main_buttons_frame, text="Rooms", command=self.rooms_window, bd='2')
rooms_button.pack(side='top')
msg.focus_set()
msg.bind('<KeyRelease-Return>', sendproc)
# periodic check to update list of current room users
main_tk.after(1, redraw_current_room_users)
main_tk.mainloop()
def connect_to_server(self):
try:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_ip = socket.gethostbyname(HOST)
except socket.gaierror:
sys.exit()
client_socket.connect((remote_ip, PORT))
print 'Socket Connected to {0} on ip {1}'.format(HOST, remote_ip)
self._client_socket = client_socket
return client_socket
def logout(self):
self.logout_requested = True
self.send_message(client_protocol_support.format_protocol_message(client_protocol_support.COMMAND_LOGOUT, self.username, self.password, ''))
self.creds_provided = False
self.is_login_attempt = False
self.cleanup()
def cleanup(self):
self.username = None
self.password = None
self.data_response_queue = Queue.Queue()
self.users_in_my_room_queue = Queue.Queue()
if self.root_tk and self.root_tk.winfo_exists():
self.root_tk.destroy()
self.root_tk.quit()
self.root_tk = None
if self._client_socket:
self._client_socket.close()
def form_raw_command(self, command, *content):
'''
Forms command sequence from user input (which can come from different sources) to such form: /kick Vasili
'''
return client_protocol_support.form_request(self, command + ''.join([' ' + x for x in content]))
def get_credentials(client):
'''
login GUI module. Client will call this callback until its username field will be empty.
:param client:
:return:
'''
def validate_username(login, max_length):
return reg.match('^[\w-]+$', login) is not None and len(login) <= max_length
def login(*args):
if validate_username(login_var.get(), USERNAME_MAX_LENGTH):
client.username = login_var.get()
client.password = password_var.get()
# send login request
client.send_message(client.form_raw_command(client_protocol_support.COMMAND_LOGIN))
# wait for server
response = client.recieve_message()
if response == client_protocol_support.SUCCESS:
client.is_login_attempt = True
credentials_tk.destroy()
credentials_tk.quit()
client._log.debug('logged in as {0}'.format(client.username))
return
else:
if response:
error_var.set('Incorrect login or password')
else:
error_window("Connection is lost")
client._client_socket.close()
sys.exit(-1)
else:
error_var.set('Login must be less then {len} symbols\nand contain only latters/numbers'
.format(len=USERNAME_MAX_LENGTH))
credentials_tk.update_idletasks()
credentials_tk = Tk()
credentials_tk.resizable(0, 0)
credentials_tk.title('Login')
credentials_tk.geometry('280x200')
login_var = StringVar()
password_var = StringVar()
error_var = StringVar()
main_label_var = StringVar()
login_label_var = StringVar()
password_label_var = StringVar()
main_label_var.set(
'Please login. If you doesn''t have one - just type it in. \nLeave it blank to login anonymously')
login_label_var.set('Username:')
password_label_var.set('Password:')
Label(credentials_tk, textvariable=main_label_var).pack()
Label(credentials_tk, textvariable=error_var, fg='red').pack()
Label(credentials_tk, textvariable=login_label_var).pack()
login_field = Entry(credentials_tk, textvariable=login_var)
login_field.focus_set()
login_field.pack(side='top', fill='x', expand='true')
Label(credentials_tk, textvariable=password_label_var).pack()
password_field = Entry(credentials_tk, textvariable=password_var)
password_field.pack(side='top', fill='x', expand='true')
Button(credentials_tk, text="Login/Register", command=login).pack(side='bottom')
login_field.bind('<Return>', login)
password_field.bind('<Return>', login)
credentials_tk.mainloop()
def mock_get_credentials(client):
client.username = 'mock_username'
client.password = 'password'
client.is_login_attempt = True
client.send_message(client.form_raw_command(client_protocol_support.COMMAND_LOGIN))
def validate_message(new, last):
if new in last or not new:
return SPAM_MESSAGE
elif (not isinstance(new, str)) or len(new) > client_protocol_support.MESSAGE_MAX_USER_MES_LENGH - len(
client_protocol_support.MESSAGE_CONTENT_END):
return 'Only ASCII-symbol message with less than {0} chars allowed.'.format(client_protocol_support.MESSAGE_MAX_USER_MES_LENGH)
else:
return client_protocol_support.SUCCESS
def init():
server_is_running()
cl = Client()
cl.get_credentials_callback = get_credentials
cl.validate_message = validate_message
while True:
cl.start_client()
if not cl.logout_requested:
break |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-01 01:29
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import up_ride_finder.rides.validators
class Migration(migrations.Migration):
dependencies = [
('rides', '0004_auto_20170201_0118'),
]
operations = [
migrations.AlterField(
model_name='ride',
name='available_seats',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(1)], verbose_name='Maximum number of passengers'),
),
migrations.AlterField(
model_name='ride',
name='origin',
field=models.CharField(max_length=5, validators=[django.core.validators.MinLengthValidator(5)], verbose_name='ZIP code of origin'),
),
migrations.AlterField(
model_name='ride',
name='when',
field=models.DateTimeField(validators=[up_ride_finder.rides.validators.future_date], verbose_name='Approximate departure date and time'),
),
]
|
from django.db import models
class Team(models.Model):
number = models.PositiveSmallIntegerField()
MATCH_TYPE_CHOICES = (
('P','Practice'),
('Q','Qualification'),
('E','Elimination'),
)
class Match(models.Model):
number = models.PositiveSmallIntegerField()
type = models.CharField(max_length=1, choices=MATCH_TYPE_CHOICES)
def __unicode__():
pass
class MatchTeam(models.Model):
pass
|
import os
path = "D:/deeplab/tensorflow-deeplab-v3/grass/VOCdevkit/VOC2012/SegmentationClassVisualization" #文件夹目录
files= os.listdir(path) #得到文件夹下的所有文件名称
s = []
for file in files: #遍历文件夹
if not os.path.isdir(file): #判断是否是文件夹,不是文件夹才打开
str = ""
str = os.path.split(file)[-1].split('.')[0]
# str = "grass" + str
# os.rename(os.path.join(path,file),os.path.join(path,str +".jpg"))
s.append(str) #每个文件的文本存到list中
f = open('D:/deeplab/tensorflow-deeplab-v3/grass/train.txt','w')
for i in range(0,100):
f.write(s[i]+'\n')
f.close()
f = open('D:/deeplab/tensorflow-deeplab-v3/grass/val.txt','w')
for i in range(101,142):
f.write(s[i]+'\n')
f.close()
path = "D:/deeplab/tensorflow-deeplab-v3/grass/VOCdevkit/VOC2012/SegmentationClass" #文件夹目录
files= os.listdir(path) #得到文件夹下的所有文件名称
i = 0
for file in files: #遍历文件夹
if not os.path.isdir(file): #判断是否是文件夹,不是文件夹才打开
str = ""
str = os.path.split(file)[-1].split('.')[0]
str = s[i]
os.rename(os.path.join(path,file),os.path.join(path,str +".png"))
i = i + 1
s.append(str) #每个文件的文本存到list中
|
# SE
from common import *
debug_control = [
boolOut('LOOPBACK', 'Normal', 'Loopback', OSV = 'MAJOR', VAL = 0,
DESC = 'Enable internal data loopback'),
boolOut('COMPENSATE', 'Normal', 'Disabled', OSV = 'MAJOR', VAL = 0,
DESC = 'Disable internal delay compensation')]
trigger_pvs = [] # All sensor records that need triggering, in order
health_pvs = [] # Records for reporting aggregate health
# Fans and temperatures
fan_set = longIn('SE:FAN_SET', 4000, 6000, 'RPM', DESC = 'Fan set speed')
fan_temp_pvs = [fan_set]
for i in (1, 2):
fan_speed = longIn('SE:FAN%d' % i, 4000, 6000, 'RPM',
LOLO = 100, LLSV = 'MAJOR', LOW = 4000, LSV = 'MINOR',
DESC = 'Fan %d speed' % i)
fan_temp_pvs.extend([
fan_speed,
records.calc('SE:FAN%d_ERR' % i,
DESC = 'Fan %d speed error' % i,
CALC = 'A-B', INPA = fan_speed, INPB = fan_set,
EGU = 'RPM',
LOLO = -1000, LLSV = 'MAJOR', LOW = -500, LSV = 'MINOR',
HIGH = 500, HSV = 'MINOR', HIHI = 1000, HHSV = 'MAJOR')])
fan_temp_pvs.append(
# Motherboard temperature
longIn('SE:TEMP', 30, 60, 'deg C',
DESC = 'Motherboard temperature',
HIGH = 55, HSV = 'MINOR',
HIHI = 60, HHSV = 'MAJOR'))
trigger_pvs.extend(fan_temp_pvs)
health_pvs.append(
AggregateSeverity('SE:FAN:OK', 'Fan controller health', fan_temp_pvs))
system_alarm_pvs = [
# System memory and CPU usage
aIn('SE:FREE', 0, 64, 'MB', 2,
DESC = 'Free memory',
LOW = 12, LSV = 'MINOR',
LOLO = 8, LLSV = 'MAJOR'),
aIn('SE:RAMFS', 0, 64, 'MB', 3,
DESC = 'Temporary file usage',
HIGH = 1, HSV = 'MINOR',
HIHI = 16, HHSV = 'MAJOR'),
aIn('SE:CPU', 0, 100, '%', 1,
DESC = 'CPU usage',
HIGH = 80, HSV = 'MINOR',
HIHI = 95, HHSV = 'MAJOR'),
boolIn('SE:ADCCLK', 'Clock Ok', 'Clock Dropout',
ZSV = 'NO_ALARM', OSV = 'MAJOR', DESC = 'ADC clock dropout detect'),
# The following list must match the corresponding enum in sensors.c
mbbIn('SE:NTPSTAT',
('Not monitored', 0, 'NO_ALARM'), # Monitoring disabled
('No NTP server', 1, 'MAJOR'), # Local NTP server not found
('Startup', 2, 'NO_ALARM'), # No alarm during startup
('No Sync', 3, 'MINOR'), # NTP server not synchronised
('Synchronised', 4, 'NO_ALARM'), # Synchronised to remote server
DESC = 'Status of NTP server'),
longIn('SE:STRATUM',
LOW = 0, LSV = 'MAJOR', # Probably does not occur now
HIGH = 16, HSV = 'MAJOR', # Unspecified stratum
DESC = 'NTP stratum level')]
trigger_pvs.extend(system_alarm_pvs)
health_pvs.append(
AggregateSeverity('SE:SYS:OK', 'System health',
system_alarm_pvs + debug_control))
# Sensor PVs without alarm status.
trigger_pvs.extend([
# Time since booting
aIn('SE:UPTIME', 0, 24*365, 'h', 2, DESC = 'Total system up time'),
aIn('SE:EPICSUP', 0, 24*365, 'h', 2, DESC = 'Time since EPICS started'),
# Channel access counters
longIn('SE:CAPVS', DESC = 'Number of connected PVs'),
longIn('SE:CACLNT', DESC = 'Number of connected clients'),
# Network statistics
aIn('SE:NWBRX', 0, 1e4, 'kB/s', 3, DESC = 'Kilobytes received per second'),
aIn('SE:NWBTX', 0, 1e4, 'kB/s', 3, DESC = 'Kilobytes sent per second'),
aIn('SE:NWPRX', 0, 1e4, 'pkt/s', 1, DESC = 'Packets received per second'),
aIn('SE:NWPTX', 0, 1e4, 'pkt/s', 1, DESC = 'Packets sent per second'),
aIn('SE:NWMRX', 0, 1e4, 'pkt/s', 1, DESC = 'Multicast received per second'),
aIn('SE:NWMTX', 0, 1e4, 'pkt/s', 1, DESC = 'Multicast sent per second'),
stringIn('SE:SERVER', DESC = 'Synchronised NTP server')])
# Aggregate all the alarm generating records into a single "health" record.
# Only the alarm status of this record is meaningful.
trigger_pvs.extend(health_pvs)
trigger_pvs.append(
AggregateSeverity('SE:HEALTH', 'Aggregated health', health_pvs))
Trigger('SE', *trigger_pvs)
longOut('SE:TEMP', 30, 60, 'deg', DESC = 'Target temperature')
# Overflow detection PVs
def overflow(name, desc):
return boolIn(name, 'Ok', 'Overflow', OSV = 'MAJOR', DESC = desc)
overflows = [
overflow('SE:OVF:ADCIN', 'ADC input overflow'),
overflow('SE:OVF:ADCCOMP', 'ADC compensation filter'),
overflow('SE:OVF:FIR', 'FIR overflow'),
overflow('SE:OVF:DAC', 'DAC overflow'),
overflow('SE:OVF:COMP', 'DAC pre-emphasis overflow'),
overflow('SE:OVF:DECIMATE', 'FIR decimation overflow'),
]
overflows.append(
AggregateSeverity('SE:OVF', 'Numerical overflow', overflows))
boolOut('SE:OVF:SCAN',
SCAN = '.1 second',
FLNK = create_fanout('SE:OVF:FAN', *overflows),
DESC = 'Overflow detect scan')
|
#!/usr/bin/python
# -*- mode: python -*-
'''
Looks up in database for nearby srtm files. Downloads if necessary and
then processes them with GDAL
'''
import os , shutil , psycopg2 , configparser , inspect
from viewsheds import initGrassSetup , grassViewshed , grassCommonViewpoints
from subprocess import call
from pyproj import Proj, transform
config = configparser.ConfigParser()
config.read('../config.ini')
this_file = os.path.split(inspect.getfile(inspect.currentframe()))[-1]
options = config._sections[this_file]
options_ucmi = config._sections['ucmi.py']
options_viewsheds = config._sections['viewsheds.py']
# commands
unzipCmd = 'unzip -n -d {unzipDir} {files};'
url = "wget -P {geodataDir} https://dds.cr.usgs.gov/srtm/version2_1/SRTM1/Region_0{region}/{filename};"
gdalwarp = 'gdalwarp -overwrite -t_srs EPSG:3857 -r cubic {filename} {gdalwarpDir}/{tifFilename};'
gdal_merge = 'gdal_merge.py -o {tilename} {directory}*.tif;'
# directories
geodataDir = options['geodatadir']
user_temp_dir = options['user_temp_dir']
userFolder = '/'.join(['..' , options_ucmi['viewsheddir'] , '{0}']) + '/'
userDemDir = userFolder + options_ucmi['demdir'] + '/'
filename = "N%02dW%03d.hgt.zip"
tilename = 'x{0}.tif'.format(options_viewsheds['demname'])
radius = config.getfloat(this_file, "radius") #degrees (circle in which to look for other srtms)
query = """
SELECT lat , lon , region FROM srtm
WHERE
ST_DWithin(geom,
ST_SetSRID(ST_MakePoint({lon},{lat}) , 4326),{radius});
"""
# Queries databse for nearby lat , lon SRTMs
def lookupSRTM(lat , lon , userid):
# Makes directories if it doesn't already exist
def makeDir(path):
if not os.path.isdir(path):
os.mkdir(path)
# set up directories for user
makeDir(user_temp_dir + userid)
unzipDir = '/'.join([user_temp_dir , userid , options['tempepsg4326']]) + '/'
gdalwarpDir = '/'.join([user_temp_dir , userid , options['tempepsg3857']]) + '/'
makeDir(gdalwarpDir)
makeDir(unzipDir)
# connect to database"dbname=test user=postgres password=secret"
conn = psycopg2.connect("dbname={0} user={1} password={2}".format(options['dbname'] , options['user'] , options['password']))
# Open a cursor to perform database operations
cur = conn.cursor()
#query
cur.execute(query.format(lon = lon , lat = lat , radius = radius))
result = cur.fetchall()
if result == []:
print "No maps found"
return False
else:
# if file not downloaded, then downloaded it
commandList = ''
filenames = []
for row in result:
intLat = row[0]
intLon = row[1]
region = row[2]
filenames.append(filename % (intLat , -1 * intLon))
# if file has not been downloaded already add to commadn list
if filenames[-1] not in os.listdir(geodataDir):
commandList += url.format(geodataDir = geodataDir , region = region , filename = filenames[-1])
# Execute wget
os.system(commandList)
# unzip
print "unzipping...."
commandList = ''
for name in filenames:
commandList += unzipCmd.format(files = geodataDir + name , unzipDir = unzipDir)
os.system(commandList)
# convert to 3857
print "converting to EPSG:3857...."
commandList = ''
for name in filenames:
commandList += gdalwarp.format(filename = unzipDir + name[:-4] , gdalwarpDir = gdalwarpDir , tifFilename = name[:-7]+'tif' )
os.system(commandList)
# delete tile first
os.system("rm -f " + userDemDir.format(userid) + tilename)
# merge tiles
print "merging tiles"
os.system(gdal_merge.format(tilename = userDemDir.format(userid) + tilename , directory = gdalwarpDir))
print "Crop tiles to smaller area"
# -te xmin ymin xmax ymax
# The extent should be 25 km from initial point
# Convert from 4326 to 3857
padding = 25000
inProj = Proj(init='epsg:4326')
outProj = Proj(init='epsg:3857')
x , y = transform(inProj,outProj, lon , lat)
ymax = y + padding
ymin = y - padding
xmax = x + padding
xmin = x - padding
extent = "{xmin} {ymin} {xmax} {ymax}".format(ymax = ymax, ymin = ymin, xmax = xmax , xmin = xmin)
cmd = 'gdalwarp -overwrite -t_srs EPSG:3857 -te {0} {1} {2}'.format(extent,userDemDir.format(userid) + tilename, userDemDir.format(userid) + tilename[1:])
print cmd
os.system(cmd)
#deleting files in EPSG4326 folder
os.system('rm {0}/*'.format(unzipDir))
# load file into grassgis
# order is in this way, so final merged tiled is deleted last
initGrassSetup(userDemDir.format(userid) , userid , lat , lon)
#deleting all files in EPSG3857
os.system('rm {0}/*'.format(gdalwarpDir))
return True
def pointQuery(lat , lon , pointNum, firstMarker , viewNum , greaterthan , altitude , userid , dateStamp):
if firstMarker:
result = lookupSRTM(lat , lon , userid)
if not result:
# map not found break
return None
# run viewshed on point
grassViewshed(lat ,lon , pointNum , userid)
# use mapcalc to find common viewpoints
grassCommonViewpoints(viewNum , greaterthan , altitude , userid , dateStamp)
makeTransparent(userid)
# makes the image transparent
def makeTransparent(userid):
inputFile = userFolder.format(userid) + options_viewsheds['combinedname'] + '.png'
outputFile = userFolder.format(userid) + options_ucmi['locationpng']
# Make transparent
#convert viewshed1.png -fill red -opaque black -alpha copy -channel alpha -negate -channel alpha -evaluate multiply 0.5 output.png
cmd = 'convert {0} -fill red -opaque black -alpha copy -channel alpha -negate -channel alpha -evaluate multiply 0.5 {1}'.format(inputFile,outputFile)
print cmd
os.system(cmd)
def main(args):
lookupSRTM(36.95430926,-84.22389407, "343434")
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
with open("C:\\Users\\Anna\\Desktop\\Learning Community\\copypoem.txt", "r") as infile, open("C:\\Users\\Anna\\Desktop\\Learning Community\\blank.txt","w") as outfile:
for line in infile:
outfile.write(line[5:])
|
import cv2
import numpy as np
import dlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
path = 'Detection/data/Image/Ai_Sugiyama_0001.jpg'
im = Image.open(path)
im = np.array(im)
cv2.imshow("dsa", im) |
penny_Value = 1
nickel_Value = 5
dime_Value = 10
quarter_Value = 25
dollar_Value = 100
print("\nMoney Counting Game")
pennies = int (input("Enter the number of pennies: "))
nickels = int (input("Enter the number of nickels: "))
dimes = int (input("Enter the number of dimes: "))
quarters = int (input("Enter the number of quarters: "))
penny_total = pennies * penny_Value
nickel_total = nickels * nickel_Value
dime_total = dimes * dime_Value
quarter_total = quarters * quarter_Value
total = penny_total +nickel_Value + dime_total + quarter_total
if total == dollar_Value:
print ("Congratulations! That combination of coins makes a dollar!")
elif total > dollar_Value:
total = total / 100
print("\nSorry,",total,"is more than a dollar")
else:
print("\nSorry",total,"is less than a dollar") |
#!/usr/bin/env python
import os
from PIL import Image
from tqdm import tqdm
import os.path
import pathlib
def cropimage_files(foldar_name,first_num,last_num,Acrop,Bcrop,Ccrop,Dcrop,time):
basename = os.path.basename(foldar_name)
p_sub = pathlib.Path(foldar_name)
p_sub_name= str(p_sub.parent)
for x in ["A","B","C","D"]:
os.mkdir(p_sub_name+'/crop_imgs/'+basename+"_"+x+'_'+time)
for num in tqdm(range (first_num,last_num)):
file_num= str(num)
file_name= file_num.zfill(6)+"_diff.jpg"
im = Image.open(p_sub_name+'/th_imgs/'+basename+'_'+time+'/'+file_name)
im_1=im.crop((Acrop))
im_1.save(p_sub_name+'/crop_imgs/'+basename+'_A_'+time+'/'+file_name)
for num in tqdm(range (first_num,last_num)):
file_num= str(num)
file_name= file_num.zfill(6)+"_diff.jpg"
im = Image.open(p_sub_name+'/th_imgs/'+basename+'_'+time+'/'+file_name)
im_1=im.crop((Bcrop))
im_1.save(p_sub_name+'/crop_imgs/'+basename+'_B_'+time+'/'+file_name)
for num in tqdm(range (first_num,last_num)):
file_num= str(num)
file_name= file_num.zfill(6)+"_diff.jpg"
im = Image.open(p_sub_name+'/th_imgs/'+basename+'_'+time+'/'+file_name)
im_1=im.crop((Ccrop))
im_1.save(p_sub_name+'/crop_imgs/'+basename+'_C_'+time+'/'+file_name)
for num in tqdm(range (first_num,last_num)):
file_num= str(num)
file_name= file_num.zfill(6)+"_diff.jpg"
im = Image.open(p_sub_name+'/th_imgs/'+basename+'_'+time+'/'+file_name)
im_1=im.crop((Dcrop))
im_1.save(p_sub_name+'/crop_imgs/'+basename+'_D_'+time+'/'+file_name)
|
import pytest
from server.ServiceStore import *
TEST_ELEMENT_NAME = "test_element"
TEST_ELEMENT_DATA = {"Value":"1", "Color":"red"}
TEST_CONTROL_NAME = "test_control"
TEST_CONTROL_DATASET = {"value":{"desc":"Value to set the light", "type":"number"},
"color":{"desc":"Color to set the light", "type":"string"}}
TEST_CONTROL_COLOR_VAL = "blue"
TEST_CONTROL_VALUE_VAL = "1"
TEST_CONTROL_DATA_DICT = [{"name":"value","value":TEST_CONTROL_VALUE_VAL},
{"name":"color","value":TEST_CONTROL_COLOR_VAL}]
TEST_SERVICE_NAME = "test_service"
TEST_SERVICE_NAME2 = "test_service"
class TestControl():
def test_no_data_or_callback(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on")
output = control.clean_dict()
assert "name" in output
assert "parent" in output
assert "description" in output
assert "action" in output
assert len(output["dataset"]) == 0
assert output["name"] == TEST_CONTROL_NAME
def test_data_set(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
output = control.clean_dict()
assert "name" in output
assert "parent" in output
assert "description" in output
assert "action" in output
assert len(output["dataset"]) == 2
for data in output["dataset"]:
assert data["name"] in TEST_CONTROL_DATASET
assert data["type"] == TEST_CONTROL_DATASET[data["name"]]["type"]
assert data["description"] == TEST_CONTROL_DATASET[data["name"]]["desc"]
def test_no_callback(self):
def callback(value, color):
assert False
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
control.call()
def test_callback_no_data(self):
def callback():
global run
run = True
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET,
callback=callback)
control.call()
assert run
def test_callback(self):
global run
run = False
def callback(value, color):
global run
run = True
assert value == TEST_CONTROL_VALUE_VAL
assert color == TEST_CONTROL_COLOR_VAL
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET,
callback=callback)
control.call(TEST_CONTROL_DATA_DICT)
assert run
class TestElement():
def test_no_data(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue")
output = element.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert "controls" in output
assert len(output["controls"]) == 0
assert len(output["data"]) == 0
assert output["name"] == TEST_ELEMENT_NAME
def test_data(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
output = element.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert "controls" in output
assert len(output["controls"]) == 0
assert len(output["data"]) == 2
for data in output["data"]:
assert data["name"] in TEST_ELEMENT_DATA
assert data["value"] == TEST_ELEMENT_DATA[data["name"]]
def test_single_control(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA, controls=[control])
output = element.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert "controls" in output
assert len(output["controls"]) == 1
assert len(output["data"]) == 2
def test_multiple_controls(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
control2 = Control(name=TEST_CONTROL_NAME + "2",
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA, controls=[control, control2])
output = element.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert "controls" in output
assert len(output["controls"]) == 2
assert len(output["data"]) == 2
def test_register_controls(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
element.register_control(control)
output = element.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert "controls" in output
assert len(output["controls"]) == 1
assert len(output["data"]) == 2
def test_control_by_id(self):
control = Control(name=TEST_CONTROL_NAME,
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
control2 = Control(name=TEST_CONTROL_NAME + "2",
parent="hue",
desc="this is a control to turn on light",
action="turn on",
dataset=TEST_CONTROL_DATASET)
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA, controls=[control, control2])
output = element.get_control_using_id(TEST_CONTROL_NAME)
output = output.clean_dict()
assert "name" in output
assert "parent" in output
assert "description" in output
assert "action" in output
assert len(output["dataset"]) == 2
class TestService():
def test_no_attributes(self):
service = Service(name = TEST_SERVICE_NAME)
def test_single_element(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
service = Service(name = TEST_SERVICE_NAME, elements=[element])
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME
assert "elements" in output
assert len(output["elements"]) == 1
def test_multiple_elements(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
element2 = Element(name=TEST_ELEMENT_NAME+"2", parent="Hue", data=TEST_ELEMENT_DATA)
service = Service(name = TEST_SERVICE_NAME, elements=[element, element2])
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME
assert "elements" in output
assert len(output["elements"]) == 2
def test_register_element(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
service = Service(name = TEST_SERVICE_NAME)
service.register_element(element)
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME
assert "elements" in output
assert len(output["elements"]) == 1
def test_element_by_id(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
element2 = Element(name=TEST_ELEMENT_NAME+"2", parent="Hue", data=TEST_ELEMENT_DATA)
service = Service(name = TEST_SERVICE_NAME, elements=[element, element2])
output = service.get_element_using_id(TEST_ELEMENT_NAME)
output = output.clean_dict()
assert "name" in output
assert "parent" in output
assert "data" in output
assert len(output["data"]) == 2
def test_complete_service(self):
element = Element(name=TEST_ELEMENT_NAME, parent="Hue", data=TEST_ELEMENT_DATA)
service = Service(name = TEST_SERVICE_NAME, elements=[element])
output = service.clean_dict()
assert len(output["elements"]) == 1
def test_merge(self):
pytest.skip()
class TestStore():
def test_no_service(self):
store = Store()
assert len(store.clean_dict()) is 0
def test_register_method_one_service(self):
store = Store()
service = Service(name = TEST_SERVICE_NAME)
store.register_service(service)
def test_dict_one_service(self):
store = Store()
service = Service(name = TEST_SERVICE_NAME)
store.register_service(service)
output = store.clean_dict()
assert "name" in output[0]
assert output[0]["name"] is TEST_SERVICE_NAME
assert "id" in output[0]
assert output[0]["id"] is TEST_SERVICE_NAME
assert "elements" in output[0]
def test_get_id_one_service(self):
store = Store()
service = Service(name = TEST_SERVICE_NAME)
store.register_service(service)
service = store.get_service_using_id(TEST_SERVICE_NAME)
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME
assert "elements" in output
def test_dict_many_service(self):
store = Store()
service = Service(name = TEST_SERVICE_NAME)
service2 = Service(name = TEST_SERVICE_NAME2)
store.register_service(service)
store.register_service(service2)
output = store.clean_dict()
assert len(output) == 2
def test_get_id_many_service(self):
store = Store()
service = Service(name = TEST_SERVICE_NAME)
service2 = Service(name = TEST_SERVICE_NAME2)
store.register_service(service)
store.register_service(service2)
service = store.get_service_using_id(TEST_SERVICE_NAME)
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME
assert "elements" in output
service = store.get_service_using_id(TEST_SERVICE_NAME2)
output = service.clean_dict()
assert "name" in output
assert output["name"] is TEST_SERVICE_NAME2
assert "id" in output
assert output["id"] is TEST_SERVICE_NAME2
assert "elements" in output
def test_merge_services(self):
pytest.skip("apples") |
import re
import requests
import json
import os
import pdfkit
from bs4 import BeautifulSoup
from urllib.parse import quote
from time import sleep
import random
import datetime
def get_data(url, headers, before=None, after=None):
"""
before 默认为None,否则请填入内容,格式为:'2021-06-31 21:00',所有小于等于该时间的才会被获取
after 默认为None,否则请填入内容,格式为:'2021-05-27 20:00',所有大于等于该时间的才会被获取
"""
global htmls, num
i = 0
while i < 10:
rsp = requests.get(url, headers=headers)
if rsp.json().get("succeeded") == False:
sleep(0.01)
print("访问失败,重来一遍...")
rsp = requests.get(url, headers=headers)
i += 1
else:
break
with open('temp_content.json', 'w', encoding='utf-8') as f: # 将返回数据写入 temp_content.json 方便查看
f.write(json.dumps(rsp.json(), indent=2, ensure_ascii=False))
with open('temp_content.json', encoding='utf-8') as f:
all_contents = json.loads(f.read())
contents = all_contents.get('resp_data').get('topics')
if contents is not None:
for topic in contents:
create_time = topic.get("create_time", "")
if create_time != "":
create_time = create_time[:16].replace("T", " ")
create_time_time = datetime.datetime.strptime(create_time, '%Y-%m-%d %H:%M')
if after is not None:
after_time = datetime.datetime.strptime(after, '%Y-%m-%d %H:%M')
if after_time > create_time_time: continue
if before is not None:
before_time = datetime.datetime.strptime(before, '%Y-%m-%d %H:%M')
if create_time_time > before_time: continue
content = topic.get('question', topic.get('talk', topic.get('task', topic.get('solution'))))
# print(content)
text = content.get('text', '')
text = re.sub(r'<[^>]*>', '', text).strip()
text = text.replace('\n', '<br>')
if text != "":
pos = text.find("<br>")
title = str(num) + " " + text[:pos]
else:
title = str(num) + "Error: 找不到内容"
if content.get('images'):
soup = BeautifulSoup(html_template, 'html.parser')
for img in content.get('images'):
url = img.get('large').get('url')
img_tag = soup.new_tag('img', src=url)
soup.body.append(img_tag)
html_img = str(soup)
html = html_img.format(title=title, text=text, create_time=create_time)
else:
html = html_template.format(title=title, text=text, create_time=create_time)
if topic.get('question'):
answer = topic.get('answer').get('text', "")
soup = BeautifulSoup(html, 'html.parser')
answer_tag = soup.new_tag('p')
answer_tag.string = answer
soup.body.append(answer_tag)
html_answer = str(soup)
html = html_answer.format(title=title, text=text, create_time=create_time)
htmls.append(html)
num += 1
else:
print("*" * 16, "访问失败", "*" * 16)
print("失败url:", url)
print(all_contents)
print(rsp.status_code)
print("*" * 40)
next_page = rsp.json().get('resp_data').get('topics')
if next_page:
create_time = next_page[-1].get('create_time')
if create_time[20:23] == "000":
end_time = create_time[:20] + "999" + create_time[23:]
else :
res = int(create_time[20:23])-1
end_time = create_time[:20] + str(res).zfill(3) + create_time[23:] # zfill 函数补足结果前面的零,始终为3位数
end_time = quote(end_time)
if len(end_time) == 33:
end_time = end_time[:24] + '0' + end_time[24:]
next_url = start_url + '&end_time=' + end_time
print("next_url:", next_url)
sleep(random.randint(1, 5) / 100)
get_data(next_url, headers, before, after)
return htmls
def make_pdf(htmls, pdf_filepath="电子书.pdf"):
html_files = []
for index, html in enumerate(htmls):
file = str(index) + ".html"
html_files.append(file)
with open(file, "w", encoding="utf-8") as f:
f.write(html)
options = {
"user-style-sheet": "default.css",
"page-size": "Letter",
"margin-top": "0.75in",
"margin-right": "0.75in",
"margin-bottom": "0.75in",
"margin-left": "0.75in",
"encoding": "UTF-8",
"custom-header": [("Accept-Encoding", "gzip")],
"cookie": [
("cookie-name1", "cookie-value1"), ("cookie-name2", "cookie-value2")
],
"outline-depth": 10,
}
try:
print("生成PDF文件中,请耐心等待...")
if os.path.exists(pdf_filepath): os.remove(pdf_filepath)
pdfkit.from_file(html_files, pdf_filepath, options=options)
except Exception as e:
print("生成pdf报错")
print(e)
for i in html_files:
os.remove(i)
print("已制作电子书在当前目录!")
if __name__ == '__main__':
# 这个模板是默认的,无需修改
html_template = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
</head>
<body>
<h1>{title}</h1>
<p>{create_time}</p>
<p>{text}</p>
</body>
</html>
"""
# 请先登录你有权限查看的星球的账号,进入该星球页面
# 请使用谷歌浏览器刷新页面,在 Network 面板的抓包内容中找到 topics?... 这样的请求,返回的是 json 内容
# 将这个包的 cookie 部分复制到 headers 部分的 Cookie 一栏
# 将这个请求的 url,域名为 api.zsxq.com 开头的,复制到下面 start_url 的部分
headers = {
'Cookie':'abtest_env=product; zsxq_access_token=EB72127D-2A94-A794-46FE-8E1D0F151F40_C348130420D15229; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22414445544118248%22%2C%22first_id%22%3A%2217a3722c1df65-0d10035f06881c8-e726559-2073600-17a3722c1e0421%22%2C%22props%22%3A%7B%7D%2C%22%24device_id%22%3A%2217a3722c1df65-0d10035f06881c8-e726559-2073600-17a3722c1e0421%22%7D',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36 SE 2.X MetaSr 1.0'
}
start_url = 'https://api.zsxq.com/v2/groups/551212824514/topics?scope=by_owner&count=20'
# 只取大于等于 after ,小于等于 before 的日期时间的文章,可以省略这俩参数,获取所有的历史文章
# 下面这里我演示的是一段时间的拆分获取 pdf,可以批量生成多个,用于内容跨度时间长,内容非常多的星球,你可以自己看着改
time_period = [
("2021-04-01 00:00", "2021-06-30 23:59"),
("2021-01-01 00:00", "2021-03-31 23:59"),
("2020-10-01 00:00", "2020-12-31 23:59"),
("2020-07-01 00:00", "2020-09-30 23:59"),
("2020-04-01 00:00", "2020-06-30 23:59"),
("2020-01-01 00:00", "2020-03-31 23:59"),
("2019-10-01 00:00", "2019-12-31 23:59"),
("2019-07-01 00:00", "2019-09-30 23:59"),
("2019-04-01 00:00", "2019-06-30 23:59"),
("2019-01-01 00:00", "2019-03-31 23:59"),
("2018-10-01 00:00", "2018-12-31 23:59"),
("2018-07-01 00:00", "2018-09-30 23:59"),
("2018-04-01 00:00", "2018-06-30 23:59"),
]
for period in time_period:
pdf_filepath = "你的知识星球%s-%s.pdf" % (period[0][:10].replace("-",""), period[1][:10].replace("-",""))
htmls = []
num = 1
make_pdf(get_data(start_url, headers, before=period[1], after=period[0]), pdf_filepath=pdf_filepath)
# 如果你想获取该星球的所有内容,请用这几句代码,但当内容较多的时候,生成 pdf 会极慢
# htmls = []
# num = 1
# make_pdf(get_data(start_url, headers))
|
def egypt(num,den):
ciel = 0
if(num==1):
print('1/',den)
elif(num<den):
if(den%num == 0):
print('1/',den//num)
else:
ciel = den//num +1
print('1/',ciel)
egypt((num*ciel-den),(ciel*den))
egypt(12,13) |
""" Interpolate horizon from a carcass. """
#pylint: disable=attribute-defined-outside-init
from textwrap import indent
from .horizon import HorizonController
class Interpolator(HorizonController):
""" Convenient class for carcass interpolation. """
def train(self, dataset=None, cube_paths=None, horizon_paths=None, horizon=None, **kwargs):
""" Make sampler and run train process with it. """
if dataset is None:
dataset = self.make_dataset(cube_paths=cube_paths, horizon_paths=horizon_paths, horizon=horizon)
horizon = dataset.labels[0][0]
horizon.show(load_kwargs={'enlarge': True}, show=self.plot, savepath=self.make_savepath('input_image.png'))
self.log(f'Coverage of carcass is {horizon.coverage:2.5f}')
sampler = self.make_sampler(dataset)
sampler.show_locations(show=self.plot, savepath=self.make_savepath('sampler_locations.png'))
sampler.show_sampled(show=self.plot, savepath=self.make_savepath('sampler_generated.png'))
self.log(f'Created sampler\n{indent(str(sampler), " "*4)}')
return super().train(dataset=dataset, sampler=sampler, **kwargs)
def inference(self, dataset, model, config=None, name=None, **kwargs):
""" Prediction with custom naming schema. """
prediction = super().inference(dataset=dataset, model=model, **kwargs)[0]
if name is None:
if len(dataset.labels[0]) > 0:
name = dataset.labels[0][0].name
else:
name = f'prediction_{int(prediction.h_mean)}'
prediction.name = f'from_{name}'
return prediction
# One method to rule them all
def run(self, cube_paths=None, horizon_paths=None, horizon=None, **kwargs):
""" Run the entire procedure of horizon detection: from loading the carcass/grid to outputs. """
dataset = self.make_dataset(cube_paths=cube_paths, horizon_paths=horizon_paths, horizon=horizon)
model = self.train(dataset=dataset, **kwargs)
prediction = self.inference(dataset, model)
prediction = self.postprocess(prediction)
info = self.evaluate(prediction, dataset=dataset)
return prediction, info
|
from django.db import models
# Create your models here.
class Cart(models.Model):
user_id = models.IntegerField()
goods_id = models.IntegerField()
count = models.IntegerField()
goods_name = models.CharField(max_length=50)
pic = models.CharField(max_length=250)
price = models.DecimalField(max_digits=7, decimal_places=2)
is_checked = models.IntegerField(default=0)
class Meta():
db_table = 'cart'
|
import multiprocessing
import string
import time
def readPuzzle(fileName):
with open(fileName) as File:
lines = File.readlines()
lines = (filter(lambda line: line!="\n",lines))
return [list(string.replace(line," ","")) for line in lines]
def readWordList(fileName):
with open(fileName) as File:
lines = File.readlines()
lines = (filter(lambda line: line!="\n",lines))
lines = [line.split() for line in lines]
return [word for line in lines for word in line] #flattens array
# 0 1 2
# 3 4 5
# 6 7 8
def moveDirection(point, direction):
newY,newX=point[0],point[1]
if direction in [0,3,6]:
newY -= 1
elif direction in [2,5,8]:
newY += 1
if direction in [0,1,2]:
newX -= 1
elif direction in [6,7,8]:
newX += 1
return (newY,newX)
def readWord(grid, point, direction, length):
word = []
for i in range(length):
if point[0]<0 or point[1]<0 or point[0] >= len(grid[0]) or point[1] >= len(grid):
return None
word.append(grid[point[1]][point[0]])
point = moveDirection(point, direction)
return "".join(word)
#returns if found, but results never utilized
#breaks at first occurance; remove return to continue search
def findWord(grid, word, dirMap):
for y in range(len(grid)):
for x in range(len(grid[0])):
for direction in range(9):
if direction!= 4 and word == readWord(grid, (x,y), direction, len(word)):
print "{}\t\t({},{})\t\t{}".format(word,x+1,y+1,dirMap[direction])
return True
return False
grid = readPuzzle('puzzle.txt')
wordList = readWordList('word_list.txt')
dirMap = {0:"NW",1:"N",2:"NW",3:"W",5:"E",6:"SW",7:"S",8:"SE"}
#processed in parallel
for word in wordList:
p = multiprocessing.Process(target=findWord, args=(grid,word,dirMap))
p.start()
|
# coding=UTF-8
import argparse
parser = argparse.ArgumentParser(description='Um programa de exemplo.')
parser.add_argument('--frase', action='store', dest='frase',
default='Hello, world!', required=False,
help='A frase que deseja imprimir n vezes.')
parser.add_argument('-n', action='store', dest='n', required=True,
help='O número de vezes que a frase será impressa.')
arguments = parser.parse_args()
for i in range(0, int(arguments.n)):
print(arguments.frase)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
import datetime
from datetime import timedelta
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.models import User
from sqlalchemy import (
Boolean, Column, DateTime, Integer, Float, String, UniqueConstraint, ForeignKey, Sequence, Table)
from sqlalchemy.orm import relationship
metadata = Model.metadata # pylint: disable=no-member
class ResetRequest(Model):
___tablename__ = 'reset_request'
id = Column(Integer, Sequence('reset_request_id_seq'), primary_key=True, autoincrement=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
email = Column(String(64), nullable=False)
reset_date = Column(DateTime, default=datetime.datetime.now, nullable=True)
reset_hash = Column(String(256))
used = Column(Boolean)
assoc_team_user = Table(
'team_user', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('team_id', Integer, ForeignKey('team.id')),
UniqueConstraint('user_id', 'team_id')
)
class Team(Model):
__tablename__ = 'team'
id = Column(Integer, Sequence('team_id_seq'), primary_key=True, autoincrement=True)
team_name = Column(String(250))
users = relationship('SolarBIUser', secondary=assoc_team_user, backref='team')
date_created = Column(DateTime, default=datetime.datetime.now)
def __repr__(self):
return self.team_name
class TeamRegisterUser(Model):
""" the register model for users who are invited by admin """
__tablename__ = 'ab_register_user'
id = Column(Integer, Sequence("ab_register_user_id_seq"), primary_key=True)
first_name = Column(String(64), nullable=True)
last_name = Column(String(64), nullable=True)
username = Column(String(64), unique=True, nullable=False)
password = Column(String(256))
email = Column(String(64), nullable=False)
team = Column(String(250), nullable=False)
registration_date = Column(DateTime, default=datetime.datetime.now, nullable=True)
registration_hash = Column(String(256))
inviter = Column('inviter_id', Integer, ForeignKey('ab_user.id'), nullable=True)
valid_date = Column(DateTime, default=(lambda: datetime.datetime.now() + timedelta(hours=24)),
nullable=True)
role_assigned = Column('role_id', Integer, ForeignKey('ab_role.id'), nullable=True)
class SolarBIUser(User):
__tablename__ = 'ab_user'
email_confirm = Column(Boolean, default=False)
__table_args__ = {'extend_existing': True} |
from .kitti_eigen_cameras_calibration import KittiEigenCamerasCalibration
from .video_dataset_adapter import VideoDatasetAdapter
from ..data_transform_manager import DataTransformManager
from ..unsupervised_depth_data_module import UnsupervisedDepthDataModule
from ..video_dataset import VideoDataset
class KittiEigenVideoDataModuleFactory(object):
def __init__(self, main_folder, split = "my_split"):
self._main_folder = main_folder
self._split = split
def make_data_module(self, transform_manager_parameters, final_image_size, split, batch_size, num_workers,
device):
video_dataset_l = VideoDatasetAdapter(self._main_folder, self._split, "l")
video_dataset_r = VideoDatasetAdapter(self._main_folder, self._split, "r")
original_image_size = video_dataset_l.get_image_size()
transform_manager = DataTransformManager(
original_image_size,
final_image_size,
transform_manager_parameters,
)
dataset = VideoDataset(
video_dataset_l,
video_dataset_r,
transform = transform_manager.get_train_transform()
)
cameras_calibration = KittiEigenCamerasCalibration(final_image_size, original_image_size, device)
return UnsupervisedDepthDataModule(dataset,
transform_manager,
cameras_calibration=cameras_calibration,
batch_size=batch_size,
num_workers=num_workers,
split=split)
|
from django.shortcuts import render
import getpass
import requests
from bs4 import BeautifulSoup
# Create your views here.
from django.http import HttpResponse
import os, sys
cwd=os.getcwd()
cwd+="/template"
sys.path.insert(0, cwd)
def home(request):
return render(request, 'home.html')
def calculate_sgpa(request):
UID = request.POST["uname"]
PSW = request.POST["psw"]
payload={"username":UID,"password":PSW}
req=requests.post("https://academics.gndec.ac.in/",data=payload)
cookies=req.cookies
values={"final_exam_result_with_grades":"in"}
rpost = requests.post("https://academics.gndec.ac.in", cookies=cookies, data=values)
soup=BeautifulSoup(rpost.content,'html.parser')
td_tags = list(soup.find_all('td'))
#print(td_tags)
candidate_name = td_tags[0].get_text()
td_tags = td_tags[5:-32] # Clearing the extra data
subject_list = []
temp_list = []
count = 1
for item in td_tags:
if item.get_text()!='' and item.get_text()!=' ':
temp_list.append(item.get_text())
else:
temp_list.append('0')
if count == 13:
count=1
subject_list.append(temp_list)
temp_list=[]
else:
count+=1
#print(subject_list)
fail_flag=0
total_credit=0
credit_grade_sum=0
for subject in subject_list:
if int(subject[-2]) >= 4: # Grade >=4
credit_grade_sum += int(subject[11])*int(subject[12])
total_credit += int(subject[12])
else:
fail_flag=1
#print(tabulate(subject_list,headers=["Semester","Subject Code","M code","Subject Title","Theory / Practical","Result Type","Internal Obtained Marks", "Internal Max. Marks", "External Obtained Marks", "External Max. Marks", "Grade Letter", "Grade Point", "Credits"]))
sgpa=credit_grade_sum/total_credit
if fail_flag!=1:
string="""Congratulations Mr./Mrs. {} !!
Your expected SGPA is {:0.2f}
Your expected percentage is {:0.2f}%""".format(candidate_name, sgpa, sgpa*9.5)
else:
string="Sorry, your result cannot be displayed. Please check your result manually!"
return render(request, 'result.html', {"result":string})
#def calculate_sgpa(request):
# UID = request.POST["uname"]
# PSW = request.POST["psw"]
# string=str(UID)+str(PSW)
# return render(request, 'result.html', {"result":string})
|
#
# @lc app=leetcode.cn id=76 lang=python3
#
# [76] 最小覆盖子串
#
# @lc code=start
class Solution:
def minWindow(self, s: str, t: str) -> str:
need, window = {}, {}
# 初始化need字段,每个字符分别需要出现几次
for c in t:
if c in need:
need[c] += 1
else:
need[c] = 1
valid = 0
left, right = 0, 0
start, end = 0, len(s) + 1
while right < len(s):
# 增大滑动窗口:添加元素c
c = s[right]
right += 1
# 更新窗口内数据
if c in need:
if c in window:
window[c] += 1
else:
window[c] = 1
if window[c] == need[c]:
valid += 1
# 判断是否需要收缩窗口
while valid == len(need):
# 更新最小覆盖子串
if right - left < end - start:
start, end = left, right
# 缩小滑动窗口:移除元素c
c = s[left]
left += 1
if c in need:
if window[c] == need[c]:
valid -= 1
window[c] -= 1
return s[start:end] if end != len(s) + 1 else ""
# @lc code=end
|
from pyzabbix import ZabbixAPI
import datetime
import time
zapi=ZabbixAPI(server="http://192.168.44.4/zabbix")
try:
#zapi.login(user="userforapi",password="'Pakkass##18!'")
zapi.login(user="Admin",password="zabbix")
print("Yes Connected")
count=0
except:
print("Ops getting issue in connecting....")
alert_count=zapi.event.get(groupids=24,value=1,severities=4)
print alert_count
print("going To loop")
for insr in alert_count:
print insr
print("i am in the loop")
if (insr['r_eventid']=='0' and insr['severity']=='5' and insr['value']=='1'):
count=count+1
print("second for loop is to be finished")
print ("found Disater alert=",count)
count=0
print("new loop starting")
|
import cv2
import tensorflow as tf
CATEGORIES = ['Dog','Cat']
def prepare(filepath):
IMG_SIZE = 96
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
return new_array.reshape(1, IMG_SIZE, IMG_SIZE, 1)
model = tf.keras.models.load_model('INSERT_THE_NAME_OF_MODEL.model')
prediction = model.predict([prepare('dog.jpg')])
print(CATEGORIES[int(prediction[0][0])])
prediction = model.predict([prepare('cat.jpg')])
print(CATEGORIES[int(prediction[0][0])])
|
import pandas as pd, numpy as np
from sklearn.model_selection import train_test_split
data = pd.read_csv('../processed_data/processed_wikihow.csv')
X_train, X_test = train_test_split(data, test_size=0.1, random_state=42)
print(X_train, X_test.shape) |
#User Input
"User input is simple compared to java and javascriipt"
"You assign a varibale and use the input()methid following the question or prompt "
username = input("What is your name ")
print(username)
"When python takes in numbers they are converted to string thus you need to convert them to numbers in order to perfrom mathematical ooperations on them"
def add2(a,b):
result = a+b
print(result)
num1 = input("Enter number 1 ")
num2 = input("Enter number 2 ")
num1 = int(num1)
num2 =int(num2)
add2(num1,num2) |
import abc
class AbstractEnv(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def dynamics(self,state,act,rng):
'''Generate next state & reward after taking action 'act' from state 'state', using provided rng'''
'''All rewards should be in the range (0,1)'''
@staticmethod
@abc.abstractmethod
def rollout_policy(self,state,num_steps,discount,rng):
'''Perform rollout for num_step'''
'''Returns the return ('val') and a bit ('risk') indicating if constraints were violated'''
@staticmethod
@abc.abstractmethod
def random_act_generator(self,state,rng):
'''Generates a random action from state 'state' using the rng'''
@staticmethod
@abc.abstractmethod
def violates_constraint(self,state):
'''Checks whether a given state violates a (deterministic) constraint''' |
i=int(input("Enter a number:"))
for i in[]:
print(i)
print(i)
print("BLASTOFF!!")
|
n = int(input())
code = input().split('W')
result = [len(c) for c in code if c != '']
print(len(result))
print(*result)
|
#!/usr/bin/python3
"""
Function that divides all elements of a matrix.
matrix (int, float)
div (int, float)
"""
def matrix_divided(matrix, div):
"""
Function that divides all elements of a matrix.
"""
msj = "matrix must be a matrix (list of lists) of integers/floats"
if type(matrix) != list or matrix is None:
raise TypeError(msj)
for row in matrix:
if type(row) != list:
raise TypeError(msj)
for row in matrix:
for col in row:
if type(col) != int and type(col) != float:
raise TypeError(msj)
for row in matrix:
if len(row) != len(matrix[0]):
raise TypeError("Each row of the matrix must have the same size")
if type(div) != int and type(div) != float:
raise TypeError("div must be a number")
if div == 0:
raise ZeroDivisionError("division by zero")
new_mtx = [i[:] for i in matrix]
for i in range(len(new_mtx)):
for j in range(len(new_mtx[i])):
new_mtx[i][j] = round(new_mtx[i][j]/div, 2)
return new_mtx
|
from django.db import models
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.files import File
# Create your models here.
class Livre(models.Model):
titre = models.CharField(max_length = 50)
slug_title = models.SlugField(default = "")
auteur = models.CharField(max_length = 50)
resume = models.TextField(max_length = 1000)
couverture = models.ImageField(upload_to="couvertures/")
# les couvertures seront enregistrés dans le dossier MEDIA_ROOT/couvertures/
note = models.PositiveSmallIntegerField()
codeBarre = models.CharField(max_length = 50)
isbn = models.CharField(max_length = 50)
edition = models.CharField(max_length = 50)
"""
boiteALivre = models.ForeignKey(BookBox, on_delete = models.CASCADE)
A rajouter (foreignKey, ManyToManyField ?):
* avis sur le livre
* membres ayant lu le livre
* boîte à livre où on peut trouver le livre
* recommendations (autres oeuvres liées)
* date de parution
* nombres de livre dispo
* boîtes à livre contenant le livre
* date et position du dernier dépôt
"""
class Meta:
# verbose_name = "superlivre"
# verbose_name indique quel titre prennent les objets dans l'administration. S'il n'est pas précisé,
# il s'agit juste du nom de la classe
ordering = ['titre'] # indique que les livres sont classés par rapport à leur titre, ordre croissant
def __str__(self):
"""
C'est avec la méthode __str__ que l'administration représente
les livres dans la liste des livres
"""
return self.titre
def save(self, *args, **kwargs):
if not self.id:
self.slug_title = slugify(self.titre)
super(Livre,self).save(*args, **kwargs)
def getUrl(self):
"""Retourne l'url permettant d'acceder à la description du livre """
return reverse('main_app.views.to_bookList').replace('list',self.slug_title)
class UserProfile(models.Model):
""" Profil d'un membre de la communauté Bookinner """
user = models.OneToOneField(User,on_delete=models.CASCADE)
imageProfil = models.ImageField(upload_to = "imagesDeProfil/",blank=True,verbose_name = "Image de Profil",
default='static/images/profil_defaut.png')
# le verbose_name de imageProfil précisé dans le modèle est celui utilisé dans l'administration
slug_username = models.SlugField(default = "", unique = True, blank=True)
def save(self, *args, **kwargs):
# Verifier si cette condition est nécessaire
if not self.id:
self.slug_username = slugify(self.user.username)
super(UserProfile,self).save(*args, **kwargs)
def __str__(self):
"""
s = "Pseudo du membre: " + self.user.username + \
"\nIdentité du membre: " + self.user.first_name + " " + self.user.last_name
"""
s = str(self.user.username)
return s
class BookBox(models.Model):
""" Boite à livre """
"""
A mettre en attributs/champs:
position géographique
A mettre en méthode:
Liste des livres présents dans la boite à livre
Mettre à jour la liste de livre qui sont dans la boite
"""
|
from src.parameters import *
import numpy as np
import os
import matplotlib.image as mpimg
# Extract patches from a given image
def img_crop(im, w, h):
list_patches = []
imgwidth = im.shape[0]
imgheight = im.shape[1]
is_2d = len(im.shape) < 3
for i in range(0,imgheight,h):
for j in range(0,imgwidth,w):
if is_2d:
im_patch = im[j:j+w, i:i+h]
else:
im_patch = im[j:j+w, i:i+h, :]
list_patches.append(im_patch)
return list_patches
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
imgs = []
for i in range(1, num_images+1):
imageid = "satImage_%.3d" % i
image_filename = filename + imageid + ".png"
if os.path.isfile(image_filename):
print ('Loading ' + image_filename)
img = mpimg.imread(image_filename)
imgs.append(img)
else:
print ('File ' + image_filename + ' does not exist')
num_images = len(imgs)
IMG_WIDTH = imgs[0].shape[0]
IMG_HEIGHT = imgs[0].shape[1]
N_PATCHES_PER_IMAGE = (IMG_WIDTH/IMG_PATCH_SIZE)*(IMG_HEIGHT/IMG_PATCH_SIZE)
img_patches = [img_crop(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE) for i in range(num_images)]
data = [img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))]
return np.asarray(data)
|
"""
Created by Alex wang
on 20170512
"""
def ifelse(weight):
body = "fat" if weight > 120 else "thin"
print(body)
def test_cnumerate():
print("test enumerate.........")
str_list = ["one", "two", "three", "four"]
for i, str in enumerate(str_list):
print("{}\t{}".format(i, str))
def test_zip():
print("test zip..........")
list_one = [1, 2, 3, 4]
list_two = ["one", "two", "three", "four"]
for i, str in zip(list_one, list_two):
print("{}\t{}".format(i, str))
print("test zip..........")
tuple_one = (1, 2, 3, 4)
tuplt_two = ("one", "two", "three", "four")
for i, str in zip(tuple_one, tuplt_two):
print("{}\t{}".format(i, str))
def test_unzip():
print("test unzip.........")
tuple_list = [(1, 2), (1, 2), (1, 2)]
a, b = zip(*tuple_list)
print(a)
print(b)
def test_join():
li = ["one", "two", "three", "four", "five"]
print(",".join(li))
def test_read():
"""
读取文件
:return:
"""
count = 1
file_path = "E://temp/code/alarm.py"
for line in open(file_path, 'r', encoding='UTF-8'):
count += 1
print(count)
with open(file_path, 'r', encoding='UTF-8') as reader:
for line in reader:
count += 1
print(count)
def test_num():
str = "2087"
int_num = int(str)
print(int_num)
def test_map():
a = [1, 2, 3, 4, 5]
print('test_map:', ','.join(map(str, a)))
if __name__ == "__main__":
test_num()
# test_read()
test_unzip()
test_map()
|
inpt = int(input ("Enter Number: "))
for i in range(inpt):
print("hello")
|
#coding:utf-8
list = [1,"physics","chinese",2]
print list[0:]
list[3]= 'happy'
print list[2:3]
del list[1]
print list #列表可以删除和更新 |
lst=[10,12,13,16,20,25]
searchF=13
def searchL(lst,frm,to,findN):
if to>=frm:
centerIndex=int((frm+to)/2)# int(len(lst)/2)
if findN==lst[centerIndex]:
return centerIndex
if findN<lst[centerIndex]:
return searchL(lst,frm,centerIndex-1,findN)
else:
return searchL(lst,centerIndex+1,to,findN)
else:
return -1
resp=searchL(lst,0,len(lst)-1,searchF)
print("Find =",resp)
|
import json
import pandas as pd
import numpy as np
import folium
from folium.plugins import FloatImage
import vincent
import branca
import branca.colormap as cm
from PIL import Image, ImageDraw, ImageFont
def create_title_image(title, image_path):
W, H = (500,200)
image = Image.new("RGBA",(W,H))
draw = ImageDraw.Draw(image)
w, h = draw.textsize(title)
font = ImageFont.truetype('arial.ttf', 20)
draw.text((0,0), title, font=font ,fill=(0, 0, 0))
image.crop((0, 0,2*w,2*h)).save(image_path, "PNG")
class DepartementMap:
def __init__(self, longitude, latitude, title):
self.title = title
self.map = folium.Map(
location=[longitude, latitude],
tiles='openstreetmap',
zoom_start=6,
attr='My Data Attribution'
)
self.fgroup_appart = folium.map.FeatureGroup(name="Appartement", overlay=True, control=True, show=True)
self.fgroup_maison = folium.map.FeatureGroup(name="Maison", overlay=True, control=True, show=False)
@staticmethod
def make_line_chart_popup(data_row:pd.Series, title:str) -> folium.Popup:
'''Create a line chart popup from temporal Series for departements
Index of the Series have to be in {year}_median, {year}_decile1, {year}_decile9, {year+1}_median, {year+1}_decile1... format
this popup can be added in map layers'''
# filter index names and build 3 columns from one(series)
data = {
'decile_1': data_row.filter(regex=".*decile_1$").values,
'decile_9': data_row.filter(regex=".*decile_9$").values,
'median': data_row.filter(like="median").values,
}
df_to_display = pd.DataFrame.from_dict(data)
data_row = data_row.drop("color")
# create index of the dataframe from the inital data_row Series.index
df_to_display.index = pd.to_datetime(list(dict.fromkeys([int(annee_c[:4]) for annee_c in data_row.index.tolist()])), format="%Y")
line_chart = vincent.Line(df_to_display,
width=300,
height=200)
line_chart.axis_titles(x='Année', y='prix m2')
line_chart.legend(title=title)
popup = folium.Popup()
folium.Vega(line_chart, width = 400, height=250).add_to(popup)
return popup
def draw_departement(self, d_geodata, row_appart:pd.Series=None, row_maison:pd.Series=None) -> None:
'''
d_geodata: geodata for a departement
row_appart: appartement m2 prices for this departement, None if departement data is missing
row_maison: maison m2 prices for this departement, None if departement data is missing
'''
choro_appart = folium.Choropleth(
geo_data=d_geodata,
fill_color=row_appart["color"] if row_appart is not None else "white",
fill_opacity=0.5,
line_opacity=1,
line_weight=1,
line_color='blue',
)
choro_maison = folium.Choropleth(
geo_data=d_geodata,
fill_color=row_maison["color"] if row_maison is not None else "white",
fill_opacity=0.5,
line_opacity=1,
line_weight=1,
line_color='blue',
)
if row_appart is not None:
popup_appart = DepartementMap.make_line_chart_popup(row_appart, title=d_geodata["features"][0]["properties"]["nom"])
popup_appart.add_to(choro_appart)
popup_maison = DepartementMap.make_line_chart_popup(row_maison, title=d_geodata["features"][0]["properties"]["nom"])
popup_maison.add_to(choro_maison)
else:
popup_appart = folium.Popup("Données source manquantes")
popup_appart.add_to(choro_appart)
popup_maison = folium.Popup("Données source manquantes")
popup_maison.add_to(choro_maison)
choro_appart.add_to(self.fgroup_appart)
choro_maison.add_to(self.fgroup_maison)
def save(self, file_path):
'''Save to html file'''
# add the color bar to top right of the map
colormap.add_to(self.map)
self.fgroup_appart.add_to(self.map)
self.fgroup_maison.add_to(self.map)
lcontrol = folium.map.LayerControl(position='topright', collapsed=False)
lcontrol.add_to(self.map)
title_image_path = "static/images/title_logo_departement.png"
# create and save the title image to the path
create_title_image(self.title, image_path=title_image_path)
# create Floating image, image will be loaded from the path when map is loaded in prod
FloatImage(title_image_path, bottom=95, left=30).add_to(self.map)
self.map.save(file_path)
if __name__ == "__main__":
df_appart = pd.read_csv("data/immobilier/data_clean/m2_appartement_price_per_departement.csv", index_col=0)
df_maison = pd.read_csv("data/immobilier/data_clean/m2_maison_price_per_departement.csv", index_col=0)
# istance of a LinearColormap for departement coloration
# LinearColormap class was modified from source, this code will not work with branca library from pip
# (added index_display property for rendering)
colormap = cm.LinearColormap(colors=['darkgreen', 'green', 'yellow', 'orange', 'red', 'darkred'],
index=[700, 1300, 2000, 2800, 5000, 10000],
index_display=[700, 1500, 2500, 4500, 7000, 10000],
vmin=700, vmax=10000,
caption="par département en €")
# normal Colormap class utilisation, work with branca library from pip
# colormap = cm.LinearColormap(colors=['darkgreen', 'green', 'yellow', 'orange', 'red', 'darkred'],
# index=[700, 1300, 2000, 2800, 5000, 10000],
# vmin=700, vmax=10000,
# caption="Prix median du m2 par département")
df_appart["color"] = df_appart["2019_median"].apply(colormap)
df_maison["color"] = df_maison["2019_median"].apply(colormap)
longitude, latitude = 45.8566, 2.3522
map1 = DepartementMap(longitude, latitude, "Prix médian du m2 d'un bien immobilier en France")
with open("data/immobilier/geo_data/departements.geojson.txt", "r") as file:
json_departements = json.load(file)
for departement in json_departements["features"]:
d_geodata = {"type": "FeatureCollection", "features": [departement]}
if departement["properties"]["code"] in df_appart.index.tolist():
map1.draw_departement(d_geodata, df_appart.loc[departement["properties"]["code"],:],
df_maison.loc[departement["properties"]["code"],:])
else:
map1.draw_departement(d_geodata)
map1.save(file_path="templates/immobilier/maps/map_departement_folium.html")
|
from math import factorial
def tarkista(n,p):
if p < 0 or n < 0:
print("Pallojen määrän oltava positiivinen luku.")
elif p > n:
print("Arvottavia palloja saa olla enintään pallojen kokonaismäärän verran.")
else:
a = True
return a
def laske(n,p):
t = int(n-p)
nimittäjä = int((factorial(n))/ (factorial(t) * factorial(p)))
return nimittäjä
def main():
a = 0
n = int(input("Syötä lottopallojen kokonaismäärä: "))
p = int(input("Syötä arvottavien pallojen määrä: "))
a = tarkista(n,p)
if a == True:
nimittäjä = str(laske(n,p))
p = str(p)
print("Kun pelataan yksi rivi, todennäköisyys saada "+p+" oikein on 1/"+nimittäjä)
main() |
import random
import subprocess
from base import *
import clsTestService
import enums
from general import General
from selenium.webdriver.common.keys import Keys
try:
import win32com.client
except:
pass
# This class is for multiple upload
class UploadEntry():
filePath = ''
name = ''
description = ''
tags = ''
timeout = 0
retries = 0
# Constructor
def __init__(self, filePath, name, description, tags, timeout=60, retries=3):
self.filePath = filePath
self.name = name
self.description = description
self.tags = tags
self.timeout = timeout
self.retries = retries
class Upload(Base):
driver = None
clsCommon = None
def __init__(self, clsCommon, driver):
self.driver = driver
self.clsCommon = clsCommon
#=============================================================================================================
#Upload locators:
#=============================================================================================================
UPLOAD_MENU_DROP_DOWN_ELEMENT = ('id', 'uploadMenuDropDown')
DROP_DOWN_MEDIA_UPLOAD_BUTTON = ('xpath', ".//span[text()='Media Upload']")
CHOOSE_A_FILE_TO_UPLOAD_BUTTON = ('xpath', "//label[contains(.,'Choose a file to upload')]")
UPLOAD_COMPLETED_LABEL = ('xpath', "//strong[contains(.,'Upload Completed!')]")
UPLOAD_MENU_DROP_DOWN_ELEMENT = ('id', "uploadMenuDropDown")
UPLOAD_ENTRY_DETAILS_ENTRY_NAME = ('id', "Entry-name")
UPLOAD_ENTRY_DESCRIPTION_IFRAME = ('class_name', "wysihtml5-sandbox")
UPLOAD_ENTRY_DESCRIPTION_TEXT_BOX = ('xpath', "//div[@class='content']")
UPLOAD_ENTRY_DETAILS_ENTRY_DESCRIPTION = ('tag_name', 'body') #before using need to switch frame and click on the description box
UPLOAD_ENTRY_DETAILS_ENTRY_TAGS = ('id', 's2id_Entry-tags')
UPLOAD_ENTRY_DETAILS_ENTRY_TAGS_INPUT = ('xpath', "//input[contains(@id,'s2id_autogen') and contains(@class, 'focused')]")
UPLOAD_ENTRY_SAVE_BUTTON = ('xpath', "//button[@id='Entry-submit']")
UPLOAD_ENTRY_PROGRESS_BAR = ('id', 'progressBar')
UPLOAD_ENTRY_SUCCESS_MESSAGE = ('xpath', "//span[contains(.,'Your changes have been saved.')]")
UPLOAD_ENTRY_DISCLAIMER_CHECKBOX = ('id', 'disclaimer-Accepted')
UPLOAD_GO_TO_MEDIA_BUTTON = ('xpath', "//a[@class='btn btn-link' and text() = 'Go To Media']")
UPLOAD_ENABLE_SCHEDULING_RADIO = ('id', 'schedulingRadioButtons_5a65e5d39199d-scheduled')
DROP_DOWN_VIDEO_QUIZ_BUTTON = ('xpath', ".//span[text()='Video Quiz']")
DROP_DOWN_YOUTUBE_BUTTON = ('xpath', ".//span[text()='YouTube']")
VIDEO_QUIZ_PAGE_TITLE = ('xpath', "//h1[@class='editorBreadcrumbs inline']")
YOUTUBE_PAGE_TITLE = ('xpath', "//h1[@class='uploadBoxHeading']")
YOUTUBE_PAGE_LINK_FIELD = ('id', 'externalContentId')
# Elements for multiple upload
UPLOAD_UPLOADBOX = ('xpath', "//div[@id='uploadbox[ID]']") #Replace [ID] with uploadbox ID
UPLOAD_MULTIPLE_CHOOSE_A_FILE_BUTTON = ('xpath', "//label[@for='fileinput[ID]']") #Replace [ID] with uploadbox ID
UPLOAD_GO_TO_MEDIA_BUTTON = ('xpath', "//a[@id='back' and contains(text(), 'Go To Media')]")
#============================================================================================================
def clickMediaUpload(self):
try:
parentElement = self.get_element(self.UPLOAD_MENU_DROP_DOWN_ELEMENT)
self.get_child_element(parentElement, self.DROP_DOWN_MEDIA_UPLOAD_BUTTON).click()
return True
except NoSuchElementException:
writeToLog("INFO","FAILED to click on Media Upload from drop down menu")
return False
# @Author: Inbar Willman
def clickVideoQuiz(self):
try:
parentElement = self.get_element(self.UPLOAD_MENU_DROP_DOWN_ELEMENT)
self.get_child_element(parentElement, self.DROP_DOWN_VIDEO_QUIZ_BUTTON).click()
return True
except NoSuchElementException:
writeToLog("INFO","FAILED to click on Video Quiz from drop down menu")
return False
# @Author: Inbar Willman
def clickYoutube(self):
try:
parentElement = self.get_element(self.UPLOAD_MENU_DROP_DOWN_ELEMENT)
self.get_child_element(parentElement, self.DROP_DOWN_YOUTUBE_BUTTON).click()
return True
except NoSuchElementException:
writeToLog("INFO","FAILED to click on youtube from drop down menu")
return False
# @Author: Tzachi Guetta
# In case disclaimer module is turned on and set to "before upload"
# The following function will check that upload is prevented before disclaimer's check-box was checked.
def handleDisclaimerBeforeUplod(self):
try:
if self.wait_visible(self.clsCommon.upload.CHOOSE_A_FILE_TO_UPLOAD_BUTTON, 5) == False:
if self.click(self.UPLOAD_ENTRY_DISCLAIMER_CHECKBOX) == False:
writeToLog("INFO","FAILED to click on disclaimer check-box")
return False
else:
writeToLog("INFO","FAILED, upload button is presented before User agree to terms of Use (disclaimer)")
return False
except NoSuchElementException:
return False
return True
# @Author: Tzachi Guetta
def extractEntryID (self, locator):
try:
div = self.get_element(locator)
href = div.get_attribute('href')
entryID = href.split("/")[len(href.split("/"))-1]
except NoSuchElementException:
return False
return entryID
# @Authors: Oleg Sigalov & Tzachi Guetta
def uploadEntry(self, filePath, name, description, tags, timeout=60, disclaimer=False, retries=3, uploadFrom=enums.Location.UPLOAD_PAGE):
for i in range(retries):
try:
if i > 0:
writeToLog("INFO","FAILED to upload after " + str(i) + " retries of " + str(retries) + ". Going to upload again...")
# Convert path for Windows
filePath = filePath.replace("/", "\\")
filePath = filePath.replace("\\\\", "\\")
# Navigate to upload page
if uploadFrom == enums.Location.UPLOAD_PAGE:
if self.navigateToUploadPage() == False:
continue
#checking if disclaimer is turned on for "Before upload"
if disclaimer == True:
if self.clsCommon.upload.handleDisclaimerBeforeUplod() == False:
writeToLog("INFO","FAILED, Handle disclaimer before upload failed")
continue
# Wait page load
self.wait_for_page_readyState()
# If running on remote node
if localSettings.LOCAL_SETTINGS_RUN_MDOE == localSettings.REMOTE_RUN_MODE:
# Because of miltiple run at same time, we apply random wait
timeDelay = random.uniform(1.1, 2.9)
sleep(timeDelay)
# Click Choose a file to upload
if self.click(self.CHOOSE_A_FILE_TO_UPLOAD_BUTTON) == False:
writeToLog("DEBUG","FAILED to click on 'Choose a file to upload' button")
continue
sleep(3)
# Type in a file path
if self.typeIntoFileUploadDialog(filePath) == False:
continue
# Wait for success message "Upload Completed"
startTime = datetime.datetime.now().replace(microsecond=0)
if self.waitUploadCompleted(startTime, timeout) == False:
continue
if self.isErrorUploadMessage() == True:# TODO verify it doesn't take time when there is no error
writeToLog("INFO","FAILED to upload entry, error message appeared on the screen: 'Oops! Entry could not be created.'")
continue
# Fill entry details: name, description, tags
if self.fillFileUploadEntryDetails(name, description, tags) == False:
continue
if self.getAppUnderTest() == enums.Application.BLACK_BOARD:
self.get_body_element().send_keys(Keys.TAB)
self.get_body_element().send_keys(Keys.PAGE_DOWN)
# Click Save
if self.click(self.UPLOAD_ENTRY_SAVE_BUTTON) == False:
writeToLog("DEBUG","FAILED to click on 'Save' button")
continue
sleep(3)
# Wait for loader to disappear
self.clsCommon.general.waitForLoaderToDisappear()
# Wait for 'Your changes have been saved.' message
if self.wait_visible(self.UPLOAD_ENTRY_SUCCESS_MESSAGE, 45) != False:
entryID = self.extractEntryID(self.UPLOAD_GO_TO_MEDIA_BUTTON)
if entryID != None:
writeToLog("INFO","Successfully uploaded entry: '" + name + "'"", entry ID: '" + entryID + "'")
return entryID
else:
writeToLog("INFO","FAILED to upload entry, no success message was appeared'")
continue
except Exception:
writeToLog("INFO","FAILED to upload entry, retry number " + str(i))
pass
def uploadMulitple(self, uploadEntrieList, disclaimer=False, uploadFrom=enums.Location.UPLOAD_PAGE):
uploadboxCount = 1
if uploadFrom == enums.Location.UPLOAD_PAGE:
# Click Add New
if self.click(General.ADD_NEW_DROP_DOWN_BUTTON, multipleElements=True) == False:
writeToLog("DEBUG","FAILED to click on 'Add New' button")
return False
# Click Media Upload
if self.clickMediaUpload() == False:
writeToLog("DEBUG","FAILED to click on 'Media Upload' button")
return False
# Checking if disclaimer is turned on for "Before upload"
if disclaimer == True:
if self.clsCommon.upload.handleDisclaimerBeforeUplod() == False:
writeToLog("INFO","FAILED, Handle disclaimer before upload failed")
return False
# Wait page load
self.wait_for_page_readyState()
for entry in uploadEntrieList:
if self.fillFileUploadEntryDetailsMultiple(entry.filePath, entry.name, entry.description, entry.tags, entry.timeout, uploadboxCount) == False:
return False
# # Click Save
if self.click(('xpath', self.UPLOAD_UPLOADBOX[1].replace('[ID]', str(uploadboxCount)) + self.UPLOAD_ENTRY_SAVE_BUTTON[1])) == False:
writeToLog("DEBUG","FAILED to click on 'Save' button")
return False
# Click Save another time, it's a workaround
if self.click(('xpath', self.UPLOAD_UPLOADBOX[1].replace('[ID]', str(uploadboxCount)) + self.UPLOAD_ENTRY_SAVE_BUTTON[1])) == False:
writeToLog("DEBUG","FAILED to click on 'Save' button")
return False
sleep(3)
# Wait for loader to disappear
self.clsCommon.general.waitForLoaderToDisappear()
# Wait for 'Your changes have been saved.' message
if self.wait_visible(('xpath', self.UPLOAD_UPLOADBOX[1].replace('[ID]', str(uploadboxCount)) + self.UPLOAD_ENTRY_SUCCESS_MESSAGE[1]), 45) != False:
# TODO return entry ID
entryID = self.extractEntryID(('xpath', self.UPLOAD_UPLOADBOX[1].replace('[ID]', str(uploadboxCount)) + self.UPLOAD_GO_TO_MEDIA_BUTTON[1]))
# if entryID != None:
# writeToLog("INFO","Successfully uploaded entry: '" + entry.name + "'"", entry ID: '" + entryID + "'")
# return entryID
writeToLog("INFO","Successfully uploaded entry: '" + entry.name + "'"", entry ID: '" + entryID + "'")
uploadboxCount += 1
sleep(1)
continue
else:
writeToLog("INFO","FAILED to upload entry, no success message was appeared'")
return False
return True
# Fill basic entry details after upload is completed, only: name, description, tags
def fillFileUploadEntryDetailsMultiple(self, filePath, name="", description="", tags="", timeout=60, uploadboxId=""):
# Get the uploadbox element
uploadBoxElement = self.get_element(self.replaceInLocator(self.UPLOAD_UPLOADBOX, '[ID]', str(uploadboxId)))
# Click Choose a file to upload
if self.click_child(uploadBoxElement, self.replaceInLocator(self.UPLOAD_MULTIPLE_CHOOSE_A_FILE_BUTTON, '[ID]', str(uploadboxId))) == False:
writeToLog("DEBUG","FAILED to click on 'Choose a file to upload' button")
return False
# Type in a file path
if self.typeIntoFileUploadDialog(filePath) == False:
return False
# Wait for success message "Upload Completed"
startTime = datetime.datetime.now().replace(microsecond=0)
if self.waitUploadCompleted(startTime, timeout, uploadboxId) == False:
return False
if self.isErrorUploadMessage(uploadboxId) == True:# TODO verify it doesn't take time when there is no error
writeToLog("INFO","FAILED to upload entry, error message appeared on the screen: 'Oops! Entry could not be created.'")
return False
entryNameElement = self.wait_visible_child(uploadBoxElement, self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME, 30)
if entryNameElement == False:
writeToLog("INFO","FAILED to find an entry name field:'" + name + "'")
return False
entryNameElement.clear()
if self.send_keys_to_child(uploadBoxElement, self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME, name) == False:
writeToLog("INFO","FAILED to fill an entry name:'" + name + "'")
return False
if self.fillFileUploadEntryDescription(description, uploadboxId) == False:
writeToLog("INFO","FAILED to fill an entry Description:'" + description + "'")
return False
# if self.fillFileUploadEntryTagsMultiple(tags, uploadboxId) == False:
# writeToLog("INFO","FAILED to fill an entry Tags:'" + tags + "'")
# return False
if self.fillFileUploadEntryTags(tags, uploadboxId) == False:
writeToLog("INFO","FAILED to fill an entry Tags:'" + tags + "'")
return False
return True
# The method supports BOTH single and multiple upload
def waitUploadCompleted(self, startTime, timeout=60, uploadboxId=-1):
if uploadboxId != -1:
# Get the uploadbox element
uploadBoxElement = self.get_element(self.replaceInLocator(self.UPLOAD_UPLOADBOX, '[ID]', str(uploadboxId)))
isUploadCompleted = self.wait_for_child_text(uploadBoxElement, self.UPLOAD_COMPLETED_LABEL, "Upload Completed!", timeout)
else:
isUploadCompleted = self.wait_for_text(self.UPLOAD_COMPLETED_LABEL, "Upload Completed!", timeout)
if isUploadCompleted == False:
writeToLog("INFO","Upload didn't finish after timeout: " + str(timeout) + " seconds")
return False
else:
now = datetime.datetime.now().replace(microsecond=0)
uploadDuration = now - startTime
writeToLog("INFO","Upload finished after: " + str(uploadDuration))
return True
def typeIntoFileUploadDialog(self, filePath):
try:
if localSettings.LOCAL_SETTINGS_RUN_MDOE == localSettings.REMOTE_RUN_MODE:
self.clsCommon.instertPathInFileUploadWindows(filePath)
else:
if (localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_IE):
# TODO IE not implemented yet
subprocess.call([localSettings.LOCAL_SETTINGS_AUTOIT_SCRIPTS + r'\openFile.exe' ,filePath])
elif(localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_FIREFOX):
subprocess.call([localSettings.LOCAL_SETTINGS_AUTOIT_SCRIPTS + r'\openFileFirefox.exe' ,filePath])
elif(localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_CHROME):
subprocess.call([localSettings.LOCAL_SETTINGS_AUTOIT_SCRIPTS + r'\openFileChrome.exe' ,filePath])
else:
writeToLog("INFO","FAILED to type into 'Choose File' window, unknown browser: '" + localSettings.LOCAL_RUNNING_BROWSER + "'")
return False
return True
except Exception:
writeToLog("INFO","FAILED to type into 'Choose File' window")
return False
# Fill basic entry details after upload is completed, only: name, description, tags
def fillFileUploadEntryDetails(self, name="", description="", tags=""):
if self.wait_visible(self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME) == False:
return False
self.get_element(self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME).clear()
if self.send_keys(self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME, name) == False:
writeToLog("INFO","FAILED to fill a entry name:'" + name + "'")
return False
sleep(2)
if self.fillFileUploadEntryDescription(description) == False:
writeToLog("INFO","FAILED to fill a entry Description:'" + description + "'")
return False
sleep(2)
if self.fillFileUploadEntryTags(tags) == False:
writeToLog("INFO","FAILED to fill a entry Tags:'" + tags + "'")
return False
# The method supports BOTH single and multiple upload
def fillFileUploadEntryDescription(self, text, uploadboxId=-1):
if uploadboxId != -1:
# Get the uploadbox element
uploadBoxElement = self.get_element(self.replaceInLocator(self.UPLOAD_UPLOADBOX, '[ID]', str(uploadboxId)))
# Switch to Description iFrame
descpriptionIframe = self.get_child_element(uploadBoxElement, self.UPLOAD_ENTRY_DESCRIPTION_IFRAME)
else:
# Switch to Description iFrame
descpriptionIframe = self.get_element(self.UPLOAD_ENTRY_DESCRIPTION_IFRAME)
# Switch to iframe which is contains the description text box
self.driver.switch_to.frame(descpriptionIframe)
# Click on Description text box
el = self.get_element(self.UPLOAD_ENTRY_DESCRIPTION_TEXT_BOX)
if el.click() == False:
writeToLog("DEBUG","FAILED to click on Description filed")
return False
sleep(2)
# Enter text Description
if self.clear_and_send_keys(self.UPLOAD_ENTRY_DETAILS_ENTRY_DESCRIPTION, text) == True:
return True
else:
writeToLog("DEBUG","FAILED to type in Description")
return False
self.switch_to_default_content()
# The method supports BOTH single and multiple upload
# tags - should provided with ',' as a delimiter and comma (',') again in the end of the string
# for example 'tags1,tags2,'
def fillFileUploadEntryTags(self, tags, uploadboxId=-1):
try:
self.switch_to_default_content()
if self.getAppUnderTest() == enums.Application.BLACK_BOARD:
self.clsCommon.blackBoard.switchToBlackboardIframe()
elif self.getAppUnderTest() == enums.Application.SHARE_POINT:
self.clsCommon.sharePoint.switchToSharepointIframe()
self.get_body_element().send_keys(Keys.PAGE_DOWN)
sleep(1)
# If upload single (method: uploadEntry)
if uploadboxId == -1:
tagsElement = self.get_element(self.UPLOAD_ENTRY_DETAILS_ENTRY_TAGS)
else:
# Get the uploadbox element
uploadBoxElement = self.get_element(self.replaceInLocator(self.UPLOAD_UPLOADBOX, '[ID]', str(uploadboxId)))
tagsElement = self.get_child_element(uploadBoxElement, self.UPLOAD_ENTRY_DETAILS_ENTRY_TAGS)
except NoSuchElementException:
writeToLog("DEBUG","FAILED to get Tags filed element")
return False
if self.clickElement(tagsElement) == False:
writeToLog("DEBUG","FAILED to click on Tags filed")
return False
sleep(1)
if(localSettings.LOCAL_RUNNING_BROWSER == clsTestService.PC_BROWSER_CHROME):
# Remove the Mask over all the screen (over tags filed also)
maskOverElement = self.get_element(self.clsCommon.channel.CHANNEL_REMOVE_TAG_MASK)
self.driver.execute_script("arguments[0].setAttribute('style','display: none;')",(maskOverElement))
if self.clickElement(tagsElement) == False:
writeToLog("DEBUG","FAILED to click on Tags filed")
return False
if uploadboxId == -1: # -1 stands for single
if self.send_keys(self.UPLOAD_ENTRY_DETAILS_ENTRY_TAGS_INPUT, tags) == True:
return True
else:
if self.send_keys_to_child(uploadBoxElement, self.UPLOAD_ENTRY_DETAILS_ENTRY_TAGS_INPUT, tags) == True:
return True
writeToLog("DEBUG","FAILED to type in Tags")
return False
# The method supports BOTH single and multiple upload
# Return true if error message ('Oops! Entry could not be created.') appeared after upload
def isErrorUploadMessage(self, uploadboxId=-1):
if uploadboxId != -1:
# Get the uploadbox element
uploadBoxElement = self.get_element(self.replaceInLocator(self.UPLOAD_UPLOADBOX, '[ID]', str(uploadboxId)))
progressBarText = self.get_element_child_text(uploadBoxElement, self.UPLOAD_ENTRY_PROGRESS_BAR)
else:
progressBarText = self.get_element_text(self.UPLOAD_ENTRY_PROGRESS_BAR)
if progressBarText == None:
return False
if progressBarText == 'Oops! Entry could not be created.':
return True
else:
return False
# Use after upload is done, from upload page
def clickGoToMyMedia(self):
return self.click(self.UPLOAD_GO_TO_MEDIA_BUTTON)
# @Author: Inbar Willman
def addNewVideoQuiz(self):
# Click Add New
if self.click(General.ADD_NEW_DROP_DOWN_BUTTON) == False:
writeToLog("DEBUG","FAILED to click on 'Add New' button")
return False
# Click video quiz
if self.clickVideoQuiz() == False:
writeToLog("DEBUG","FAILED to click on 'Video Quiz' button")
return False
if self.wait_visible(self.VIDEO_QUIZ_PAGE_TITLE, 30) == False:
writeToLog("DEBUG","FAILED to navigate to add new video quiz page")
return False
return True
# @Author: Tzachi Guetta
# Note: all entries were shared the same description & tags
def uploadEntries(self, entriesDict, entryDescription, entryTags):
try:
# Checking if entriesNames list type
if type(entriesDict) is dict:
for entryName in entriesDict:
if self.uploadEntry(entriesDict.get(entryName), entryName, entryDescription, entryTags) == None:
writeToLog("INFO","FAILED to upload entry: " + entryName)
return False
else:
writeToLog("INFO","FAILED, Entries list was not provided ")
return False
except Exception:
return False
return True
# @Author: Inbar Willman
def clickAddYoutube(self):
# Click Add New
if self.click(General.ADD_NEW_DROP_DOWN_BUTTON) == False:
writeToLog("DEBUG","FAILED to click on 'Add New' button")
return False
# Click youtube
if self.clickYoutube() == False:
writeToLog("DEBUG","FAILED to click on 'Yotube' button")
return False
if self.wait_visible(self.YOUTUBE_PAGE_TITLE, 30) == False:
writeToLog("DEBUG","FAILED to navigate to add new youtube page")
return False
return True
# @Author: Inbar Willman
def addYoutubeEntry(self, youtubeLink, entryName):
youtubeField = self.get_element(self.YOUTUBE_PAGE_LINK_FIELD)
#Insert youtube link in field
if youtubeField.send_keys(youtubeLink) == False:
writeToLog("DEBUG","FAILED to insert youtube link")
return False
#Click enter in field in order to see entry's fields
if youtubeField.send_keys(Keys.ENTER) == False:
writeToLog("DEBUG","FAILED to click on youtube link field")
return False
# Wait for loader to disappear
self.clsCommon.general.waitForLoaderToDisappear()
#Wait until tag element is displayed
self.wait_visible(self.UPLOAD_ENTRY_DETAILS_ENTRY_TAGS, timeout=40)
#Insert new name for entry
self.get_element(self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME).clear()
if self.send_keys(self.UPLOAD_ENTRY_DETAILS_ENTRY_NAME, entryName) == False:
writeToLog("INFO","FAILED to fill a entry name:'" + entryName + "'")
return False
# Click Save
if self.click(self.UPLOAD_ENTRY_SAVE_BUTTON) == False:
writeToLog("DEBUG","FAILED to click on Save button")
return False
sleep(3)
# Wait for loader to disappear
self.clsCommon.general.waitForLoaderToDisappear()
# Wait for 'Your changes have been saved.' message
if self.wait_visible(self.UPLOAD_ENTRY_SUCCESS_MESSAGE, 45) != False:
entryID = self.extractEntryID(self.UPLOAD_GO_TO_MEDIA_BUTTON)
if entryID != None:
writeToLog("INFO","Successfully uploaded youtube entry")
return entryID
else:
writeToLog("INFO","FAILED to upload entry, no success message was appeared'")
return False
def navigateToUploadPage(self):
# Get the application under test and use each application right method
application = localSettings.LOCAL_SETTINGS_APPLICATION_UNDER_TEST
if application == enums.Application.BLACK_BOARD:
self.clsCommon.blackBoard.navigateToUploadPageBlackBoard()
elif application == enums.Application.SHARE_POINT:
self.clsCommon.sharePoint.navigateToUploadPageSharePoint()
# Click Add New
if self.click(General.ADD_NEW_DROP_DOWN_BUTTON, multipleElements=True) == False:
writeToLog("DEBUG","FAILED to click on 'Add New' button")
return False
# Click Media Upload
if self.clickMediaUpload() == False:
writeToLog("DEBUG","FAILED to click on 'Media Upload' button")
return False
return True
# @Author: Inbar Willman
# Upload multiple entries with same filePath
def uploadMultipleEntries(self, filePath, entriesList, description, tags, timeout=60, disclaimer=False, retries=3, uploadFrom=enums.Location.UPLOAD_PAGE):
for entry in entriesList:
if self.uploadEntry(filePath, entry, description, tags, timeout, disclaimer, retries, uploadFrom) == False:
writeToLog("DEBUG","FAILED to upload entry as part of multiple upload")
return False
return True
def navigateToEntryPageFromUploadPage(self, entryName):
if self.click(self.UPLOAD_GO_TO_MEDIA_BUTTON) == False:
writeToLog("INFO","FAILED to click on 'go to media' button")
return False
tmpEntry = (self.clsCommon.entryPage.ENTRY_PAGE_ENTRY_TITLE[0], self.clsCommon.entryPage.ENTRY_PAGE_ENTRY_TITLE[1].replace('ENTRY_NAME', entryName))
#Check if we already in edit entry page
if self.wait_visible(tmpEntry, 15) == False:
writeToLog("INFO","FAILED, entry page for entry '" + entryName + "' did NOT open")
return False
sleep(2)
writeToLog("INFO","Success, entry page was open successfully")
return True
|
def solution(players, callings):
players = dict(zip(players, [i for i in range(len(players))]))
players_index = dict(zip([i for i in range(len(players))], players))
for i in callings:
p1, p1_index = i, players[i]
p2, p2_index = players_index[p1_index - 1], p1_index - 1
players[p1], players_index[p1_index] = p2_index, p2
players[p2], players_index[p2_index] = p1_index, p1
return list(players_index.values()) |
import os
import requests
from .Photo import Photo
from .repository import UnsplashRepository
class UnsplashService:
def __init__(self, repository: UnsplashRepository):
self.base_url = "https://api.unsplash.com"
self.access_key = os.environ["UNSPLASH_ACCESS_KEY"]
self.repository = repository
def get_photo(self, animal_id: str) -> Photo:
unsplash_id = self.repository.find_by_animal_id(animal_id)
payload = {"client_id": self.access_key}
url = f"{self.base_url}/photos/{unsplash_id}"
response = requests.get(url, params=payload)
response.raise_for_status()
data = response.json()
print(data)
return Photo(data["urls"]["regular"])
|
from enum import Enum
class tags(Enum):
discover = 0
acknowledge = 1
authenticate = 2
request_auth = 3
auth = 4
message = 10
message_ack = 11
sub_declare = 12
sub_removal = 13
sub_ack = 14
data = 100
|
from django.shortcuts import render
from .models import Product
from orders.models import Order
from .forms import RegisterForm
from .forms import LoginForm
from .forms import ResetTelForm
from products.models import Type
from products.models import Photo
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.contrib.auth import login, logout
from django.db.models import F
from main.smsc_api import *
from main.forms import generate_pw
from transliterate import translit
import random
import json
from django.views.decorators.cache import cache_control
def get_hide_username(username, percent_hide):
guessed_string = username
len_part_hide = round((len(username) / 100) * percent_hide)
used_letters = []
random.seed(version=2)
for i in range(0, len_part_hide):
hide_pos = random.randint(1, len(username) - 1)
if hide_pos not in used_letters:
used_letters.append(hide_pos)
guessed_string = guessed_string[:hide_pos] + '*' + guessed_string[hide_pos + 1:]
''.join(guessed_string)
return guessed_string
@cache_control(max_age=0, no_cache=True, no_store=True, must_revalidate=True)
def login_logout(request):
if request.method == 'POST':
if not request.session.session_key:
request.session.save()
session_key = request.session.session_key
if len(request.POST.dict()) == 0:
if request.user.is_active:
Order.objects.filter(client=request.user.client, present_in_basket=True, session_key=session_key).update(
client=None, discount_total=F('discount_total')-request.user.client.discount_client)
old_session_key = session_key
logout(request)
if not request.session.session_key:
request.session.save()
session_key = request.session.session_key
Order.objects.filter(client=None, present_in_basket=True,
session_key=old_session_key).update(session_key=session_key)
data = {'success': True}
else:
data = {'success': False, 'error': 'Сам не знаю, що за помилка!'}
return HttpResponse(json.dumps(data), content_type="application/json")
if len(request.POST.dict()) <= 3:
login_form = LoginForm(request.POST or None)
if login_form.is_valid():
data = {}
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
Order.objects.filter(client=None, present_in_basket=True, session_key=session_key).update(client=user.client,
discount_total=F('discount_total')+user.client.discount_client)
data = {'success': True}
else:
data = {'success': False, 'error': 'Провірте правильність паролю чи логіну'}
else:
data = {'success': False, 'error': 'Користувач не активний або відсутній'}
return HttpResponse(json.dumps(data), content_type="application/json")
else:
if len(request.POST.dict()) > 3:
register_form = RegisterForm(request.POST or None)
if register_form.is_valid():
username = register_form.cleaned_data['username']
real_name = register_form.cleaned_data['real_name']
password = register_form.cleaned_data['password']
if real_name:
lst = real_name.split()
last_name = lst[-1]
first_name = lst[0]
else:
first_name = username
last_name = username
email = register_form.cleaned_data['email']
phone = register_form.cleaned_data['phone']
new_user = User.objects.create_user(email=email, username=username,
first_name=first_name, last_name=last_name, is_active=True)
new_user.client.real_name = real_name
new_user.client.phone = phone
new_user.set_password(password)
new_user.save()
return JsonResponse(register_form.cleaned_data)
else:
return JsonResponse(register_form.errors)
def password_reset_via_tel(request):
if request.method == 'POST':
reset_tel_form = ResetTelForm(request.POST or None)
if reset_tel_form.is_valid():
phone = reset_tel_form.cleaned_data['phone']
if phone:
password_reset = generate_pw() + generate_pw()
try:
user = User.objects.get(client__phone=phone)
except User.DoesNotExist:
return render(request, 'password_reset/password_reset_via_tel_confirm.html', locals()) #no_user
else:
smsc = SMSC()
phone_number = '8'+phone
login_shadow = get_hide_username(user.username, 50)
full_str = translit("Для Вашого логіна ", 'uk', reversed=True)+login_shadow+translit(" парол було успішно змінено. Новий парол: ", 'uk', reversed=True)+password_reset
r = smsc.send_sms(phone_number, full_str, sender="MilaTort Team")
user.set_password(password_reset)
user.is_active = False
user.save()
return render(request, 'password_reset/password_reset_via_tel_done.html', context={'phone': phone_number})
else:
return render(request, 'password_reset/password_reset_via_tel_confirm.html', locals())
return render(request, 'password_reset/password_reset_via_tel_confirm.html', locals())
@cache_control(max_age=0, no_cache=True, no_store=True, must_revalidate=True)
def list_product(request, slug):
photos_of_product = []
list_of_types = Type.objects.get(slug_type__iexact=slug)
category_for_back = list_of_types.category.slug_category
product_of_type = Product.objects.filter(category_plus_type_product_id=list_of_types.id, is_active=True).order_by('category_plus_type_product__category__direction_cat')
for this_product in product_of_type:
photos_of_product.append(Photo.objects.filter(product_id=this_product.id, is_active=True, main_photo=True).first())
request.session['referer_path'] = request.build_absolute_uri()
return render(request, 'list_product/list_product.html', context={'product_of_type': product_of_type,
'title_of_type': list_of_types,
'photos_of_product': photos_of_product,
'category_for_back': category_for_back})
|
__author__ = 'Matthijs'
class RequestVars:
POST = {}
GET = {}
Path = ''
|
from .utils import decorator
class Test(object):
def __init__(self, accept, description, host, port, cafile=None, name=None, forced_result=None):
self.accept = accept
self.description = description
self.host = host
self.port = port
self.cafile = cafile
if name is None:
name = "{}:{}".format(self.host, self.port)
self.name = name
self.forced_result = forced_result
def run(gen, callback, *args, **keys):
if callable(gen):
gen = gen()
stack = [gen]
result = None
while stack:
try:
value = stack[-1].send(result)
except StopIteration as stop:
# Interpret the generator's "return value" as described in PEP 380.
# See: https://www.python.org/dev/peps/pep-0380/#enhancements-to-stopiteration
result = stop.args[0] if stop.args else None
stack.pop()
continue
if isinstance(value, Test):
if value.forced_result is None:
result = callback(value, *args, **keys)
else:
result = value.forced_result
yield value, result
continue
if callable(value):
value = value()
result = None
stack.append(value)
@decorator
def testenv(func, *args, **keys):
def _testenv():
return func(*args, **keys)
return _testenv
@testenv
def testgroup(*funcs):
for func in funcs:
yield func
|
from django.contrib import admin
from accounts.models import UserProfile
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
class UserAdminInline(admin.StackedInline):
model = UserProfile
class UserAdmin(BaseUserAdmin):
inlines = (UserAdminInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
import argparse
from argparse import RawTextHelpFormatter
desc = """==================================================================================================
Bias factorized, base-resolution deep learning models of chromatin accessibility reveal
cis-regulatory sequence syntax, transcription factor footprints and regulatory variants
=================================================================================================="""
def read_parser():
parser = argparse.ArgumentParser(description=desc,formatter_class=RawTextHelpFormatter)
subparsers = parser.add_subparsers(help="Must be eithier 'pipeline', 'train', 'qc', 'bias', 'prep', 'pred_bw', 'contribs_bw', 'modisco_motifs' ,'footprints', or 'snp_score'.", required=True, dest='cmd')
# main parsers
pipeline_parser = subparsers.add_parser("pipeline", help="End-to-end pipline with train, quality check and test for bias factorized ChromBPNet model")
train_parser = subparsers.add_parser("train", help="Train bias factorized ChromBPNet model")
qc_parser = subparsers.add_parser("qc", help="Do quality checks and get test metrics for bias factorized ChromBPNet model")
# bias parsers
bias_parser_full = subparsers.add_parser("bias", help="Tools to train, quality check and test bias model")
bias_parser_sub = bias_parser_full.add_subparsers(help="Must be eithier 'pipeline' 'train', 'qc'.", required=True, dest='cmd_bias')
bias_parser = bias_parser_sub.add_parser("pipeline", help="End-to-end pipline with train, quality check and test for bias model")
bias_parser_train = bias_parser_sub.add_parser("train", help="Train bias model")
bias_parser_qc = bias_parser_sub.add_parser("qc", help="Do quality checks and get test metrics for the bias model")
# helper parsers
prep_parser_full = subparsers.add_parser("prep", help="Tools to generate preprocessing data for chrombpnet")
prep_parser_sub = prep_parser_full.add_subparsers(help="Must be eithier 'nonpeaks' or 'splits'.", required=True, dest='cmd_prep')
nonpeaks_parser = prep_parser_sub.add_parser("nonpeaks", help="Generate non-peak background regions given peaks")
splits_parser = prep_parser_sub.add_parser("splits", help="Generate chromosome splits")
# downstream tool parsers
preds_parser = subparsers.add_parser("pred_bw", help="Get model prediction bigwigs (Metrics calculated if observed bigwig provided)")
contribs_parser = subparsers.add_parser("contribs_bw", help="Get contribution score bigwigs")
motifs_parser = subparsers.add_parser("modisco_motifs", help="Summarize motifs from contribution scores with TFModisco")
#custom_preds_parser = subparsers.add_parser("pred_custom", help="Make model predictions on custom sequences and output to .h5 file")
#custom_contribs_parser = subparsers.add_parser("contribs_custom", help="Get contribution on custom sequences and output to .h5 file")
footprints_parser = subparsers.add_parser("footprints", help="Get marginal footprinting for given model and given motifs")
variants_parser = subparsers.add_parser("snp_score", help="Score SNPs with model")
def general_training_args(required_train, optional_train):
required_train.add_argument('-g','--genome', required=True, type=str, help="reference genome fasta file")
required_train.add_argument('-c', '--chrom-sizes', type=str, required=True, help="Chrom sizes file")
group = required_train.add_mutually_exclusive_group(required=True)
group.add_argument('-ibam', '--input-bam-file', type=str, help="Input BAM file")
group.add_argument('-ifrag', '--input-fragment-file', type=str, help="Input fragment file")
group.add_argument('-itag', '--input-tagalign-file', type=str, help="Input tagAlign file")
required_train.add_argument('-o', '--output-dir', type=str, required=True, help="Output dir (path/to/output/dir)")
required_train.add_argument('-d', '--data-type', required=True, type=str, choices=['ATAC', 'DNASE'], help="assay type")
required_train.add_argument("-p", "--peaks", type=str, required=True, help="10 column bed file of peaks. Sequences and labels will be extracted centered at start (2nd col) + summit (10th col).")
required_train.add_argument("-n", "--nonpeaks", type=str, required=True, help="10 column bed file of non-peak regions, centered at summit (10th column)")
required_train.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
optional_train.add_argument("-oth", "--outlier-threshold", type=float, default=0.9999, help="threshold to use to filter outlies")
#optional_train.add_argument('-ps', '--plus-shift', type=int, default=None, help="Plus strand shift applied to reads. Estimated if not specified")
#optional_train.add_argument('-ms', '--minus-shift', type=int, default=None, help="Minus strand shift applied to reads. Estimated if not specified")
optional_train.add_argument('--ATAC-ref-path', type=str, default=None, help="Path to ATAC reference motifs (ATAC.ref.motifs.txt used by default)")
optional_train.add_argument('--DNASE-ref-path', type=str, default=None, help="Path to DNASE reference motifs (DNASE.ref.motifs.txt used by default)")
optional_train.add_argument('--num-samples', type=int, default=10000, help="Number of reads to sample from BAM/fragment/tagAlign file for shift estimation")
optional_train.add_argument("-il", "--inputlen", type=int, default=2114, required=False, help="Sequence input length")
optional_train.add_argument("-ol", "--outputlen", type=int, default=1000, required=False, help="Prediction output length")
optional_train.add_argument("-s", "--seed", type=int, default=1234, help="seed to use for model training")
optional_train.add_argument("-e", "--epochs", type=int, default=50, help="Maximum epochs to train")
optional_train.add_argument("-es", "--early-stop", type=int, default=5, help="Early stop limit, corresponds to 'patience' in callback")
optional_train.add_argument("-l", "--learning-rate", type=float, default=0.001, help="Learning rate for model training")
optional_train.add_argument("-track","--trackables",nargs="*",default=['logcount_predictions_loss', 'loss', 'logits_profile_predictions_loss', 'val_logcount_predictions_loss', 'val_loss', 'val_logits_profile_predictions_loss'], help="list of things to track per batch, such as logcount_predictions_loss,loss,profile_predictions_loss,val_logcount_predictions_loss,val_loss,val_profile_predictions_loss")
optional_train.add_argument("-a","--architecture-from-file",type=str,required=False, default=None, help="Model to use for training")
optional_train.add_argument("-fp","--file-prefix",type=str,required=False, default=None, help="File prefix for output to use. All the files will be prefixed with this string if provided.")
optional_train.add_argument('-hp', '--html-prefix', required=False, default="./", help="The html prefix to use for the html file output.")
return required_train, optional_train
# Generate non-peak regions from peak-regions
nonpeaks_parser._action_groups.pop()
required_nonpeaks_parser = nonpeaks_parser.add_argument_group('required arguments')
optional_nonpeaks_parser = nonpeaks_parser.add_argument_group('optional arguments')
required_nonpeaks_parser.add_argument("-g","--genome", required=True, help="reference genome file")
required_nonpeaks_parser.add_argument("-o","--output-prefix", required=True, help="output BED file prefix to store the gc content of binned genome. suffix .bed will be appended by the code. If the prefix contains a directory path make sure it exists.")
required_nonpeaks_parser.add_argument("-p", "--peaks", type=str, required=True, help="10 column bed file of peaks. Sequences and labels will be extracted centered at start (2nd col) + summit (10th col).")
required_nonpeaks_parser.add_argument('-c', '--chrom-sizes', type=str, required=True, help="Chrom sizes file")
required_nonpeaks_parser.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
optional_nonpeaks_parser.add_argument("-il","--inputlen", type=int,default=2114, help="inputlen to use to make bins and find gc content")
optional_nonpeaks_parser.add_argument("-st","--stride", type=int,default=1000, help="stride to use for shifting the bins")
optional_nonpeaks_parser.add_argument("-npr", "--neg-to-pos-ratio-train", type=int, default=2, help="Ratio of negatives to positives to sample in training set (test set always has 1:1 positive to negatives ratio")
optional_nonpeaks_parser.add_argument("-br", "--blacklist-regions", type=str, required=False, default=None, help="TSV file with 3 columns - chr, start, end")
optional_nonpeaks_parser.add_argument("-s", "--seed", type=int, default=1234, help="seed to use for generating nonpeaks")
# Generate splits
splits_parser._action_groups.pop()
required_splits_parser = splits_parser.add_argument_group('required arguments')
required_splits_parser.add_argument("-op", "--output_prefix", type=str, required=True, help="Path prefix to store the fold information (appended with .json)")
required_splits_parser.add_argument("-c", "--chrom-sizes", type=str, required=True, help="TSV file with chromosome sizes. All chromosomes from the first column of chrom sizes file are used")
required_splits_parser.add_argument("-tcr", "--test-chroms", nargs="*", type=str, required=True, help="Chromosomes to use for test")
required_splits_parser.add_argument("-vcr", "--valid-chroms", nargs="*", type=str, required=True, help="Chromosomes to use for validation")
# train chrombpnet arguments
train_parser._action_groups.pop()
required_main_parser = train_parser.add_argument_group('required arguments')
optional_main_parser = train_parser.add_argument_group('optional arguments')
required_main_parser,optional_main_parser = general_training_args(required_main_parser, optional_main_parser)
required_main_parser.add_argument("-b", "--bias-model-path", type=str, required=True, help="Path for a pretrained bias model")
optional_main_parser.add_argument("-sr", "--negative-sampling-ratio", type=float, default=0.1, help="Ratio of negatives to positive samples per epoch")
optional_main_parser.add_argument("-fil", "--filters", type=int, default=512, help="Number of filters to use in chrombpnet mode")
optional_main_parser.add_argument("-dil", "--n-dilation-layers", type=int, default=8, help="Number of dilation layers to use in chrombpnet model")
optional_main_parser.add_argument("-j", "--max-jitter", type=int, default=500, help="Maximum jitter applied on either side of region (default 500 for chrombpnet model)")
optional_main_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
# chrombpnet pipeline arguments
pipeline_parser._action_groups.pop()
required_pipeline_parser = pipeline_parser.add_argument_group('required arguments')
optional_pipeline_parser = pipeline_parser.add_argument_group('optional arguments')
required_pipeline_parser,optional_pipeline_parser = general_training_args(required_pipeline_parser, optional_pipeline_parser)
required_pipeline_parser.add_argument("-b", "--bias-model-path", type=str, required=True, help="Path for a pretrained bias model")
optional_pipeline_parser.add_argument("-sr", "--negative-sampling-ratio", type=float, default=0.1, help="Ratio of negatives to positive samples per epoch")
optional_pipeline_parser.add_argument("-fil", "--filters", type=int, default=512, help="Number of filters to use in chrombpnet mode")
optional_pipeline_parser.add_argument("-dil", "--n-dilation-layers", type=int, default=8, help="Number of dilation layers to use in chrombpnet model")
optional_pipeline_parser.add_argument("-j", "--max-jitter", type=int, default=500, help="Maximum jitter applied on either side of region (default 500 for chrombpnet model)")
optional_pipeline_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
# chrombpnet model qc arguments
qc_parser._action_groups.pop()
required_qc_parser = qc_parser.add_argument_group('required arguments')
optional_qc_parser = qc_parser.add_argument_group('optional arguments')
required_qc_parser.add_argument("-bw", "--bigwig", type=str, required=True, help="Input bigwig file of observed data")
required_qc_parser.add_argument("-cm", "--chrombpnet-model", type=str, required=True, help="Path to chrombpnet model h5")
required_qc_parser.add_argument("-cmb", "--chrombpnet-model-nb", type=str, required=True, help="Path to chrombpnet nobias model h5")
required_qc_parser.add_argument('-g','--genome', required=True, type=str, help="reference genome fasta file")
required_qc_parser.add_argument('-c', '--chrom-sizes', type=str, required=True, help="Chrom sizes file")
required_qc_parser.add_argument('-o', '--output-dir', type=str, required=True, help="Output dir (path/to/output/dir)")
required_qc_parser.add_argument('-d', '--data-type', required=True, type=str, choices=['ATAC', 'DNASE'], help="assay type")
required_qc_parser.add_argument("-p", "--peaks", type=str, required=True, help="10 column bed file of peaks. Sequences and labels will be extracted centered at start (2nd col) + summit (10th col).")
required_qc_parser.add_argument("-n", "--nonpeaks", type=str, required=True, help="10 column bed file of non-peak regions, centered at summit (10th column)")
required_qc_parser.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
optional_qc_parser.add_argument("-fp","--file-prefix",type=str,required=False, default=None, help="File prefix for output to use. All the files will be prefixed with this string if provided.")
optional_qc_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
optional_qc_parser.add_argument('-hp', '--html-prefix', required=False, default="./", help="The html prefix to use for the html file output.")
# bias model pipeline arguments
bias_parser._action_groups.pop()
required_bias_parser = bias_parser.add_argument_group('required arguments')
optional_bias_parser = bias_parser.add_argument_group('optional arguments')
required_bias_parser,optional_bias_parser = general_training_args(required_bias_parser, optional_bias_parser)
required_bias_parser.add_argument("-b", "--bias-threshold-factor", type=float, required=True, help="A threshold is applied on maximum count of non-peak region for training bias model, which is set as this threshold x min(count over peak regions). Recommended start value 0.5 for ATAC and 0.8 for DNase.")
optional_bias_parser.add_argument("-fil", "--filters", type=int, default=128, help="Number of filters to use in chrombpnet mode")
optional_bias_parser.add_argument("-dil", "--n-dilation-layers", type=int, default=4, help="Number of dilation layers to use in chrombpnet model")
optional_bias_parser.add_argument("-j", "--max-jitter", type=int, default=0, help="Maximum jitter applied on either side of region (default 500 for chrombpnet model)")
optional_bias_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
# bias model training arguments
bias_parser_train._action_groups.pop()
required_biast_parser = bias_parser_train.add_argument_group('required arguments')
optional_biast_parser = bias_parser_train.add_argument_group('optional arguments')
required_biast_parser,optional_biast_parser = general_training_args(required_biast_parser, optional_biast_parser)
required_biast_parser.add_argument("-b", "--bias-threshold-factor", type=float, required=True, help="A threshold is applied on maximum count of non-peak region for training bias model, which is set as this threshold x min(count over peak regions). Recommended start value 0.5 for ATAC and 0.8 for DNas")
optional_biast_parser.add_argument("-fil", "--filters", type=int, default=128, help="Number of filters to use in chrombpnet mode")
optional_biast_parser.add_argument("-dil", "--n-dilation-layers", type=int, default=4, help="Number of dilation layers to use in chrombpnet model")
optional_biast_parser.add_argument("-j", "--max-jitter", type=int, default=0, help="Maximum jitter applied on either side of region (default 500 for chrombpnet model)")
optional_biast_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
# bias model qc arguments
bias_parser_qc._action_groups.pop()
required_bqc_parser = bias_parser_qc.add_argument_group('required arguments')
optional_bqc_parser = bias_parser_qc.add_argument_group('optional arguments')
required_bqc_parser.add_argument("-bw", "--bigwig", type=str, required=True, help="Input bigwig file of observed data")
required_bqc_parser.add_argument("-bm", "--bias-model", type=str, required=True, help="Path to bias model .h5 file")
required_bqc_parser.add_argument('-g','--genome', required=True, type=str, help="reference genome fasta file")
required_bqc_parser.add_argument('-c', '--chrom-sizes', type=str, required=True, help="Chrom sizes file")
required_bqc_parser.add_argument('-o', '--output-dir', type=str, required=True, help="Output dir (path/to/output/dir)")
required_bqc_parser.add_argument('-d', '--data-type', required=True, type=str, choices=['ATAC', 'DNASE'], help="assay type")
required_bqc_parser.add_argument("-p", "--peaks", type=str, required=True, help="10 column bed file of peaks. Sequences and labels will be extracted centered at start (2nd col) + summit (10th col).")
required_bqc_parser.add_argument("-n", "--nonpeaks", type=str, required=True, help="10 column bed file of non-peak regions, centered at summit (10th column)")
required_bqc_parser.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
optional_bqc_parser.add_argument("-fp","--file-prefix",type=str,required=False, default=None, help="File prefix for output to use. All the files will be prefixed with this string if provided.")
optional_bqc_parser.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for model training")
optional_bqc_parser.add_argument('-hp', '--html-prefix', required=False, default="./", help="The html prefix to use for the html file output.")
# Make prediction bigwigs
preds_parser._action_groups.pop()
required_preds = preds_parser.add_argument_group('required arguments')
optional_preds = preds_parser.add_argument_group('optional arguments')
required_preds.add_argument("-bm", "--bias-model", type=str, required=False, help="Path to bias model h5 (atleast one of -bm, -cm, -cmb is reqd)")
required_preds.add_argument("-cm", "--chrombpnet-model", type=str, required=False, help="Path to chrombpnet model h5 (atleast one of -bm, -cm, -cmb is reqd)")
required_preds.add_argument("-cmb", "--chrombpnet-model-nb", type=str, required=False, help="Path to chrombpnet no bias model h5 (atleast one of -bm, -cm, -cmb is reqd)")
required_preds.add_argument("-r", "--regions", type=str, required=True, help="10 column bed file of regions for prediction")
required_preds.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
required_preds.add_argument("-c", "--chrom-sizes", type=str, required=True, help="Chromosome sizes 2 column tab-separated file")
required_preds.add_argument("-op", "--output-prefix", type=str, required=True, help="Output prefix for bigwig files")
optional_preds.add_argument("-os", "--output-prefix-stats", type=str, default=None, required=False, help="Output stats on bigwig")
optional_preds.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for prediction")
optional_preds.add_argument("-t", "--tqdm", type=int,default=1, help="Use tqdm. If yes then you need to have it installed.")
optional_preds.add_argument("-d", "--debug-chr", nargs="+", type=str, default=None, help="Run for specific chromosomes only (e.g. chr1 chr2) for debugging")
optional_preds.add_argument("-bw", "--bigwig", type=str, default=None, help="If provided .h5 with predictions are output along with calculated metrics considering bigwig as groundtruth.")
# Make contribution score bigwigs
contribs_parser._action_groups.pop()
required_contribs = contribs_parser.add_argument_group('required arguments')
optional_contribs = contribs_parser.add_argument_group('optional arguments')
required_contribs.add_argument("-m", "--model-h5", type=str, required=True, help="Path model .h5 file")
required_contribs.add_argument("-r", "--regions", type=str, required=True, help="10 column bed file of regions for contribution score predictions")
required_contribs.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
required_contribs.add_argument("-c", "--chrom-sizes", type=str, required=True, help="Chromosome sizes 2 column tab-separated file")
required_contribs.add_argument("-op", "--output-prefix", type=str, required=True, help="Output prefix for bigwig files")
optional_contribs.add_argument("-pc", "--profile-or-counts", nargs="+", type=str, default=["counts", "profile"], choices=["counts", "profile"],
help="use either counts or profile or both for running shap")
optional_contribs.add_argument("-os", "--output-prefix-stats", type=str, default=None, required=False, help="Output stats on bigwig")
optional_contribs.add_argument("-t", "--tqdm", type=int,default=1, help="Use tqdm. If yes then you need to have it installed.")
optional_contribs.add_argument("-d", "--debug-chr", nargs="+", type=str, default=None, help="Run for specific chromosomes only (e.g. chr1 chr2) for debugging")
# Get marginal footprints
footprints_parser._action_groups.pop()
required_ftps = footprints_parser.add_argument_group('required arguments')
optional_ftps = footprints_parser.add_argument_group('optional arguments')
required_ftps.add_argument("-m", "--model-h5", type=str, required=True, help="Path model .h5 file")
required_ftps.add_argument("-r", "--regions", type=str, required=True, help="10 column bed file of non-peak regions")
required_ftps.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
required_ftps.add_argument("-fl", "--chr-fold-path", type=str, required=True, help="Fold information - dictionary with test,valid and train keys and values with corresponding chromosomes")
required_ftps.add_argument("-op", "--output-prefix", type=str, required=True, help="Output prefix for bigwig files")
required_ftps.add_argument("-pwm_f", "--motifs-to-pwm", type=str, required=True, help="Path to a TSV file containing motifs in first column and motif string to use for footprinting in second column")
optional_ftps.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for prediction")
optional_ftps.add_argument("--ylim",default=None,type=tuple, required=False,help="lower and upper y-limits for plotting the motif footprint, in the form of a tuple i.e. \
(0,0.8). If this is set to None, ylim will be autodetermined.")
# Do variant scoring
variants_parser._action_groups.pop()
required_ves = variants_parser.add_argument_group('required arguments')
optional_ves = variants_parser.add_argument_group('optional arguments')
required_ves.add_argument("-snps", "--snp-data", type=str, required=True, help="Path to a tsv output with the following information in columns - chr, position to insert allele (0-based), ref allele, alt allele")
required_ves.add_argument("-m", "--model-h5", type=str, required=True, help="Path model .h5 file")
required_ves.add_argument("-g", "--genome", type=str, required=True, help="Genome fasta")
required_ves.add_argument("-op", "--output-prefix", type=str, required=True, help="Output prefix for bigwig files")
optional_ves.add_argument("-bs", "--batch-size", type=int, default=64, help="batch size to use for prediction")
optional_ves.add_argument("-dm","--debug-mode-on", type=int, default=0, help="Use this mode to print the flanks of first five SNP insert locations")
# Run TF-Modisco
motifs_parser._action_groups.pop()
required_tfm = motifs_parser.add_argument_group('required arguments')
optional_tfm = motifs_parser.add_argument_group('optional arguments')
required_tfm.add_argument("-i", "--h5py", type=str, required=True, help="A legacy h5py file containing the one-hot encoded sequences and shap scores.")
required_tfm.add_argument("-n", "--max-seqlets", type=int, required=True, help="The maximum number of seqlets per metacluster.")
required_tfm.add_argument("-op", "--output-prefix", type=str, required=True, help="The path to the output file.")
optional_tfm.add_argument("-l", "--n-leiden", type=int, default=2, help="The number of Leiden clusterings to perform with different random seeds.")
optional_tfm.add_argument("-w", "--window", type=int, default=500, help="The window surrounding the peak center that will be considered for motif discovery.")
optional_tfm.add_argument("-v", "--verbose", action="store_true", default=False, help="Controls the amount of output from the code.")
# Pull the arguments
args = parser.parse_args()
return args
|
from django.urls import path
# from .views import MovieListView, MovieDetailView, MovieCreateView, MovieUpdateView, MovieDestroyView
from rest_framework.routers import DefaultRouter
from .views import MovieViewSet
# urlpatterns = [
# path('', MovieListView.as_view()),
# path('<pk>', MovieDetailView.as_view()),
# path('create/', MovieCreateView.as_view()),
# path('<pk>/update', MovieUpdateView.as_view()),
# path('<pk>/delete', MovieDestroyView.as_view()),
# ]
router = DefaultRouter()
router.register(r'api', MovieViewSet, base_name='movie')
urlpatterns = router.urls |
#!/usr/bin/env python3
import os
import sys
import unittest
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
from data_store_agent import DataStoreAgent
class CleanupTestBundles(unittest.TestCase):
test_bundle_query = {
"query": {
"prefix": {
"files.project_json.project_core.project_short_name": "prod/"
}
}
}
def setUp(self):
self.data_store = DataStoreAgent(deployment="prod")
def test_find_test_bundles(self):
for fqid in self._test_bundles():
print(fqid)
def test_tombstone_test_bundles(self):
for fqid in self._test_bundles():
uuid, version = fqid.split(".", 1)
print("Tombstoning bundle", uuid, version)
self.data_store.tombstone_bundle(uuid)
def _test_bundles(self):
for hit in self.data_store.search_iterate(self.test_bundle_query):
yield hit['bundle_fqid']
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from scipy.optimize import curve_fit
from matplotlib.pyplot import figure, show, cm, xticks, yticks
import full_henon as fh
import helper as he
def closest(array, val):
""" Finding closest value in list """
# lst = np.asarray(lst)
ind = (np.abs(array - val)).argmin()
return array[ind], ind
def box_counting(xv, yv, sF, xS=4, yS=4):
""" Improved version of the function box_counting """
finxS = int(xS / sF) # Final x box size
finyS = int(yS / sF) # Final y box size
# Region in which Hénon map is defined
xMin, xMax = -1.33, 1.32
yMin, yMax = -0.5, 0.42
grid = np.zeros((finxS, finyS)) # Creating the grid
xRange = np.linspace(xMin, xMax, finxS) # x range of grid
yRange = np.linspace(yMin, yMax, finyS) # y range of grid
for ind in range(len(xv)):
xPos, xInd = he.take_closest(xRange, xv[ind]) # x index in grid
yPos, yInd = he.take_closest(yRange, yv[ind]) # y index in grid
grid[yInd][xInd] += 1 # Closest pixel in grid
gridN = np.count_nonzero(grid) # Counting non zero values
return gridN
def naive_box_dim(xv, yv, sRange, saveFig=None):
""" Naive implementation of the box-counting dimension """
grids = np.asarray([box_counting(xv, yv, s) for s in sRange]) # Counting boxes
print(grids)
# Taking logarithms
grids = np.log2(grids)
sRange = -np.log2(sRange) # log(1/s) = -log(s)
# Linear fit
def fit_linear(x, a, b):
return a * x + b
para, error = curve_fit(fit_linear, sRange, grids) # Best fit parameters
# Label
lab = f"$\log_2 (N(s))$ = {para[0]:.2f} $* \log_2 (1/s)$ + {para[1]:.2f}"
xRange = np.linspace(min(sRange), max(sRange), int(1e3)) # x values
yRange = fit_linear(xRange, *para) # y values
# Plotting
fig = figure(figsize=(12,8))
frame = fig.add_subplot(1,1,1)
frame.scatter(sRange, grids, s=175, marker="X", color="navy", zorder=3)
frame.plot(xRange, yRange, lw=2, label=lab, color="crimson")
frame.set_xlabel("$\log_2 (1/s)$", fontsize=20)
frame.set_ylabel("$\log_2 (N(s))$", fontsize=20)
frame.tick_params(axis='both', labelsize=15)
frame.legend(fontsize=20)
frame.grid(zorder=2)
if saveFig: fig.savefig(saveFig)
else: show()
def red_box_dim(sF, nIts, xv, yv, xv2, yv2):
""" Calculate the reduced box counting dimension """
gridN = np.asarray(box_counting(xv, yv, sF)) # N(s, n)
grid2N = np.asarray(box_counting(xv2, yv2, sF)) # N(s, 2n)
grid2S = np.asarray(box_counting(xv, yv, 2*sF)) # N(2s, n)
print(f"N(s, n) = {gridN}")
print(f"N(2s, n) = {grid2S}")
# From Grassberger
alpha, beta = 2.42, 0.89 # Values of constants
mult1 = (sF**(-alpha)) * (nIts**(-beta)) # Recurring factor
mult2 = ((2*sF)**(-alpha)) * (nIts**(-beta))
# See Peitgens, Jurgens, Saupe
denom = (1 - 2**(-beta)) * mult1 # Denominator
gamma1 = (grid2N - gridN) / denom # Constant gamma_1
nS = gridN + gamma1 * mult1 # Finding N(s)
n2S = grid2S + gamma1 * mult2 # Finding N(2s)
boxDim = (np.log(nS) - np.log(n2S)) / np.log(2) # The dimension
return boxDim
|
from .response import ResponseViewSet
|
# coding=utf-8
import logging
import os.path
import uuid
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import define, options
import tornado.web
import tornado.websocket
from setting import TORNADO_SETTINGS
define("port", default=8000, help="run on the given port", type=int)
def send_message(message):
for handler in ChatSocketHandler.socket_handlers:
try:
handler.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
class MainHandler(tornado.web.RequestHandler):
def get(self):
if self.get_secure_cookie('nickname'):
self.render('index.html', counts=(len(ChatSocketHandler.socket_handlers)+1))
else:
self.render('login.html')
def post(self):
nickname = self.get_argument('nickname')
if nickname:
self.set_secure_cookie('nickname', nickname)
self.render('index.html', counts=(len(ChatSocketHandler.socket_handlers)+1))
class ChatSocketHandler(tornado.websocket.WebSocketHandler):
socket_handlers = set()
def check_origin(self, origin):
return True
def open(self):
ChatSocketHandler.socket_handlers.add(self)
nickname = self.get_secure_cookie('nickname')
send_message('<span class="welcome">%s加入聊天室.</span>' % nickname)
def on_close(self):
ChatSocketHandler.socket_handlers.remove(self)
nickname = self.get_secure_cookie('nickname')
send_message('<span class="welcome">%s离开了.</span>' % nickname)
def on_message(self, message):
message = '<span class="name">%s</span>: %s' % (self.get_secure_cookie('nickname'), message.encode('utf8'))
send_message(message)
class ChatUserCountHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
self.write('%s' % len(ChatSocketHandler.socket_handlers))
self.finish()
TORNADO_ROUTES = [('/', MainHandler), # ('/new-msg/', ChatHandler),
('/new-msg/socket', ChatSocketHandler),
('/user-count', ChatUserCountHandler)]
class Application(tornado.web.Application):
def __init__(self):
handlers = TORNADO_ROUTES
settings = TORNADO_SETTINGS
tornado.web.Application.__init__(self, handlers, **settings)
def main():
tornado.options.parse_command_line()
application = Application()
http_server = tornado.httpserver.HTTPServer(application)
http_server.bind(options.port)
http_server.start()
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
|
import csv
import random
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
from nltk.stem.porter import *
# Parameters
validation_size = 0.2 # values: decimal between 0-1
model = 'svm' # values: logistic, naivebayes, svm
predict_test = True # generate test predictions?
ngram_max = 2 # largest n-gram size for building train_features
# Load Training Data
train_data = []
train_sentiment = []
counter = 0
with open('data/train.tsv') as f:
next(f)
reader = csv.reader(f, delimiter='\t')
stemmer = PorterStemmer()
for phrase_id, sentence_id, phrase, sentiment in reader:
counter += 1
string.join([stemmer.stem(w) for w in phrase.split(' ')])
train_data.append(phrase)
train_sentiment.append(sentiment)
# Build training & validation sets
train_data, val_data, train_sentiment, val_sentiment = \
cross_validation.train_test_split(train_data, train_sentiment, \
test_size=validation_size, random_state=random.randint(1, 100))
# Build feature vectors
vectorizer = CountVectorizer(ngram_range=(1, ngram_max))
train_features = vectorizer.fit_transform(train_data)
# Select Training Algorithm
if model == 'logistic':
clf = LogisticRegression()
elif model == 'naivebayes':
clf = MultinomialNB()
elif model == 'svm':
clf = LinearSVC(C=10)
clf_model = clf.fit(train_features, train_sentiment)
# Validation
val_features = vectorizer.transform(val_data)
print clf.score(val_features, val_sentiment)
# Generate Test Predictions
if predict_test:
test_data = []
phrase_id_list = []
with open('data/test.tsv') as f:
next(f)
reader = csv.reader(f, delimiter='\t')
for phrase_id, sentence_id, phrase in reader:
test_data.append(phrase)
phrase_id_list.append(phrase_id)
test_features = vectorizer.transform(test_data)
sentimend_pred = clf.predict(test_features)
for (id, s) in zip(phrase_id_list, sentimend_pred):
print str(id) + "," + str(s)
|
# -*- coding: utf-8 -*-
import uuid
import inject
import logging
import base64
from model.registry import Registry
from model.mail.mail import Mail
from model.files.files import FileDAO
from model.laboralinsertion.inscription import InscriptionDAO
import model.laboralinsertion.user
import model.users.users
class EmailToSend:
registry = inject.attr(Registry)
mailModel = inject.attr(Mail)
def __init__(self, inscriptionIds, emailsToSend):
assert isinstance(inscriptionIds, list)
assert isinstance(emailsToSend, list)
self.reg = self.registry.getRegistry('LaboralInsertion')
self.inscriptionIds = inscriptionIds
self.inscriptions = None
self.users = None
self.mails = emailsToSend
def _clasifyByUser(self, con, users):
users2 = {}
for u in users:
ld = model.laboralinsertion.user.UserDAO.findById(con, u.id)[0]
email = model.users.users.MailDAO.findById(con, ld.emailId)[0]
users2[u.id] = {
'user': u,
'data': ld,
'email': email
}
return users2
def _loadInscriptions(self, con):
self.inscriptions = InscriptionDAO.findById(con, self.inscriptionIds)
def _loadUsers(self, con):
userIds = [ i.userId for i in self.inscriptions ]
self.users = self._clasifyByUser(con, model.users.users.UserDAO.findById(con, userIds))
def _generateOds(self):
import pyoo
host = self.reg.get('ooHost')
port = int(self.reg.get('ooPort'))
sheetTemplate = self.reg.get('sheetTemplate')
calc = pyoo.Desktop(host, port)
doc = calc.open_spreadsheet(sheetTemplate)
try:
sheet = doc.sheets[0]
index = 2
for i in self.inscriptions:
sheet[index,0].value = self.users[i.userId]['user'].lastname
sheet[index,1].value = self.users[i.userId]['user'].name
sheet[index,2].value = self.users[i.userId]['user'].genre
#sheet[index,3].value = self.users[i.userId]['user'].getAge()
sheet[index,3].value = self.users[i.userId]['user'].dni
sheet[index,4].value = self.users[i.userId]['email'].email
sheet[index,5].value = i.degree
sheet[index,6].value = i.approved
sheet[index,7].value = i.average1
index = index + 1
fn = '/tmp/{}.xlsx'.format(str(uuid.uuid4()))
doc.save(fn, pyoo.FILTER_EXCEL_2007)
return fn
finally:
doc.close()
def _attachOds(self, parts):
fn = self._generateOds()
f = open(fn,'rb')
try:
content = f.read()
parts.append(self.mailModel.getFilePart('datos.xlsx', content, content_type='application', subtype='vnd.openxmlformats-officedocument.spreadsheetml.sheet'))
finally:
f.close()
def _attachContent(self, parts):
template = self.reg.get('mailTemplate')
f = open(template,'r')
try:
content = f.read()
parts.append(self.mailModel.getHtmlPart(content))
finally:
f.close()
def _attachCvs(self, con, parts):
for u in self.users.values():
data = u['data']
if data.cv is None:
continue
meta = FileDAO.findById(con, data.cv)
content = FileDAO.getContent(con, meta.id)
if content is None:
continue
filedata = None
if meta.codec[0] == 'binary':
filedata = bytes(content)
elif meta.codec[0] == 'base64':
filedata = base64.b64decode(bytes(content))
fn = '{}.pdf'.format(u['user'].dni)
parts.append(self.mailModel.getFilePart(fn, filedata, content_type='application', subtype='pdf'))
def sendMail(self, con):
''' cargo los datos basicos de la base '''
self._loadInscriptions(con)
self._loadUsers(con)
''' genero las partes del mail. contenido, planilla y cvs '''
parts = []
self._attachContent(parts)
self._attachOds(parts)
self._attachCvs(con, parts)
""" ----------------------------------------------------------
A pedido de paula, se envía un mail a insercion laboral siempre con el contenido de los envíos.
---------------------------------------------------------- """
self.mails.append('insercionlaboral@econo.unlp.edu.ar')
''' envío un mail a cada uno de los mails listados con el mensaje completo '''
for mail in self.mails:
m = self.mailModel.createMail('insercionlaboral@econo.unlp.edu.ar', mail, 'Bolsa de trabajo FCE')
for p in parts:
m.attach(p)
self.mailModel._sendMail('insercionlaboral@econo.unlp.edu.ar', mail, m)
return [ u['email'].email for u in self.users.values() ]
class Sent:
''' datos de envíos a empresas '''
def __init__(self):
self.id = ''
self.creation = None
self.inscriptions = []
self.emails = []
class SentDAO:
@staticmethod
def _createSchema(con):
cur = con.cursor()
try:
cur.execute("""
create table laboral_insertion.sent (
id varchar primary key,
creation timestamp default now(),
inscriptions varchar[],
emails varchar[]
)
""")
finally:
cur.close()
@staticmethod
def _fromResult(r):
s = Sent()
s.id = r['id']
s.creation = r['creation']
s.inscriptions = r['inscriptions']
s.emails = r['emails']
@staticmethod
def persist(con, s):
''' inserta un nuevo sent en la base '''
cur = con.cursor()
try:
s.id = str(uuid.uuid4())
ins = s.__dict__
cur.execute('insert into laboral_insertion.sent (id, inscriptions, emails) values '
'(%(id)s, %(inscriptions)s, %(emails)s)', ins)
return s.id
finally:
cur.close()
@staticmethod
def findAll(con):
''' obtiene todos los ids de los enviados '''
cur = con.cursor()
try:
cur.execute('select id from laboral_insertion.sent')
r = [c['id'] for c in cur]
return r
finally:
cur.close()
@staticmethod
def findByInscriptionId(con, id):
''' obtiene los ids de los Sent que tengan una determinada inscripcion '''
cur = con.cursor()
try:
cur.execute('select id from laboral_insertion.sent where %s = ANY(inscriptions)', (id,))
r = [s['id'] for s in cur]
return r
finally:
cur.close()
@staticmethod
def findById(con, ids=[]):
''' retorna las sent que tienen los ids pasados en la lista de parametros '''
if len(ids) <= 0:
return []
cur = con.cursor()
try:
cur.execute('select * from laboral_insertion.sent where id in %s', (tuple(ids),))
cs = [SentDAO._fromResult(c) for c in cur]
return cs
finally:
cur.close()
|
import logging
from openprocurement.auction.utils import get_latest_bid_for_bidder, make_request
from openprocurement.auction.worker.auctions import multilot
from openprocurement.auction.worker.utils import prepare_service_stage
from openprocurement.auction.worker.journal import AUCTION_WORKER_API_APPROVED_DATA
FORMATTER = lambda **kw: "{}_{}".format(kw.get('tender_id'), kw.get('lot_id'))
MULTILINGUAL_FIELDS = ['title', 'description']
ADDITIONAL_LANGUAGES = ['ru', 'en']
LOGGER = logging.getLogger('Auction Esco')
# Indentical methods
get_auction_info = multilot.get_auction_info
prepare_auction_and_participation_urls = multilot.prepare_auction_and_participation_urls
announce_results_data = multilot.announce_results_data
def prepare_auction_document(self):
self.auction_document.update(
{'_id': self.auction_doc_id,
'stages': [],
'tenderID': self._auction_data['data'].get('tenderID', ''),
'procurementMethodType': self._auction_data['data'].get('procurementMethodType', ''),
'TENDERS_API_VERSION': self.worker_defaults['resource_api_version'],
'initial_bids': [],
'current_stage': -1,
"NBUdiscountRate": self._auction_data["data"].get("NBUdiscountRate"),
'results': [],
"noticePublicationDate": self._auction_data["data"].get("noticePublicationDate"),
'minimalStepPercentage': self._lot_data.get('minimalStepPercentage', {}),
'procuringEntity': self._auction_data['data'].get('procuringEntity', {}),
'items': self._lot_data.get('items', []),
'minValue': self._lot_data.get('value', {}),
'lot': {},
"fundingKind": self._auction_data["data"].get("fundingKind", {}),
"yearlyPaymentsPercentageRange": self._auction_data["data"].get("yearlyPaymentsPercentageRange")}
)
self.auction_document['auction_type'] = 'meat' if self.features else 'default'
for key in MULTILINGUAL_FIELDS:
for lang in ADDITIONAL_LANGUAGES:
lang_key = '{}_{}'.format(key, lang)
if lang_key in self._auction_data['data']:
self.auction_document[lang_key] = self._auction_data['data'][lang_key]
if lang_key in self._lot_data:
self.auction_document['lot'][lang_key] = self._lot_data[lang_key]
self.auction_document[key] = self._auction_data['data'].get(key, '')
self.auction_document['lot'][key] = self._lot_data.get(key, '')
self.auction_document['stages'].append(
prepare_service_stage(
start=self.startDate.isoformat(),
type="pause"
)
)
return self.auction_document
# TODO: bid['value']['amount']
def post_results_data(self, with_auctions_results=True):
patch_data = {'data': {'bids': list(self._auction_data['data']['bids'])}}
if with_auctions_results:
for bid_index, bid in enumerate(self._auction_data['data']['bids']):
if bid.get('status', 'active') == 'active':
for lot_index, lot_bid in enumerate(bid['lotValues']):
if lot_bid['relatedLot'] == self.lot_id and lot_bid.get('status', 'active') == 'active':
auction_bid_info = get_latest_bid_for_bidder(self.auction_document["results"], bid["id"])
patch_data['data']['bids'][bid_index]['lotValues'][lot_index]["value"]["yearlyPaymentsPercentage"] = auction_bid_info["yearlyPaymentsPercentage"]
patch_data['data']['bids'][bid_index]['lotValues'][lot_index]["value"]["contractDuration"]["days"] = auction_bid_info["contractDurationDays"]
patch_data['data']['bids'][bid_index]['lotValues'][lot_index]["value"]["contractDuration"]["years"] = auction_bid_info["contractDurationYears"]
patch_data['data']['bids'][bid_index]['lotValues'][lot_index]["date"] = auction_bid_info["time"]
break
LOGGER.info(
"Approved data: {}".format(patch_data),
extra={"JOURNAL_REQUEST_ID": self.request_id,
"MESSAGE_ID": AUCTION_WORKER_API_APPROVED_DATA}
)
results = make_request(
self.tender_url + '/auction/{}'.format(self.lot_id), data=patch_data,
user=self.worker_defaults["resource_api_token"],
method='post',
request_id=self.request_id, session=self.session
)
return results
|
from django.shortcuts import render
from .forms import ClientForm
from orders.models import Order
from products.models import Photo
from products.models import Type
from products.models import Category
from products.models import Product
from systemoptions.models import Systemoptions
from blogs.models import Comment
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.core.mail import EmailMultiAlternatives
from django.core.mail.backends.smtp import EmailBackend
from main.smsc_api import *
from transliterate import translit
import random
def generate_username(first_name, last_name):
val = "{0}{1}".format(first_name[0], last_name).lower()
x=0
while True:
if x == 0 and User.objects.filter(username=val).count() == 0:
return val
else:
new_val = "{0}{1}".format(val, x)
if User.objects.filter(username=new_val).count() == 0:
return new_val
x += 1
if x > 1000000:
raise Exception("Name is super popular!")
def get_real_name(username):
lst = username.split()
last_name = lst[-1]
first_name = lst[0]
if not last_name:
first_name = last_name
full_name = dict()
full_name['last_name'] = last_name
full_name['first_name'] = first_name
return full_name
class BlogOfRecord:
def __init__(self, photo, comment, count):
self.photo = photo
self.comment = comment
self.count = count
def edit_key(self):
last = abs(self.count) % 100
if last in range(10, 20):
key_new = 'ів'
else:
last = abs(last) % 10
key_new = {
last == 0: 'ів',
last == 1: '',
last > 1: 'і',
last > 4: 'ів',
}[True]
full_key = 'Коментар'+key_new
return full_key
def main(request):
blogs = []
land_photos = Photo.objects.filter(land_photo=True, is_active=True).order_by('?')[:15]
temp_blogs = Photo.objects.filter(blog_photo=True, is_active=True).order_by('dates_upload')
for any_photo in temp_blogs:
required_entries = Comment.objects.filter(product_commit_id=any_photo.product_id, r_commit__isnull=False).exclude(r_commit='')
if required_entries:
blogs.append(BlogOfRecord(any_photo,
required_entries.last(),
required_entries.count()))
random.shuffle(blogs)
blogs = blogs[:3]
categories = Category.objects.filter(accessibility=True).order_by('direction_cat')
types = Type.objects.filter(is_active=True).order_by('direction_type')
products = Product.objects.filter(price__exact=True)
min_prices_counts_units = []
for any_type in types:
temporary = Product.objects.filter(category_plus_type_product_id=any_type.id, is_active=True).order_by('price')
if temporary:
min_prices_counts_units.append({'min_price_of_this_type': temporary.first().price, 'count_of_this_type':
temporary.count(), 'unit_of_this_type': temporary.first().get_unit_display()})
else:
min_prices_counts_units.append(None)
if request.method == 'POST':
submitter = request.POST['submitter']
username = request.POST['username']
full_name = get_real_name(username)
last_name = full_name['last_name']
first_name = full_name['first_name']
request.POST = request.POST.copy()
request.POST['username'] = generate_username(last_name, first_name)
client_form = ClientForm(request.POST or None)
if client_form.is_valid():
client = client_form.save_user(last_name, first_name)
if not request.session.session_key:
request.session.save()
session_key = request.session.session_key
quick_order = Order(client=client, order_description=request.POST['order_description'], status_of_order='new',
present_in_basket=False, session_key=session_key)
quick_order.save()
status_quick = inform_service_quick(quick_order, submitter)
if status_quick:
data = {'success': True}
else:
data = {'success': False, 'error': 'error'}
return render(request, 'main/index.html', locals())
def inform_service_quick(quick_order, submitter):
if quick_order:
date_of_quick_order = quick_order.dates_order
quick_order_subj = 'Новий (ШВИДКИЙ) заказ: від ' + date_of_quick_order.strftime('%Y-%m-%d %H:%M')
w_master_options = Systemoptions.objects.all().first()
if w_master_options:
if w_master_options.email_send:
context = {'quick_order': quick_order}
if submitter == 'call':
context.update({'submitter': submitter})
config_quick = w_master_options.email_from
to_obj_quick = list(w_master_options.emails_pool.all())
to_quick = []
for to_item_quick in to_obj_quick:
to_quick.append(to_item_quick.email_manager)
backend_quick = EmailBackend(host=config_quick.email_host, port=config_quick.email_port, username=config_quick.email_host_user,
password=config_quick.email_host_password, use_tls=config_quick.email_use_tls)
html_quick = render_to_string('quick_email_template/quick_email_template.html', context=context).strip()
msg_quick = EmailMultiAlternatives(subject=quick_order_subj, body=html_quick,
from_email=w_master_options.email_from.default_from_email,
to=to_quick, reply_to=['ivanmila24@gmail.com'], connection=backend_quick)
msg_quick.content_subtype = 'html'
msg_quick.mixed_subtype = 'related'
msg_quick.send()
if w_master_options.phone_send:
smsc = SMSC()
phone_numbers_quick = w_master_options.phones_pool.all()
for phone_number_quick in phone_numbers_quick:
phone_quick = '8' + phone_number_quick.phone_manager
full_str = translit(quick_order_subj, 'uk', reversed=True) + translit(". № заказу(ID): ", 'uk',
reversed=True) + str(
quick_order.id) + translit(". Деталі в поштові скринці чи в БД.", 'uk', reversed=True)
r = smsc.send_sms(phone_quick, full_str, sender="MilaTort Team")
return True
else:
return False
else:
return False
|
import numpy as np
import cv2
from reference_line import ReferenceLine
class PeaksIdentifier:
def __init__(self, triangles, ref_line=None):
self.triangles = triangles
if ref_line is None:
self.ref_line = ReferenceLine(triangles)
else:
self.ref_line = ref_line
def identify(self):
approx = map(lambda t: t.approx, self.triangles)
vertices = reduce(lambda a, b: np.vstack([a, b]), approx)
vertices = np.hstack([
vertices,
np.ones((vertices.shape[0], 1))
])
relative_positions = vertices.dot(self.ref_line.vector_form)
peaks = vertices[np.where(relative_positions < 0)[0]][:, :2]
if len(peaks) > len(vertices) - len(peaks):
peaks = vertices[np.where(relative_positions > 0)[0]][:, :2]
peaks = sorted(peaks, key=lambda pk: pk[0])
result = {}
for (i, p) in enumerate(peaks):
result[i] = p
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.