text stringlengths 8 6.05M |
|---|
# Generated by Django 2.1.2 on 2018-11-07 22:53
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('misPerrisDJ', '0015_auto_20181107_1932'),
]
operations = [
migrations.AlterField(
model_name='persona',
name='email',
field=models.EmailField(max_length=254, validators=[django.core.validators.RegexValidator('^[+a-zA-Z0-9._-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,4}$')]),
),
]
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tf_util.systems import system_from_str
from train_dsn import train_dsn
import sys
from util import fct_integrals as integrals
from util import tf_integrals as tf_integrals
from util import fct_mf as mf
def compute_bistable_mu(Sini, ics_0, ics_1):
### Set parameters
Mm = 3.5 # Mean of m
Mn = 1.0 # Mean of n
Mi = 0.0 # Mean of I
Sim = 1.0 # Std of m
Sin = 1.0 # Std of n
Sip = 1.0 # Std of input orthogonal to m and n, along h (see Methods)
g = 0.8
tol = 1e-10
eps = 0.2
ParVec = [Mm, Mn, Mi, Sim, Sin, Sini, Sip]
ys0, count = mf.SolveStatic(ics_0, g, ParVec, eps, tol)
ys1, count = mf.SolveStatic(ics_1, g, ParVec, eps, tol)
ss0 = ys0[-1, 2]
ss1 = ys1[-1, 2]
mu = np.array([ss0, ss1])
return mu
planar_layers = int(sys.argv[1])
num_neurons = int(sys.argv[2])
c_init_order = int(sys.argv[3])
lr_order = int(sys.argv[4])
T = int(sys.argv[5])
D = 2 * num_neurons
c_init = 10 ** c_init_order
n = 1000
K = 1
M = n
k_max = 10
check_rate = 1000
max_iters = 5000
system_str = "rank1_rnn"
behavior_str = "bistable"
Sini = 0.5
ics_0 = np.array([5.0, 5.0, 5.0], np.float64)
ics_1 = np.array([-5.0, 5.0, -5.0], np.float64)
Ics_0 = np.tile(np.expand_dims(np.expand_dims(ics_0, 0), 1), [K, M, 1])
Ics_1 = np.tile(np.expand_dims(np.expand_dims(ics_1, 0), 1), [K, M, 1])
system_class = system_from_str(system_str)
system = system_class(D, T, Sini, Ics_0, Ics_1, behavior_str)
# behavioral constraints
mu = compute_bistable_mu(Sini, ics_0, ics_1)
Sigma = 0.01 * np.ones((2,))
behavior = {"mu": mu, "Sigma": Sigma}
random_seed = 0
TIF_flow_type = "PlanarFlowLayer"
nlayers = planar_layers
flow_dict = {
"latent_dynamics": None,
"TIF_flow_type": TIF_flow_type,
"repeats": nlayers,
}
np.random.seed(0)
cost, phi, T_x = train_dsn(
system,
behavior,
n,
flow_dict,
k_max=k_max,
c_init=c_init,
lr_order=lr_order,
check_rate=check_rate,
max_iters=max_iters,
random_seed=random_seed,
)
|
from breezypythongui import EasyFrame
def computeDistance(height, index, bounces):
pass
class BouncyGUI(EasyFrame):
def __init__(self):
EasyFrame.__init__(self, title = "Bouncy")
self.addLabel(text = "Initial Height",row = 0, column = 0)
self.heightField = self.addIntegerField(value = 0,row = 0,column = 1, width = 10)
self.addLabel(text = "Index",row = 1, column = 0)
self.indexField = self.addFloatField(value = 0,row = 1,column = 1, width = 10)
self.addLabel(text = "Number of Bounces",row = 2, column = 0)
self.bouncesField = self.addIntegerField(value = 0,row = 2,column = 1, width = 10)
self.compuuteButton = self.addButton(text = "Compute",row = 3, column =0,columnspan =2,
command = self.computeDistance)
self.addLabel(text = "Distance",row = 4, column = 0)
self.distanceField = self.addFloatField(value = 0,row = 4,column = 1, width = 10)
def computeDistance(self):
height=self.heightField.getNumber()
index=self.indexField.getNumber()
bounces=self.bouncesField.getNumber()
distance=0
for eachPass in range(bounces):
distance+=height
height *=index
distance+=height
self.distanceField.setNumber(distance)
def main():
BouncyGUI().mainloop()
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
# filename: ZeroAI.py
#from wordcloud import WordCloud, STOPWORDS
import mysql.connector
import mypsw
import time
import datetime
#import matplotlib.pyplot as plt
from basic import Basic
#from poster.streaminghttp import register_openers
import requests
from requests.packages.urllib3.filepost import encode_multipart_formdata
import json
import glob
#import forcastline
import f50_market_spider
import f51_simulated_trading
import f52_db_simulated
import asyncio
import _thread
import math
import common
import requests
import predict_batch
from collections import defaultdict
class word_in_color(object):
word_in_rising_major = ''
word_in_falling_major = ''
word_in_comments = []
word_in_rising_minor = []
word_in_falling_minor = []
def color_word(word, *args, **kwargs):
if (word == word_in_color.word_in_rising_major):
color = '#ffffff' # red
elif (word == word_in_color.word_in_falling_major):
color = '#f44336' # green
elif (word in word_in_color.word_in_comments):
color = '#ffffff' if word_in_color.word_in_rising_major != '' else '#f44336' # grey
elif (word in word_in_color.word_in_rising_minor):
color = '#7f7f7f' # deepred
elif (word in word_in_color.word_in_falling_minor):
color = '#7a211b' # deepgreen
else:
color = '#000000' # black
return color
def utc2local(utc_st):
#UTC时间转本地时间(+8:00)
now_stamp = time.time()
local_time = datetime.datetime.fromtimestamp(now_stamp)
utc_time = datetime.datetime.utcfromtimestamp(now_stamp)
offset = local_time - utc_time
local_st = utc_st + offset
return local_st
input_days_len = 225
def simulated_trading(next_id, input_text):
f52_db_simulated.save_result(next_id, '')
symbol_list = input_text.split(' ')
predict_list = []
for symbol in symbol_list:
while len(symbol) > 0:
for _ in range(5):
marketListString = f50_market_spider.search_for_symbol(symbol)
if marketListString != None:
break
if len(marketListString) == 0:
symbol = symbol[:-1]
else:
break
if not marketListString:
f52_db_simulated.save_result(next_id, '模拟失败,未找到市场'+symbol+'相关信息。')
market, is_crypto = f50_market_spider.get_best_market(json.loads(marketListString))
marketObj = market
marketObj["name"] = marketObj["name"].replace("Investing.com","")
timestamp_list, price_list, openprice_list, highprice_list, lowprice_list = f50_market_spider.get_history_price(str(marketObj["pairId"]), marketObj["pair_type"], 4800)
if len(price_list) < f50_market_spider.input_days_len:
continue
turtlex_predict = f50_market_spider.predict(marketObj["symbol"]+marketObj["name"], timestamp_list, price_list, openprice_list, highprice_list, lowprice_list, 4500, is_crypto)
predict_list.append(turtlex_predict)
#if is_crypto:
simulate_result, win_count, loss_count, draw_count, max_loss, max_loss_days, year_list, max_single_win, max_single_loss, strategy_count = f51_simulated_trading.simulate_trading11(predict_list)
#else:
# simulate_result, win_count, loss_count, draw_count, max_loss, max_loss_days, year_list, max_single_win, max_single_loss, strategy_count = f51_simulated_trading.simulate_trading(predict_list)
time_end=time.time()
init_balance = simulate_result["balance_dynamic_list"][0]
last_balance = simulate_result["balance_dynamic_list"][-1]
years = len(simulate_result["symbol_list"]) / 365
annual_yield =math.pow( last_balance / init_balance, 1 / years) * 100.0 - 100.0
output_text = "模拟结果:\n" +str(input_text) + "\n海龟11量化交易决策引擎\n交易天数:" + str(len(simulate_result["symbol_list"])) + "\n盈利天数:" + str(win_count) + "\n亏损天数:" + str(loss_count) + "\n平局天数:" + str(draw_count)
output_text += "\n胜率:" + str(round((win_count * 100.0 / (win_count + loss_count)),3) if (win_count + loss_count) > 0 else 0 ) + "%" + "\n最大亏损:" + str(round(max_loss * 100.0,3)) + '%' + "\n最长衰落期:" + str(max_loss_days) + "天"
output_text += "\n初始余额:" + str(init_balance) + "\n最终余额:" + str(last_balance) + "\n年化收益:" + str(round(annual_yield,3)) + '%'
output_text += "\n最大单日盈利:" + str(max_single_win) + "%\n最大单日亏损:" + str(max_single_loss) + '%' + "\n策略分布:" + json.dumps(strategy_count)
output_text += "\n日期范围:[" + datetime.datetime.strftime(simulate_result["date_list"][0],f50_market_spider.dateformat) + ',' + datetime.datetime.strftime(simulate_result["date_list"][-1],f50_market_spider.dateformat) + ']\n历年收益:'
for year_item in year_list:
output_text += "\n"+str(year_item["year"])+":"+str(round(year_item["profit"],3)) + "%"
output_text += "\n广告位:\n虚位以待……"
f52_db_simulated.save_result(next_id, output_text)
def simulated_begin(next_id, input_text):
simulated_trading(next_id,input_text.strip())
def simulated_end(input_text):
next_id = int(input_text.strip())
result = f52_db_simulated.read_result(next_id)
if len(result) == 0:
result = "模拟结果未生成,请稍后查询!"
return result
def get_prediction_text(exchange, symbol, prediction):
order_item = prediction["orders"]
#for order_key in prediction["orders"]:
# order_item = prediction["orders"][order_key][0]
timeStamp = int(float(prediction["strategy"]["ai"])/1000.0)
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y年%m月%d日 %H时%M分%S秒".encode('unicode_escape').decode('utf8'), timeArray).encode('utf-8').decode('unicode_escape')
sign_text = 'p.s. 海龟∞AI正在无限进化中,' \
'于'+otherStyleTime+'完成了第'+str(prediction["strategy"]['epoch'])+'轮演化。\n' \
'训练集年化:' + str(round(prediction["strategy"]["fitness"]*100.0,2)) + '%\n' \
'验证集年化:' + str(round(prediction["strategy"]["validation"]*100.0,2)) + '%'
if order_item:
stop_loss_str = ('止损价:' + str(order_item["stop_loss_price"]) + '('+ str(order_item["stop_loss"]) +'ATR)') if prediction["strategy"]["trend_grid"] >= 0.5 else ('止损价:' + str(prediction["strategy"]['stop_loss_price']))
text = "交易所:" + order_item["exchange"] + "\n市场:" + order_item["symbol"] + '\n' \
'入场价:' + str(order_item["entry_price"]) + '\n' \
'操作:' + ('网格' if prediction["strategy"]["trend_grid"] < 0.5 else ('做多' if prediction["strategy"]["long_short"] >= 0.5 else '做空')) + '\n' \
'信心指数:' + str(prediction["strategy"]["trade"]*100.0) + '%(选择超过50%且信号最强的市场下单)\n' \
'ATR:' + str(round(order_item["atr"],3)) + '%\n' + str(stop_loss_str) + '\n' \
'头寸(仓位):' + str(round(order_item["amount"],3)) + '%/天(每天的头寸总和)\n镜像K线:' + ('是' if prediction["strategy"]["mirror"] else '否') + '\n' + sign_text
#'——AI海龟∞(编号:'+str(prediction["strategy"]["ai"])+';回测年化:'+str(round(prediction["strategy"]["validation"]*100.0,2))+'%)'
else:
text = "交易所:" + exchange + "\n市场:" + symbol + '\n' \
'操作:观望\n' \
'信心指数:' + str(prediction["strategy"]["trade"]*100.0) + '%\n镜像K线:' + ('是' if prediction["strategy"]["mirror"] else '否') + '\n' + sign_text
#'——AI海龟∞(编号:'+str(prediction["strategy"]["ai"])+';回测年化:'+str(round(prediction["strategy"]["validation"]*100.0,2))+'%)'
return text
auto_state = defaultdict(str)
def process_auto_state(openID, state, input):
if state == "" and input == 'auto' \
or state == "start.1" and input == '5' \
or state == "start.2" and input == '6':
next_state = 'start'
ctt = '''您好,欢迎使用自动交易内测版。
请选择要进行的操作:
1 模拟交易;
2 自动交易;
3 退出。'''
elif state == "start" and input == '3':
next_state = ''
ctt = '''您已退出自动交易内测版。
输入"auto"重新进入自动交易内测版。'''
elif state == "start" and input == '1':
next_state = 'start.1'
ctt = '''您已选择模拟交易。
请选择要进行的操作:
1 模拟账户余额;
2 模拟账户持仓;
3 模拟交易日志;
4 模拟交易预告;
5 返回。'''
elif state == "start" and input == '2':
next_state = 'start.2'
ctt = '''您已选择自动交易。
请选择要进行的操作:
1 绑定/解绑手机号;
2 绑定/解绑交易所;
3 查询自动交易状态;
4 自动交易参数设置;
5 开启/中止自动交易;
6 返回。'''
elif state == 'start.1' and input in ['1','2','3','4']:
next_state = 'start.1'
ctt = '''请访问电报机器人t.me/itrdbot进行后续操作!
您已选择模拟交易。
请选择要进行的操作:
1 模拟账户余额;
2 模拟账户持仓;
3 模拟交易日志;
4 模拟交易预告;
5 返回。'''
elif state == 'start.2' and input in ['1','2','3','4','5']:
next_state = state
ctt = '''请访问电报机器人t.me/itrdbot进行后续操作!
您已选择自动交易。
请选择要进行的操作:
1 绑定/解绑手机号;
2 绑定/解绑交易所;
3 查询自动交易状态;
4 自动交易参数设置;
5 开启/中止自动交易;
6 返回。'''
elif state == 'start.1':
next_state = state
ctt = '''对不起,暂不支持该指令。
您已选择模拟交易。
请选择要进行的操作:
1 模拟账户余额;
2 模拟账户持仓;
3 模拟交易日志;
4 模拟交易预告;
5 返回。'''
elif state == 'start.2':
next_state = state
ctt = '''对不起,暂不支持该指令。
您已选择自动交易。
请选择要进行的操作:
1 绑定/解绑手机号;
2 绑定/解绑交易所;
3 查询自动交易状态;
4 自动交易参数设置;
5 开启/中止自动交易;
6 返回。'''
elif state == 'start':
next_state = state
ctt = '''对不起,暂不支持该指令。
您好,欢迎使用自动交易内测版。
请选择要进行的操作:
1 模拟交易;
2 自动交易;
3 退出。'''
auto_state[openID] = next_state
return ctt
def chat(origin_input, openID):
'''
微信公众号,自动回复,入口函数
'''
time_start=time.time()
origin_input = origin_input.strip()
origin_input = origin_input.replace(' ', '')
#假如用户进入了自动交易模式
user_auto_state = auto_state[openID]
if not user_auto_state and origin_input == "auto" or user_auto_state:
return process_auto_state(openID, user_auto_state,origin_input)
if '@' in origin_input and len(origin_input) >= 3:
return get_exchange_symbol_response(origin_input, openID)
if origin_input[:2] == "模拟":
return get_simulation_response(origin_input)
if origin_input[:2] == "结果":
return simulated_end(origin_input[2:])
upper_input = origin_input.upper()
if upper_input in predict_batch.group_set:
return predict_batch.get_prediction(upper_input,openID)
return f50_market_spider.get_v1_prediction("exchange", origin_input, "1", "", "", openID)
def get_exchange_symbol_response(origin_input, openID):
# 替换换行
origin_input = origin_input.replace('\n', '')
# 替换换行
origin_input = origin_input.replace('\r', '')
split_str_list = origin_input.split('@',1)
symbol = split_str_list[0]
exchange = split_str_list[1]
if symbol and exchange:
return f50_market_spider.get_v1_prediction(exchange, symbol, 5, exchange, symbol, openID)
else:
return "请输入'市场@交易所'执行预测,例如:'BTCUSDT@binance'。"
def get_simulation_response(origin_input):
max_id = f52_db_simulated.get_max_id()
next_id = max_id + 1
symbols_str = origin_input[2:].strip()
symbol_list = symbols_str.split(' ')
if len(symbol_list) == 0:
return "请输入模拟+市场名1+市场名2+市场名3……市场名用空格分隔。"
_thread.start_new_thread( simulated_begin, (next_id, symbols_str) )
return "模拟开始,"+str(len(symbol_list)*5)+"分钟后输入'结果" + str(next_id) + "'查询模拟结果。"
def get_marketListString(origin_input):
marketListString = ""
marketListString = f50_market_spider.search_for_symbol(origin_input)
if not marketListString or len(marketListString) == 0:
for char_index in range(len(origin_input)):
char_index_end = len(origin_input) - char_index - 1
if char_index == 0:
new_input = origin_input[-char_index_end:]
elif char_index_end == 0:
new_input = origin_input[:char_index]
else:
new_input = origin_input[:char_index] + origin_input[-char_index_end:]
marketListString = f50_market_spider.search_for_symbol(new_input)
if marketListString and len(marketListString) > 0:
break
origin_input = new_input
return marketListString
# def draw_single(aiera_version, input_text, alias_results, mycursor, params, origin_input):
# if aiera_version == "V1":
# return draw_single_v1(input_text, alias_results, mycursor)
# if aiera_version == "V2":
# return forcastline.draw_single_v2(input_text, alias_results, mycursor, params, origin_input)
# def draw_single_v1(input_text, alias_results, mycursor):
# output_text = ""
# alias_result = alias_results[0]
# select_predictions_statment = "SELECT * FROM predictions WHERE symbol = '" + alias_result[1] + "' ORDER BY time DESC"
# #print(select_predictions_statment)
# mycursor.execute(select_predictions_statment)
# predictions_results = mycursor.fetchall()
# if len(predictions_results) == 0:
# output_text = forcastline.text_no_market(input_text)
# return None, output_text
# select_prices_statment = "SELECT * FROM price WHERE symbol = '" + alias_result[1] + "'"
# #print(select_prices_statment)
# mycursor.execute(select_prices_statment)
# prices_results = mycursor.fetchall()
# picture_name = draw_market_v1(alias_result, prices_results, predictions_results)
# return picture_name, output_text
def help_text():
output_text = '您好!欢迎来到AI纪元,我是通向未来之路的向导。\n' \
'请输入:“全球股指”、“商品期货”、“外汇”、“个股”或“加密货币”,获取实时的市场趋势强弱排名。\n' \
'输入具体的市场代码如“上证指数”、“黄金”或“比特币”,获取市场未来十天的涨跌趋势。\n' \
'请使用分散化与自动化的方式进行交易,并控制每笔交易的风险值小于1%。\n' \
'Hello! Welcome to the AI Era, I am the guide to the future.\n' \
'Please enter: "INDICES", "COMMODITIES", "CURRENCIES", "STOCKS" or "CRYPTOCURRENCY" to get real-time market trend rankings.\n' \
'Enter specific market codes such as “SSE”, “Gold” or “Bitcoin” to get the market trending in the next 10 days.\n' \
'Please use a decentralized and automated approach to trading and control the risk value of each transaction to less than 1%.\n'
return output_text
def get_subtags(tagname, mycursor):
subtags = []
select_subtags_statment = "select * from subtags where tag = '" + tagname + "' and tag <> subtag"
#print(select_subtags_statment)
mycursor.execute(select_subtags_statment)
subtags_results = mycursor.fetchall()
for subtags_result in subtags_results:
subtags.append(subtags_result[1])
return subtags
def get_markets_from_endtags(endtags, mycursor):
markets = []
select_tags_statment = 'select * from tags where tag in (%s) ' % ','.join(['%s']*len(endtags))
#print(select_tags_statment)
mycursor.execute(select_tags_statment, endtags)
tags_results = mycursor.fetchall()
for tags_result in tags_results:
markets.append(tags_result[1])
return markets
def get_markets_from_tag(tagname, mycursor):
nextsubtags = []
endtags = []
subtags = get_subtags(tagname, mycursor)
if len(subtags) == 0:
endtags.append(tagname)
while len(subtags) > 0:
for subtag in subtags:
nextsubtag = get_subtags(subtag, mycursor)
if len(nextsubtag) == 0:
endtags.append(subtag)
else:
nextsubtags.extend(nextsubtag)
subtags = nextsubtags
markets = get_markets_from_endtags(endtags, mycursor)
return markets
def fetch_tag(input_text, mycursor):
tagname = input_text
markets = get_markets_from_tag(tagname, mycursor)
utc_today = datetime.datetime.utcnow()+datetime.timedelta(days=-10)
today_str = utc_today.strftime("%Y-%m-%d")
if len(markets) > 0:
select_alias_statment = "SELECT pricehistory.SYMBOL, pricehistory.date, pricehistory.F, symbol_alias.SYMBOL_ALIAS FROM symbol_alias " \
" inner join predictlog on symbol_alias.symbol = predictlog.symbol and predictlog.LOADINGDATE > '1950-1-1' and predictlog.MAXDATE >= '"+today_str+"' and predictlog.symbol in (%s) " \
" inner join pricehistory on pricehistory.symbol = symbol_alias.symbol and pricehistory.date = predictlog.maxdate and pricehistory.l <> pricehistory.h and pricehistory.c > 0 " \
" ORDER BY pricehistory.SYMBOL" % ','.join(['%s']*len(markets))
#print(select_alias_statment)
mycursor.execute(select_alias_statment, markets)
alias_results = mycursor.fetchall()
else:
input_text = input_text.replace("/","%").replace("-","%").replace("*","%").replace(" ","%").replace("?","%").replace("=","%")
select_alias_statment = "SELECT * FROM symbol_alias WHERE symbol_alias LIKE '%" + input_text + "%' group by symbol"
#print(select_alias_statment)
mycursor.execute(select_alias_statment)
alias_results = mycursor.fetchall()
if len(alias_results) > 1:
'''
select_alias_statment = "SELECT predictions.*, symbol_alias.SYMBOL_ALIAS FROM symbol_alias " \
" inner join predictions on predictions.symbol = symbol_alias.symbol WHERE symbol_alias LIKE '%" + input_text + "%' ORDER BY symbol ASC"
'''
select_alias_statment = "SELECT pricehistory.SYMBOL, pricehistory.date, pricehistory.F, symbol_alias.SYMBOL_ALIAS FROM symbol_alias " \
" inner join predictlog on symbol_alias.symbol = predictlog.symbol and predictlog.LOADINGDATE > '1950-1-1' " \
" inner join pricehistory on pricehistory.symbol = symbol_alias.symbol and pricehistory.date = predictlog.maxdate and pricehistory.l <> pricehistory.h and pricehistory.c > 0 " \
" WHERE symbol_alias LIKE '%" + input_text + "%' ORDER BY pricehistory.symbol ASC"
#print(select_alias_statment)
mycursor.execute(select_alias_statment)
alias_results = mycursor.fetchall()
return "", alias_results
def init_mycursor():
word_in_color.word_in_rising_major = ''
word_in_color.word_in_falling_major = ''
word_in_color.word_in_comments = []
word_in_color.word_in_rising_minor = []
word_in_color.word_in_falling_minor = []
mydb = mysql.connector.connect(
host=mypsw.wechatguest.host,
user=mypsw.wechatguest.user,
passwd=mypsw.wechatguest.passwd,
database=mypsw.wechatguest.database,
auth_plugin='mysql_native_password'
)
mycursor = mydb.cursor()
return mydb, mycursor
def picture_url(picture_name):
myMedia = Media()
accessToken = Basic().get_access_token()
filePath = picture_name
mediaType = "image"
murlResp = Media.uplaod(accessToken, filePath, mediaType)
#print(murlResp)
return murlResp
# def draw_market_v1(alias_result, prices_results, predictions_results):
# plt.figure(figsize=(6.4,6.4), dpi=100, facecolor='black')
# predictions_result = predictions_results[0]
# #plt.subplot(211)
# plt.style.use('dark_background')
# x=[i for i in range(1,122)]
# y=[prices_results[0][121-price_index] for price_index in range(120)]
# plt.title( alias_result[2] + ":" + alias_result[0] + " "
# + predictions_result[1].strftime('%Y-%m-%d %H:%M')
# + " UTC\n微信公众号:AI纪元 WeChat Public Account: AI Era V1") #图标题
# #plt.xlabel(u'过去120天收盘价') #X轴标签
# prediction_text, nextprice = forcastline.day_prediction_text(predictions_result[2],float(prices_results[0][2]),float(prices_results[0][122]))
# plt.xlabel( prediction_text ) #X轴标签
# #plt.plot(x,y,"green",linewidth=1, label=u"价格")
# y.append(nextprice)
# currentprice = prices_results[0][2]
# if nextprice >= currentprice:
# plt.plot(x,y,"white",label="ATR:"+ str(float(prices_results[0][122])*100) + "%" )
# plt.fill_between(x,min(y),y,facecolor="white",alpha=0.3)
# plt.plot(x,[currentprice] * 121, "w--", label="Price:"+str(currentprice))
# else:
# plt.plot(x,y,"red", label="ATR:"+ str(float(prices_results[0][122])*100) + "%" )
# plt.fill_between(x,min(y),y,facecolor="red",alpha=0.3)
# plt.plot(x,[currentprice] * 121, "r--", label="Price:"+str(currentprice))
# plt.annotate(xy=[122,currentprice], s=currentprice, bbox=None)
# if nextprice >= currentprice:
# bbox_props = dict(boxstyle='round',fc='white', ec='k',lw=1)
# plt.annotate(xy=[122,nextprice], s=nextprice, color='white', bbox=None)
# else:
# bbox_props = dict(boxstyle='round',fc='red', ec='k',lw=1)
# plt.annotate(xy=[122,nextprice], s=nextprice, color='red', bbox=None)
# plt.legend(loc = 2)
# picture_name = 'Img/' + forcastline.pinyin(alias_result[0]) + "_V1" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.jpg'
# plt.savefig(picture_name, facecolor='black')
# return picture_name
# def draw_tag(aiera_version, input_text, alias_results):
# stopwords = set(STOPWORDS)
# word_frequencies = {}
# market_list = []
# for alias_result in alias_results:
# predictions_result = alias_result
# x=[0,1]
# y=[0.0, score(predictions_result[2])]
# maxvalue = max(y)
# minvalue = min(y)
# if abs(maxvalue) >= abs(minvalue):
# bestvalue = maxvalue
# bestindex = y.index(maxvalue)
# else:
# bestvalue = minvalue
# bestindex = y.index(minvalue)
# word_single = predictions_result[3]
# word_single = "/" + word_single + "/"
# market_list.append((word_single, bestvalue))
# wordcount = abs(bestvalue)
# if bestvalue >= 0:
# word_in_color.word_in_rising_minor.append(word_single)
# else:
# word_in_color.word_in_falling_minor.append(word_single)
# word_frequencies[word_single] = wordcount
# market_list.sort(key=lambda x:x[1], reverse=False)
# time_str = "Time:"+max( [alias_result[1] for alias_result in alias_results] ).strftime('%Y-%m-%d') + "_UTC"
# if abs(market_list[0][1]) > abs(market_list[-1][1]):
# word_in_color.word_in_falling_major = market_list[0][0]
# comment_frequency = int(abs(market_list[0][1]))
# else:
# word_in_color.word_in_rising_major = market_list[-1][0]
# comment_frequency = int(abs(market_list[-1][1]))
# word_in_color.word_in_comments = ['输入:'+input_text,time_str,'微信公众号:AI纪元']
# word_frequencies[word_in_color.word_in_comments[0]] = comment_frequency
# word_frequencies[word_in_color.word_in_comments[1]] = comment_frequency
# word_frequencies[word_in_color.word_in_comments[2]] = comment_frequency
# market_index = 0
# y_market = [market[0] for market in market_list]
# x_score = [market[1] for market in market_list]
# y_pos = [i for i, _ in enumerate(y_market)]
# wordcloud = WordCloud(width = 700, height = 700,
# background_color ='black',
# color_func=color_word,
# stopwords = stopwords,
# font_path='simhei.ttf',
# collocations=False
# ).generate_from_frequencies(word_frequencies)
# plt.figure(figsize = (7.0, 7.0), facecolor = None)
# plt.imshow(wordcloud, interpolation="bilinear")
# plt.axis("off")
# plt.margins(x=0, y=0)
# plt.tight_layout(pad = 0)
# picture_name = 'Img/' + forcastline.pinyin(input_text) + "_" + aiera_version + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.jpg'
# plt.savefig(picture_name)
# return picture_name
def score(prediction_result):
return (prediction_result * 2 - 1) * 100
class Media(object):
#def __init__(self):
#register_openers()
#上传图片
def uplaod(accessToken, filePath, mediaType):
openFile = open(filePath, "rb")
param = {'media': openFile.read()}
postData, content_type = encode_multipart_formdata(param)
postUrl = "https://api.weixin.qq.com/cgi-bin/media/upload?access_token=%s&type=%s" % (accessToken, mediaType)
headers = {'Content-Type': content_type}
files = {'media': open(filePath, "rb")}
urlResp = requests.post(postUrl, files=files)
#print(urlResp.text)
return json.loads(urlResp.text)['media_id']
|
import random
def roll(number_of_dice=5):
"""
Roll the indicatednumber of 6 sided dice using a random number generator
# Use the seed so our output is always reliable
>>> random.seed(1234)
>>> roll(5)
[1, 1, 1, 4, 5]
"""
return sorted(random.choice((1,2,3,4,5,6))
for i in range(number_of_dice))
|
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.mixins import UserPassesTestMixin
from django.urls import reverse_lazy
from issues.models import Employee
##### For function-based view #####
LOGIN_URL = 'login_view'
def at_least_level_1_employee_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=LOGIN_URL):
actual_decorator = user_passes_test(
lambda user: user.is_authenticated and user.employee.level >= 1,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
def at_least_level_2_employee_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=LOGIN_URL):
actual_decorator = user_passes_test(
lambda user: user.is_authenticated and user.employee.level >= 2,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
def at_least_level_3_employee_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=LOGIN_URL):
actual_decorator = user_passes_test(
lambda user: user.is_authenticated and user.employee.level >= 3,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
##### For Class-based View #####
class AtLeastLevel1RequiredMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.employee.level >= 1
class AtLeastLevel2RequiredMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.employee.level >= 2
class AtLeastLevel3RequiredMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_authenticated and self.request.user.employee.level >= 2
|
import turtle
canvas = turtle.Screen()
canvas.bgcolor("lightgreen")
tess=turtle.Turtle()
tess.color("blue")
tess.pensize(2)
tess.speed(200)
s=1
for i in range(99):
tess.right(90)
tess.forward(s)
s+=2
tess.stamp()
tess.penup()
tess.left(45)
tess.forward(200)
tess.pendown()
l=1
for i in range(99):
tess.right(91)
tess.forward(l)
l+=2
canvas.exitonclick()
|
import torch as T
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
from . import feature
from . import preprocessing as prep
from . import processing as pro
class MolData(Dataset):
"""Custom PyTorch Dataset that takes a file containing \n separated SMILES"""
def __init__(self, smiles, labels):
self.max_atom = 80
self.max_degree = 6
self.atoms, self.bonds, self.edges = self._featurize(smiles)
self.label = T.from_numpy(labels).float()
def _featurize(self, smiles):
return prep.tensorise_smiles(smiles, max_atoms=self.max_atom, max_degree=self.max_degree)
def __getitem__(self, i):
return self.atoms[i], self.bonds[i], self.edges[i], self.label[i]
def split(self, batch_size):
return
def __len__(self):
return len(self.label)
class AllData(Dataset):
def __init__(self, pd_lst, data_path):
self.max_atom = 80
self.max_degree = 6
self.max_atom_p = 300
self.max_degree_p = 15
self.atoms, self.bonds, self.edges, self.node, self.verge, self.label = self._featurize(pd_lst, data_path)
def _featurize(self, pd_lst, data_path):
return pro.pd_to_input(pd_lst, data_path, max_degree=self.max_degree, max_atoms=self.max_atom, max_degree_p=self.max_degree_p, max_atoms_p=self.max_atom_p)
def __getitem__(self, i):
return self.atoms[i], self.bonds[i], self.edges[i], self.node[i], self.verge[i], self.label[i]
def split(self, batch_size):
return
def __len__(self):
return len(self.label)
class AllData_pk(Dataset):
def __init__(self, out):
self.atoms, self.bonds, self.edges, self.node, self.verge, self.label = self._featurize(out)
def _featurize(self, out):
return out
def __getitem__(self, i):
return self.atoms[i], self.bonds[i], self.edges[i], self.node[i], self.verge[i], self.label[i]
def split(self, batch_size):
return
def __len__(self):
return len(self.label) |
import Adafruit_BBIO.UART as UART
import serial
from imu3000 import imu3000
from time import sleep
_TIMEOUT = 1
def openUart():
"""
Opens UART1 (TX = 24, RX = 26). Returns true if successful.
"""
UART.setup("UART1")
ser = serial.Serial(port = "/dev/ttyO1", baudrate=9600, timeout=_TIMEOUT)
# Reset port
ser.close()
ser.open()
return ser
port = openUart()
if not port.isOpen():
print "Failed to open UART1. Exiting."
exit()
sensor = imu3000()
while True:
x, y, z = sensor.getReadings()
toSend = "X: " + str(x) + ", Y: " + str(y) + ", Z: " + str(z) + "\n"
port.write(toSend)
sleep(0.5)
|
from django.views.generic import TemplateView
class RegistrationIndex(TemplateView):
template_name = "registration/index.html"
|
#!/usr/bin/env python3
"""wcount.py: count words from an Internet file.
__author__ = "Wangkunhao"
__pkuid__ = "1800011715"
__email__ = "1800011715@pku.edu.cn"
"""
import sys
from urllib.request import urlopen
def count(lines):
words = []
word = []
state = 0
nums = dict()
for letter in lines:
a = ord(letter)
if 65 <= a <=90 or 97 <= a <= 122 or a == 39:
word.append(letter)
state = 1
elif state == 1:
words.append("".join(word))
word[:] = []
state = 0
for w in words:
w = w.lower()
if w in nums:
nums[w] += 1
else:
nums[w] = 1
return nums
def wcount(lines, topn = 10):
"""count words from lines of text string, then sort by their counts
in reverse order, output the topn (word count), each in one line.
"""
alist = sorted([(a, b) for (b, a) in count(lines).items()])
k = -1
anss_alist = []
if topn <= len(alist):
while k>= -topn:
anss_alist.append((alist[k][1], alist[k][0]))
k -= 1
anss_dict = dict(anss_alist)
else:
alist.reverse()
anss_dict = dict([(b, a) for (a, b) in alist])
return anss_dict
def main():
if len(sys.argv) == 1:
print('Usage: {} url [topn]'.format(sys.argv[0]))
print(' url: URL of the txt file to analyze ')
print(' topn: how many (words count) to output. If not given, will output top 10 words')
sys.exit(1)
if __name__ == '__main__':
main() |
import math
import time
def distance(x1,y1,x2,y2):
print("Calculating distance ...")
time.sleep(1)
space = math.sqrt((x2-x1)**2 + (y2-y1)**2)
print("Distance :",space)
distance(4,0,6,6) |
import numpy as np
import pandas as pd
# select the vender has max sell log
raw_data = pd.read_csv('./sales_month_item.csv').as_matrix()
raw_data = np.delete(raw_data, 0, axis = 0) # remove top 1 row
raw_data = np.delete(raw_data, 0, axis = 1) # remove left 1 column
data = raw_data
raw_data = raw_data.tolist();
maxCounterVenderID = 0
maxCounter = 0
allVenderID = []
while ( len(raw_data) != 0 ):
venderID = raw_data[0][2]
allVenderID.append(venderID)
counter = 0
for row in raw_data:
if ( row[2] == venderID ):
counter += 1
if ( counter >= maxCounter ):
maxCounter = counter
maxCounterVenderID = venderID
# [x for x in a if x != 2]
raw_data = [ r for r in raw_data if r[2] != venderID ]
print(len(raw_data))
print ("maxCounterVenderID: ", maxCounterVenderID)
print ("maxCounter: ", maxCounter)
dataList = data.tolist()
filtered_data = [ r for r in dataList if r[2] == maxCounterVenderID ]
print ("len(filtered_data): ", len(filtered_data))
# print (data)
raw_data = np.array(filtered_data)[:,10]
print ("len(raw_data): ", len(raw_data))
print (raw_data)
stopwords = ["not", "identified", "unknown", "item", "&"]
def preprocessing(raw):
data = []
for i in range(len(raw)):
l = raw[i]
l = l.lower()
l = [w for w in l.split()]
l = [w for w in l if w not in stopwords]
data.append(l)
return data
def counting(data):
dic = {}
count = 0
for i in range(len(data)):
for w in data[i]:
if w not in dic:
dic[w] = count
count += 1
return dic
def hot_vector(data,dic):
hot = np.zeros([len(data), len(dic)]) # type = int of float?
for i in range(len(data)):
for w in data[i]:
hot[i][dic[w]] = 1
print hot[:10, :10]
return hot
data = preprocessing(raw_data)
dic = counting(data)
hot = hot_vector(data,dic)
filtered_data = np.array(filtered_data)
print(filtered_data.shape)
filtered_data = np.delete(filtered_data, 10, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 9, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 8, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 6, axis = 1) # remove col
# filtered_data = np.delete(filtered_data, 2, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 1, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 0, axis = 1) # remove col
for idx in range(filtered_data.shape[0]):
[year,month] = filtered_data[idx][1].split('-')
filtered_data[idx][0] = year
filtered_data[idx][1] = month
filtered_data = np.c_[filtered_data,hot]
print (filtered_data)
from sklearn.svm import SVR
y = np.random.randn(filtered_data[:,3])
filtered_data = np.delete(filtered_data, 4, axis = 1) # remove col
filtered_data = np.delete(filtered_data, 3, axis = 1) # remove col
x = filtered_data
clf = SVR(C=1.0, epsilon=0.2)
x_valid=x[:10]
y_valid=y[:10]
x=x[10:]
y=y[10:]
clf.fit(x, y)
y_predict = clf.predict(x_valid)
# SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
# kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
|
"""Строит простой график квадратов чисел от 0 до 10"""
import matplotlib.pyplot as plt
input_values = [1, 2, 3, 4, 5]
def squares():
"""Рассчитывает квадраты чисел."""
square = []
for i in input_values:
square.append(i * i)
return square
def plt_settings():
"""Настройки отображения графика"""
# Назначение заголовка диаграммы и меток осей
plt.title("Квадраты чисел от 0 до 10", fontsize=18)
plt.xlabel("Числа", fontsize=14)
plt.ylabel("Квадраты чисел", fontsize=14)
# Размер шрифта делений на осях
plt.tick_params(axis="both", labelsize=14)
plt.plot(input_values, squares(), linewidth=5)
plt_settings()
plt.show()
|
import time
x=int(input("Please enter the first number :"))
y=int(input("Please enter the second number :"))
print("Calculating sum :")
time.sleep(1)
sum=x+y
print("Sum :",sum)
time.sleep(1)
if (sum>15) and (sum<20):
print("As the sum of the 2 inputs is between 15 and 20 ...")
time.sleep(1)
print("Sum :",20)
else:
time.sleep(1)
print("As the sum of the 2 inputs is not between 15 and 20 ...")
time.sleep(1)
print("Sum :",sum)
|
import os, sys, re
from django.core.files import File
from django.core.files.storage import get_storage_class
from django.conf import settings
from api_docs.models import *
class Importer(object):
def __init__(self, topic, language, version, section=None, options=dict()):
self.topic = topic
self.language = language
self.version = version
self.section = section
self.options = options
self.verbosity = int(options.get('verbosity', 1))
self.PRIMARY_NAMESPACE = None
self.DOC_ROOT = os.getcwd()
self.class_map = {}
self.page_map = {}
self.page_refs = {}
self.page_order = []
self.namespace_map = {}
self.namespace_order = []
self.IMG_REGEX = re.compile(r'<img (?P<attribs>\w+=[\'"][\w\-\_\.\:\/\s]*[\'"]\s+)*src=[\'"](?P<url>[\w\-\_\.\:\/]*)(?P<anchor>#[\w\_\-\.]+)?[\'"][^>]*>')
self.LINK_REGEX = re.compile(r'<a (?P<attribs>\w+=[\'"][\w\-\_\.\:\/\s]*[\'"]\s+)*href=[\'"](?P<url>[\w\-\_\.\:\/]*)(?P<anchor>#[\w\_\-\.]+)?[\'"][^>]*>(?P<text>.+)</a>')
self.file_storage = get_storage_class()()
def strip_links(self, line):
clean_line = line
removal = self.LINK_REGEX.search(line)
while removal:
source = removal.group(0)
replace = removal.group('text')
if '</a>' in replace:
source = source[:source.index('</a>')+4]
replace = replace[:replace.index('</a>')]
if self.verbosity >= 3:
print "Removing link: %s" % removal.group(0)
print " From: %s\n To: %s" % (source, replace)
clean_line = clean_line.replace(source, replace)
removal = self.LINK_REGEX.search(clean_line)
return clean_line
def clean_links(self, line, source_filename, element_fullname=None):
try:
# Clean Links
clean_line = line
source_line = line
#~ if source_filename in clean_line:
#~ clean_line = clean_line.replace('href="'+source_filename+'#', 'href="#')
#~ source_line = clean_line
match = self.LINK_REGEX.search(source_line)
while match:
if self.verbosity >= 3:
print "Matched line [%s:%s]: %s " % (match.start(), match.end(), source_line)
match_url = match.group('url') or ''
match_anchor = match.group('anchor') or ''
if self.verbosity >= 2:
print "In: %s" % source_filename
print " URL: %s" % match_url
print " Anchor: %s" % match_anchor
filename = self.lookup_from_url(match_url, match_anchor, element_fullname)
if self.verbosity >= 2:
print "Checking for: %s" % filename
if filename in self.page_map:
if self.verbosity >= 2:
print "Found in page map: " + self.page_map[filename]
new_link = '/'.join(('/api', self.topic.slug, self.language.slug, self.version.slug, self.page_map[filename], ''))
clean_line = clean_line.replace('href="'+match_url+match_anchor, 'href="'+unicode.encode(new_link, 'ascii')+match_anchor)
elif filename in self.class_map:
if self.verbosity >= 2:
print "Found in class map: " + self.class_map[filename]
new_link = '/'.join(('/api', self.topic.slug, self.language.slug, self.version.slug, self.class_map[filename], ''))
clean_line = clean_line.replace('href="'+match_url+match_anchor, 'href="'+unicode.encode(new_link, 'ascii')+match_anchor)
# Replace links to things we don't recognize
else:
if self.verbosity >= 2:
print "No direct link found, replacing them instead"
url_base = match.group('url').split('/')[-1]
if self.verbosity >= 2:
print "URL base: %s" % url_base
element = Element.objects.filter(source_file=url_base, source_format=self.SOURCE_FORMAT)
if element:
element = element[0]
new_link = '/'.join(('/api', element.section.topic_version.language.topic.slug, element.section.topic_version.language.slug, element.section.topic_version.slug, element.fullname, ''))
clean_line = clean_line.replace('href="'+match_url+match_anchor, 'href="'+unicode.encode(new_link, 'ascii')+match_anchor)
else:
page = Page.objects.filter(source_file=url_base, source_format=self.SOURCE_FORMAT)
if page:
page = page[0]
new_link = '/'.join(('/api', page.section.topic_version.language.topic.slug, page.section.topic_version.language.slug, page.section.topic_version.slug, page.fullname, ''))
clean_line = clean_line.replace('href="'+match_url+match_anchor, 'href="'+unicode.encode(new_link, 'ascii')+match_anchor)
# Remove links we couldn't match
elif '://' not in match.group('url'):
#if '://' in match.group('url'):
## Continue trying to match on this line
#source_line = source_line[match.end():]
#match = self.LINK_REGEX.search(source_line)
#continue
if self.verbosity >= 3:
print "Removing links from: %s" % clean_line
removal = self.LINK_REGEX.search(clean_line)
while removal:
source = removal.group(0)
replace = removal.group('text')
if '</a>' in replace:
source = source[:source.index('</a>')+4]
replace = replace[:replace.index('</a>')]
if self.verbosity >= 3:
print "Removing link: %s" % removal.group(0)
print " From: %s\n To: %s" % (source, replace)
clean_line = clean_line.replace(source, replace)
removal = self.LINK_REGEX.search(clean_line)
# Continue trying to match on this line
source_line = source_line[match.end('url'):]
match = self.LINK_REGEX.search(source_line)
return clean_line
except Exception, e:
print "Error cleaning links: %s" % line
if self.verbosity >= 2:
import pdb; pdb.set_trace()
self.parse_line(line, source_filename, element_fullname)
elif self.verbosity >= 2:
raise e
else:
return line
def clean_images(self, line, source_filename, element_fullname=None):
try:
# Clean images
clean_line = line
source_line = line
if '<img ' in line:
img_match = self.IMG_REGEX.search(source_line)
while img_match:
if self.verbosity >= 2:
print "Image in %s: %s" % (source_filename, img_match.group('url'))
if img_match.group('url').startswith('.'):
if source_filename.startswith('/'):
src_filename = os.path.abspath(os.path.join(os.path.dirname(source_filename), img_match.group('url')))
rel_filename = os.path.join('api', self.topic.slug, self.language.slug, self.version.slug, element_fullname, src_filename[len(self.DOC_ROOT)+1:])
else:
src_filename = os.path.abspath(os.path.join(self.DOC_ROOT, os.path.dirname(source_filename), img_match.group('url')))
rel_filename = os.path.join('api', self.topic.slug, self.language.slug, self.version.slug, element_fullname, src_filename[len(self.DOC_ROOT)+1:])
else:
src_filename = os.path.join(self.DOC_ROOT, img_match.group('url'))
rel_filename = os.path.join('api', self.topic.slug, self.language.slug, self.version.slug, element_fullname, img_match.group('url'))
if self.verbosity >= 2:
print "Uploading %s to %s" % (src_filename, rel_filename)
uploaded_file = self.upload_file(src_filename, rel_filename)
if self.verbosity >= 2:
print "Upload successful: %s" % uploaded_file
clean_line = clean_line.replace(img_match.group('url'), uploaded_file)
source_line = source_line[img_match.end():]
img_match = self.IMG_REGEX.search(source_line)
return clean_line
except Exception, e:
print "Error cleaning images: %s" % line
if self.verbosity >= 2:
import pdb; pdb.set_trace()
self.parse_line(line, source_filename, element_fullname)
elif self.verbosity >= 2:
raise e
else:
return line
def just_text(self, line):
while '<' in line:
start_of_tag = line.find('<')
end_of_tag = line.find('>', start_of_tag)
line = line[:start_of_tag] + line[end_of_tag+1:]
return line.encode('ascii', 'ignore')
def parse_line(self, line, source_filename, element_fullname=None):
clean_line = self.clean_images(line, source_filename, element_fullname)
clean_line = self.clean_links(clean_line, source_filename, element_fullname)
return clean_line
def lookup_from_url(self, url, anchor, element_fullname):
if not anchor:
return url
if anchor != '' and anchor[1:] in self.class_map:
return anchor[1:]
else:
return url
def parse_filename(self, filename):
return filename
def parse_href(self, href):
return href
def parse_pagename(self, pagename):
if pagename.endswith('.html'):
pagename = pagename[:-5]
return pagename.replace('/', '-').replace(' ', '_')
def parse_classname(self, classname):
return classname
def parse_namespace(self, namespace):
if self.options.get('force_namespace', None):
return self.options.get('force_namespace')
elif self.options.get('namespace', None) and not namespace:
return self.options.get('namespace')
else:
return namespace
def upload_file(self, src_filename, rel_filename):
if self.file_storage.exists(rel_filename):
self.file_storage.delete(rel_filename)
src_file = File(open(src_filename, 'rb'))
dst_file = self.file_storage.save(rel_filename, src_file)
src_file.close()
return self.file_storage.url(dst_file)
def run(self):
raise NotImplementedError("Importer classes must define a 'run' method")
|
import numpy as np
def sigmoid(x):
return 1/(1+np.exp(-x))
vf = np.vectorize(sigmoid)
def predict(input_vector):
assert(input_vector.shape==(4,1))
# taking weighted sum
wsum0_vector = np.dot(w01, input_vector)
# activating
h1_layer = vf(wsum0_vector)
# taking weighted sum
wsum1_vector = np.dot(w10, h1_layer)
# activating
op_layer = vf(wsum1_vector)
return np.argmax(op_layer), op_layer
file = open("sample.txt")
dataset = [eval(x.rstrip("\n")) for x in file.readlines()]
file.close()
print("---------------------DATASET-----------------\n",dataset)
# Dividing the the dataset
training_data = dataset[8:]
testing_data = dataset[:8]
print("-----------------------traing------------------\n",
training_data, len(training_data))
print("------------------test---------------------\n",
testing_data, len(testing_data))
ip_layer = np.array([[None],
[None],
[None],
[None],
])
h1_layer = np.array([[None],
[None],
[None],
[None]])
op_layer = np.array([[None],
[None]])
# Initializing Weight matrices
np.random.seed(0)
w01 = np.random.randn(4,4)
w10 = np.random.randn(2,4)
'''print(np.dot(w01,np.array([[1],
[2],
[3],
[4]
])))'''
print(predict(np.array([[0],[0],[0],[0]]))) |
import math
val = math.factorial(5)
def fact(n):
val = 1
for i in range(2,n + 1):
val *= i
return val
for i in range(1,10):
print('fact({})={}'.format(i,fact(i)))
|
import os
import nltk
from pytest import fixture
from genderbias.document import Document
porter = nltk.PorterStemmer()
wnl = nltk.WordNetLemmatizer()
example_dir = os.path.dirname(__file__) + "/../example_letters/"
examples = {'m': dict(file=example_dir + "letterofRecM", sentences=13, commas=12, words=446),
'f': dict(file=example_dir + "letterofRecW", sentences=26, commas=29, words=693)
}
@fixture(params=examples.values())
def example_doc(request):
with open(request.param['file']) as stream:
return dict(request.param,
document=Document(request.param['file']),
document_string=Document(stream.read()))
def test_can_read_examples(example_doc):
with open(example_doc['file'], 'r') as stream:
assert stream.readable()
def test_document_from_file_equals_document_from_string(example_doc):
file = example_doc['document']
string = example_doc['document_string']
assert file.sentences() == string.sentences()
assert file.words() == string.words()
assert file.words_with_indices() == string.words_with_indices()
assert file.words_with_indices() == string.words_with_indices()
assert file.stemmed_words() == string.stemmed_words()
def test_words(example_doc):
words = example_doc['document'].words()
assert isinstance(words, list)
assert len(words) == example_doc['words']
def test_words_with_indices(example_doc):
words_with_indices = example_doc['document'].words_with_indices()
assert len(words_with_indices) == example_doc['words']
latest_start = -1
latest_stop = 0
for word, start, stop in words_with_indices:
assert stop > start
assert stop > latest_stop
latest_stop = stop
assert start > latest_start
latest_start = start
def test_stemming(example_doc):
assert (sum([len(x) for x in example_doc['document'].words()]) >
sum([len(x) for x in example_doc['document'].stemmed_words()]))
# def test_lemmatizing():
# words = ['strangely']
# stemmed = [porter.stem(w) for w in words]
# lemmaed = [wnl.lemmatize(w) for w in stemmed]
# assert lemmaed == ['strange']
def test_sentence(example_doc):
s = example_doc['document'].sentences()
for ss in s:
assert "\n" not in ss
assert len(s) == example_doc['sentences']
def test_words_by_part_of_speech(example_doc):
c = example_doc['document'].words_by_part_of_speech()
assert len(c[',']) == example_doc['commas']
|
from NextVersion import nextVersion
#verify that the function output matches the expected output
def verifyFunctionOutput(functionOutput, expectedOutput):
return (functionOutput == expectedOutput)
#verify that the function doesn't match the given output
def verifyFunctionOutputIncorrect(functionOutput, output):
return (functionOutput != output)
def testNextVersion(input, output):
print('nextVersion("' + input + '") == "' + output + '"')
print("Result: " + str(verifyFunctionOutput(nextVersion(input), output)))
def testNextVersionIncorrect(input, output):
print('nextVersion("' + input + '") != ' + str(output))
print("Result: " + str(verifyFunctionOutputIncorrect(nextVersion(input),
output)))
#verify that the number increases
testNextVersion("0","1")
#verify that the function output is a string
#check against an int - the output shouldn't be an int
testNextVersionIncorrect("0",1)
#verify that the version is incremented
#only the last number will be affected
testNextVersion("0.1","0.2")
testNextVersion("0.0.1","0.0.2")
testNextVersion("1.1.1","1.1.2")
testNextVersion("1.2.3","1.2.4")
#verify incrementing 9 by 1 will increment the next number in the sequence
testNextVersion("0.9.9","1.0.0")
#verify that incrementing 9 in the first position results in 10
testNextVersion("9.9.9","10.0.0")
#verify that incrementing the version affects the correct numbers
testNextVersion("9.0.9","9.1.0")
testNextVersion("1.0.9.9","1.1.0.0")
testNextVersion("1.2.3.4.5.6.7.8","1.2.3.4.5.6.7.9")
|
from .api import Api # noqa
from .client import Client # noqa
from .error import * # noqa
from .models import * # noqa
from .utils.constants import TOPICS # noqa
|
#!/usr/local/bin/python
# By: Kyle Finley
# Description: Creates a CSV of all snapshots in an AWS account for auditing
import boto3
from botocore.client import Config
import csv
#- Enables support for multiple Named Profiles -#
profile = None
session = None
while session == None:
profile = raw_input('AWS Named Profile: ') #user prompt
try: #validates profile
session = boto3.session.Session(profile_name=profile)
except Exception:
print 'AWS Named Profile "%s" was not found.' % profile
session = None
#- Setting up boto -#
boto3config = Config(connect_timeout=50, read_timeout=300) #extends timeout
ec2 = session.client('ec2', config=boto3config)
paginator = ec2.get_paginator('describe_snapshots')
OwnerId = str(session.client('sts').get_caller_identity().get('Account')) #gets root account ID
#- Configure botocore retries -#
## This paired with the long timeout supports a large number of snapshots
unique_handlers = ec2.meta.events._unique_id_handlers
checker = unique_handlers['retry-config-ec2']['handler']._checker
checker.__dict__['_max_attempts'] = 20
#- Collecting Snapshots -#
snap_data = list() #stores data for all snapshots
response_iterator = paginator.paginate(
OwnerIds=[OwnerId], #required for accurate and fast response
PaginationConfig={
'PageSize': 500 #lowers the chance of timeout
}
)
#- Processing Snapshots -#
for page in response_iterator:
for snapshot in page['Snapshots']:
data = {'SnapshotId' : snapshot['SnapshotId'], 'VolumeId' : snapshot['VolumeId'], 'StartTime' : snapshot['StartTime']} #creates a dict to keep data together
if 'Tags' in snapshot: #some snapshots dont have tags
data['Tags'] = snapshot['Tags']
else:
data['Tags'] = ''
if 'Description' in snapshot: #some snapshots dont have a description
data['Description'] = snapshot['Description']
else:
data['Description'] = ''
snap_data.append(data) #adds dict to list of all snapshot data
#- Output -#
with open('SnapshotOutput.csv', 'wb') as f: #name of file saved to same directory as script
for item in snap_data:
w = csv.DictWriter(f, item.keys())
w.writerow(item)
f.close()
print 'Snapshots found: %i' % len(snap_data) #shows number of snapshots in terminal
|
import sys
import threading
from src.tcp import read_request, handle_request, write_response
from src.tcp.tcp_server import create_server_socket, accept_connection
def serve_client(client_socket, cid):
request = read_request(client_socket)
if request is None:
print(f"Client #{cid} disconnected.")
return
response = handle_request(request)
write_response(client_socket, response, cid)
def run_server(port=53210):
server_socket = create_server_socket(port)
cid = 0
while True:
client_socket = accept_connection(server_socket, cid)
threading.Thread(target=serve_client, args=(client_socket, cid)).start()
cid += 1
if __name__ == '__main__':
run_server(port=int(sys.argv[1]))
|
import torchtext
from torchtext import data
from torchtext import datasets
import copy
import torch
import os
from collections import Counter, OrderedDict
import argparse
parser = argparse.ArgumentParser(description='Build a vocabulary for Transformer.')
parser.add_argument('--data_path', type=str, default='./')
parser.add_argument("--vocab_file", type=str, default='es-de-fr.s.w.pt')
parser.add_argument('--src_train', type=str, default='es,de,fr', help='source language marker')
parser.add_argument('--trg_train', type=str, default='en,en,en', help='target language marker')
parser.add_argument('--base', type=str, default='bpe', choices=['byte', 'char', 'bpe', 'word'])
parser.add_argument('--max_vocab_size', type=int, default=80000, help='max vocabulary size')
parser.add_argument('--share_embeddings', action='store_false', help='share embeddings between encoder and decoder')
parser.add_argument('--eos', type=str, default='<eos>')
args = parser.parse_args()
# -- default setting -- #
data_path = args.data_path
tokenizer = lambda s: s.split()
# -- source / target field --- #
srcs_train = args.src_train.split(',')
trgs_train = args.trg_train.split(',')
languages = list(set(srcs_train + trgs_train))
print('build the vocabulary.')
# --- setup dataset (no lazy mode when building the vocab) --- #
counter = Counter()
sources = []
for src, trg in zip([srcs_train, trgs_train]):
DATADIR = '{}/{}-{}'.format(data_path, src, trg)
if not os.path.exists(DATADIR):
DATADIR = '{}/{}-{}'.format(data_path, trg, src)
if not os.path.exists(DATADIR):
raise FileNotFoundError
with open('{}/train.bpe.src'.format(DATADIR), 'r') as f:
lines = f.readlines()
sources.append(lines)
with open('{}/train.bpe.trg'.format(DATADIR), 'r') as f:
lines = f.readlines()
sources.append(lines)
for data in sources:
for x in data:
counter.update(tokenizer(x))
specials = list(OrderedDict.fromkeys(
tok for tok in ['<unk>', '<pad>', '<init>', '<eos>'] + ['<{}>'.format(a) for a in languages] if tok is not None))
meta_vocab = torchtext.vocab.Vocab(counter, specials=specials, max_size=args.max_vocab_size)
print('setup the dataset.')
torch.save([meta_vocab, meta_vocab], data_path + '/' + args.vocab_file)
print('done. {}'.format(len(meta_vocab)))
print(meta_vocab.itos[:10])
|
#------------------------------------------------------------------------------------------------------------------
# train_test_plot_def
#
# MIT License
# Dr Debdarsan Niyogi (debdarsan.niyogi@gmail.com)
#------------------------------------------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
from IPython.display import HTML
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support as score
# Import LogisticRegression, KNeighborsClassifier, SVM, DecisionTreeClassifier, RandomForestClassifier, XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import warnings
warnings.filterwarnings("ignore")
# Pre-defined Classifier Models
model_lrg = LogisticRegression(max_iter=10000)
model_knn = KNeighborsClassifier()
model_svm = svm.SVC()
model_dtr = DecisionTreeClassifier()
model_rfr = RandomForestClassifier()
model_xgb = XGBClassifier()
# List of pre-defined models
models = [model_lrg, model_knn, model_svm, model_dtr, model_rfr, model_xgb]
# List of pre-defined model names
model_names = ['Logistic Regression', 'K-Neighbors', 'Support Vector',
'Decision Tree', 'Random Forest', 'XGBoost']
# First letters of model names
model_ids = 'LKSDRX'
# Initialize an empty list of classification algorithms
algorithm_list = []
# Initialize an empty list for the accuracy of each algorithm
accuracy_list = []
def _plot_confusion_matrix(conf_mat, classes, normalize = False, title = 'Confusion Matrix',
cmap = plt.cm.Greens, size = 5):
"""
Plots confusion matrix for binary or multi-class classification
Parameters:
----------
conf_mat: confusion matrix, given test and predicted values of the target (dependent) variable
classes: comma separated unique class names of the target variable to be predicted
normalize: boolean flag indicating if normalization is to be applied
title: title of the confusion matrix plot
ax: axes object(s) of the plot
cmap: color map
size: integer controlling size of the plot and the labels proportionally
Returns:
-------
None
"""
fig, ax = plt.subplots(figsize = (size, size))
ax.set_title(title, fontsize = size + 8)
plt.tick_params(axis = 'x', labelsize = size + 8)
plt.tick_params(axis = 'y', labelsize = size + 8)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation = 45, size = size + 8)
plt.yticks(tick_marks, classes, size = size + 8)
plt.sca(ax)
fmt = '.2f' if normalize else 'd'
thresh = conf_mat.max() / 2.
for i, j in itertools.product(range(conf_mat.shape[0]), range(conf_mat.shape[1])):
ax.text(j, i, format(conf_mat[i, j], fmt),
horizontalalignment = "center",
color = "white" if conf_mat[i, j] > thresh else "black", size = size + 8)
ax.set_ylabel('True Label', fontsize = size + 8)
ax.set_xlabel('Predicted Label', fontsize = size + 8)
ax.imshow(conf_mat, interpolation = 'nearest', cmap = cmap)
plt.show()
return
def _compare_algos(algorithm_list, accuracy_list, size = 5):
"""
Plots algorithm names vs the testing accuracy figures
Parameters:
----------
algorithm_list: list of names of the algorithms
accuracy_list: list of accuracy values
size: integer controlling the size of the plot and the labels proportionally
Returns:
-------
None
"""
# Combine the list of algorithms and list of accuracy scores into a dataframe
# and sort the values based on accuracy score
df_accuracy = pd.DataFrame(list(zip(algorithm_list, accuracy_list)),
columns = ['Algorithm', 'Accuracy Score']).sort_values(by = ['Accuracy Score'], ascending = True)
# Plot
ax = df_accuracy.plot.barh('Algorithm', 'Accuracy Score', align = 'center', legend = False, color = 'g')
# Add the data labels
for i in ax.patches:
ax.text(i.get_width() + 0.02, i.get_y() + 0.2, str(round(i.get_width(), 2)), fontsize = 10)
# Set the limit
plt.xlim(0, 1.1)
# Set the lables
plt.xlabel('Test Accuracy Score')
# Set ticks
# Generate a list of ticks for y-axis
y_ticks = np.arange(len(algorithm_list))
plt.yticks(y_ticks, df_accuracy['Algorithm'], rotation = 0)
# Set title
plt.title('Algorithm performance')
# Turn of top and right frames
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return
def train_test_plot_def(df, target, algos, size):
"""
Performs the following operations:
---------------------------------
1. Splits the dataframe into target (dependent variable) and predictors (independent variable)
2. Scales the values of independent variables (all input values must be numeric)
3. Splits the dataset into training and testing sets
4. Loops through the list of classification algorithms to
a) Train
b) Test
c) Evaluate and report performance
d) Plot Confusion Matrix
e) Plot feature importance (if it is available for this particular algorithm)
5. Shows comparative plot of accuracies for all the algorithms
Parameters:
----------
df (pandas dataframe): the whole dataset containing observations for both target and predictor variables
target (string): column name of the target variable in df, e.g. 'Species'
algos (comma separated character string): the first letters of classification algorithms to be applied, e.g. l,r,x
l: LogisticRegression
k: KNeighborsClassifier
s: Support Vector Machine
d: DecisionTreeClassifier
r: RandomForestClassifier
x: XGBClassifier
size (int): size of the plots, typical values are 5, 10, 15
Returns:
-------
None
Example:
-------
train_test_plot_def(iris_df, 'Species', 'l,r,x', 5)
where,
iris_df: input dataframe, e.g. iris_df = pd.read_csv('Iris.csv')
'Species': name of the target column in iris_df
'l,r,x': first letters of (L)ogisticRegression', (R)andomForestClassifier and (X)GBClassifier (case insensitive)
5: size of the plots generated
"""
# set X and y
y = df[target]
X = df.drop(target, axis = 1)
# scale X
X = StandardScaler().fit(X).transform(X)
# Split the data set into training and testing data sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42, stratify = y)
# Target class names
classes = np.unique(df[target])
algorithm_list = []
accuracy_list = []
algos_selected = algos.upper().split(',')
for i in range(len(algos_selected)):
this_algo = algos_selected[i].strip()
indx = model_ids.index(this_algo)
model = models[indx]
algorithm_list.append(model_names[indx])
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
disp_line = '<h1>' + model_names[indx] + '</h1>'
display(HTML(disp_line))
disp_line = '<h2>Scores:</h2>'
display(HTML(disp_line))
acc = accuracy_score(y_test, y_pred)
precision, recall, fscore, support = score(y_test, y_pred)
score_df = pd.DataFrame(list(zip(precision, recall, fscore, support)),
columns = ['Precision', 'Recall', 'F1-score', 'Support'])
score_df = pd.concat([pd.DataFrame(classes), score_df], axis = 1)
score_df.columns = ['Target Class', 'Precision', 'Recall', 'F1-score', 'Support']
display(HTML(score_df.to_html(index = False)))
accuracy_list.append(acc)
cm_model = confusion_matrix(y_test, y_pred)
_plot_confusion_matrix(cm_model, classes = classes,
title = model_names[indx] + '\nTest Accuracy: {:.2f}'.format(acc), size = size)
if hasattr(model, 'feature_importances_'):
fig, ax = plt.subplots(figsize = (size, size))
plt.tick_params(axis = 'x', labelsize = size + 8)
plt.tick_params(axis = 'y', labelsize = size + 8)
plt.xticks(size = size + 8)
plt.yticks(size = size + 8)
plt.xlabel('')
ax.set_title('Feature Importance (using '+ model_names[indx]+')', fontsize=size+10)
importances = pd.DataFrame(np.zeros((X_train.shape[1], 1)), columns = ['Importance'],
index = df.drop(target, axis = 1).columns)
importances.iloc[:,0] = model.feature_importances_
importances.sort_values(by = 'Importance', inplace = True, ascending = False)
top_importances = importances.head(10)
sns.barplot(x = 'Importance', y = top_importances.index, data = top_importances)
plt.show()
_compare_algos(algorithm_list, accuracy_list, size = size)
return |
import numpy as np
def create_image_cppn_input(output_size, is_distance_to_center=True, is_bias=True, input_borders=((-1, 1), (-1, 1))):
img_height = output_size[0]
img_width = output_size[1]
num_of_input = 2
if is_distance_to_center:
num_of_input = num_of_input + 1
if is_bias:
num_of_input = num_of_input + 1
cppn_input = np.zeros((img_height * img_width, num_of_input))
min_i = input_borders[0][0]
max_i = input_borders[0][1]
range_i = max_i - min_i
min_j = input_borders[1][0]
max_j = input_borders[1][1]
range_j = max_j - min_j
input_idx = 0
for i in range(img_height):
# i coordinate in [-1 1]
#i_input = -1.0 + 2.0 * i / (img_height - 1)
i_input = min_i + i * range_i / (img_height - 1)
for j in range(img_width):
# j coordinate in [-1 1]
#j_input = -1.0 + 2.0 * j / (img_width - 1)
j_input = min_j + j * range_j / (img_width - 1)
row = []
if is_bias:
row.append(1.0)
row.append(i_input)
row.append(j_input)
# distance to center of image
if is_distance_to_center:
d = np.linalg.norm([i_input, j_input])
row.append(d)
cppn_input[input_idx, :] = np.array(row)
# input[input_idx, :] = [i_input, j_input]
input_idx = input_idx + 1
return cppn_input
def calc_neat_forward_image_cppn_output(neat_net, cppn_input,):
output = np.zeros(np.shape(cppn_input)[0])
for idx, input in enumerate(cppn_input):
output[idx] = neat_net.activate(input)[0]
return output
def calc_neat_recurrent_image_cppn_output(neat_net, cppn_input, n_iter=1):
output = np.zeros(np.shape(cppn_input)[0])
for idx, input in enumerate(cppn_input):
neat_net.reset()
for _ in range(n_iter):
output[idx] = neat_net.activate(input)[0]
return output
def postprocess_image_cppn_output(image_size, cppn_output):
'''
Each output pixel x of the CPPN is reformated by: 1-abs(x).
And the output is reshaped into the given image dimensions.
'''
image = np.reshape(cppn_output, image_size)
return 1-np.abs(image) |
from vcenter_connect import add_disk
virtualmachine_name = raw_input("enter virtual machine name:")
disk_size = raw_input("Enter Disc size in GB:")
disk_type = raw_input("Enter Disc type:")
if add_disk(virtualmachine_name,disk_size,disk_type):
print "Disc created"
else:
print "Something went wrong"
|
class Administrator():
def __init__(self,orders):
self.count = 0
self.orders = orders
def enter(self):
print("\nLogin: ")
login = input()
print("\nPassword: ")
password = input()
self.data = login + " " + password
return self
def exit(self):
self = None
def OrderDetails(self):
print("\nOrder id: ")
id = int(input())
print("\nProduct name: ")
name = input()
print("\nPrice: ")
price = float(input())
print("\nAmount: ")
amount = float(input())
print("\nOrdered ")
print(name)
print(" ")
fprice = price*amount
print(str(fprice))
self.orders[id].AddPrice(fprice)
self.orders[id].AddChangeLog("Ordered " + str(amount) + " " + name)
self.orders[id].CountPrice(id)
def AddWorkerToOrder(self):
print("\nOrder id: ")
id = int(input())
print("\nWorker full name: ")
name = input()
mes = str(name) + " has been added to order #" + str(id)
print("\n" + mes)
self.orders[id].AddChangeLog(mes + "\n")
class Order():
def __init__ (self):
self.price = 0
self.changeList = ""
def CountPrice(self,id):
print("\nOrder #" + str(id))
print(self.changeList)
print("\nPrice: "+ str(self.price))
def AddPrice(self, price_):
self.price += price_
def AddChangeLog(self, log):
self.changeList += log
def main():
orders = [Order()]
admin = Administrator(orders).enter()
admin.AddWorkerToOrder()
admin.OrderDetails()
admin.exit()
if __name__ == "__main__":
main() |
import ast
class SubAttr(object):
def __init__(self):
self._SubAttrName = ""
self._SubAttrLabel = ""
self._IsTypeString = False
self._IsTypeInteger = False
self._IsTypeFloat = False
self._IsTypeDate = False
self._IsTypeBoolean = False
self._SubAttrChoices = []
self._SubAttrChoicesFilter = []
self._StringConstraints = {}
self._IntegerConstraints = {}
self._FPConstraints = {}
self._BooleanConstraints = {}
self._DateConstraints = {}
self._IsOptional = False
@property
def SubAttrName(self):
return self._SubAttrName
@SubAttrName.setter
def SubAttrName(self,subAttrName):
if (type(subAttrName) is not str):
raise ValueError('SubAttrName must be type String')
self._SubAttrName = str(subAttrName)
@property
def SubAttrLabel(self):
return self._SubAttrLabel
@SubAttrLabel.setter
def SubAttrLabel(self,subAttrLabel):
if (type(subAttrLabel) is not str):
raise ValueError('SubAttrLabel must be type String')
self._SubAttrLabel = str(subAttrLabel)
@property
def IsTypeString(self):
return self._IsTypeString
@IsTypeString.setter
def IsTypeString(self,isTypeString):
if (not isinstance(ast.literal_eval(str(isTypeString)),bool)):
raise ValueError('IsTypeString must be type Boolean')
self._IsTypeString = ast.literal_eval(str(isTypeString))
@property
def IsTypeInteger(self):
return self._IsTypeInteger
@IsTypeInteger.setter
def IsTypeInteger(self,isTypeInteger):
if (not isinstance(ast.literal_eval(str(isTypeInteger)),bool)):
raise ValueError('IsTypeInteger must be type Boolean')
self._IsTypeInteger = ast.literal_eval(str(isTypeInteger))
@property
def IsTypeFloat(self):
return self._IsTypeFloat
@IsTypeFloat.setter
def IsTypeFloat(self,isTypeFloat):
if (not isinstance(ast.literal_eval(str(isTypeFloat)),bool)):
raise ValueError('IsTypeFloat must be type Boolean')
self._IsTypeFloat = ast.literal_eval(str(isTypeFloat))
@property
def IsTypeDate(self):
return self._IsTypeDate
@IsTypeDate.setter
def IsTypeDate(self,isTypeDate):
if (not isinstance(ast.literal_eval(str(isTypeDate)),bool)):
raise ValueError('IsTypeDate must be type Boolean')
self._IsTypeDate = ast.literal_eval(str(isTypeDate))
@property
def IsTypeBoolean(self):
return self._IsTypeBoolean
@IsTypeBoolean.setter
def IsTypeBoolean(self,isTypeBoolean):
if (not isinstance(ast.literal_eval(str(isTypeBoolean)),bool)):
raise ValueError('IsTypeBoolean must be type Boolean')
self._IsTypeBoolean = ast.literal_eval(str(isTypeBoolean))
@property
def SubAttrChoices(self):
return self._SubAttrChoices
@SubAttrChoices.setter
def SubAttrChoices(self,subAttrChoices):
if (not isinstance(ast.literal_eval(str(subAttrChoices)),list)):
raise ValueError('SubAttrChoices must be type List of choice of String type')
for choice in (ast.literal_eval(str(subAttrChoices))):
if (type(choice) is not str):
raise ValueError('Choices must be of type String')
self._SubAttrChoices = ast.literal_eval(str(subAttrChoices))
if (self._SubAttrChoices):
self._SubAttrChoices.append('Other')
@property
def SubAttrChoicesFilter(self):
return self._SubAttrChoicesFilter
@SubAttrChoicesFilter.setter
def SubAttrChoicesFilter(self,subAttrChoicesFilter):
if (not isinstance(ast.literal_eval(str(subAttrChoicesFilter)),list)):
raise ValueError('SubAttrChoicesFilter must be type List of choice of String type')
for choice in (ast.literal_eval(str(subAttrChoicesFilter))):
if (type(choice) is not str):
raise ValueError('Choices must be of type String')
self._SubAttrChoicesFilter = ast.literal_eval(str(subAttrChoicesFilter))
if (self._SubAttrChoicesFilter):
self._SubAttrChoicesFilter = ['Any'] + self._SubAttrChoicesFilter
@property
def StringConstraints(self):
return self._StringConstraints
@StringConstraints.setter
def StringConstraints(self,stringConstraints):
if (not isinstance(ast.literal_eval(str(stringConstraints)),dict)):
raise ValueError('StringConstraints must be specified as dictionary')
self._StringConstraints = ast.literal_eval(str(stringConstraints))
@property
def IntegerConstraints(self):
return self._IntegerConstraints
@IntegerConstraints.setter
def IntegerConstraints(self,integerConstraints):
if (not isinstance(ast.literal_eval(str(integerConstraints)),dict)):
raise ValueError('IntegerConstraints must be specified as dictionary')
self._IntegerConstraints = ast.literal_eval(str(integerConstraints))
@property
def FPConstraints(self):
return self._FPConstraints
@FPConstraints.setter
def FPConstraints(self,fpConstraints):
if (not isinstance(ast.literal_eval(str(fpConstraints)),dict)):
raise ValueError('FPConstraints must be specified as dictionary')
self._FPConstraints = ast.literal_eval(str(fpConstraints))
@property
def BooleanConstraints(self):
return self._BooleanConstraints
@BooleanConstraints.setter
def BooleanConstraints(self,booleanConstraints):
if (not isinstance(ast.literal_eval(str(booleanConstraints)),dict)):
raise ValueError('BooleanConstraints must be specified as dictionary')
self._BooleanConstraints = ast.literal_eval(str(booleanConstraints))
@property
def DateConstraints(self):
return self._DateConstraints
@DateConstraints.setter
def DateConstraints(self,dateConstraints):
if (not isinstance(ast.literal_eval(str(dateConstraints)),dict)):
raise ValueError('DateConstraints must be specified as dictionary')
self._DateConstraints = ast.literal_eval(str(dateConstraints))
@property
def IsOptional(self):
return self._IsOptional
@IsOptional.setter
def IsOptional(self,isOptional):
if (not isinstance(ast.literal_eval(str(isOptional)),bool)):
raise ValueError('IsOptional must be type Boolean')
self._IsOptional = ast.literal_eval(str(isOptional))
|
hrs = input("Enter Hours:")
h = float(hrs)
rte = input("Enter Rate:")
r = float(rte)
if hrs > 40:
pay = (h - 40) * (r * 1.5) + (40 * r)
else:
pay = (h * r)
print(pay)
|
"""Class to represent the Busy State
"""
import os
import signal
import logging
import subprocess # nosec #pylint-disable type: ignore
import alsaaudio
import requests
from ..hotword_engine.stop_detection import StopDetector
from ..speech import TTS
from .base_state import State
from .lights import lights
logger = logging.getLogger(__name__)
class BusyState(State):
"""Busy state inherits from base class State. In this state, SUSI API is called to perform query and the response
is then spoken with the selected Text to Speech Service.
"""
def detection(self):
"""This callback is fired when a Hotword Detector detects a hotword.
:return: None
"""
# subprocess.call(['killall', 'play'])
# subprocess.call(['killall', 'mpv']
if hasattr(self, 'audio_process'):
self.audio_process.send_signal(signal.SIGSTOP) # nosec #pylint-disable type: ignore
lights.off()
lights.wakeup()
subprocess.Popen(['play', str(self.components.config['detection_bell_sound'])]) # nosec #pylint-disable type: ignore
lights.wakeup()
self.transition(self.allowedStateTransitions.get('recognizing'))
self.audio_process.send_signal(signal.SIGCONT) # nosec #pylint-disable type: ignore
def on_enter(self, payload=None):
"""This method is executed on entry to Busy State. SUSI API is called via SUSI Python library to fetch the
result. We then call TTS to speak the reply. If successful, we transition to Idle State else to the Error State.
:param payload: query to be asked to SUSI
:return: None
"""
logger.debug('Busy state')
try:
import RPi.GPIO as GPIO
GPIO.output(17, True)
reply = self.components.susi.ask(payload)
GPIO.output(17, False)
GPIO.output(27, True)
if self.components.renderer is not None:
self.notify_renderer('speaking', payload={'susi_reply': reply})
if 'answer' in reply.keys():
logger.info('Susi: %s', reply['answer'])
lights.off()
lights.speak()
self.__speak(reply['answer'])
lights.off()
else:
lights.off()
lights.speak()
self.__speak("I don't have an answer to this")
lights.off()
if 'identifier' in reply.keys():
classifier = reply['identifier']
stopAction = StopDetector(self.detection)
if classifier[:3] == 'ytd':
video_url = reply['identifier']
requests.get('http://localhost:7070/song?vid=' + video_url[4:])
else:
audio_url = reply['identifier']
audio_process = subprocess.Popen(['play', audio_url[6:], '--no-show-progress']) # nosec #pylint-disable type: ignore
self.audio_process = audio_process
stopAction.run()
stopAction.detector.terminate()
if 'volume' in reply.keys():
subprocess.call(['amixer', '-c', '1', 'sset', "'Headphone'", ',', '0', str(reply['volume'])])
subprocess.call(['amixer', '-c', '1', 'sset', "'Speaker'", ',', '0', str(reply['volume'])])
subprocess.call(['play', str(self.components.config['detection_bell_sound'])]) # nosec #pylint-disable type: ignore
if 'table' in reply.keys():
table = reply['table']
for h in table.head:
print('%s\t' % h, end='')
self.__speak(h)
print()
for datum in table.data[0:4]:
for value in datum:
print('%s\t' % value, end='')
self.__speak(value)
print()
if 'pause' in reply.keys():
requests.get('http://localhost:7070/pause')
if 'resume' in reply.keys():
requests.get('http://localhost:7070/resume')
if 'restart' in reply.keys():
requests.get('http://localhost:7070/restart')
if 'stop' in reply.keys():
requests.get('http://localhost:7070/stop')
subprocess.call(['killall', 'play']) # nosec #pylint-disable type: ignore
self.transition(self.allowedStateTransitions.get('idle'))
if 'rss' in reply.keys():
rss = reply['rss']
entities = rss['entities']
count = rss['count']
for entity in entities[0:count]:
logger.debug(entity.title)
self.__speak(entity.title)
self.transition(self.allowedStateTransitions.get('idle'))
except ConnectionError:
self.transition(self.allowedStateTransitions.get(
'error'), payload='ConnectionError')
except Exception as e:
logger.error('Got error: %s', e)
self.transition(self.allowedStateTransitions.get('error'))
def on_exit(self):
"""Method executed on exit from the Busy State.
"""
try:
import RPi.GPIO as GPIO
GPIO.output(17, False)
GPIO.output(27, False)
GPIO.output(22, False)
except RuntimeError as e:
logger.error(e)
except ImportError:
logger.warning("This device doesn't have GPIO port")
def __speak(self, text):
if self.components.config['default_tts'] == 'google':
TTS.speak_google_tts(text)
if self.components.config['default_tts'] == 'flite':
logger.info("Using flite for TTS") # indication for using an offline music player
TTS.speak_flite_tts(text)
elif self.components.config['default_tts'] == 'watson':
TTS.speak_watson_tts(text)
|
# Generated by Django 2.0.7 on 2018-09-15 21:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TraverMsg', '0005_auto_20180915_0057'),
]
operations = [
migrations.AlterField(
model_name='citymsg',
name='img_url',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='首页图片url'),
),
migrations.AlterField(
model_name='provincemsg',
name='img_url',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='图片url'),
),
migrations.AlterField(
model_name='scenicmsg',
name='img_url',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='图片url'),
),
migrations.AlterField(
model_name='scenicmsg',
name='link_url',
field=models.CharField(blank=True, max_length=1000, null=True, verbose_name='景点链接'),
),
]
|
"""
CCT 建模优化代码
A09粒子运动工具类 ParticleRunner 示例
作者:赵润晓
日期:2021年4月29日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# ParticleRunner 类提供了粒子在磁场中运动的函数
# 计算方法包括自己实现的 runge_kutta4 法(GPU加速时也是用这一方法)
# 另外还有 scipy 包提供的 ode 法,这个方法更智能,可以自动调整积分步长
# 首先创建一个粒子和磁铁来演示 ParticleRunner 类的函数
# 创建一个位于全局坐标系原点,沿着 x 轴正向运动,动能 250 MeV 的质子
proton250 = ParticleFactory.create_proton(
position=P3.origin(),
direct=P3.x_direct(),
kinetic_MeV=250
)
# 创建一个产生匀强磁场的磁铁,磁场沿 z 轴正方向,大小为 2.4321282996 T
uniform_magnet243 = Magnet.uniform_magnet(magnetic_field=P3(z=2.4321282996))
# 类私有函数 __callback_for_runge_kutta4()
# 将二阶微分方程转为一阶,返回一个函数 Callable[[float, numpy.ndarray], numpy.ndarray]
# 原先的粒子在磁场运动方程为
# v = p'
# a = (q/m)v×B
# 自变量 t
# 这是一个二阶微分方程
# 通过向量化转为一阶
# 令 Y = [v,p]
# 则方程变为 Y' = [a, v] = [(q/m)v×B, v]
# 这个函数仅在内部才能调用,因此不做示例
# 类私有函数 __callback_for_solve_ode()
# 函数功能和 __callback_for_runge_kutta4() 一致
# 不同点在于
# __callback_for_runge_kutta4() 返回的函数,入参是 t,[p,v] 返回值是 [v,a],其中数组元素都是三维矢量 P3
# __callback_for_solve_ode() 返回的函数,入参是 t,[px,py,px,vx,vy,vz] 返回值是 [vx,vy,vz,ax,ay,az] 其中数组元素都是浮点数
# 函数 run_only(p,m,length,footstep,concurrency_level,report) 让粒子(群)在磁场中运动 length 距离
# 函数没有返回值。粒子运动完成后,粒子信息(位置、速度等)反映运动完成时的值
# 参数意义和可选值如下
# p 粒子 RunningParticle,或者粒子数组 [RunningParticle...]。注意运行后,粒子发生变化,返回运动后的坐标、速度值。
# m 磁场
# length 运动长度
# footstep 步长,默认 20 mm
# concurrency_level 并行度,默认值 1
# report 是否打印并行任务计划,默认 True 打印
# 下面让粒子 proton250 在 uniform_magnet243 下运动 pi/2 米
if __name__ == "__main__":
# 因为代码后面采用了多线程,所以需要将代码放在 __main__ 里面
proton250_0 = proton250.copy() # 拷贝一下,为了后面再次使用
ParticleRunner.run_only(
p= proton250_0,
m=uniform_magnet243,
length= math.pi/2
)
print(proton250_0)
# 输出为
# p=(1.0000000541349048, -0.9999999670349127, 0.0),v=(16.0287304376252, -183955178.02123857, 0.0),v0=183955178.0274753
# 再举一个例子,取两个粒子,采用并行任务计算
proton250_1 = proton250.copy() # 拷贝一下,为了后面再次使用
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main() # 使用并行计算时,需要这条语句(实际上没有任何意义,只是提醒代码需要被 if __name__ == "__main__": 包裹)
ParticleRunner.run_only([proton250_0,proton250_1],uniform_magnet243,math.pi/2,concurrency_level=2)
print(proton250_0) # p=(1.742677909061241e-07, -2.000000108269802, 0.0),v=(-183955178.01500052, -32.05746095441282, 0.0),v0=183955178.0274753
print(proton250_1) # p=(1.0000000541349048, -0.9999999670349127, 0.0),v=(16.0287304376252, -183955178.02123857, 0.0),v0=183955178.0274753
# 任务运行时会打印并行任务运行信息:
# 处理并行任务,任务数目2,并行等级2
# 任务完成,用时0.7770464420318604秒
# 再举一个例子,取两个粒子,不采用并行任务计算,只需要 concurrency_level 取 1 即可
ParticleRunner.run_only([proton250_0,proton250_1],uniform_magnet243,math.pi/2,concurrency_level=1)
print(proton250_0) # p=(-1.000000054067076, -1.0000003155704955, 0.0),v=(-48.086191406007856, 183955178.00876093, 0.0),v0=183955178.0274753
print(proton250_1) # p=(1.742677909061241e-07, -2.000000108269802, 0.0),v=(-183955178.01500052, -32.05746095441282, 0.0),v0=183955178.0274753
# 任务运行时会打印任务运行信息:
# 当前使用单线程进行粒子跟踪,如果函数支持多线程并行,推荐使用多线程
# 运行一个粒子需要0.00557秒,估计总耗时0.01114秒
# 100.00% finished
# 实际用时0.01237秒
# run_only_ode() 让粒子(群)在磁场中运动 length 距离
# 这个函数的效果和 run_only() 一样。同样没有返回值
# 不同点在于前者使用 runge_kutta4 法计算,后者使用 scipy 包采用变步长的方法计算
# 参数含义如下:
# p 粒子 RunningParticle,或者粒子数组 [RunningParticle...]。注意运行后,粒子发生变化,返回运动后的坐标、速度值。
# m 磁场
# length 运动长度
# footstep 步长,默认 20 mm
# absolute_tolerance 绝对误差,默认 1e-8
# relative_tolerance 相对误差,默认 1e-8
ParticleRunner.run_only_ode([proton250_0]*10,uniform_magnet243,math.pi/2)
print(proton250_0)
# (1.742677909061241e-07, -2.000000108269802, 0.0),v=(-183955178.01500052, -32.05746095441282, 0.0),v0=183955178.0274753
# 任务运行时会打印任务运行信息:
# track 10 particles
# ▇▇▇▇▇▇▇▇▇▇ finished
# 函数 run_get_trajectory() 运行一个粒子,并返回轨迹,轨迹是 P3 的数组
# 参数如下
# p 粒子
# m 磁场
# length 运动长度
# footstep 步长,默认 20 mm
traj = ParticleRunner.run_get_trajectory(proton250_0,uniform_magnet243,math.pi)
# 去除下面两行注释查看绘图结果
# plt.gca(projection="3d").plot(*P3.extract(traj))
# plt.show()
# 函数 run_get_all_info() 运行一个粒子,获取全部信息
# 所谓全部信息即每一步粒子的所有信息,包含位置、速度等,返回值是 RunningParticle 数组
all_info = ParticleRunner.run_get_all_info(proton250.copy(),uniform_magnet243,1,0.1)
for info in all_info:
print(info.__str__() + " distance = " + str(info.distance))
# 输出如下:
# p=(0.0, 0.0, 0.0),v=(183955178.0274753, 0.0, 0.0),v0=183955178.0274753 distance = 0.0
# p=(0.0998333333513896, -0.004995833063166415, 0.0),v=(183036168.7167266, -18364857.6149287, 0.0),v0=183955178.0274753 distance = 0.1
# p=(0.19866916542168667, -0.01993340168785551, 0.0),v=(180288325.76992166, -36546219.71405112, 0.0),v0=183955178.0274753 distance = 0.2
# p=(0.2955199630138605, -0.044663454753014774, 0.0),v=(175739104.70211726, -54362424.462139845, 0.0),v0=183955178.0274753 distance = 0.3
# p=(0.3894180267177238, -0.0789388980457223, 0.0),v=(169433959.7707788, -71635458.55155955, 0.0),v0=183955178.0274753 distance = 0.4
# p=(0.47942515982512757, -0.12241726314185991, 0.0),v=(161435889.81242117, -88192735.84918669, 0.0),v0=183955178.0274753 distance = 0.5
# p=(0.5646420424642986, -0.1746641292326529, 0.0),v=(151824808.7799325, -103868821.81686696, 0.0),v0=183955178.0274753 distance = 0.6000000000000001
# p=(0.644217217290238, -0.2351574637073639, 0.0),v=(140696747.26995525, -118507086.47560275, 0.0),v0=183955178.0274753 distance = 0.7
# p=(0.7173555969492972, -0.3032928381225379, 0.0),v=(128162893.01838718, -131961269.39762901, 0.0),v0=183955178.0274753 distance = 0.8
# p=(0.7833264083143161, -0.37838946744163776, 0.0),v=(114348479.95103914, -144096941.08952317, 0.0),v0=183955178.0274753 distance = 0.9
# p=(0.8414704941142988, -0.4596970122030806, 0.0),v=(99391536.88967079, -154792846.1647193, 0.0),v0=183955178.0274753 distance = 1.0
# 函数 run_only_deprecated() 已被废弃,因为没有使用 runge_kutta4 等控制误差的积分方法
# 函数 run_get_trajectory_deprecated() 已被废弃,因为没有使用 runge_kutta4 等控制误差的积分方法
# 函数 run_get_all_info_deprecated() 已被废弃,因为没有使用 runge_kutta4 等控制误差的积分方法
|
# http://www.codeskulptor.org/#user43_fAwSFN88Y7_0.py
# implementation of card game - Memory
import simplegui
import random
# helper function to initialize globals
def new_game():
global list1,exposed,state,turns,pre_index1,pre_index2
list1 = range(0, 8)
list2 = range(0, 8)
list1.extend(list2)
random.shuffle(list1)
exposed = []
for i in range(16):
exposed.append(False)
state = 0
turns = 0
pre_index1 = 0
pre_index2 = 0
# define event handlers
def mouseclick(pos):
global list1,exposed,state,turns,pre_index1,pre_index2
index = pos[0]//50
if state == 0:
if exposed[index] == False:
exposed[index] = True
state = 1
turns += 1
pre_index1 = index
elif state == 1:
if exposed[index] == False:
exposed[index] = True
state = 2
pre_index2 = index
else:
if exposed[index] == False:
exposed[index] = True
state = 1
turns += 1
if list1[pre_index1] != list1[pre_index2]:
exposed[pre_index1] = False
exposed[pre_index2] = False
pre_index1 = index
# cards are logically 50x100 pixels in size
def draw(canvas):
global list1,exposed,turns
for i in range(16):
if exposed[i] == True:
canvas.draw_text(str(list1[i]), (15+i*50, 60), 35, 'White')
else:
point1 = [0,0]
canvas.draw_polygon([[point1[0]+i*50,point1[1]],
[point1[0]+50+i*50,point1[1]],
[point1[0]+50+i*50,point1[1]+100],
[point1[0]+i*50,point1[1]+100]
],
1, 'Red', 'Green')
new_label = 'Turns = '+str(turns)
label.set_text(new_label)
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Reset", new_game)
label = frame.add_label("Turns = 0")
# register event handlers
frame.set_mouseclick_handler(mouseclick)
frame.set_draw_handler(draw)
# get things rolling
new_game()
frame.start()
# Always remember to review the grading rubric |
from .petscmat import *
from .residual import *
from .DREAMEqsys import DREAMEqsys
from .DREAMEqsysUnknown import DREAMEqsysUnknown
|
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import logging
import json
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from multicloud_azure.pub.aria.service import AriaServiceImpl
logger = logging.getLogger(__name__)
class InfraWorkload(APIView):
def post(self, request, cloud_owner, cloud_region_id):
data = request.data
template_data = data["infra-template"]
payload = data["infra-payload"]
inputs = json.loads(payload)
template_name = inputs['template_data']['stack_name']
service_op = AriaServiceImpl()
try:
stack = service_op.deploy_service(template_name, template_data,
inputs, logger)
if stack[1] != 200:
return Response(data=stack[0], status=stack[1])
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
rsp = {
"template_type": "heat",
"workload_id": stack[0]
}
return Response(data=rsp, status=status.HTTP_202_ACCEPTED)
class GetStackView(APIView):
def get(self, request, cloud_owner, cloud_region_id, workload_id):
service_op = AriaServiceImpl()
try:
stack = service_op.show_execution(workload_id)
if stack[1] != 200:
return Response(data=stack[0], status=stack[1])
body = json.loads(stack[0])
stack_status = body["status"]
response = "unknown"
if stack_status == "pending" or stack_status == "started":
response = "CREATE_IN_PROGRESS"
elif stack_status == "succeeded":
response = "CREATE_COMPLETE"
elif stack_status == "failed" or stack_status == "cancelled":
response = "CREATE_FAILED"
rsp = {
"template_type": "heat",
"workload_id": workload_id,
"workload_status": response
}
return Response(data=rsp, status=stack[1])
except Exception as e:
if hasattr(e, "http_status"):
return Response(data={'error': str(e)}, status=e.http_status)
else:
return Response(data={'error': str(e)},
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
#!/usr/bin/env python
#coding=utf-8
import pymongo
import sys
def get_collection():
mongo = pymongo.Connection("master", 27017)["weibo"]
return mongo["text"]
def run():
reload(sys)
sys.setdefaultencoding('utf-8')
collection = get_collection()
tf = dict()
for blogText in collection.find({"tf":{"$exists":True}, "_id":{"$lt":320000}}):
if len(blogText['tf'])>0:#过滤一些类型不合法的tf
blogId = blogText['_id']
tf = blogText['tf']
for i in range(len(tf)):
key = str(blogId) + '@' + tf.keys()[i] #tf.keys()[i] => word
value = tf.values()[i]
print "{0}\t{1}".format(key, value)#key:blogId@word; value:tf
if __name__ == '__main__':
run()
|
#!/usr/bin/env python
#-*- coding:UTF-8 -*-
import os
path='/home/zr/documents/KCFcpp/resource/blanket'
f=open("images.txt",'w')
for pic in os.listdir(path):
f.write(os.path.join(path,pic)+'\n')
f.close()
|
from math import *
from utils import get_primes
import sys
import pickle
sys.setrecursionlimit(10000)
m = 100000
k = 2
primes = get_primes(m)
def rec(prod, n, k):
if n <= 1:
return prod
p = primes[k]
if n % p == 0:
prod *= p
while n % p == 0:
n /= p
return rec(prod, n, k + 1)
def rad(n):
return rec(1, n, 0)
try:
with open('rad.pkl', 'rb') as file:
rads = pickle.load(file)
except Exception:
rads = [rad(n) for n in range(m+1)]
try:
with open('rad.pkl', 'wb') as file:
pickle.dump(file=file, obj=rads)
except Exception as e:
print(str(e))
d = sorted(enumerate(rads), key=lambda x: x[1])
print(d[10000]) |
import HW7.CalibrationSettings as CalibSets
import HW7.CalibrationClasses as CalibCls
# create a calibration object
calibration = CalibCls.Calibration()
# sample the posterior of the mortality probability
calibration.sample_posterior()
# estimate of annal mortality probability and the 95% credible interval
print('Estimate of annual mortality probability ({:.{prec}%} credible interval):'.format(1-CalibSets.ALPHA, prec=0),
calibration.get_mortality_estimate_credible_interval(CalibSets.ALPHA, 4)) # return with 4 decimals
|
import boto3
import uuid
def lambda_handler(event, context):
username = event["username"]
income = event["income"]
rent = event["rent"]
food = event["food"]
transportation = event["transportation"]
recreation = event["recreation"]
print('Generating new DynamoDB record, with ID: ' + username)
#Creating new record in DynamoDB table
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table("budgetData")
response = table.get_item(
Key={
'username' : username
})
if response is None:
table.put_item(
Item={
'username' : username,
'income' : income,
'rent' : rent,
'food' : food,
'transportation' : transportation,
'recreation' : recreation
})
else:
table.update_item(
Key={
'username': username
},
UpdateExpression='set income = :i, rent = :r, food = :f, transportation = :t, recreation = :rec',
ExpressionAttributeValues={
':i': income,
':r': rent,
':f': food,
':t': transportation,
':rec': recreation
})
|
import re
import requests
from bs4 import BeautifulSoup
import os
from random import randrange
import json
#Borramos la ultima coma del archivo nuevos.txt
with open('./nuevos.txt', 'ab') as filehandle:
filehandle.seek(-1, os.SEEK_END)
filehandle.truncate()
filehandle.close()
#Guardamos en array_enlaces_nuevos los enlaces guardados en el archivo nuevos.txt
with open ("./nuevos.txt") as f:
content = f.read()
array_enlaces_nuevo = content.split(",")
def arreglarLugar(lugar):
provincias = []
municipios = []
provincia = None
with open('provincias.json', 'r') as f:
viviendas_dict = json.load(f)
for vivienda in viviendas_dict:
provincias.append((vivienda['provincina_id']), vivienda['nombre'])
with open('municipios.json', 'r') as f:
viviendas_dict = json.load(f)
for vivienda in viviendas_dict:
municipios.append((vivienda['provincina_id']), vivienda['nombre'])
if(lugar.find('capital')):
nombre = lugar.split(' ')
lugar = nombre[0]
for municipio in municipios:
mun = municipio[1].lower()
if(lugar == mun):
id = municipio[0]
for prov in provincias:
idp = prov[0]
if (id == idp):
provincia = prov[1]
if provincia == None:
provincia = lugar
return provincia
'''
#En su defecto añadir un array a pelo aquí por código
array_enlaces_nuevo = ['asdffgsgasdfadf.com']
'''
##funcion comparar si algun enlace ya esta en el json
def comprobarRepes(array_enlaces_nuevos):
array_links_antiguos = []
#Comprobamos primero que no se repitan los links de nuevos.txt
news = []
for nuevo in array_enlaces_nuevos:
if nuevo not in news:
news.append(nuevo)
#Lee json ya creado con las viviendas y guarda variables en el array links antiguos
with open('viviendas.json', 'r') as f:
viviendas_dict = json.load(f)
for vivienda in viviendas_dict:
array_links_antiguos.append(vivienda['link'])
#Hace comprobacion de si alguno de los nuevos ya estaba en el array antiguo
a_anadir = []
for item in news:
if item not in array_links_antiguos:
a_anadir.append(item)
#Imprimimos cuantos enlaces nuevos hay para añadir y lo devolvemos
print(len(a_anadir))
return a_anadir
#Funcion para insertar al json los enlaces nuevos
def insertar_array_json():
#Se comprueba si alguno ya esta en el json
arrayenlaces = comprobarRepes(array_enlaces_nuevo)
#Si hay alguno nuevo que meter
if (len(arrayenlaces) != 0):
#borramos el cierre de corchete y añadimos una coma
with open('./viviendas.json', 'ab') as filehandle:
filehandle.seek(-1, os.SEEK_END)
filehandle.truncate()
filehandle.close()
final = open('./viviendas.json', 'a')
final.write(',')
final.close()
cuanto_queda = len(arrayenlaces)
#Recorre array de enlaces a añadir
for enlace in arrayenlaces:
print(cuanto_queda)
try:
page = requests.get(enlace)
soup = BeautifulSoup(page.content, 'html.parser')
if (str(soup).find('re-DetailHeader-price') != -1):
div_contenido = soup.find('div', class_ = "re-RealestateDetail-topContainer")
precio1 = div_contenido.find('span' , class_ = "re-DetailHeader-price")
cositas = div_contenido.find('div' , class_ = "re-DetailHeader-propertyTitleContainer")
donde = cositas.find('h1', class_ = "re-DetailHeader-propertyTitle")
cosill = div_contenido.find('ul' , class_ = 're-DetailHeader-features')
lista = cosill.findAll('li' , class_ = "re-DetailHeader-featuresItem")
regexTipo = '^(\w*)\s'
regexCAC = '\.es\/es\/(.*)\/vivienda'
reg = 'vivienda\/(.*)\/'
hecho = re.search(reg, enlace).group(1)
absa = hecho.split('/')
linkvivienda = enlace
lugarvivienda = absa[0].replace("-", " ")
preciovivienda = precio1.text
habitacionesvivienda = 'no especificado'
banosvivienda = 'no especificado'
metroscuadradosvivienda = ' no especificado'
numeroplantavivienda = 'no especificado'
for a in range (len(lista)):
i = lista[a]
if ((i.text).find('hab') != -1):
habitacionesvivienda = i.text
if ((i.text).find('ba') != -1):
banosvivienda = i.text
if ((i.text).find('m²') != -1):
metroscuadradosvivienda = i.text
if (i.text == 'Bajos'):
numeroplantavivienda = 'Bajos'
if((i.text).find('Planta') != -1):
numeroplantavivienda = i.text
compr_alq_comparvivienda = re.search(regexCAC, enlace).group(1)
tipovivienda = re.search(regexTipo, donde.text).group(1)
array_imagenes = []
for item in soup.find_all('img', class_ = 're-DetailMosaicPhoto'):
array_imagenes.append(item['src'])
lugar = arreglarLugar(lugarvivienda)
viv ="\n{\"link\": \"" + linkvivienda + "\",\"lugar\": \"" + lugar + "\",\"precio\": \"" + preciovivienda + "\",\"habitaciones\": \"" + habitacionesvivienda + "\",\"banos\": \"" + banosvivienda + "\",\"metros2\": \"" + metroscuadradosvivienda + "\",\"planta\": \"" + numeroplantavivienda + "\",\"compr_alq_compar\": \"" + compr_alq_comparvivienda + "\",\"tipo\": \"" +tipovivienda + "\",\"imagenes\": \"" + str(array_imagenes) + "\"},"
outF = open("viviendas.json", "a")
outF.write(viv)
outF.close()
comandosleep = 'sleep ' + str(randrange(1))
os.system(comandosleep)
except:
print('F en ', i)
#Borra el corchete de final del json
#Para saber cuanto queda + o -
cuanto_queda = cuanto_queda -1
#Borra la coma y pone un corchete
with open('./viviendas.json', 'ab') as filehandle:
filehandle.seek(-1, os.SEEK_END)
filehandle.truncate()
filehandle.close()
final = open('./viviendas.json', 'a')
final.write(']')
final.close()
#Borramos el archivo nuevos.txt
a_file = open("nuevos.txt", "w")
a_file.truncate()
a_file.close()
insertar_array_json()
|
# Generated by Django 3.1.5 on 2021-02-26 06:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('JOB', '0004_reqruiteruser'),
]
operations = [
migrations.AddField(
model_name='reqruiteruser',
name='type',
field=models.CharField(default='reqruiter', max_length=50),
),
migrations.AddField(
model_name='studentuser',
name='type',
field=models.CharField(default='student', max_length=50),
),
]
|
# 感觉挺有意思
# 依次把最大的放到底下,这样翻转上面的就不会影响
# 先把最大的放在最上面,再翻转一次到指定位置
# NP困难问题
class Solution:
def pancakeSort(self, arr: List[int]) -> List[int]:
ans = []
for n in range(len(arr), 1, -1):
index = 0
for i in range(n):
if arr[i] > arr[index]:
index = i
if index == n - 1:
continue
m = index
for i in range((m + 1) // 2):
arr[i], arr[m - i] = arr[m - i], arr[i] # 原地反转
for i in range(n // 2):
arr[i], arr[n - 1 - i] = arr[n - 1 - i], arr[i] # 原地反转
ans.append(index + 1)
ans.append(n)
return ans
# 根据题意,加速
class Solution:
def pancakeSort(self, arr: List[int]) -> List[int]:
ans = []
for x in range(len(arr), 0, -1):
idx = arr.index(x)
ans.extend([idx+1, x])
arr = arr[:idx:-1] + arr[:idx]
return ans
|
PROJECTNAME = "AutocompleteWidget"
SKINS_DIR = 'skins'
GLOBALS = globals() |
from setuptools import setup
setup(
name='dlpdb',
packages=['dlpdb'],
author='Andrew Jewett',
author_email='jewett.aij@gmail.com',
description='collect statistics from the entire PDB library',
long_description='A collection of tiny scripts to help automate the process of downloading and extracting coordinates, sequences, and secondary-structure information from (a representative subset of) structures from the entire PDB library. These scripts were originally used to estimate distances distances and angles between specific atoms in proteins and DNA. The distribution of these distances and angles can be used to estimate force-field parameters for coarse grained models for these biomolecules.',
url='https://github.com/jewettaij/dlpdb',
download_url='https://github.com/jewettaij/dlpdb/archive/v0.5.1.zip',
version='0.5.1',
license='MIT',
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Topic :: Scientific/Engineering :: Chemistry',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Intended Audience :: Science/Research'],
scripts=['dlpdb/scripts/extract_angles.sh',
'dlpdb/scripts/extract_dihedrals.sh',
'dlpdb/scripts/extract_projected_dihedrals.sh',
'dlpdb/scripts/extract_line_separation.sh',
'dlpdb/scripts/extract_distances.sh',
'dlpdb/scripts/extract_helix_angles.sh',
'dlpdb/scripts/extract_helix_dihedrals.sh',
'dlpdb/scripts/extract_helix_projected_dihedrals.sh',
'dlpdb/scripts/extract_helix_distances.sh',
'dlpdb/scripts/extract_helix_resAveDistances.sh',
'dlpdb/scripts/extract_resAveDistances.sh',
'dlpdb/scripts/extract_sheet_angles.sh',
'dlpdb/scripts/extract_sheet_dihedrals.sh',
'dlpdb/scripts/extract_sheet_projected_dihedrals.sh',
'dlpdb/scripts/extract_sheet_distances.sh',
'dlpdb/scripts/extract_sheet_resAveDistances.sh',
'dlpdb/scripts/extract_turn_angles.sh',
'dlpdb/scripts/extract_turn_dihedrals.sh',
'dlpdb/scripts/extract_turn_projected_dihedrals.sh',
'dlpdb/scripts/extract_turn_distances.sh',
'dlpdb/scripts/extract_turn_resAveDistances.sh',
'dlpdb/scripts/move_membrane_proteins.sh',
'dlpdb/scripts/move_missing_dna_heavy_atoms.sh',
'dlpdb/scripts/move_missing_protein_heavy_atoms.sh',
'dlpdb/scripts/move_missing_secondary_str.sh',
'dlpdb/scripts/move_nmr_structures.sh',
'dlpdb/scripts/move_non-dna.sh',
'dlpdb/scripts/replace_all_secondary_str.sh',
'dlpdb/scripts/replace_missing_secondary_str.sh'],
entry_points={
'console_scripts': ['coords2angles.py=dlpdb.coords2angles:main',
'coords2dihedrals.py=dlpdb.coords2dihedrals:main',
'coords2projected_dihedrals.py=dlpdb.coords2projected_dihedrals:main',
'coords2distances.py=dlpdb.coords2distances:main',
'coords2helixAngleOmega.py=dlpdb.coords2helixAngleOmega:main',
'dlpisces.py=dlpdb.dlpisces:main',
'download_pdbs.py=dlpdb.download_pdbs:main',
'dssp2pdb.py=dlpdb.dssp2pdb:main',
'fix_dna_residue_order_pdb.py=dlpdb.fix_dna_residue_order_pdb:main',
'has_dna_heavy_atoms.py=dlpdb.has_dna_heavy_atoms:main',
'has_helices.py=dlpdb.has_helices:main',
'has_protein_heavy_atoms.py=dlpdb.has_protein_heavy_atoms:main',
'has_rna_heavy_atoms.py=dlpdb.has_rna_heavy_atoms:main',
'has_secondary_str.py=dlpdb.has_secondary_str:main',
'has_sheets.py=dlpdb.has_sheets:main',
'has_turns.py=dlpdb.has_turns:main',
'helixAngleOmega.py=dlpdb.helixAngleOmega:main',
'merge_lines_periodic.py=dlpdb.merge_lines_periodic:main',
'pdb2coords_ave.py=dlpdb.pdb2coords_ave:main',
'pdb2coords.py=dlpdb.pdb2coords:main',
'pdb2helix.py=dlpdb.pdb2helix:main',
'pdb2sequence.py=dlpdb.pdb2sequence:main',
'pdb2sheet.py=dlpdb.pdb2sheet:main',
'pdb2turn.py=dlpdb.pdb2turn:main',
'pdb_interleave_residues.py=dlpdb.pdb_interleave_residues:main',
'select_chains_with_dna.py=dlpdb.select_chains_with_dna:main',
'select_interval.py=dlpdb.select_interval:main',
'strip_secondary_str.py=dlpdb.strip_secondary_str:main',
'truncate_chars.py=dlpdb.truncate_chars:main',
'truncate_tokens.py=dlpdb.truncate_tokens:main']},
zip_safe=True,
include_package_data=True
)
|
from lib.experiment import Experiment
from lib import scheduler, utils
experiment = Experiment(N=1000, M=5000, t_max=10000, beta_scheduler=scheduler.ConstantBetaScheduler(0.5),
algorithm="Metropolis", batch_size=None, use_gpu=False)
errors, energies, x = experiment.run()
utils.plot_errors_energies(errors, energies)
|
import factory
from archives.models import Archive
from credentials.tests.factories import SshKeyPairFactory
class ArchiveFactory(factory.DjangoModelFactory):
FACTORY_FOR = Archive
name = factory.Sequence(lambda n: "Archive %d" % n)
host = "archive.example.com"
policy = "cdimage"
basedir = "/var/tmp"
username = "testing"
ssh_credentials = factory.SubFactory(SshKeyPairFactory)
transport = "ssh"
|
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.forms import ModelForm
# Create your models here.
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
LEVEL_CHOICES = (
('N1', 'N1'),
('N2', 'N2'),
('N3', 'N3'),
('N4', 'N4'),
('N5', 'N5'),
('NO', 'NO'),
)
class ExamInfo(models.Model):
name = models.CharField(max_length=10)
level = models.CharField(max_length=2, choices=LEVEL_CHOICES)
#---------------------------Uploading of files
class User(models.Model):
username = models.CharField(max_length = 30)
headImg = models.FileField(upload_to = './upload/')
def __unicode__(self):
return self.username
#---------------------------Programming the Django application
from django.db import models
class Input(models.Model):
r = models.FloatField()
import wtforms as wtf
class Average(wtf.Form):
filename = wtf.FileField(validators=
[wtf.validators.InputRequired()])
|
from scrapers.asosmen import AsosMenScraper
if __name__ == "__main__":
scraper = AsosMenScraper()
scraper.run()
|
"""Restaurant rating lister."""
# put your code here
import sys
import random
filename = sys.argv[1]
my_file = open(filename)
restaurants_dict = {}
for line in my_file:
words_list = (line.rstrip()).split(':')
restaurants_dict[words_list[0]] = int(words_list[1])
while True:
print("Please select one of these:\n\
See all the ratings (S)\n\
Add a new restaurant (A)\n\
Update ratings of an existing restaurant (U)\n\
Quit (Q)")
choice =input("S, A, U or Q: ")
if choice.upper() == "Q":
break
elif choice.upper() == 'A':
restaurant = input("Please enter the restaurant name: ")
rating = int(input("Please enter it's rating: "))
while rating < 1 or rating > 5:
print("Please enter a rating between 1 and 5.")
rating = int(input("Please enter it's rating: "))
restaurants_dict[restaurant.title()] = rating
elif choice.upper() == 'S':
for restaurant, rating in sorted(restaurants_dict.items()):
print("{} is rated at {}." .format(restaurant, rating))
elif choice.upper() == 'U':
selection = input("Do you want to choose the restaurant to update? Y/N ")
if selection.upper() == "Y":
print("Here is the list of restaurants: ")
for restaurant in restaurants_dict.keys():
print(restaurant)
new_rating_for_rest = input("Please enter your choice: ").title()
else:
restaurants_list = list(restaurants_dict.keys())
new_rating_for_rest = random.choice(restaurants_list)
print("Here is the restaurant to change the rating.")
print(new_rating_for_rest, restaurants_dict[new_rating_for_rest])
new_rating = int(input("Please enter new rating "))
while new_rating < 1 or new_rating > 5:
print("Please enter a rating between 1 and 5.")
new_rating = int(input("Please enter it's rating: "))
restaurants_dict[new_rating_for_rest] = new_rating
|
# file r 只读模式
# 文件不存在会出错
file1 = open("file1", "r")
# 方法一
# data = file1.readlines()
# for i in data:
# print(i.strip())
# 方法二
# for i, j in enumerate(file1.readlines()):
# print(i+1, j.strip())
# 方法三
for i, j in enumerate(file1):
print(i+1, j.strip())
file1.close() |
import numpy as np
from joblib import Parallel, delayed
import timeit
import time
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from .BaseCrossVal import BaseCrossVal
from ..utils import binary_metrics, dict_perc, dict_median
class holdout(BaseCrossVal):
""" Exhaustitive search over param_dict calculating binary metrics.
Parameters
----------
model : object
This object is assumed to store bootlist attributes in .model (e.g. modelPLS.model.x_scores_).
X : array-like, shape = [n_samples, n_features]
Predictor variables, where n_samples is the number of samples and n_features is the number of predictors.
Y : array-like, shape = [n_samples, 1]
Response variables, where n_samples is the number of samples.
param_dict : dict
List of attributes to calculate and return bootstrap confidence intervals.
split: : a positive number between 0 and 1, (default 0.8)
The split for the train and test split.
Methods
-------
Run: Runs all necessary methods prior to plot.
Plot: Creates a R2/Q2 plot.
"""
def __init__(self, model, X, Y, param_dict, folds=None, n_mc=1, n_boot=0, n_cores=-1, ci=95, test_size=0.2, stratify=True):
super().__init__(model=model, X=X, Y=Y, param_dict=param_dict, folds=folds, n_mc=n_mc, n_boot=n_boot, n_cores=n_cores, ci=ci)
if folds is not None:
print("You are using holdout not kfold, so folds has no effect.")
# Save holdout specific inputs
self.test_size = test_size
if stratify is True:
self.stratify = Y
else:
self.stratify = None
def calc_ypred(self):
"""Calculates ypred full and ypred cv."""
time.sleep(0.5) # Sleep for 0.5 secs to finish printing
# Start Timer
start = timeit.default_timer()
# Actual loop including Monte-Carlo reps
self.loop_mc = self.param_list * self.n_mc
ypred = Parallel(n_jobs=self.n_cores)(delayed(self._calc_ypred_loop)(i) for i in tqdm(range(len(self.loop_mc))))
# Split ypred into full / cv and put in final format
# Format :::> self.ypred_full -> parameter_type -> monte-carlo -> y_true / y_pred
self.ypred_full = [[] for i in range(len(self.param_list))]
self.ypred_cv = [[] for i in range(len(self.param_list))]
self.loop_mc_numbers = list(range(len(self.param_list))) * self.n_mc
for i in range(len(self.loop_mc)):
j = self.loop_mc_numbers[i] # Location to append to
self.ypred_full[j].append(ypred[i][0])
self.ypred_cv[j].append(ypred[i][1])
# Stop timer
stop = timeit.default_timer()
self.parallel_time = (stop - start) / 60
print("Time taken: {:0.2f} minutes with {} cores".format(self.parallel_time, self.n_cores))
def calc_ypred_epoch(self):
"""Calculates ypred full and ypred cv for each epoch (edge case)."""
time.sleep(0.5) # Sleep for 0.5 secs to finish printing
# Start Timer
start = timeit.default_timer()
# Set param to the max -> Actual loop including Monte-Carlo reps
epoch_param = [self.param_list[-1]]
self.loop_mc = epoch_param * self.n_mc
ypred = Parallel(n_jobs=self.n_cores)(delayed(self._calc_ypred_loop_epoch)(i) for i in tqdm(range(len(self.loop_mc))))
# Get epoch list
self.epoch_list = []
for m in self.param_list2:
for t, v in m.items():
self.epoch_list.append(v - 1)
# Split ypred into full / cv and put in final format
# Format :::> self.ypred_full -> parameter_type -> monte-carlo -> y_true / y_pred
# Note, we need to pull out the specific epochs from the model
self.ypred_full = [[] for i in range(len(self.epoch_list))]
self.ypred_cv = [[] for i in range(len(self.epoch_list))]
for i in range(len(self.loop_mc)):
for j in range(len(self.epoch_list)):
actual_epoch = self.epoch_list[j]
self.ypred_full[j].append([ypred[i][0][0][0], ypred[i][0][0][1][actual_epoch]])
self.ypred_cv[j].append([ypred[i][1][0][0], ypred[i][1][0][1][actual_epoch]])
# Stop timer
stop = timeit.default_timer()
self.parallel_time = (stop - start) / 60
print("Time taken: {:0.2f} minutes with {} cores".format(self.parallel_time, self.n_cores))
def _calc_ypred_loop(self, i):
"""Core component of calc_ypred."""
# Set x and y
if len(self.X) == len(self.Y):
X_train, X_test, Y_train, Y_test = train_test_split(self.X, self.Y, test_size=self.test_size, stratify=self.stratify)
else:
X0_train, X0_test, X1_train, X1_test, Y_train, Y_test = train_test_split(self.X[0], self.X[1], self.Y, test_size=self.test_size, stratify=self.stratify)
X_train = [X0_train, X1_train]
X_test = [X0_test, X1_test]
# Set hyper - parameters
params_i = self.loop_mc[i]
model_i = self.model()
model_i.set_params(params_i)
# Split into train and test
ypred_full_i = model_i.train(X_train, Y_train)
ypred_cv_i = model_i.test(X_test)
# Get ypred full cv
ypred_full = [Y_train, ypred_full_i]
ypred_cv = [Y_test, ypred_cv_i]
return [ypred_full, ypred_cv]
def _calc_ypred_loop_epoch(self, i):
"""Core component of calc_ypred_epoch."""
# Set inputs
Y_full = []
Y_cv = []
if len(self.X) == len(self.Y):
X_train, X_test, Y_train, Y_test = train_test_split(self.X, self.Y, test_size=self.test_size, stratify=self.stratify)
else:
X0_train, X0_test, X1_train, X1_test, Y_train, Y_test = train_test_split(self.X[0], self.X[1], self.Y, test_size=self.test_size, stratify=self.stratify)
X_train = [X0_train, X1_train]
X_test = [X0_test, X1_test]
# Set hyper - parameters
params_i = self.loop_mc[i]
model_i = self.model()
model_i.set_params(params_i)
# Train model with epoch_ypred=True
model_i.train(X_train, Y_train, epoch_ypred=True, epoch_xtest=X_test)
Y_full_split = model_i.epoch.Y_train
Y_full.append([Y_train, Y_full_split])
Y_cv_split = model_i.epoch.Y_test
Y_cv.append([Y_test, Y_cv_split])
return [Y_full, Y_cv]
def calc_stats(self):
"""Calculates binary statistics from ypred full and ypred cv."""
# Calculate for each parameter and append
stats_list = []
std_list = []
self.full_loop = []
self.cv_loop = []
for i in range(len(self.param_list)):
full_loop = []
cv_loop = []
# Get all monte-carlo
for k in range(len(self.ypred_full[i])):
full_mc = binary_metrics(self.ypred_full[i][k][0], self.ypred_full[i][k][1], parametric=self.model.parametric)
cv_mc = binary_metrics(self.ypred_cv[i][k][0], self.ypred_cv[i][k][1], parametric=self.model.parametric)
full_loop.append(full_mc)
cv_loop.append(cv_mc)
# Average binary metrics
stats_full_i = dict_median(full_loop)
stats_cv_i = dict_median(cv_loop)
# Rename columns
stats_full_i = {k + "full": v for k, v in stats_full_i.items()}
stats_cv_i = {k + "cv": v for k, v in stats_cv_i.items()}
stats_cv_i["R²"] = stats_full_i.pop("R²full")
stats_cv_i["Q²"] = stats_cv_i.pop("R²cv")
# Combine and append
stats_combined = {**stats_full_i, **stats_cv_i}
stats_list.append(stats_combined)
# Save loop -> full_loop is a placeholder
self.full_loop.append(full_loop)
self.cv_loop.append(cv_loop)
# Keep std if n_mc > 1
if self.n_mc > 1:
std_full_i = dict_perc(full_loop, ci=self.ci)
std_cv_i = dict_perc(cv_loop, ci=self.ci)
std_full_i = {k + "full": v for k, v in std_full_i.items()}
std_cv_i = {k + "cv": v for k, v in std_cv_i.items()}
std_cv_i["R²"] = std_full_i.pop("R²full")
std_cv_i["Q²"] = std_cv_i.pop("R²cv")
std_combined = {**std_full_i, **std_cv_i}
std_list.append(std_combined)
self.table = self._format_table(stats_list) # Transpose, Add headers
self.table = self.table.reindex(index=np.sort(self.table.index))
if self.n_mc > 1:
self.table_std = self._format_table(std_list) # Transpose, Add headers
self.table_std = self.table_std.reindex(index=np.sort(self.table_std.index))
return self.table
def plot(self, metric="r2q2", scale=1, color_scaling="tanh", rotate_xlabel=True, legend="bottom_right", color_beta=[10, 10, 10], ci=95, diff1_heat=True):
super().plot(metric=metric, scale=scale, color_scaling=color_scaling, rotate_xlabel=rotate_xlabel, legend=legend, model="holdout", color_beta=color_beta, ci=ci, diff1_heat=diff1_heat)
|
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--validation',
action='store_true',
default=False,
help='To split validation or not.')
parser.add_argument('--train_filename',
default='data/train.csv',
help='The csv file to train')
parser.add_argument('--attributes_filename',
default='models/attributes_PM2.5_PM10.npy',
help='The filtered boolean numpy file,\
specified which attributes are used \
to train.')
parser.add_argument('-e', '--epoches',
type=int,
default=2000)
parser.add_argument('-lr', '--learning_rate',
type=float,
default=0.005)
parser.add_argument('--save_intervals',
default=100,
type=int,
help='The epoch intervals to save models')
parser.add_argument('--prefix',
required=True,
help='The prefix of saving name')
parser.add_argument('--params_init_model',
default=None,
type=str,
help='The initialization parameters \
from a given model name.')
parser.add_argument('--lambda_value',
default=0.0,
type=float,
help='The regularization hyperparameter,\
default=0.0')
args = parser.parse_args()
return args
|
import cv2
# Basic functions - resizing image, need to know current image size first
img = cv2.imread("images/profile.jpg")
print(img.shape) # ex output: (1364, 1364, 3) (height, width, # for channel so VGR)
imgResize = cv2.resize(img, (1000, 500))
# Crop image
imgCropped = img[0:200, 200:500] # dont need to use cv function (note height first than width, opencv functions is swapped)
cv2.imshow("Image", imgResize)
cv2.imshow("Image Cropped", imgCropped)
cv2.waitKey(0)
|
from flask import Flask, render_template, request, url_for, flash, session, redirect
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, IntegerField, validators
from passlib.hash import sha256_crypt
from functools import wraps
from flask_socketio import SocketIO, emit
from pprint import pprint
app = Flask(__name__)
app.secret_key='secret123'
socketio = SocketIO( app )
def messageRecived():
print( 'message was received!!!' )
@socketio.on( 'my event' )
def handle_my_custom_event( json ):
print( 'recived my event: ' + str( json ) )
# connections.append(json)
# socketio.emit( 'update connections', connections, callback=updateConnections )
socketio.emit( 'my response', json, callback=messageRecived )
# connections = []
#Config MySQL
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'XXXX'
app.config['MYSQL_PASSWORD'] = 'XXXX'
app.config['MYSQL_DB'] = 'XXXX'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
#init mysql
mysql = MySQL(app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/chat')
def chat():
return render_template('chat.html')
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
username = StringField('Username', [validators.Length(min=3, max=25)])
email = StringField('Email', [validators.Length(min=6, max=50)])
password = PasswordField('Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords do not match')
])
confirm = PasswordField('Confirm Password')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
# Create cursor
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(name, email, username, password) VALUES(%s, %s, %s, %s)", (name, email, username, password))
#Commit to DB
mysql.connection.commit()
#Close connection
cur.close()
flash('You are now registered and can log in', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
#user login
@app.route('/add_friend', methods=['GET', 'POST'])
def add_friend():
if request.method == 'POST':
#get form fields
sender_id = request.form['sender_id']
receiver_id = request.form['receiver_id']
#Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("INSERT INTO friend_requests(sender_id, receiver_id) VALUES(%s, %s)", (sender_id, receiver_id))
#Commit to DB
mysql.connection.commit()
cur.close()
return receiver_id
return receiver_id
#user reject friend
@app.route('/reject_friend', methods=['GET', 'POST'])
def reject_friend():
if request.method == 'POST':
#get form fields
sender_id = request.form['sender_id']
receiver_id = request.form['receiver_id']
# pprint(sender_id)
#Create cursor
cur = mysql.connection.cursor()
# delete
result = cur.execute("DELETE FROM friend_requests WHERE sender_id=%s AND receiver_id=%s", (sender_id, receiver_id))
#Commit to DB
mysql.connection.commit()
cur.close()
return receiver_id
return receiver_id
#user accept friend
@app.route('/accept_friend', methods=['GET', 'POST'])
def accept_friend():
if request.method == 'POST':
#get form fields
sender_id = request.form['sender_id']
receiver_id = request.form['receiver_id']
# pprint(sender_id)
#Create cursor
cur = mysql.connection.cursor()
# delete
result = cur.execute("INSERT INTO friends(sender_id, receiver_id) VALUES(%s, %s)", (sender_id, receiver_id))
#Commit to DB
mysql.connection.commit()
result = cur.execute("DELETE FROM friend_requests WHERE sender_id=%s, receiver_id=%s", (sender_id, receiver_id))
#Commit to DB
mysql.connection.commit()
cur.close()
return receiver_id
return receiver_id
# if result > 0:
# # Get the stored hash
# data = cur.fetchone()
# password = data['password']
# #compare the passwords
# if sha256_crypt.verify(password_candidate, password):
# #passed
# session['logged_in'] = True;
# session['username'] = username
# session['id'] = data['id']
# flash('You are now logged in', 'success')
# return redirect(url_for('profile'))
# else:
# error = 'Invalid login'
# return render_template('login.html', error=error)
# #close connection
# cur.close()
# else:
# error = 'Username not found'
# return render_template('login.html', error=error)
# return render_template('login.html')
#user login
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
#get form fields
username = request.form['username']
password_candidate = request.form['password']
#Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute("SELECT * FROM users WHERE username=%s", [username])
if result > 0:
# Get the stored hash
data = cur.fetchone()
password = data['password']
#compare the passwords
if sha256_crypt.verify(password_candidate, password):
#passed
session['logged_in'] = True;
session['username'] = username
session['id'] = data['id']
flash('You are now logged in', 'success')
return redirect(url_for('profile'))
else:
error = 'Invalid login'
return render_template('login.html', error=error)
#close connection
cur.close()
else:
error = 'Username not found'
return render_template('login.html', error=error)
return render_template('login.html')
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
return redirect(url_for('login'))
return wrap
#Dashboard
@app.route('/profile')
@is_logged_in
def profile():
# Create cursor
cur = mysql.connection.cursor()
# Get users
result = cur.execute("SELECT * FROM users")
users = cur.fetchall()
# Get users
result = cur.execute("SELECT * FROM posts WHERE username=%s", [session['username']])
posts = cur.fetchall()
# Get users
result = cur.execute("SELECT * FROM friends WHERE sender_id=%s OR receiver_id=%s", [session['id'], session['id']])
friends = []
for id in cur.fetchall():
if get_user_by_id(id['receiver_id']) != session['id']:
friends.append(get_user_by_id(id['receiver_id']))
if get_user_by_id(id['sender_id']) == session['id']:
friends.append(get_user_by_id(id['sender_id']))
# pprint(friend_requests)
# result = cur.execute("SELECT sender_id FROM friend_requests WHERE receiver_id=%s", [session['id']])
result = cur.execute("SELECT * FROM friend_requests WHERE receiver_id=%s", [session['id']])
# request_ids = get_user_by_id(result)
friend_requests = []
for id in cur.fetchall():
friend_requests.append(get_user_by_id(id['sender_id']))
# pprint(friend_requests)
# Close connection
cur.close()
# print(friend_requests)
return render_template('profile.html', users=users, posts=posts, friend_requests=friend_requests, friends=friends)
# Logout
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
# # Posts form class
class PostForm(Form):
title = StringField('Title', [validators.Length(min=1, max=200)])
post = TextAreaField('Post', [validators.required()])
# Socialze
@app.route('/socialize', methods=['GET', 'POST'])
@is_logged_in
def socialize():
form = PostForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
post = form.post.data
username = session['username']
# Create Cursor
cur = mysql.connection.cursor()
# Execute
cur.execute("INSERT INTO posts (username, title, post) VALUES (%s, %s, %s)", (username, title, post))
#Commit to DB
mysql.connection.commit()
# Close connection
cur.close()
flash('Post added', 'success')
return redirect(url_for('profile'))
# Create cursor
cur = mysql.connection.cursor()
# Get users
result = cur.execute("SELECT * FROM users")
users = cur.fetchall()
# Get users
result = cur.execute("SELECT * FROM posts")
posts = cur.fetchall()
# result = cur.execute("SELECT sender_id FROM friend_requests WHERE receiver_id=%s", [session['id']])
result = cur.execute("SELECT * FROM friend_requests WHERE receiver_id=%s", [session['id']])
# request_ids = get_user_by_id(result)
friend_requests = []
for id in cur.fetchall():
friend_requests.append(get_user_by_id(id['sender_id']))
# pprint(friend_requests)
# Close connection
cur.close()
# print(friend_requests)
return render_template('socialize.html', users=users, friend_requests=friend_requests, form=form, posts=posts)
def get_user_by_id(id):
# Create cursor
cur = mysql.connection.cursor()
# Get users
result = cur.execute("SELECT * FROM users WHERE id=%s", [id])
user = cur.fetchone()
# Close connection
cur.close()
return user
if __name__=='__main__':
# socketio.run( app, debug = True, host'10.42.0.12' )
socketio.run( app, debug = True )
|
# !/usr/bin/env python
# encoding: utf-8
import sys
import os
import json
import datetime
import time
import bisect
import pandas as pd
from functools import total_ordering
class Account(object):
"""Docstring for Account. """
def __init__(self):
"""TODO: to be defined1. """
def order(self, order_id, direction, price, qty):
raise "no implement"
def trade(self, order_id, price, qty):
raise "no implement"
def cancel(self, order_id, leaves_qty):
raise "no implement"
def settle(self, timestamp):
raise "no implement"
@total_ordering
class Event(object):
"""Docstring for Event. """
def __init__(self, timestamp):
self._timestamp = int(timestamp)
def __cmp__(self, other):
pass
# return cmp(self._timestamp, other._timestamp)
def __eq__(self, other):
return self._timestamp == other._timestamp
def __lt__(self, other):
return self._timestamp < other._timestamp
def do(self, account):
raise "error"
class LimitOrder(Event):
"""Docstring for LimitOrder. """
def __init__(self, timestamp, order_id, direction, price, qty):
"""TODO: to be defined1. """
Event.__init__(self, timestamp)
self._order_id = order_id
self._direction = direction
self._price = price
self._qty = qty
def do(self, account):
account.order(self._order_id, self._direction, self._price, self._qty)
class TradeOrder(Event):
"""Docstring for TradeOrder. """
def __init__(self, timestamp, order_id, price, qty):
"""TODO: to be defined1. """
Event.__init__(self, timestamp)
self._order_id = order_id
self._price = price
self._qty = qty
def do(self, account):
account.trade(self._order_id, self._price, self._qty)
class CancelOrder(Event):
"""Docstring for CancelOrder. """
def __init__(self, timestamp, order_id):
"""TODO: to be defined1. """
Event.__init__(self, timestamp)
self._order_id = order_id
def do(self, account):
account.cancel(self._order_id)
class DailySettle(Event):
"""Docstring for DailySettle. """
def __init__(self, timestamp):
Event.__init__(self, timestamp)
def do(self, account):
account.settle(self._timestamp)
class Order(object):
"""Docstring for Order. """
def __init__(self, direction, leaves_qty):
"""TODO: to be defined1.
:direction: TODO
:leaves_qty: TODO
"""
self._direction = direction
self._leaves_qty = leaves_qty
def trade(self, qty):
self._leaves_qty = self._leaves_qty - qty
assert (self._leaves_qty >= 0)
class PositionDetail(object):
"""Docstring for PositionDetail. """
def __init__(self, direction, price, leaves_qty):
self._direction = direction
self._price = price
self._leaves_qty = leaves_qty
def close(self, price, qty, update_pl):
close_qty = min(self._leaves_qty, qty)
self._leaves_qty = self._leaves_qty - close_qty
if self._direction == "Buy":
update_pl((price - self._price) * close_qty)
else:
update_pl((self._price - price) * close_qty)
return qty - close_qty
class FutureAccount(object):
"""Docstring for FutureAccount. """
def __init__(self, contract_multiple):
self._orders = {}
self._long_position = []
self._short_position = []
self._long_unfill = 0
self._short_unfill = 0
self._pl = 0.0
self._contract_multiple = contract_multiple
self._result = {
'date': [],
'pl': [],
'long_position': [],
'short_position': [],
'long_unfill': [],
'short_unfill': []
}
def order(self, order_id, direction, price, qty):
self._orders[order_id] = Order(direction, qty)
if self._orders[order_id]._direction == "Buy":
self._long_unfill = self._long_unfill + qty
else:
self._short_unfill = self._short_unfill + qty
def trade(self, order_id, price, qty):
assert (order_id in self._orders)
order = self._orders[order_id]
order.trade(qty)
close_positions = self._long_position if order._direction == "Sell" else self._short_position
open_positions = self._long_position if order._direction == "Buy" else self._short_position
leaves_qty = qty
while leaves_qty > 0 and len(close_positions) > 0:
leaves_qty = close_positions[0].close(price, leaves_qty, self.update_pl)
if close_positions[0]._leaves_qty == 0:
del close_positions[0]
else:
assert close_positions[0]._leaves_qty > 0, close_positions[
0]._leaves_qty
if leaves_qty > 0:
open_positions.append(PositionDetail(order._direction, price, leaves_qty))
if order._direction == "Buy":
self._long_unfill = self._long_unfill - qty
assert (self._long_unfill >= 0)
else:
self._short_unfill = self._short_unfill - qty
assert (self._short_unfill >= 0)
def cancel(self, order_id):
assert (order_id in self._orders)
leaves_qty = self._orders[order_id]._leaves_qty
if self._orders[order_id]._direction == "Buy":
self._long_unfill = self._long_unfill - leaves_qty
assert (self._long_unfill >= 0)
else:
self._short_unfill = self._short_unfill - leaves_qty
assert (self._short_unfill >= 0)
def settle(self, timestamp):
self._result['date'].append(
datetime.datetime.utcfromtimestamp(timestamp / 1000.0))
self._result['pl'].append(self._pl * self._contract_multiple)
self._result['long_position'].append(
sum([pos._leaves_qty for pos in self._long_position]))
self._result['short_position'].append(
sum([pos._leaves_qty for pos in self._short_position]))
self._result['long_unfill'].append(self._long_unfill)
self._result['short_unfill'].append(self._short_unfill)
def to_df(self):
return pd.DataFrame(self._result)
def update_pl(self, pl):
self._pl = self._pl + pl
def read_file(path, events):
with open(path, 'r') as f:
datas = json.load(f)
for order in datas['datas']:
if order['type'] == 'limit_order':
events.append(
LimitOrder(order['timestamp'], order['order_id'],
order['direction'], float(order['price']),
int(order['qty'])))
elif order['type'] == 'cancel_order':
events.append(
CancelOrder(order['timestamp'], order['order_id']))
elif order['type'] == 'trade_order':
events.append(
TradeOrder(order['timestamp'], order['order_id'],
float(order['price']), int(order['qty'])))
else:
assert (False)
def summary(path):
dfs = []
product_infos = {}
with open('product_info.json', 'r') as f:
product_infos = json.load(f)
for f in os.listdir(path):
events = []
read_file(os.path.join(path, f), events)
instrument = os.path.splitext(f)[0]
product_code = instrument.strip('0123456789')
assert product_code in product_infos, product_code
account = FutureAccount(product_infos[product_code]['contract_multiple'])
datetime_from = datetime.date(2016, 12, 5)
datetime_to = datetime.date(2017, 11, 23)
for i in range((datetime_to - datetime_from).days + 1):
day = datetime_from + datetime.timedelta(days=i)
dt = datetime.datetime.combine(day, datetime.time(15, 0, 0))
dt = dt + datetime.timedelta(hours=8) # utc
ts = time.mktime(dt.timetuple())
bisect.insort_right(events, DailySettle(int(ts * 1000)))
assert (all(
events[i] <= events[i + 1] for i in range(len(events) - 1)))
for event in events:
event.do(account)
df = account.to_df()
df['instrument'] = instrument
dfs.append(df)
return pd.concat(dfs)
def main():
path = sys.argv[1]
if not os.path.exists(path):
return 1
df = summary(path)
df.to_csv('daily_pl.csv')
df.groupby('instrument').last().to_csv('pl.csv')
if __name__ == '__main__':
sys.exit(main())
|
from report import Report
from server import Server
from travis import Travis
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def isCovered(self, ranges: List[List[int]], left: int, right: int) -> bool:
return all(
any(l <= el <= r for l, r in ranges) for el in range(left, right + 1)
)
if __name__ == "__main__":
solution = Solution()
assert solution.isCovered([[1, 2], [3, 4], [5, 6]], 2, 5)
assert not solution.isCovered([[1, 10], [10, 20]], 21, 21)
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from pycont.apps.accounts.models import Account
from pycont.apps.transactions.models import Transaction
class AccountModelTestCase(TestCase):
fixtures = ['users', 'accounts']
def test_created(self):
emitter = Account.objects.get(id=1)
receiver = Account.objects.get(id=2)
new_transaction = Transaction.objects.create(amount=42, emitter=emitter, receiver=receiver)
self.assertIsInstance(new_transaction, Transaction)
def test_defaults_euro(self):
emitter = Account.objects.get(id=1)
receiver = Account.objects.get(id=2)
new_transaction = Transaction.objects.create(amount=42, emitter=emitter, receiver=receiver)
self.assertEquals(new_transaction.currency, 'EUR')
def test_dont_accept_simolean(self):
receiver = Account.objects.get(id=2)
with self.assertRaises(ValidationError):
Transaction.objects.create(amount=42, receiver=receiver, currency='SIM')
def test_at_least_one_end(self):
account = Account.objects.get(id=1)
no_receiver = Transaction.objects.create(amount=42, emitter=account)
no_emitter = Transaction.objects.create(amount=12, receiver=account)
self.assertIsInstance(no_receiver, Transaction)
self.assertIsInstance(no_emitter, Transaction)
def test_validation_error_if_no_end(self):
with self.assertRaises(ValidationError):
Transaction.objects.create(amount=42)
def test_not_empty(self):
emitter = Account.objects.get(id=1)
receiver = Account.objects.get(id=2)
with self.assertRaises(ValidationError):
Transaction.objects.create(emitter=emitter, receiver=receiver)
|
import rumps
import os
from subprocess import call
FILE_NAME_STATUS = "gapak"
class Gapa(rumps.App):
def __init__(self, name):
super().__init__(name, icon='images/circle.png', menu=['Toggle Desktop Items', None, 'Quit'], quit_button=None)
try:
with open(FILE_NAME_STATUS, "r") as file:
self.__isHidden = (file.readlines().pop(0) == 'True')
except FileNotFoundError:
self.__isHidden = False
@rumps.clicked('Toggle Desktop Items')
def hide(self, _):
desktop = '{0}/Desktop/**'.format(os.path.expanduser('~'))
if not self.__isHidden:
call("/usr/bin/chflags hidden " + desktop, shell=True)
self.__isHidden = True
else:
call("/usr/bin/chflags nohidden " + desktop, shell=True)
self.__isHidden = False
@rumps.clicked('Quit')
def clean_up_before_quit(self, _):
with open(FILE_NAME_STATUS, "w") as file:
file.write(str(self.__isHidden))
rumps.quit_application()
if __name__ == "__main__":
app = Gapa("gapa")
app.run()
|
# Full web stack No browser required
# PhantomJS is a headless WebKit scriptable with a JavaScript API. It has fast andnative support for various web standards: DOM handling, CSS selector, JSON, Canvas, and SVG
# http://phantomjs.org/examples/index.html
|
import urllib.request as fetch
import re
from time import sleep
from random import random
from pymongo import MongoClient as mc
ikea = 'http://www.ikea.com'
client = mc()
db = client.IKEA
furniture = db.core_furniture
todo=furniture.find({'done':2})
i=todo.count()
for each in todo:
_id = str(each['id'])
print(_id)
sleep(2 * random())
url = 'http://www.ikea.com/cn/zh/catalog/products/' + _id
try:
response = fetch.urlopen(url)
response = response.read().decode('utf8')
pattern_name = re.compile(
'<div id="name" class="productName">(.*?)</div>.*?<div id="type" class="productType">(.*?)<strong>', re.S)
pattern_price = re.compile('<span id="price1" class="packagePrice">(.*?)</span>', re.S)
pattern_img = re.compile('<img id="productImg" src=\'(.*?)\'.*?title=.*?>', re.S)
result_name_type = re.findall(pattern_name, response)
result_price = re.findall(pattern_price, response)
result_img = re.findall(pattern_img, response)
print(result_name_type)
print(result_img)
print(result_price)
print(len(result_name_type))
if(result_name_type):
if len(result_name_type[0]) > 1:
name = result_name_type[0][0].strip()
ftype = result_name_type[0][1].strip()
print(name, ftype)
furniture.update_one({'id':_id},{'$set':{'name': name, 'type': ftype, 'done':1}})
if result_img:
img = ikea + result_img[0]
filename = _id + ".jpg"
fetch.urlretrieve(img, filename)
except:
furniture.update_one({'id':_id},{'$set':{'done':2}})
|
import tensorflow as tf
from src.main.utils.decorators import lazy_property
class Dataset(object):
def __init__(self, features, target, batch_size):
self.feature_data=features
self.target_data=target
self.batch_size=batch_size
self.features
self.target
self.features_op
self.target_op
self.features_batch
self.target_batch
@lazy_property
def features(self):
dataset=tf.data.Dataset.from_tensor_slices(self.feature_data).batch(self.batch_size)
return dataset.make_initializable_iterator()
@lazy_property
def features_op(self):
return self.features.initializer
@lazy_property
def features_batch(self):
return self.features.get_next()
@lazy_property
def target(self):
dataset=tf.data.Dataset.from_tensor_slices(self.target_data).batch(self.batch_size)
return dataset.make_initializable_iterator()
@lazy_property
def target_op(self):
return self.target.initializer
@lazy_property
def target_batch(self):
return self.target.get_next()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sleep 强制等待,缺点是对设置的时间参数不同场景不好把握
# implicitly_wait 隐式等待,缺点是设置对全局生效,若查找元素失败后会一直进行查找,知道超过设置的等待时间
# WebDriverWait 显示等待,通过until和until_not自定义等待条件
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestWait:
def setup(self):
self.driver = webdriver.Chrome()
self.driver.get("https://home.testing-studio.com/")
# self.driver.implicitly_wait(5)
def test_wait(self):
self.driver.find_element(By.XPATH, '//*[@title="归入各种类别的所有主题"]').click()
# self.driver.find_element(By.XPATH, '//*[@title="在最近的一年,一月,一周或一天最活跃的主题"]').click()
# def wait(x): # 该函数一定更要有一个参数
# return len(self.driver.find_elements(By.XPATH,'//*[@class="table-heading"]'))
# WebDriverWait(self.driver, 5).until(wait)# 这里不能写成(),括号表示函数调用,不是传参
WebDriverWait(self.driver, 4).until(expected_conditions.element_to_be_clickable((By.XPATH,'//*[@class="table-heading"]'))) |
import logging
from tinydb import TinyDB, Query
from collectors.exceptions import DuplicateFound
from storage import Storage
logger = logging.getLogger(__name__)
class StorageTinyDB(Storage):
def __init__(self, db_filename):
super().__init__()
self.db_filename = db_filename
self.db = TinyDB(db_filename)
def search(self, id=None, type=None):
Item = Query()
if type and id:
return self.db.search((Item.id == id) & (Item.type == type))
elif type:
return self.db.search(Item.type == type)
elif id:
return self.db.search(Item.id == id)
def upsert(self, item, update=False):
existing = self.search(id=item['id'], type=item['type'])
if existing:
assert len(existing) == 1, \
'We have 2 duplicates with id: {id} and type: {type}'.format(
id=item['id'], type=item['type']
)
existing = existing[0]
if update:
existing.update(item)
logger.info("Updating: %s" % item)
self.db.update(existing, eids=[existing.eid])
raise DuplicateFound(
f"We already have the id {item['id']} of type {item['type']} in the DB"
)
logger.info('Adding: %s' % item)
self.db.insert(item)
def all(self):
data = self.db.all()
data.sort(key=lambda item: item['timestamp'], reverse=True)
return data
def max_timestamp(self, **kwargs):
items = self.search(**kwargs)
# we scan all item to get the max_timestamp
max_timestamp = None
for item in items:
if max_timestamp is None or item['timestamp'] > max_timestamp:
max_timestamp = item['timestamp']
return max_timestamp
def __str__(self):
return "DB: %s" % self.db_filename
def close(self):
pass
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Disallow 'await' in a loop."""
from __future__ import annotations
import ast
from contextlib import contextmanager
from pathlib import PurePath
from typing import Iterator, Sequence
def check_for_await_in_loop(tree: ast.AST, filename: str) -> Iterator[tuple[int, int, str, None]]:
path = PurePath(filename)
if (
not filename.startswith("src/python")
or path.stem.startswith("test_")
or path.stem.endswith("_test")
):
return
violations: list[tuple[int, int, str, None]] = []
class Visitor(ast.NodeVisitor):
def __init__(self):
# this isn't entirely correct: function/class definitions within a loop might have
# `await`s in them, but aren't _necessarily_ a problem (see example below).
#
# tasks = []
# for i in range(10):
# async def foo(i=i):
# await bar(i)
# tasks.append(foo())
# asyncio.gather(tasks)
self._in_loop = False
@contextmanager
def in_loop(self) -> Iterator[None]:
old = self._in_loop
self._in_loop = True
try:
yield
finally:
self._in_loop = old
def traverse(self, node: ast.AST | Sequence[ast.AST]):
if isinstance(node, ast.AST):
self.visit(node)
else:
for x in node:
self.visit(x)
def visit_for(self, node: ast.For | ast.AsyncFor):
"""Example::
[async] for MULTIPLE in await ONCE:
await MULTIPLE
else:
await ONCE
"""
self.visit(node.iter)
self.traverse(node.orelse)
with self.in_loop():
self.visit(node.target)
self.traverse(node.body)
visit_For = visit_AsyncFor = visit_for
def visit_While(self, node: ast.While):
"""Example:
while await MULTIPLE:
await MULTIPLE
"""
with self.in_loop():
self.generic_visit(node)
def visit_comp(self, node: ast.DictComp | ast.ListComp | ast.SetComp | ast.GeneratorExp):
"""Example::
[
await MULTIPLE
[async] for MULTIPLE in await ONCE
if MULTIPLE
for MULTIPLE in await MULTIPLE
]
"""
first_comp = node.generators[0]
self.visit(first_comp.iter)
with self.in_loop():
self.visit(first_comp.target)
for expr in first_comp.ifs:
self.visit(expr)
for other_comp in node.generators[1:]:
self.visit(other_comp)
if isinstance(node, ast.DictComp):
self.visit(node.key)
self.visit(node.value)
else:
self.visit(node.elt)
visit_ListComp = visit_GeneratorExp = visit_SetComp = visit_DictComp = visit_comp
def _await_that_could_be_multiget(self, node: ast.Await) -> bool:
"""Check for `await Get(...)` or `await MultiGet(...)` literally."""
value = node.value
# This checks for `await Get()` and `await MultiGet()` literally, because there's not
# currently MultiGet support for rule_helpers (i.e. `[await some_rule_helper(x) for x in
# ...]` cannot become `await MultiGet([rule_helper(x) for x in ...])` ). Once that's
# supported, this could flip to default to True, except for `await Effect`.
return (
isinstance(value, ast.Call)
and isinstance(value.func, ast.Name)
and value.func.id in ("Get", "MultiGet")
)
def visit_Await(self, node: ast.Await):
if self._in_loop and self._await_that_could_be_multiget(node):
violations.append(
(
node.lineno,
node.col_offset,
"PNT30 `await` in a loop may be a performance hazard: prefer concurrent requests via MultiGet, or add `# noqa: PNT30: <explanation>` if this is required",
None,
)
)
Visitor().visit(tree)
yield from violations
setattr(check_for_await_in_loop, "name", __name__)
setattr(check_for_await_in_loop, "version", "0.0.0")
|
from synergy import SynergyServer
synergy_server = SynergyServer()
synergy_server.create_room('Global', default_room=True)
synergy_server.start()
"""
From a websocket client:
Sent: {"request": "authenticate", "aid": "c9f93756-2ff6-40aa-8824-2409d7113818"}
Received: {"request": "authenticate", "authenticated": true}
Received: {"rooms": ["Global"], "request": "room_list"}
Sent: {"request": "send_message", "room": "Global", "message": "Hi"}
Received: {"message": "Hi", "color": "green", "author": "JCharante"}
"""
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from qutip import Qobj, qeye, sigmax, sigmaz
from pulser.backend.noise_model import NoiseModel
from pulser_simulation import SimConfig
@pytest.fixture
def matrices():
pauli = {}
pauli["I"] = qeye(2)
pauli["X"] = sigmax()
pauli["Zh"] = 0.5 * sigmaz()
pauli["ket"] = Qobj([[1.0], [2.0]])
pauli["I3"] = qeye(3)
return pauli
def test_init():
config = SimConfig(
noise=(
"SPAM",
"doppler",
"dephasing",
"amplitude",
),
temperature=1000.0,
runs=100,
)
str_config = config.__str__(True)
assert "SPAM, doppler, dephasing, amplitude" in str_config
assert (
"1000.0µK" in str_config
and "100" in str_config
and "Solver Options" in str_config
)
config = SimConfig(noise="depolarizing")
str_config = config.__str__(True)
assert "depolarizing" in str_config
config = SimConfig(
noise="eff_noise",
eff_noise_opers=[qeye(2), sigmax()],
eff_noise_probs=[0.3, 0.7],
)
str_config = config.__str__(True)
assert (
"Effective noise distribution" in str_config
and "Effective noise operators" in str_config
)
with pytest.raises(TypeError, match="'temperature' must be a float"):
SimConfig(temperature="0.0")
with pytest.raises(ValueError, match="SPAM parameter"):
SimConfig(eta=-1.0)
with pytest.raises(
ValueError, match="'amp_sigma' must be greater than or equal to zero"
):
SimConfig(amp_sigma=-0.001)
def test_eff_noise_opers(matrices):
# Some of these checks are repeated in the NoiseModel UTs
with pytest.raises(ValueError, match="The operators list length"):
SimConfig(noise=("eff_noise"), eff_noise_probs=[1.0])
with pytest.raises(TypeError, match="eff_noise_probs is a list of floats"):
SimConfig(
noise=("eff_noise"),
eff_noise_probs=["0.1"],
eff_noise_opers=[qeye(2)],
)
with pytest.raises(
ValueError, match="The general noise parameters have not been filled."
):
SimConfig(noise=("eff_noise"))
with pytest.raises(TypeError, match="is not a Qobj."):
SimConfig(
noise=("eff_noise"), eff_noise_opers=[2.0], eff_noise_probs=[1.0]
)
with pytest.raises(TypeError, match="to be of Qutip type 'oper'."):
SimConfig(
noise=("eff_noise"),
eff_noise_opers=[matrices["ket"]],
eff_noise_probs=[1.0],
)
with pytest.raises(NotImplementedError, match="Operator's shape"):
SimConfig(
noise=("eff_noise"),
eff_noise_opers=[matrices["I3"]],
eff_noise_probs=[1.0],
)
with pytest.raises(
NotImplementedError, match="You must put the identity matrix"
):
SimConfig(
noise=("eff_noise"),
eff_noise_opers=[matrices["X"], matrices["I"]],
eff_noise_probs=[0.5, 0.5],
)
with pytest.raises(ValueError, match="The completeness relation is not"):
SimConfig(
noise=("eff_noise"),
eff_noise_opers=[matrices["I"], matrices["Zh"]],
eff_noise_probs=[0.5, 0.5],
)
def test_from_noise_model():
noise_model = NoiseModel(
noise_types=("SPAM",),
p_false_neg=0.4,
p_false_pos=0.1,
state_prep_error=0.05,
)
assert SimConfig.from_noise_model(noise_model) == SimConfig(
noise="SPAM", epsilon=0.1, epsilon_prime=0.4, eta=0.05
)
|
import time
start_time = time.time()
fibonacchi = [1, 2]
sum = 2
while fibonacchi[-1] < 4000000:
fibonacchi.append(fibonacchi[-1]+fibonacchi[-2])
if fibonacchi[-1] % 2 == 0 and fibonacchi[-1] < 4000000:
sum += fibonacchi[-1]
print(sum)
print("Elapsed Time: ",(time.time() - start_time)) |
from django.shortcuts import render
# Create your views here.
from django.http import JsonResponse
from src.wrappers import alcohol_limit
from src.singletons import sku_match
import json
from src.expression import Item
from src.Utils.logger import logger
sku_matcher_singleton = sku_match.SkuSingleton()
def check_alcohol_limit(request):
try:
logger.info("REQUEST", request)
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
order_form = body.get('items', [])
city_id = body.get('cityId', 1)
logger.info(order_form, city_id)
order_flag, rule_details, order_details, order_bound, broad_order_bound = \
alcohol_limit.hit_order(order_form, city_id)
return JsonResponse({'code': 200,
'alcohol_within_limits': order_flag,
'category_rules': rule_details,
'category_bounds': broad_order_bound,
'order_details': order_details,
'order_bounds': order_bound})
except Exception as e:
return JsonResponse({'code': 400, 'error': str(e), 'alcohol_within_limits': True})
def get_top_unmatched(request):
try:
logger.info(request)
assert request.GET
key = request.GET['key']
if not key == 'data':
return JsonResponse({'code': 400, 'error': 'Unwarranted Request'})
sku_matchbook = sku_matcher_singleton.get_obj()
# sku_corpus = sku_matchbook.corpora[0]
unused_list = list(sku_matchbook.unused_dict.items())
unused_list.sort(key=lambda x: x[1], reverse=True)
unused_list = unused_list[0:100]
return JsonResponse({'code': 200, 'data': unused_list})
except Exception as e:
return JsonResponse({'code': 400, 'error': str(e)})
|
from django.shortcuts import render, HttpResponse
# def index(request):
# response = "Hello, I am your first request!"
# return HttpResponse( response )
def index(request):
print '*' * 100
return render( request, 'first_app/index.html' )
|
import pandas as pd
# get all progress sites
from work.controller import getSiteProgressdf
dfProgressSites = getSiteProgressdf()
# 1: get all hh habs
from consumers.models import Consumer
from django.db.models import Count, F, Q
cs = Consumer.objects.all()
# cshabs = cs.values('site__origin__hab_id','site__hab_id','site__census', 'site__habitation', )\
# .annotate(apl=Count(Q(F('habitation')=='APL')))
cshabs = cs.values('site__origin__hab_id', 'site__hab_id','site__village', 'site__census', 'site__habitation','site__district', 'site__division' )\
.annotate(
bpl=Count('apl_bpl', filter=Q(apl_bpl='BPL')),
apl=Count('apl_bpl', filter=Q(apl_bpl='APL')))
# 2: are all habs present in macro?
dfcshabs = pd.DataFrame(cshabs)
dfcshabs['hh'] = dfcshabs['apl'] + dfcshabs['bpl']
df_all = dfProgressSites.merge(dfcshabs, how='outer', left_on='hab_id', right_on='site__hab_id')
print(df_all.head()) |
import os
import sys
import pickle
import argparse
import logging
from utils import *
from simulate import *
from stem.descriptor import parse_file, DocumentHandler
import stem.descriptor.reader as reader
def find_desc(descs, consensus_paths, desc_writer):
"""
Find descriptors pertaining to a particular consensus document
"""
with reader.DescriptorReader(consensus_paths) as desc_reader:
valid_after = None
# this is O(n*n). optimize.
for router in desc_reader:
if valid_after != router.document.valid_after:
# when valid_after is None we shouldn't write
# anything
if valid_after:
desc_writer(descs_per_consensus, valid_after)
logging.info("Descriptors - Found : %s, Not Found : %s",
found, not_found)
descs_per_consensus = []
found, not_found = 0, 0
valid_after = router.document.valid_after
matched_descs = descs.get(router.fingerprint, None)
if matched_descs:
found += 1
published = timestamp(router.published)
selected_desc = matched_descs[0]
for desc in matched_descs:
if (desc.unix_timestamp <= published and
desc.unix_timestamp >= selected_desc.unix_timestamp):
selected_desc = desc
# server descs don't have flags, lets steal
# it from the consensus
selected_desc.flags = router.flags
descs_per_consensus.append(selected_desc)
else:
not_found += 1
def descriptor_writer(output_dir):
"""
Return a function that writes descs into a file.
"""
def write_processed_descs(descs_per_consensus, valid_after):
"""
Write descs to a file.
"""
file_name = valid_after.strftime('%Y-%m-%d-%H-%M-%S-descriptors')
logging.info("Writing descs into %s", file_name)
outpath = os.path.join(output_dir, file_name)
if os.path.exists(outpath):
logging.error("%s file already exists", outpath)
# break?
with open(outpath, 'wb') as output:
output.write('@type server-descriptor 1.0\n')
for desc in descs_per_consensus:
output.write(unicode(desc).encode('utf8'))
output.write('\n')
return write_processed_descs
def calculate_bw(desc):
return min(desc.average_bandwidth, desc.burst_bandwidth, desc.observed_bandwidth)
def parse_args(parser):
parser.add_argument("-p", "--process", help="Pair consensuses with recent descriptors",
action="store_true")
parser.add_argument("-x", "--simulate", help="Do a bunch of simulated path selections using consensus from --in, processed descriptors from --out, and taking --samples",
action="store_true")
parser.add_argument("-c", "--consensus", help="List of input consensus documents", default="in/consensuses")
parser.add_argument("-d", "--descs", help="List of input descriptor documents", default='in/desc')
parser.add_argument("-o", "--output", help="Output dir", default='out/processed-descriptors')
parser.add_argument("-l", "--log", help="Logging level", default="DEBUG")
return parser.parse_args()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parse_args(parser)
if not (args.process or args.simulate):
parser.error('No action requested, add --process or --simulate')
if not os.path.exists(args.descs):
parser.error('%s does not exist' % args.descs)
if not os.path.exists(args.consensus):
parser.error('%s does not exist' % args.consensus)
output_dir = os.path.abspath(args.output)
if not os.path.exists(args.output):
os.makedirs(output_dir)
log_level = getattr(logging, args.log.upper(), None)
if not isinstance(log_level, int):
parser.error('Invalid log level: %s' % args.log)
desc_path, cons_path = [], []
descs = {}
# setup logging
logging.basicConfig(level=log_level, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.info("Starting pathsim.")
if args.process:
desc_writer = descriptor_writer(output_dir)
descs = process_server_desc(os.path.abspath(args.descs))
find_desc(descs, output_dir, desc_writer)
elif args.simulate:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(args.output)):
for filename in filenames:
desc_path.append(os.path.join(dirpath, filename))
# for dirpath, dirnames, filenames in os.walk(os.path.abspath(args.consensus)):
# for filename in filenames:
# cons_path.append(os.path.join(dirpath,filename))
cons_path = os.path.abspath(args.consensus)
simulation = Simulation(desc_path[0], cons_path)
simulation.simulate()
|
print(3 + 5)
print(7 - 4)
print(3 * 2)
print(6 / 7)
print(2 ** 2)
# PEMDAS
# Paranthesis
# Exponent
# Multiplication
# Division
# Addition
# Subtraction
|
# Generated by Django 3.0.8 on 2020-08-14 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fhblog', '0005_post_snippet'),
]
operations = [
migrations.AddField(
model_name='post',
name='post_thumbnail_image',
field=models.ImageField(blank=True, height_field=200, null=True, upload_to='images/fhblog', width_field=200),
),
]
|
import math
from collections import defaultdict
from subprocess import call
import numpy as np
import torch
from squirrel.data.batch import merge_batches, split_batch
from squirrel.decoder import valid_model, valid_model_ppl
from squirrel.optimizer import Adam
from squirrel.utils import Timer, format, gather_dict, item
# class AsynchronousPreprocessing(object):
# def __init__(self, train, model):
# self.train_iter = [iter(t) for t in train]
# self.model = model
def get_learning_rate(args, i):
if not args.disable_lr_schedule:
if args.lr == 0: # use pre-defined learning rate
return min(
max(1.0 / math.sqrt(args.d_model * i), 5e-5),
i / (args.warmup * math.sqrt(args.d_model * args.warmup)))
else:
# manually define the learning rate (the same as fairseq-py)
warmup_end_lr = args.lr
warmup_init_lr = 1e-7
lr_step = (warmup_end_lr - warmup_init_lr) / args.warmup
decay_factor = warmup_end_lr * args.warmup**0.5
if i < args.warmup:
return warmup_init_lr + i * lr_step
else:
return decay_factor * (i**-0.5)
return 0.001
def train_model(args,
watcher,
model,
train,
dev,
save_path=None,
maxsteps=None,
decoding_path=None,
names=None):
# optimizer
all_opt = [
Adam(
param, betas=(0.9, 0.98), eps=1e-9, weight_decay=args.weight_decay)
for param in model.module.trainable_parameters()
]
opt = all_opt[0]
# if resume training
if (args.load_from != 'none') and (args.resume):
with torch.cuda.device(args.local_rank): # very important.
offset, opt_states = torch.load(
args.workspace_prefix + '/models/' + args.load_from +
'.pt.states',
map_location=lambda storage, loc: storage.cuda())
opt.load_state_dict(opt_states)
else:
offset = 0
iters, best_i = offset, 0
# confirm the saving path
if save_path is None:
save_path = args.model_name
# setup a watcher
param_to_watch = ['corpus_bleu']
watcher.set_progress_bar(args.eval_every)
watcher.set_best_tracker(model, opt, save_path, args.local_rank,
*param_to_watch)
if args.tensorboard and (not args.debug):
watcher.set_tensorboard('{}/runs/{}'.format(args.workspace_prefix,
args.prefix + args.hp_str))
watcher.add_tensorboard(
'Hyperparameters', ' \n'.join([
'{}:\t{}'.format(a, b)
for a, b in sorted(args.__dict__.items(), key=lambda x: x[0])
]), 0, 'text')
train_iter = [iter(t) for t in train]
while True:
def check(every=0, k=0):
if every <= 0:
return False
return iters % every == k
# whether we only train LM or not ---> task
if ((iters < args.lm_steps) or (args.lm_only)):
task = 'lm_only'
else:
task = None
# --- saving --- #
if check(args.save_every) and (
args.local_rank == 0): # saving only works for local-rank=0
watcher.info('save (back-up) checkpoints at iter={}'.format(iters))
with torch.cuda.device(args.local_rank):
torch.save(watcher.best_tracker.model.state_dict(),
'{}_iter={}.pt'.format(args.model_name, iters))
torch.save(
[iters, watcher.best_tracker.opt.state_dict()],
'{}_iter={}.pt.states'.format(args.model_name, iters))
# --- validation --- #
if check(args.eval_every): # and (args.local_rank == 0):
if args.local_rank == 0:
call(["hostname"])
call([
"nvidia-smi", "--format=csv",
"--query-gpu=memory.used,memory.free"
])
watcher.close_progress_bar()
if not args.no_valid:
with torch.no_grad():
outputs_data = [
valid_model(
args,
watcher,
model,
d,
print_out=True,
dataflow=['src', 'trg']) for d in dev
]
if args.tensorboard and (not args.debug):
for outputs in outputs_data:
for name, value in outputs['tb_data']:
watcher.add_tensorboard(name, value, iters)
if not args.debug:
if len(outputs_data) == 1: # single pair MT
avg_corpus_bleu = outputs_data[0]['corpus_bleu']
requires_tracking = [0]
sources = outputs_data[0]['src']
decodes = outputs_data[0]['dec']
targets = outputs_data[0]['trg']
else:
# for multilingual training, we need to compute the overall BLEU
# which is merge the dataset and re-evaluate
if args.track_best is not None:
requires_tracking = [
int(a) for a in args.track_best.split(',')
]
else:
requires_tracking = list(range(len(dev)))
sources, decodes, targets = [], [], []
for i in requires_tracking:
sources += outputs_data[i]['src']
decodes += outputs_data[i]['dec']
targets += outputs_data[i]['trg']
avg_corpus_bleu = np.mean([
outputs_data[i]['corpus_bleu']
for i in requires_tracking
])
if args.tensorboard and (not args.debug):
if len(outputs_data) > 1:
watcher.add_tensorboard('dev/average_BLEU',
avg_corpus_bleu, iters)
L = len(sources)
txt = ''
for i in range(10, L, L // 8):
txt += 'source: ' + ' '.join(
sources[i]) + ' \n'
txt += 'target: ' + ' '.join(
targets[i]) + ' \n'
txt += 'decode: ' + ' '.join(
decodes[i]) + ' \n'
txt += '----------- \n'
watcher.add_tensorboard(
'Translation sample', txt, iters, dtype='text')
watcher.acc_best_tracker(iters, avg_corpus_bleu)
if args.test_src is not None:
test_srcs, test_trgs = args.test_src.split(
','), args.test_trg.split(',')
else:
test_srcs, test_trgs = args.src.split(
','), args.trg.split(',')
watcher.info('tracking for language pairs: {}'.format(
'/'.join([
'{}-{}'.format(test_srcs[i], test_trgs[i])
for i in requires_tracking
])))
watcher.info(
'the best model is achieved at {}, corpus BLEU={}'.
format(watcher.best_tracker.i,
watcher.best_tracker.corpus_bleu))
if args.local_rank == 0:
if watcher.best_tracker.i > best_i:
best_i = watcher.best_tracker.i
watcher.info('model:' + args.prefix + args.hp_str)
if args.valid_ppl:
with torch.no_grad():
outputs_data = [
valid_model_ppl(
args,
watcher,
model,
d,
dataflow=['src', 'trg'],
task=task) for d in dev
]
if args.tensorboard and (not args.debug):
for outputs in outputs_data:
for name, value in outputs['tb_data']:
watcher.add_tensorboard(name, value, iters)
watcher.info('model:' + args.prefix + args.hp_str)
# ---set-up a new progressor---
watcher.set_progress_bar(args.eval_every)
if maxsteps is None:
maxsteps = args.maximum_steps
if iters > maxsteps:
watcher.info('reach the maximum updating steps.')
break
# --- training --- #
iters += 1
model.train()
info_str = 'training step = {}, lr={:.7f}, '.format(
iters, opt.param_groups[0]['lr'])
info = defaultdict(lambda: [])
pairs = []
with Timer() as train_timer:
opt.param_groups[0]['lr'] = get_learning_rate(
args, iters) # (args.model == 'AutoTransformer2'))
opt.zero_grad()
# prepare the data
for inter_step in range(args.inter_size):
def sample_a_training_set(train, prob):
if (prob is None) or (
len(prob) == 0
): # not providing probability, sample dataset uniformly.
prob = [1 / len(train) for _ in train]
train_idx = np.random.choice(np.arange(len(train)), p=prob)
return next(train[train_idx])
def merge_training_sets(train):
return merge_batches([next(train_i) for train_i in train])
if len(train) == 1: # single-pair MT:
batch = next(train_iter[0])
else:
if args.sample_a_training_set:
batch = sample_a_training_set(train_iter,
args.sample_prob)
else:
batch = merge_training_sets(train_iter)
# --- attention visualization --- #
if (check(args.att_plot_every, 1) and (inter_step == 0)
and (args.local_rank == 0)):
model.module.attention_flag = True
# -- search optimal paths (for training insertable transformer) -- #
if (args.order == 'random') or (args.order == 'optimal'):
if args.search_with_dropout:
model.train()
else:
model.eval()
with torch.no_grad():
infob_ = model(
batch,
mode='path',
dataflow=['src', 'trg'],
step=iters)
for t in infob_:
info[t] += [item(infob_[t])]
# training with dropout
model.train() # open drop-out
DIV = args.inter_size * args.sub_inter_size
for batch_ in split_batch(batch, args.sub_inter_size):
mode = 'search_train' if args.order == 'search_optimal' else 'train'
info_ = model(
batch_, mode=mode, dataflow=['src', 'trg'], step=iters)
info_['loss'] = info_['loss'] / DIV
info_['loss'].backward()
pairs.append(batch.task + batch.message)
for t in info_:
info[t] += [item(info_[t])]
# multiple steps, one update
grad_norm = opt.clip_grad_norm(args.grad_clip)
opt.step()
if args.distributed: # gather information from other workers.
gather_dict(info)
for t in info:
try:
info[t] = sum(info[t])
except TypeError:
continue
if check(args.print_every) and (args.order != 'fixed'):
watcher.info("--------" * 15)
for s in range(min(3, len(info['src']))):
watcher.info("{}:\t{}".format('source', info['src'][s]))
watcher.info("{}:\t{}".format('target', info['trg'][s]))
if 'reorder' in info:
watcher.info("{}:\t{}".format('reorder',
info['reorder'][s]))
watcher.info("--------" * 15)
# basic infomation
info_str += '#sentence={}, #token={}, '.format(
int(info['sents']), format(info['tokens'], 'k'))
if 'full_tokens' in info:
info_str += '#token(F)={}, '.format(
format(info['full_tokens'], 'k'))
info_str += 'gn={:.4f}, speed={} t/s | BEST={} | '.format(
grad_norm, format(info['tokens'] / train_timer.elapsed_secs, 'k'),
watcher.best_tracker.corpus_bleu)
for keyword in info:
if keyword[:2] == 'L@':
info_str += '{}={:.3f}, '.format(
keyword, info[keyword] / args.world_size / DIV)
if args.tensorboard and (not args.debug):
watcher.add_tensorboard(
'train/{}'.format(keyword),
info[keyword] / args.world_size / DIV, iters)
if args.tensorboard and (not args.debug):
watcher.add_tensorboard('train/LR', opt.param_groups[0]['lr'],
iters)
# -- attention visualization -- #
if (model.module.attention_maps is
not None) and (args.local_rank == 0):
watcher.info('Attention visualization at Tensorboard')
with Timer() as visualization_timer:
for name, value in model.module.attention_maps:
watcher.add_tensorboard(name, value, iters, 'figure')
model.module.attention_maps = None
watcher.info('Attention visualization cost: {}s'.format(
visualization_timer.elapsed_secs))
watcher.step_progress_bar(info_str=info_str)
|
# coding: utf-8
import csv
import pandas as pd
import requests
import time
import json
import datetime
from InstagramAPI import InstagramAPI
from sklearn.externals import joblib
import datetime
import random
import traceback
import os
import sys
from config import user, password
def print_m(message):
""" Функция для вывода сообщения """
folder_data = os.path.abspath("data") + "/"
now_time = datetime.datetime.now()
print("Время {0}\tСообщение {1}".format(now_time, str(message)))
with open(folder_data + 'log_InstaBot.csv', 'a', newline='') as csvfile:
log_writer = csv.writer(csvfile)
log_writer.writerow([str("Время {0}\tСообщение {1}".format(now_time, str(message)))])
class InstaBot:
def __init__(self, user, password):
self.user = user
self.password = password
self.api, self.user_id = self.insta_login()
def insta_login(self):
api = InstagramAPI(self.user, self.password)
api.login()
time.sleep(5)
api.getSelfUsernameInfo()
user_id = api.LastJson.get('user')['pk']
return api, user_id
def get_total_followers(self, user_id):
""" Функция возвращает список подписчиков в инстагаме """
followers = []
next_max_id = True
while next_max_id:
# first iteration hack
if next_max_id is True:
next_max_id = ''
try:
_ = self.api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(self.api.LastJson.get('users', []))
next_max_id = self.api.LastJson.get('next_max_id', '')
except json.decoder.JSONDecodeError:
time.sleep(1)
_ = self.api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(self.api.LastJson.get('users', []))
next_max_id = self.api.LastJson.get('next_max_id', '')
true_followers = []
for follower in followers:
true_followers.append(follower['pk'])
return true_followers
def get_total_following(self, user_id):
"""
Возвращает список последователей в инстагаме
"""
following = []
next_max_id = True
while next_max_id:
# first iteration hack
if next_max_id is True:
next_max_id = ''
try:
_ = self.api.getUserFollowings(user_id, maxid=next_max_id)
following.extend(self.api.LastJson.get('users', []))
next_max_id = self.api.LastJson.get('next_max_id', '')
except json.decoder.JSONDecodeError:
time.sleep(1)
_ = self.api.getUserFollowings(user_id, maxid=next_max_id)
following.extend(self.api.LastJson.get('users', []))
next_max_id = self.api.LastJson.get('next_max_id', '')
true_following = []
for user in following:
true_following.append(user['pk'])
return true_following
def get_user_posts(self, user_id):
"""
Возвращает последние посты пользователя
"""
user_posts = []
sucsess = True
next_max_id = ''
while sucsess:
try:
_ = self.api.getUserFeed(user_id, maxid=next_max_id)
user_posts.extend(self.api.LastJson.get('items', ''))
next_max_id = self.api.LastJson.get('next_max_id', '')
if next_max_id == '':
sucsess = False
except json.decoder.JSONDecodeError:
time.sleep(1)
_ = self.api.getUserFeed(user_id, maxid=next_max_id)
user_posts.extend(self.api.LastJson.get('items', ''))
next_max_id = self.api.LastJson.get('next_max_id', '')
if next_max_id == '':
sucsess = False
return user_posts
class InstaBase(InstaBot):
def __init__(self, user, password):
self.folder_data = os.path.abspath("data") + "/"
self.user = user
self.password = password
self.clf = joblib.load('model.pkl')
self.api, self.user_id = self.insta_login()
self.start_time = datetime.datetime.now()
self.time_work = self.change_time_work()
def change_time_work(self):
time_work = round((datetime.datetime.now() - self.start_time).seconds/60/60,2)
self.time_work = time_work
return time_work
def get_info_about_user(self, user_id):
""" Получение информации о пользователе """
url = 'https://i.instagram.com/api/v1/users/{0}/info/'.format(user_id)
r = requests.get(url)
if r.status_code == 200:
row = self.get_row(r)
elif r.status_code == 404:
return [0]
print_m("User not found")
elif r.status_code == 500:
print_m("Internal Server Error")
time.sleep(5)
row = [0]
elif r.status_code == 429:
print_m("Too Many Requests. Wait 60 second")
time.sleep(60)
row = self.get_info_about_user(user_id)
else:
print_m(r.status_code)
row = [0]
return row
@staticmethod
def get_row(r):
""" Функция возвращает строку с данными """
answer = r.json()
user_id = answer['user']['pk']
username = answer['user']['username']
follower_count = answer['user']['follower_count']
following_count = answer['user']['following_count']
media_count = answer['user']['media_count']
usertags_count = answer['user']['usertags_count']
is_private = answer['user']['is_private']
return [user_id, username, follower_count, following_count, media_count, usertags_count, is_private]
def get_first_row(self, file):
""" Функция возвращает первую строку файла """
with open(str(self.folder_data) + str(file), newline='') as f:
reader = csv.reader(f)
row = next(reader)
return row
def delete_first_row(self, file):
""" Функция удаляет первую строку файла """
df = pd.read_csv(str(self.folder_data) + str(file), header=None, index_col=None)
df.loc[1:].to_csv(str(self.folder_data) + str(file), index=False, columns=None, header=False)
return 0
def write_to_end(self, file, row):
""" Функция добавляет строку в конец файла """
with open(str(self.folder_data) + str(file), 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow(row)
return 0
def len_file(self, file):
""" Функция возвращает размер файла """
try:
df = pd.read_csv(str(self.folder_data) + str(file), header=None, index_col=None)
return len(df)
except pd.errors.EmptyDataError:
return 0
def get_users_from_file(self, file):
""" Функция возвращает список user_id из файла """
try:
df = pd.read_csv(str(self.folder_data) + str(file), header=None, index_col=None)
return df[0].unique()
except pd.errors.EmptyDataError:
return []
def get_user_stat(self, user_id):
""" Возвращает дополнительную статистику по пользователю"""
# Получаем обычную статистику по пользователю
row = self.get_info_about_user(user_id)
# Получаем список постов
posts = self.get_user_posts(user_id)
# Находим две дополнительные статистики
# Количество лайков
total_like_count = 0
# Количество комментариев
total_comment_count = 0
# По всем постам собираем лайки и комментарии
for post in posts:
# Количество лайков в посте
try:
like_count = post['like_count']
except KeyError:
like_count = 0
# Количество комментариев в посте
try:
comment_count = post['comment_count']
except KeyError:
comment_count = 0
# Добавляем к общему числу лайков
total_like_count += like_count
# Добавляем к общему числу комментариев
total_comment_count += comment_count
# добавляем к строке информации количество лайков
row.append(total_like_count)
# добавляем к строке информации количество комментариев
row.append(total_comment_count)
# Количество взаимных подписчиков
reciprocal_follower_count = 0
# Количество невзаимных подписчиков
unreciprocal_follower_count = 0
# Количество невзаимных подписок
unreciprocal_following_count = 0
# Получаем список подписчиков
followers = self.get_total_followers(user_id)
# Получаем список подписок
followings = self.get_total_following(user_id)
# Проходимся по подписчикам
for follower in followers:
# Если подписчик находится в подписках
if follower in followings:
# взаимная подписка
reciprocal_follower_count += 1
else:
# невзаимная последователь
unreciprocal_follower_count += 1
# Проходимся по подпискам
for following in followings:
# Если подписка не в подписчиках
if following not in followers:
# не взаимная подписка
unreciprocal_following_count += 1
# добавляем к строке информации количество взаимных подписок
row.append(reciprocal_follower_count)
# добавляем к строке информации количество невзаимных подписчиков
row.append(unreciprocal_follower_count)
# добавляем к строке информации количество невзаимных подписок
row.append(unreciprocal_following_count)
# Получаем людей, подписанных на меня
followers_me = self.get_total_followers(self.user_id)
# Находим количество людей, подписанных на меня, на которых подписан пользователь
count_d_follow = 0
for follower in followers_me:
if follower in followings:
count_d_follow += 1
# добавляем к строке информации количество людей, подписанных на меня, на которых подписан пользователь
row.append(count_d_follow)
return row
def predict_user_follow(self, user_id):
"""
Функция служит для определения - подпишется на нас пользователь или нет
:return:
"""
stat = self.get_user_stat(user_id)
columns = ['user_id', 'username', 'follower_count', 'following_count', 'media_count', 'usertags_count',
'is_private', 'like_count', 'comment_count', 'reciprocal_follower_count',
'unreciprocal_follower_count', 'unreciprocal_following_count', 'count_d_follow']
df = pd.DataFrame([stat], columns=columns)
X = df[['user_id', 'follower_count', 'following_count', 'media_count', 'usertags_count', 'is_private',
'like_count', 'comment_count', 'reciprocal_follower_count', 'unreciprocal_follower_count',
'unreciprocal_following_count', 'count_d_follow']]
return self.clf.predict(X)
def main(user, password):
IB = InstaBase(user, password)
while 1:
try:
print_m("Программа работает {0} часов".format(IB.change_time_work()))
if IB.change_time_work() <= 100:
print_m("Программа работает менее 20 часов")
# Если программа работает меньше 6 часов
if IB.get_info_about_user(IB.user_id)[3] < 4900:
print_m("Количество подписчиков менее 4900")
# Если количество подписчиков меньше 4900
if IB.len_file('to_follow.csv') > 0:
print_m("В списке на подписку есть пользователи")
# Если в списке "на подписку" еще есть пользователи
# Получить первого пользователя в списке "на подписку"
row = IB.get_first_row('to_follow.csv')
print(IB.user_id)
# Получить user_id пользователя
user_id = row[0]
print_m("Запускаем процесс подписки на пользователя {0}".format(user_id))
if user_id != '0':
# Получить список уже зафолловленных пользователей
else_follow = IB.get_users_from_file('follow.csv')
# Получить список пользователей, которых не пропустила модель
not_follow = IB.get_users_from_file('not_follow.csv')
# Получить список пользователей, которых пытались зафолловить
data_follow = IB.get_users_from_file('data.csv')
# Сравнить, если user_id в этих списках
if user_id not in else_follow and user_id not in data_follow and user_id not in not_follow:
print_m("Пользователь {0} не находится в списках подписанных и отписанных".format(user_id))
# Проверяем текущие отношения с пользователем
result = IB.api.userFriendship(user_id)
# Если результат равен True, то пользователь существует
if result == True:
# На него подписан я
followed_by = IB.api.LastJson['followed_by']
# Он подписан на меня
following = IB.api.LastJson['following']
# Кратковременный сон
time.sleep(2)
if followed_by == False and following == False:
print_m("Пользователь {0} не подписан на меня и я на него".format(user_id))
# Если нет, то фолловим его, прогоняем через модель
result = IB.predict_user_follow(user_id)[0]
if result == 1:
print_m("Пользователь {0} вероятнее всего подпишется на нас".format(user_id))
print_m("Подписываемся на пользователя {0}".format(user_id))
IB.api.follow(int(user_id))
# Удаляем из списка "на подписку"
IB.delete_first_row('to_follow.csv')
# Записываем в конец файла зафолловленных
IB.write_to_end('follow.csv', row)
# Отдых
time.sleep(80 + random.randint(0, 1)*-1*random.randint(0, 20))
else:
print_m("Пользователь {0} вероятнее всего не подпишется на нас".format(user_id))
# Удаляем из списка "на подписку"
IB.delete_first_row('to_follow.csv')
# Записываем в конец файла отказных пользователей
IB.write_to_end('not_follow.csv', row)
else:
print_m("Пользователь {0} либо уже подписан на нас, либо мы на него".format(user_id))
# Удаляем из списка "на подписку"
IB.delete_first_row('to_follow.csv')
# Записываем в конец файла зафолловленных
IB.write_to_end('follow.csv', row)
else:
# Если нет, то пользователь удален
print_m("Пользователь {0} удален".format(user_id))
# Удаляем из списка "на подписку"
IB.delete_first_row('to_follow.csv')
else:
# Если он есть в других списках
# Удаляем из списка "на подписку"
print_m("Удаляем пользователя {0} из to_follow.csv".format(user_id))
IB.delete_first_row('to_follow.csv')
else:
# Если он есть в других списках
# Удаляем из списка "на подписку"
print_m("Удаляем пользователя {0} из to_follow.csv".format(user_id))
IB.delete_first_row('to_follow.csv')
else:
print_m("В списке на подписку нет пользователей")
# Если список "на подписку" опустел
count = 0
# Получить список уже зафолловленных пользователей
else_follow = IB.get_users_from_file('follow.csv')
# Получить список пользователей, которых пытались зафолловить
data_follow = IB.get_users_from_file('data.csv')
# Получить список пользователей, которых не пропустила модель
not_follow = IB.get_users_from_file('not_follow.csv')
# Список новых подписчиков
new_follow = []
print_m("Запускаем достань инфу бота")
while IB.len_file('to_follow.csv') < 4000 or count <= len(else_follow):
# Если программа работает менее 6 часов
if IB.change_time_work() <= 100:
print_m("Программа работает менее 20 часов")
# Если в списке на подписку закончились пользователи
# Получаем n-ого пользователя из списка зафолловленных
user_id = else_follow[count]
# Получить список его фолловеров
followers = IB.get_total_followers(user_id)
print_m("Получили список всех пользователей пользователя {0}".format(user_id))
print_m("Их количество {0}".format(len(followers)))
# Проверить каждого подписчика пользователя
for follower in followers:
# Сравнить, если user_id в этих списках
if follower not in else_follow and follower not in data_follow \
and follower not in new_follow and follower not in not_follow:
# Если нет, то прогоняем его через модель
result = IB.predict_user_follow(user_id)[0]
row = IB.get_info_about_user(follower)
if result == 1:
print_m("Добавили в список на подписку пользователя {0}".format(follower))
IB.write_to_end('to_follow.csv', row)
new_follow.append(follower)
time.sleep(2)
else:
# Записываем в конец файла отказных пользователей
IB.write_to_end('not_follow.csv', row)
count += 1
else:
print_m("Отдых")
# Если программа работает более 6 часов, то отдых
time.sleep(60*60*6)
# Обнуляем время работы программы
IB.start_time = datetime.datetime.now()
else:
print_m("У нас больше 4900 подписок, включаем отписка бота")
# Если количество подписок больше 4900, то включаем автоотписку
while IB.get_info_about_user(IB.user_id)[3] > 900:
# Если программа работает менее 6 часов
if IB.change_time_work() <= 20:
print_m("Программа работает менее 20 часов")
# Пока количество подписок более 900
# Получаем первого пользователя из списка зафолловленных
row = IB.get_first_row('follow.csv')
# Получить user_id пользователя
user_id = row[0]
print_m("Запускаем процесс отписки от пользователя {0}".format(user_id))
# Добавляем расширенную статистику по пользователю
row = IB.get_user_stat(user_id)
# Проверяем текущие отношения с пользователем
result = IB.api.userFriendship(user_id)
time.sleep(1)
# Если результат равен True, то пользователь существует
if result == True:
# На него подписан я
followed_by = IB.api.LastJson['followed_by']
# Он подписан на меня
following = IB.api.LastJson['following']
# Если он подписан на меня
if followed_by == True:
# добавляем к строке 1
row.append(1)
else:
# если нет добавляем к строке 0
row.append(0)
if following == True:
# Если я подписан на него
# то отписываемся
print_m("Отписываемся от пользователя {0}".format(user_id))
IB.api.unfollow(int(user_id))
# Удаляем из списка подписанных
IB.delete_first_row('follow.csv')
# Записываем в конец файла с данными
IB.write_to_end('data.csv', row)
# отдых
time.sleep(80 + random.randint(0, 1)*-1*random.randint(0, 20))
else:
print_m("Мы и так не подписаны на пользователя {0}".format(user_id))
# Удаляем из списка подписанных
IB.delete_first_row('follow.csv')
print_m("Удаляем пользователя {0} из списка подписанных".format(user_id))
# Записываем в конец файла с данными
IB.write_to_end('data.csv', row)
else:
print_m("Пользователь удален {0}".format(user_id))
# Удаляем из списка подписанных
IB.delete_first_row('follow.csv')
print_m("Отписываемся от пользователя {0}".format(user_id))
IB.api.unfollow(int(user_id))
else:
print_m("Отдых")
# Если программа работает более 20 часов, то отдых
time.sleep(60*60*6)
# Обнуляем время работы программы
IB.start_time = datetime.datetime.now()
else:
print_m("Отдых")
# Если программа работает более 20 часов, то отдых
time.sleep(60*60*6)
# Обнуляем время работы программы
IB.start_time = datetime.datetime.now()
except KeyboardInterrupt:
print_m("Прекращено пользователем")
sys.exit()
except:
print_m("Ошибка")
traceback.print_exc()
time.sleep(60)
if __name__ == "__main__":
main(user, password)
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
__author__ = 'jdroot'
from mako.exceptions import TemplateLookupException
from mako import exceptions
from pylons import config
class TemplateManager:
def __init__(self):
self.templates = {}
def get(self, name, style='html'):
key = "%s.%s" % (name.lower(), style)
template = self.templates.get(key)
if not template:
try:
template = config['pylons.app_globals'].mako_lookup.get_template(key)
if template:
self.templates[key] = template
except TemplateLookupException:
print exceptions.html_error_template().render()
if not template:
raise AttributeError('template for %s not found' % name)
return template
|
import requests
from requests.models import Response
import os
import json
from mockito import when, mock, unstub
from form_reader import FormReader
headers = {
'content-type': 'application/json',
'authorization': 'token ' + os.environ["GITHUBTOKEN"]
}
form_url = "https://github.ncsu.edu/test/HW0-510"
url = "https://api.github.ncsu.edu/repos/test/HW0-510/commits/master"
another_url = "https://api.github.ncsu.edu/repos/test/HW0-510/issues"
def stop_mocking():
unstub()
def start_mocking_forms():
response = [["test@ncsu.edu", form_url]]
when(FormReader).get_data("http://test.com").thenReturn(response)
def start_mocking_git():
respobj = Response()
respobj._content = b'{"commit": {"committer": {"date": "2019-09-06T08:20:34Z"}}}'
when(requests).get(url, headers=headers).thenReturn(respobj)
another_respobj = Response()
another_respobj._content = b'[{"url": "https://abc.com"}, {"url": "https://bcd.com"}]'
when(requests).get(another_url, headers=headers).thenReturn(another_respobj)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class Student(object):
def __init__(self,name):
self.name = name
def __str__(self):
return '(name:%s)' % self.name
__repr__ = __str__
print(Student('tangdu')) |
from django.conf.urls import url
from cms import views
urlpatterns = [
# 記事
url(r'^article/$', views.article_list, name='article_list'), # 一覧
url(r'^article/add/$', views.article_edit, name='article_add'), # 登録
url(r'^article/mod/(?P<article_id>\d+)/$',
views.article_edit, name='article_mod'), # 修正
url(r'^article/del/(?P<article_id>\d+)/$',
views.article_del, name='article_del'), # 削除
]
|
#coding:utf-8
#!/usr/bin/env python
from django.shortcuts import render
from gclib.DBConnection import DBConnection
from django.http import HttpResponse
from gclib.json import json
from gclib.config import config
from excel_import import excel_import
def index(request):
return render(request, 'index.html', {})
def dungeon(request):
return generalConfigRequestProcess(request, 'dungeon')
def level(request):
return generalConfigRequestProcess(request, 'level')
def monster(request):
return generalConfigRequestProcess(request, 'monster')
def game(request):
return generalConfigRequestProcess(request, 'game')
def skill(request):
return generalConfigRequestProcess(request, 'skill')
def skill_effect(request):
return generalConfigRequestProcess(request, 'skill_effect')
def skill_level(request):
return generalConfigRequestProcess(request, 'skill_level')
def pet(request):
return generalConfigRequestProcess(request, 'pet')
def pet_level(request):
return generalConfigRequestProcess(request, 'pet_level')
def prompt(request):
return generalConfigRequestProcess(request, 'prompt')
def garcha(request):
return generalConfigRequestProcess(request, 'garcha')
def equipment(request):
return generalConfigRequestProcess(request, 'equipment')
def strength_probability(request):
return generalConfigRequestProcess(request, 'strength_probability')
def strength_price(request):
return generalConfigRequestProcess(request, 'strength_price')
def luckycat_bless(request):
return generalConfigRequestProcess(request, 'luckycat_bless')
def luckycat_fortune(request):
return generalConfigRequestProcess(request, 'luckycat_fortune')
def luckycat_profit(request):
return generalConfigRequestProcess(request, 'luckycat_profit')
def luck(request):
return generalConfigRequestProcess(request, 'luck')
def language(request):
return generalConfigRequestProcess(request, 'language')
def stone(request):
return generalConfigRequestProcess(request, 'stone')
def stone_probability(request):
return generalConfigRequestProcess(request, 'stone_probability')
def stone_level(request):
return generalConfigRequestProcess(request, 'stone_level')
def trp_price(request):
return generalConfigRequestProcess(request, 'trp_price')
def trp_probability(request):
return generalConfigRequestProcess(request, 'trp_probability')
def trp(request):
return generalConfigRequestProcess(request, 'trp')
def educate(request):
return generalConfigRequestProcess(request, 'educate')
def educate_grade(request):
return generalConfigRequestProcess(request, 'educate_grade')
def almanac_combination(request):
return generalConfigRequestProcess(request, 'almanac_combination')
def reborn(request):
return generalConfigRequestProcess(request, 'reborn')
def ladder_score(request):
return generalConfigRequestProcess(request, 'ladder_score')
def arena_loot(request):
return generalConfigRequestProcess(request, 'arena_loot')
def name(request):
return generalConfigRequestProcess(request, 'name')
def drop(request):
return generalConfigRequestProcess(request, 'drop')
def dialog(request):
return generalConfigRequestProcess(request, 'dialog')
def drama(request):
return generalConfigRequestProcess(request, 'drama')
def quest(request):
return generalConfigRequestProcess(request, 'quest')
def item(request):
return generalConfigRequestProcess(request, 'item')
def signin(request):
return generalConfigRequestProcess(request, 'signin')
def levelup(request):
return generalConfigRequestProcess(request, 'levelup')
def open_award(request):
return generalConfigRequestProcess(request, 'open_award')
def tower_monster(request):
return generalConfigRequestProcess(request, 'tower_monster')
def tower_markup(request):
return generalConfigRequestProcess(request, 'tower_markup')
def tower_award(request):
return generalConfigRequestProcess(request, 'tower_award')
def medal(request):
return generalConfigRequestProcess(request, 'medal')
def medal_loot(request):
return generalConfigRequestProcess(request, 'medal_loot')
def medal_level(request):
return generalConfigRequestProcess(request, 'medal_level')
def mall_price(request):
return generalConfigRequestProcess(request, 'mall_price')
def practice_property(request):
return generalConfigRequestProcess(request, 'practice_property')
def practice_level(request):
return generalConfigRequestProcess(request, 'practice_level')
def slotmachine(request):
return generalConfigRequestProcess(request, 'slotmachine')
def vip(request):
return generalConfigRequestProcess(request, 'vip')
def potential(request):
return generalConfigRequestProcess(request, 'potential')
def potential_price(request):
return generalConfigRequestProcess(request, 'potential_price')
def email(request):
return generalConfigRequestProcess(request, 'email')
def gift(request):
return generalConfigRequestProcess(request, 'gift')
def invite(request):
return generalConfigRequestProcess(request, 'invite')
def infection_battle(request):
return generalConfigRequestProcess(request, 'infection_battle')
def infection_prestige_price(request):
return generalConfigRequestProcess(request, 'infection_prestige_price')
def infection_damage_award(request):
return generalConfigRequestProcess(request, 'infection_damage_award')
def infection_prestige_award(request):
return generalConfigRequestProcess(request, 'infection_prestige_award')
def infection_exploit_price(request):
return generalConfigRequestProcess(request, 'infection_exploit_price')
def explore_award(request):
return generalConfigRequestProcess(request, 'explore_award')
def generalConfigRequestProcess(request, confname):
if request.method == 'POST':
confstr = request.POST['config']
try:
config.setConfig(confname, confstr)
return render(request, 'index.html', {})
except:
return render(request, confname + '.html', {'config':confstr})
else:
confstr = config.getConfigStr(confname)
if confstr != None:
return render(request, confname + '.html', {'config': confstr})
else:
config.createConfig(confname)
return render(request, confname + '.html', {'config':''})
def monster_import(request):
return excel_import.monster_import(request)
def level_import(request):
return excel_import.level_import(request)
def dungeon_import(request):
return excel_import.dungeon_import(request)
def skill_import(request):
return excel_import.skill_import(request)
def skill_effect_import(request):
return excel_import.skill_effect_import(request)
def skill_level_import(request):
return excel_import.skill_level_import(request)
def pet_import(request):
return excel_import.pet_import(request)
def pet_level_import(request):
return excel_import.pet_level_import(request)
def garcha_import(request):
return excel_import.garcha_import(request)
def equipment_import(request):
return excel_import.equipment_import(request)
def strength_price_import(request):
return excel_import.strength_price_import(request)
def luckycat_bless_import(request):
return excel_import.luckycat_bless_import(request)
def luckycat_profit_import(request):
return excel_import.luckycat_profit_import(request)
def luck_import(request):
return excel_import.luck_import(request)
def language_import(request):
return excel_import.language_import(request)
def stone_import(request):
return excel_import.stone_import(request)
def stone_probability_import(request):
return excel_import.stone_probability_import(request)
def stone_level_import(request):
return excel_import.stone_level_import(request)
def trp_import(request):
return excel_import.trp_import(request)
def trp_price_import(request):
return excel_import.trp_price_import(request)
def trp_probability_import(request):
return excel_import.trp_probability_import(request)
def educate_import(request):
return excel_import.educate_import(request)
def educate_grade_import(request):
return excel_import.educate_grade_import(request)
def almanac_combination_import(request):
return excel_import.almanac_combination_import(request)
def reborn_import(request):
return excel_import.reborn_import(request)
def ladder_score_import(request):
return excel_import.ladder_score_import(request)
def name_import(request):
return excel_import.name_import(request)
def arena_loot_import(request):
return excel_import.arena_loot_import(request)
def drop_import(request):
return excel_import.drop_import(request)
def dialog_import(request):
return excel_import.dialog_import(request)
def drama_import(request):
return excel_import.drama_import(request)
def quest_import(request):
return excel_import.quest_import(request)
def item_import(request):
return excel_import.item_import(request)
def tower_monster_import(request):
return excel_import.tower_monster_import(request)
def tower_markup_import(request):
return excel_import.tower_markup_import(request)
def tower_award_import(request):
return excel_import.tower_award_import(request)
def medal_import(request):
return excel_import.medal_import(request)
def medal_loot_import(request):
return excel_import.medal_loot_import(request)
def medal_level_import(request):
return excel_import.medal_level_import(request)
def mall_price_import(request):
return excel_import.mall_price_import(request)
def practice_level_import(request):
return excel_import.practice_level_import(request)
def practice_property_import(request):
return excel_import.practice_property_import(request)
def slotmachine_import(request):
return excel_import.slotmachine_import(request)
def vip_import(request):
return excel_import.vip_import(request)
def potential_price_import(request):
return excel_import.potential_price_import(request)
def email_import(request):
return excel_import.email_import(request)
def gift_import(request):
return excel_import.gift_import(request)
def infection_battle_import(request):
return excel_import.infection_battle_import(request)
def infection_prestige_price_import(request):
return excel_import.infection_prestige_price_import(request)
def infection_damage_award_import(request):
return excel_import.infection_damage_award_import(request)
def infection_prestige_award_import(request):
return excel_import.infection_prestige_award_import(request)
def infection_exploit_price_import(request):
return excel_import.infection_exploit_price_import(request)
def explore_award_import(request):
return excel_import.explore_award_import(request) |
from django.contrib.auth.models import User
from django.utils import timezone
from locations.models import Page
from django.conf import settings
from django.db import models
import datetime
class Question(models.Model):
""" Represents a question. """
objects = models.Manager()
quiz = models.ForeignKey(Page, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, help_text="The date&time this question was created. Automatically generated when the model saves.")
modified = models.DateTimeField(auto_now=True, help_text="The date&time this question was updated. Automatically generated when the model updates.")
multiple = models.BooleanField(default=False, help_text="Is this a multiple choice answer")
points = models.IntegerField(default=10, help_text="number of points given for getting the correct answer to this question")
content = models.CharField(max_length=200, default="Write the content of your question here.")
def __str__(self):
return self.content
class Choice(models.Model):
""" Represents a question. """
objects = models.Manager()
question = models.ForeignKey(Question, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, help_text="The date&time this question was created. Automatically generated when the model saves.")
modified = models.DateTimeField(auto_now=True, help_text="The date&time this question was updated. Automatically generated when the model updates.")
content = models.CharField(max_length=200, default="Write the content of your question here.",)
correct = models.BooleanField(default=False, help_text="Is this the correct answer")
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.created <= now
was_published_recently.admin_order_field = 'created'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
def __str__(self):
return self.content
|
import pandas as pd
import numpy as np
from random import randint
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pylab as pylab
from scipy import stats
# you need at least 2000 games to get a good result
log = pd.read_csv("/home/andras/PycharmProjects/TradingGame/logs/percentChange.csv", sep=",", index_col=0)
percentChange = log.percentChange
summedPercent = sum(percentChange)
print("SUM: ", summedPercent)
fig = plt.figure()
# AX 1 -
ax1 = fig.add_subplot(111)
ax1.plot(percentChange, "-", color='g', linewidth=1)
#plt.axhline(0, color='black', linewidth=0.5)
ax1.set_ylim([-10, 10])
plt.title("Guess Success")
fig.suptitle('Percent Change') # or plt.suptitle('Main title')
#ax1.legend()
fig.tight_layout()
#fileName = "/home/andras/PycharmProjects/TradingGame/lab/img_" + imageName + ".png"
#fig.savefig(fileName)
plt.show()
'''
321
322
323
324
325
326
''' |
# -*- coding: utf-8 -*-
# This file is part of the pyMor project (http://www.pymor.org).
# Copyright Holders: Felix Albrecht, Rene Milk, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import numpy as np
from algorithms.timestepping import imex_euler
from pymor.discretizations.basic import DiscretizationBase
from pymor.operators.constructions import VectorOperator
from pymor.vectorarrays.interfaces import VectorArrayInterface
from pymor.tools.arguments import method_arguments
from pymor.operators.interfaces import OperatorInterface
class InstationaryImexDiscretization(DiscretizationBase):
def __init__(self, T, nt, initial_data, explicit_operator, implicit_operator, rhs=None, mass=None, num_values=None,
products=None, parameter_space=None, estimator=None, visualizer=None, cache_region='disk',
name=None):
assert isinstance(initial_data, (VectorArrayInterface, OperatorInterface))
assert not isinstance(initial_data, OperatorInterface) or initial_data.source.dim == 1
assert isinstance(explicit_operator, OperatorInterface)
assert isinstance(implicit_operator, OperatorInterface)
assert rhs is None or isinstance(rhs, OperatorInterface) and rhs.linear
assert mass is None or isinstance(mass, OperatorInterface) and mass.linear
if isinstance(initial_data, VectorArrayInterface):
initial_data = VectorOperator(initial_data, name='initial_data')
assert explicit_operator.source == explicit_operator.range == implicit_operator.source == implicit_operator.range == initial_data.range
assert rhs is None or rhs.source == explicit_operator.source and rhs.range.dim == 1
assert mass is None or mass.source == mass.range == explicit_operator.source
operators = {'explicit_operator': explicit_operator, 'implicit_operator': implicit_operator, 'mass': mass}
functionals = {'rhs': rhs}
vector_operators = {'initial_data': initial_data}
super(InstationaryImexDiscretization, self).__init__(operators=operators, functionals=functionals,
vector_operators=vector_operators,
products=products, estimator=estimator,
visualizer=visualizer, cache_region=cache_region, name=name)
self.T = T
self.solution_space = explicit_operator.source
self.nt = nt
self.initial_data = initial_data
self.explicit_operator = explicit_operator
self.implicit_operator = implicit_operator
self.rhs = rhs
self.mass = mass
self.num_values = num_values
self.build_parameter_type(inherits=(initial_data, explicit_operator, implicit_operator, rhs, mass), provides={'_t': 0})
self.parameter_space = parameter_space
with_arguments = frozenset(method_arguments(__init__)).union({'operators', 'functionals', 'vector_operators'})
def with_(self, **kwargs):
assert set(kwargs.keys()) <= self.with_arguments
assert 'operators' not in kwargs or kwargs['operators'].viewkeys() <= {'explicit_operator', 'implicit_operator', 'mass'}
assert 'functionals' not in kwargs or kwargs['functionals'].viewkeys() <= {'rhs'}
assert 'vector_operators' not in kwargs or kwargs['vector_operators'].viewkeys() <= {'initial_data'}
assert 'operators' not in kwargs or not set(kwargs['operators']).intersection(kwargs.viewkeys())
assert 'functionals' not in kwargs or not set(kwargs['functionals']).intersection(kwargs.viewkeys())
assert 'vector_operators' not in kwargs or not set(kwargs['vector_operators']).intersection(kwargs.viewkeys())
assert 'time_stepper_nt' not in kwargs or 'time_stepper' not in kwargs
if 'operators' in kwargs:
kwargs.update(kwargs.pop('operators'))
if 'functionals' in kwargs:
kwargs.update(kwargs.pop('functionals'))
if 'vector_operators' in kwargs:
kwargs.update(kwargs.pop('vector_operators'))
if 'time_stepper_nt' in kwargs:
kwargs['time_stepper'] = self.time_stepper.with_(nt=kwargs.pop('time_stepper_nt'))
return self._with_via_init(kwargs)
def _solve(self, mu=None):
mu = self.parse_parameter(mu).copy()
# explicitly checking if logging is disabled saves the expensive str(mu) call
if not self.logging_disabled:
if self.linear:
pt = 'sparsity unknown' if getattr(self.operator, 'sparse', None) is None \
else ('sparse' if self.operator.sparse else 'dense')
else:
pt = 'nonlinear'
self.logger.info('Solving {} ({}) for {} ...'.format(self.name, pt, mu))
mu['_t'] = 0
U0 = self.initial_data.as_vector(mu)
return imex_euler(self.explicit_operator, self.implicit_operator, self.rhs, U0, 0, self.T, self.nt, mu)
|
# I pledge my honor that I have abided by the Stevens Honor System - Owen Gresham
def isDateValid(date):
daysPerMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
dateVals = date.split("/")
if len(dateVals) != 3:
print("Invalid date")
return
month = int(dateVals[0])
day = int(dateVals[1])
year = int(dateVals[2])
if month < 1 or month > 12:
print("Invalid date")
return
if day < 1 or day > daysPerMonth[month - 1]:
print("Invalid date")
return
if year < 0:
print("Invalid date")
return
print("Date is valid")
def main():
print("This program accepts a date in the format month/day/year and outputs whether the date is valid.")
date = input("Enter a date in month/day/year format: ")
isDateValid(date)
main()
|
import setuptools
import coin
def get_package_description() -> str:
"""Returns a description of this package from the markdown files."""
with open("README.md", "r") as stream:
readme: str = stream.read()
return readme
setuptools.setup(
name="coin",
version=coin.__version__,
author="Colin Manko",
author_email="",
description="",
long_description=get_package_description(),
long_description_content_type="text/markdown",
url="https://github.com/big-c-note/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
)
|
#
# Runs two NerTagger's models on given PostgreSQL collection.
# Finds differences between NE annotations produced by two models.
# The collection must have input_layers required by NerTagger.
#
# Outputs summarized statistics about differences, and writes all differences into a
# file. The output will be written into a directory named 'diff_' + collection's name.
#
import os, sys, re
import os.path
import argparse
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from estnltk import logger
from estnltk.storage.postgres import PostgresStorage
from estnltk.storage.postgres import IndexQuery
from estnltk.taggers import DiffTagger
from estnltk.layer_operations import flatten
from estnltk.taggers.standard_taggers.diff_tagger import iterate_diff_conflicts
from conf_utils import pick_random_doc_ids
from conf_utils import create_ner_tagger_from_model
from conf_utils import flip_ner_input_layer_names
from conf_utils import find_ner_dependency_layers
from conf_utils import load_in_doc_ids_from_file
from ner_diff_utils import NerDiffFinder
from ner_diff_utils import NerDiffSummarizer
from ner_diff_utils import write_formatted_diff_str_to_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
"Runs two NerTagger models on given PostgreSQL collection. "+
"Finds differences between NE annotations produced by two models. "+
"Outputs summarized statistics about differences, and writes all differences into a "+
"file. By default, the output will be written into a directory named 'diff_' + "+
"collection's name. ")
# 1) Specification of the evaluation settings #1
parser.add_argument('collection', type=str, \
help='name of the collection on which the evaluation will be performed. '+
'the collection must have input_layers required by NerTagger.')
parser.add_argument('first_model', type=str, \
help='location of the first NE model to be compared (the "old layer"). '+
'must be a directory containing files "model.bin" and "settings.py".')
parser.add_argument('second_model', type=str, \
help='location of the second NE model to be compared against (the "new layer"). '+
'must be a directory containing files "model.bin" and "settings.py".')
# 2) Database access & logging parameters
parser.add_argument('--pgpass', dest='pgpass', action='store', \
default='~/.pgpass', \
help='name of the PostgreSQL password file (default: ~/.pgpass). '+\
'the format of the file should be: hostname:port:database:username:password ')
parser.add_argument('--schema', dest='schema', action='store',\
default='public',\
help='name of the collection schema (default: public)')
parser.add_argument('--role', dest='role', action='store',
help='role used for accessing the collection. the role must have a read access. (default: None)')
parser.add_argument('--logging', dest='logging', action='store', default='info',\
choices=['debug', 'info', 'warning', 'error', 'critical'],\
help='logging level (default: info)')
# 3) Specification of the evaluation settings #2
parser.add_argument('--old_ner_layer', dest='old_ner_layer', action='store', default='old_named_entities',\
help="name of the NE layer created by the first_model; assumingly the \"old layer\". "+
"(default: 'old_named_entities')")
parser.add_argument('--new_ner_layer', dest='new_ner_layer', action='store', default='new_named_entities',\
help="name of the NE layer created by the second_model; assumingly the \"new layer\". "+
"(default: 'new_named_entities')")
parser.add_argument('--old_ne_attr', dest='old_ne_attr', action='store', default='nertag',\
help="name of the attribute containing NE type label in the old "+
"layer. (default: 'nertag')")
parser.add_argument('--new_ne_attr', dest='new_ne_attr', action='store', default='nertag',\
help="name of the attribute containing NE type label in the new "+
"layer. (default: 'nertag')")
parser.add_argument('--in_prefix', dest='in_prefix', action='store', default='',\
help="prefix for filtering collection layers suitable as NerTagger's input layers."+\
" if the collection contains multiple candidates for an input layer (e.g. multiple "+\
" 'words' layers), then only layers with the given prefix will be used as input layers. "+\
"(default: '')" )
parser.add_argument('--in_suffix', dest='in_suffix', action='store', default='',\
help="suffix for filtering collection layers suitable as NerTagger's input layers."+\
" if the collection contains multiple candidates for an input layer (e.g. multiple "+\
" 'words' layers), then only layers with the given suffix will be used as input layers. "+\
"(default: '')" )
parser.add_argument('--filename_key', dest='file_name_key', action='store', default='file',\
help="name of the key in text object's metadata which conveys the original file "+\
"name. if the key is specified and corresponding keys are available in "+\
"metadata (of each text object), then each of the collection's document will be "+\
"associated with its corresponding file name (that is: the file name will be the "+\
"identifier of the document in the output). Otherwise, the identifier of the document "+\
"in the output will be 'doc'+ID, where ID is document's numeric index in "+\
"the collection. "+\
"(default: 'fname')" )
parser.add_argument('--textcat_key', dest='text_cat_key', action='store', default='subcorpus',\
help="name of the key in text object's metadata which conveys subcorpus "+\
"or text category name. if the key is specified and corresponding keys are "+\
"available in metadata (of each text object), then the evaluation / difference "+\
"statistics will be recorded / collected subcorpus wise. Otherwise, no subcorpus "+\
"distinction will be made in difference statistics and output. "+\
"(default: 'subcorpus')" )
parser.add_argument('-r', '--rand_pick', dest='rand_pick', action='store', type=int, \
help="integer value specifying the amount of documents to be randomly chosen for "+\
"difference evaluation. if specified, then the given amount of documents will be "+\
"processed (instead of processing the whole corpus). if the amount exceeds the "+\
"corpus size, then the whole corpus is processed. (default: None)" )
parser.add_argument('-f', '--file_pick', dest='file_pick', action='store', type=str, \
help="name of the file containing indexes of the documents that need to be processed "+\
"in the difference evaluation. if specified, then only documents listed in the "+\
"file will be processed (instead of processing the whole corpus). note: each "+\
"document id must be on a separate line in the index file. (default: None)" )
parser.add_argument('--out_dir_prefix', dest='out_dir_prefix', action='store', default='diff_',\
help="a prefix that will be added to the output directory name. the output directory "+\
" name will be: this prefix concatenated with the name of the collection. "+\
"(default: 'diff_')" )
args = parser.parse_args()
logger.setLevel( (args.logging).upper() )
log = logger
storage = PostgresStorage(pgpass_file=args.pgpass,
schema=args.schema,
role=args.role)
try:
# Check model dirs and layer names
if args.first_model == args.second_model:
log.error("(!) Invalid model dictories: first_model cannot be identical to second_model: {!r}".format(args.first_model))
exit(1)
if args.old_ner_layer == args.new_ner_layer:
log.error("(!) Indistinguishable layer names: old_ner_layer cannot be identical to new_ner_layer: {!r}".format(args.old_ner_layer))
exit(1)
collection = storage.get_collection( args.collection )
if not collection.exists():
log.error(' (!) Collection {!r} does not exist...'.format(args.collection))
exit(1)
else:
docs_in_collection = len( collection )
log.info(' Collection {!r} exists and has {} documents. '.format( args.collection,
docs_in_collection ))
log.info(' Collection {!r} has layers: {!r} '.format( args.collection,
collection.layers ))
chosen_doc_ids = []
if args.rand_pick is not None and args.rand_pick > 0:
# Pick a random sample (instead of the whole corpus)
chosen_doc_ids = pick_random_doc_ids( args.rand_pick, storage, args.schema, args.collection, logger )
log.info(' Random sample of {!r} documents chosen for processing.'.format( len(chosen_doc_ids) ))
elif args.file_pick is not None:
# Or load target document indexes from the file
chosen_doc_ids = load_in_doc_ids_from_file( args.file_pick, storage, args.schema, args.collection, logger)
log.info(' {!r} document indexes loaded from {!r} for processing.'.format( len(chosen_doc_ids), args.file_pick ))
# Create ner_taggers
first_ner_tagger, first_ner_input_layers_mapping = \
create_ner_tagger_from_model( args.old_ner_layer, args.first_model, collection, log,
incl_prefix=args.in_prefix, incl_suffix=args.in_suffix )
second_ner_tagger, second_ner_input_layers_mapping = \
create_ner_tagger_from_model( args.new_ner_layer, args.second_model, collection, log,
incl_prefix=args.in_prefix, incl_suffix=args.in_suffix )
assert first_ner_input_layers_mapping == second_ner_input_layers_mapping
ner_diff_finder = NerDiffFinder( args.old_ner_layer, args.new_ner_layer,
old_layer_attr=args.old_ne_attr,
new_layer_attr=args.new_ne_attr )
ner_diff_summarizer = NerDiffSummarizer( args.old_ner_layer, args.new_ner_layer )
startTime = datetime.now()
# Create output directory name
output_dir = args.out_dir_prefix + args.collection
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Timestamp for output files
output_file_prefix = os.path.splitext(sys.argv[0])[0]
assert os.path.sep not in output_file_prefix
output_file_suffix = startTime.strftime('%Y-%m-%dT%H%M%S')
eval_layers = list(first_ner_input_layers_mapping.values())
data_iterator = None
if chosen_doc_ids:
data_iterator = collection.select( IndexQuery(keys=chosen_doc_ids), progressbar='ascii', layers=eval_layers )
else:
data_iterator = collection.select( progressbar='ascii', layers=eval_layers )
for key, text in data_iterator:
# 0) Fetch document and subcorpus' identifiers
fname_stub = 'doc' + str(key)
if args.file_name_key is not None:
if args.file_name_key in text.meta.keys() and text.meta[args.file_name_key] is not None:
fname_stub = text.meta[ args.file_name_key ]+f'({key})'
text_cat = 'corpus'
if args.text_cat_key is not None:
if args.text_cat_key in text.meta.keys() and text.meta[args.text_cat_key] is not None:
text_cat = text.meta[ args.text_cat_key ]
# 1) Add new NE annotations
# Hack: we need to flip input layer names, because NerTagger
# currently does not allow customizing input layer names
flip_ner_input_layer_names( text, first_ner_input_layers_mapping )
first_ner_tagger.tag( text )
second_ner_tagger.tag( text )
# 2) Find differences between old and new layers
# Get differences within their respective contexts (as string)
# Get number of grouped differences (diff_gaps)
diff_layer, formatted_str, diff_gaps = ner_diff_finder.find_difference( text, fname_stub, text_cat )
# 3) Record difference statistics
ner_diff_summarizer.record_from_diff_layer( 'named_entities', diff_layer, text_cat )
# 4) Output NE-s that have differences in annotations along with their contexts
if formatted_str is not None and len(formatted_str) > 0:
fpath = os.path.join(output_dir, f'_{output_file_prefix}__ann_diffs_{output_file_suffix}.txt')
write_formatted_diff_str_to_file( fpath, formatted_str )
summarizer_result_str = ner_diff_summarizer.get_diffs_summary_output( show_doc_count=True )
log.info( os.linesep+os.linesep+'TOTAL DIFF STATISTICS:'+os.linesep+summarizer_result_str )
time_diff = datetime.now() - startTime
log.info('Total processing time: {}'.format(time_diff))
# Write summarizer's results to output dir
fpath = os.path.join(output_dir, f'_{output_file_prefix}__stats_{output_file_suffix}.txt')
with open(fpath, 'w', encoding='utf-8') as out_f:
out_f.write( 'TOTAL DIFF STATISTICS:'+os.linesep+summarizer_result_str )
out_f.write( 'Total processing time: {}'.format(time_diff) )
except:
raise
finally:
storage.close()
|
"""System module."""
from webapp import myapp
app = myapp()
client = app.test_client()
def test_root():
"""A dummy docstring."""
url = "http://localhost:5000/"
response = client.get(url)
print(response.data)
assert response.data == b'Hello World!'
def test_health():
"""A dummy docstring."""
# Test 2 - Health endpoint
url = "http://localhost:5000/health"
response = client.get(url)
assert response.status_code == 200
def test_metadata():
"""A dummy docstring."""
# Test 2 - Test metadata
url = "http://localhost:5000/metadata"
response = client.get(url)
assert response.status_code == 200
|
Python 3.8.5 (tags/v3.8.5:580fbb0, Jul 20 2020, 15:43:08) [MSC v.1926 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #I pledge my honor that I have abided by the Stevens Honor System -Kyriacos Petrou
>>> #valid date code
>>> def main():
date = input("Enter the date in mm/dd/2001 format: ")
month, day, year = date.split('/')
month=int(month)
day=int(day)
year=int(year)
if month == 4 or month == 6 or month == 9 or month == 11:
if 1<= day <= 30 and year > 0:
print("Date is valid. ")
else:
print("Date is invalid. ")
if month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12:
if 1 <= day <= 31 and year > 0:
print("Date is valid. ")
else:
print("Date is invalid. ")
if month == 2:
if 1 <= day <= 28 and year > 0:
print("Date is valid. ")
else:
print("Date is invalid. ")
>>> main()
Enter the date in mm/dd/2001 format: 9/7/2001
Date is valid.
>>> |
import cv2
import tensorflow as tf
import sys
DATADIR = "../ruap_data/test/"
IMG_SIZE = 350
IMG_NAME = sys.argv[1] if len(sys.argv) > 1 else "somevalue"
#prilagodba slike za testiranje
def prepare(filepath):
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
#učitavanje modela
model = tf.keras.models.load_model("64x3-CNN.model")
#poziv prilagodbi slike
prediction = model.predict([prepare(DATADIR + IMG_NAME)])
print(prediction)
#ispis predviđanja - u nekim slučajevima se vraća niz u kojem je max vrijednost 0.9999....
#te se zbog toga koristi funkcija max() da bismo dobili ispis
if prediction[0][0] == max(prediction[0]):
print("Piće na slici je Jager")
elif prediction[0][1] == max(prediction[0]):
print("Piće na slici je Stock")
elif prediction[0][2] == max(prediction[0]):
print("Piće na slici je viski")
|
# -*- coding : utf-8 -*-
import path_helper as ph
import file_updater as fu
import log
root_path = ph.get_root_path()
all_java_files = ph.list_all_java_file(root_path)
log.log(len(all_java_files))
for java_file in all_java_files:
log.i("Modifying: ", java_file)
fu.update_file(java_file) |
import matplotlib.pyplot as plt
def set_size(width=17, height=4):
"""Pretty doll function to reshape plot box in notebook"""
plt.rcParams['figure.figsize'] = [width, height]
def set_style(style='seaborn-deep'):
""""Set plot style"""
try:
plt.style.use(style)
except OSError:
raise ValueError(
f"Style {style} not available"
f": Check plt.style.available for list of available styles"
)
|
#input="taco cat"
def palindromePermutation(string):
#Defining a freq table to store the frequency of characters
#removal of all characters which are not letters
freq={}
for i in string:
if(((ord(i)>=65 and ord(i)<=92) or (ord(i)>=97 and ord(i)<=122))):
if(i in freq.keys()):
freq[i]+=1
else:
freq[i]=1
else:
continue
#Now checking if the freq of character counts are even
odd=0
for key in freq.keys():
count=freq[key]
if(count%2==0):
continue
elif(count%2!=0 and odd<1):
odd+=1
continue
else:
return False
return True
string="taco aat"
result=palindromePermutation(string)
print ("Is the string a permutation of Palindrome:",result)
|
from data.DataGenerator import ImageDataGenerator
from core import resnet_v2
import tensorflow as tf
import math
slim = tf.contrib.slim
result_txt_file = "D:\\pycharm_program\\UrbanFunctionClassification\\result.txt"
DATASET_DIR = "D:\\competition\\data\\test\\test\\"
CHECKPOINT_DIR = 'D:\\pycharm_program\\UrbanFunctionClassification\\checkpoint\\'
NUM_CLASSES = 9
BATCHSIZE = 1
##################### get the input pipline ############################
with tf.name_scope("input"):
DataGenerator = ImageDataGenerator(DATASET_DIR, mode="testing")
# get the dataset statistics
test_set_length = DataGenerator.test_set_length
print("test_set_length:%d" % test_set_length)
TestDataset = DataGenerator.getBatchData(type="testing", batch_size=BATCHSIZE)
iterator = TestDataset.make_one_shot_iterator()
next_batch = iterator.get_next()
##################### get the input pipline ############################
##################### setup the network ################################
x = tf.placeholder(tf.float32, shape=(None, 100, 100, 3))
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net_output, end_points = resnet_v2.resnet_v2_50(x, NUM_CLASSES, is_training=False)
# 评价操作
with tf.name_scope("test"):
pre = tf.argmax(net_output, 1)
##################### setup the network ################################
with tf.Session() as sess:
# initial variables
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# 判断有没有checkpoint
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored .....")
# 训练过程
print("testing start")
test_batches_of_epoch = int(math.ceil(test_set_length/BATCHSIZE))
result = []
for step in range(20):
img_batch, AreaID_batch = sess.run(next_batch)
classIDs = sess.run(pre, feed_dict={x: img_batch})
classID = (classIDs[0] + 1)
dict_result = {"AreaID": AreaID_batch[0].decode('UTF-8'), "classID": "00" + str(classID)}
result.append(dict_result)
with open(result_txt_file, "w") as f:
for item in result:
f.write(item["AreaID"] + "\t" + item["classID"] + "\n")
|
from CallBackOperator import CallBackOperator
from ConnectionPackage.ConnectionParameters import ConnectionParameters
class ComboBoxOperator(CallBackOperator):
def __init__(self, window, model=None, value_range=None):
super().__init__(window, model, value_range)
self.ConnectionParameters = ConnectionParameters()
def ConnectCallBack(self):
self.window.BaudRatecomboBox.currentIndexChanged.connect(self.SetBaudRate)
self.window.COMPortspinBox.valueChanged.connect(self.SetCOMPort)
self.window.ProtocolcomboBox.currentIndexChanged.connect(self.SetProtocol)
self.window.ByteSizecomboBox.currentIndexChanged.connect(self.SetByteSize)
self.window.ParitycomboBox.currentIndexChanged.connect(self.SetParity)
self.window.StopBitscomboBox.currentIndexChanged.connect(self.SetStopBits)
# overridden
def init_line_edit(self):
pass
# overridden
def init_slider(self):
pass
def SetProtocol(self):
arg = self.window.ProtocolcomboBox.currentText()
if (len(arg)):
self.ConnectionParameters.SetProtocol(arg)
def SetByteSize(self):
arg = (self.window.ByteSizecomboBox.currentText())
if (len(arg)):
self.ConnectionParameters.SetByteSize(int(arg))
def SetParity(self):
arg = self.window.ParitycomboBox.currentText()
if (len(arg)):
self.ConnectionParameters.SetParity(arg)
def SetStopBits(self):
arg = (self.window.StopBitscomboBox.currentText())
if (len(arg)):
self.ConnectionParameters.SetStopBits(int(arg))
def SetBaudRate(self):
arg = (self.window.BaudRatecomboBox.currentText())
if (len(arg)):
self.ConnectionParameters.SetBaudRate(int(arg))
def SetCOMPort(self):
arg = self.window.COMPortspinBox.value()
self.ConnectionParameters.SetCOMPort(arg)
def value_changed(self, val):
pass
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 2 17:34:38 2017
@author: InfiniteJest
"""
def chunkize_query(chunksize, data):
fmt = '''
'''
for chunk in (data[i:i + chunksize] for i in range(0, len(data), chunksize)):
q = fmt.format(column_data = ', '.join(["'"+str(i)+"'" for i in chunk]))
print('M FOR MINI')
yield q
def chunkize_query_percent(no_chunks, percent):
chunk = percent/no_chunks
fmt = '''
Where (ABS(CAST((BINARY_CHECKSUM(*) * RAND()) as int)) % 100) < {amount}
'''
chunk_run = 0
while chunk_run < percent:
q = fmt.format(amount = chunk)
print('W FOR WUMBO')
chunk_run += chunk
yield q
def write_sqlchunks_to_file(file_name, chunks, with_header=True, delimiter=',',quotechar='"',quoting=csv.QUOTE_NONNUMERIC):
ofile = open(file_name,'a', newline='', encoding='utf-8')
csv_writer = csv.writer(ofile, delimiter=delimiter, quotechar=quotechar,quoting=quoting)
cur = conn.cursor()
cur.execute(next(chunks))
if with_header:
column = [field[0] for field in cur.description]
csv_writer.writerow(column)
first_results = cur.fetchall()
for result in first_results:
csv_writer.writerow(result)
cur.close()
for chunk in chunks:
cur = conn.cursor()
cur.execute(chunk)
results = cur.fetchall()
for result in results:
csv_writer.writerow(result)
cur.close()
ofile.close()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from sklearn import linear_model
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
# 定义自变量x
x = np.array([[1], [2], [3], [4]], dtype=np.float32)
# 定义因变量y
y = np.array([6, 5, 7, 10], dtype=np.float32)
# 加载scikit-learn的线性模型
linear = linear_model.LinearRegression()
# 通过x和y来建立线性模型
linear.fit(x, y)
# 查看模型系数β2
print(linear.coef_) # [ 1.39999998]
# 查看模型的截距β1
print(linear.intercept_) # 3.5000000596
|
# see https://blog.alexandruioan.me/2017/01/31/the-2017-university-of-bristol-arm-hackathon for more details
import math
from http.server import BaseHTTPRequestHandler, HTTPServer, urllib
from sys import argv
import sys
import serial
import threading
import queue
import numpy as np
import time
q = queue.Queue()
radius = 250
DOWN = b'c130'
UP = b'c70'
INIT_A = b'a128'
INIT_B = b'b84'
MOVE = b'm'
# serial flag for debugging
SEND = True
if SEND:
ser = serial.Serial('/dev/ttyACM0', 115200)
ser.write(UP)
ser.write(INIT_A)
ser.write(INIT_B)
ser.write(MOVE)
# separate thread for sending data to serial
def serial_send(q):
while True:
to_send = q.get()
if SEND:
print("Write start")
ser.write(to_send)
print("Write end")
#ser.flush()
q.task_done()
thrSend = threading.Thread(target=serial_send, args=(q,))
thrSend.setDaemon(True)
thrSend.start()
def serial_read():
while True:
print(ser.readline())
thrRecv = threading.Thread(target=serial_read)
thrRecv.setDaemon(True)
thrRecv.start()
class req_handler(BaseHTTPRequestHandler):
prevX = -1
prevY = -1
# this runs when points are POSTed to the server
# it triggers the calculation of the angles of the servos
# and puts them in a queue
# the tread at the other end of the queue sends the data over serial
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
count = int((post_data['count'])[0])
q.put_nowait(UP)
for i in range(count):
pointX = float((post_data['p' + str(i) + 'x'])[0])
pointY = float((post_data['p' + str(i) + 'y'])[0])
if pointX == 0 and pointY == -1: # pen down
q.put_nowait(DOWN)
print("pen down")
continue
elif pointX == -1 and pointY == 0: # pen up
q.put_nowait(UP)
print("pen up")
continue
# don't draw points that are too close
if (req_handler.prevX, req_handler.prevY) != (-1, -1):
if math.sqrt((req_handler.prevX - pointX)**2 + (req_handler.prevY - pointY)**2) < 0.5:
continue
# timing
#t0 = time.time()
(theta1, theta2) = calculateANgle(pointX, pointY)
#t1 = time.time()
#total = t1 - t0
#print("Time: ", total)
(a, b) = (int(round(theta1)), int(round(theta2)))
to_send = ('a' + str(a) + 'b' + str(b) + 'm' + '\n').encode('ascii') # + '\n'
q.put_nowait(to_send)
# save the current position
(req_handler.prevX, req_handler.prevY) = (pointX, pointY)
# TODO return to center?
q.put_nowait(UP)
# https://uk.mathworks.com/help/fuzzy/examples/modeling-inverse-kinematics-in-a-robotic-arm.html
# pregenerate grid
theta1range = np.arange(0, math.pi, 0.01)
theta2range = np.arange(0, math.pi, 0.01)
THETA1, THETA2 = np.meshgrid(theta1range, theta2range)
X_pred = radius * np.cos(THETA1) + radius * np.cos(THETA1 + THETA2)
Y_pred = radius * np.sin(THETA1) + radius * np.sin(THETA1 + THETA2)
grid = np.dstack((X_pred, Y_pred))
def calculateAngle(pointX, pointY):
min_dist = 100000
min_theta1 = 0
min_theta2 = 0
# slow solution
# ~0.12s
#for theta1 in np.arange(0, math.pi, 0.01):
# for theta2 in np.arange(0, math.pi, 0.01):
# x_pred = radius * math.cos(theta1) + radius * math.cos(theta1 + theta2)
# y_pred = radius * math.sin(theta1) + radius * math.sin(theta1 + theta2)
# look_dist = math.sqrt((x_pred - pointX) ** 2 + (y_pred - pointY) ** 2)
# if look_dist < min_dist:
# min_dist = look_dist
# min_theta1 = theta1
# min_theta2 = theta2
# numpy solution
# ~0.005s
# generate 3D array of repeated target point
point = np.array([[[pointX, pointY]]])
point3D = np.repeat(np.repeat(point, X_pred.shape[0], axis = 0), X_pred.shape[1], axis = 1)
# create 3D array with potential X and Y values
grid = np.dstack((X_pred, Y_pred))
# compute the Euclidean distance
diff = np.subtract(point3D, grid)
dists = np.linalg.norm(diff, ord = 2, axis = 2)
# find the minimum distance (grid point closest to the target point)
idx1, idx2 = np.unravel_index(dists.argmin(), dists.shape)
# extract its theta values
min_theta1 = THETA1[idx1][idx2]
min_theta2 = THETA2[idx1][idx2]
return (math.degrees(min_theta1), math.degrees(min_theta2))
# start the HTTP server, binding it to all addresses, port 1180
def run(server_class = HTTPServer, handler_class = req_handler, port = 1180):
server_address = ('0.0.0.0', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd...')
httpd.serve_forever()
def main():
try:
if len(argv) == 2:
run(port = int(argv[1]))
else:
run()
except KeyboardInterrupt:
print("Exiting...")
sys.exit()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.