text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/4/25 下午7:46
# @Author : ZHZ
# @Description : 根据num_days去划分数据集并图表显示,默认值是14
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator,FormatStrFormatter
xmajorLocator = MultipleLocator(1)
xmajorLocator = MultipleLocator(1)
num_days = 14
sum_flag_temp = 0
days_20141009 = datetime.datetime(2014, 10, 9)
item_id_dict = {}
all_item_sum = []
kid_item_sum = []
count1 = []
count2 = []
new_father_kid_item_x = []
'''生成新的days_20141009列'''
filtered_outlier_if = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_if1_filtered_3std.csv");
filtered_outlier_if['days_20141009'] = filtered_outlier_if['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
filtered_outlier_isf = pd.read_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/Data/OutputData/1_isf1_filtered_3std.csv");
filtered_outlier_isf['days_20141009'] = filtered_outlier_isf['date'].\
map(lambda x:(datetime.datetime(x / 10000, x / 100 % 100, x % 100) - days_20141009).days)
def countByDays_if(dataframe, start_day, end_day):
if start_day > end_day:
return None
dataframe = dataframe[dataframe['days_20141009']>=start_day]
dataframe = dataframe[dataframe['days_20141009']<=end_day]
print dataframe[['date','qty_alipay_njhs']]
if len(dataframe)<=0:
return None
per = float(num_days)/float(end_day-start_day+1)
temp = {}
temp['twoWeek'] = (end_day-1)/num_days
temp['date'] = str(start_day)+"_"+str(end_day)
temp['item_id'] = item_id_dict[int(dataframe.item_id.mean())]
temp['qty'] = dataframe.qty_alipay_njhs.sum()*per
print temp
return temp
def TransferDataByDays_if():
flag = 0
for i,father_kid_item in filtered_outlier_if.groupby(['item_id']):
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
new_father_kid_item_temp=[]
while(flag_day>=first_day):
flag_day = flag_day - num_days
if (flag_day<=first_day):
ff = countByDays_if(father_kid_item, first_day, flag_day+num_days)
else:
ff = countByDays_if(father_kid_item, flag_day+1, flag_day+num_days)
if ff == None:
print "这里有个None"
continue
new_father_kid_item_x.append(ff)
new_father_kid_item_temp.append(ff)
showGraph_all(new_father_kid_item_temp)
# return
new_father_kid_item_temp = []
flag = flag+1
print new_father_kid_item_x
dataframe = pd.DataFrame(new_father_kid_item_x,columns=['twoWeek','date','item_id','qty'])
dataframe.to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/if_all_14.csv",index = None,columns=None)
return dataframe
def TransferDataByDays_isf():
flag = 0
for i,father_kid_item in filtered_outlier_isf.groupby(['item_id','store_code']):
first_day = father_kid_item.days_20141009.min()
last_day = father_kid_item.days_20141009.max()
flag_day = last_day
father_kid_item = father_kid_item.sort_values('days_20141009')
new_father_kid_item_temp=[]
while(flag_day>=first_day):
flag_day = flag_day - num_days
if (flag_day<=first_day):
ff = countByDays_if(father_kid_item, first_day, flag_day+num_days)
else:
ff = countByDays_if(father_kid_item, flag_day+1, flag_day+num_days)
if ff == None:
print "这里有个None"
continue
new_father_kid_item_x.append(ff)
new_father_kid_item_temp.append(ff)
showGraph_kid(new_father_kid_item_temp,i[1])
new_father_kid_item_temp = []
flag = flag+1
# if flag>10:
# return
print new_father_kid_item_x
dataframe = pd.DataFrame(new_father_kid_item_x,columns=['twoWeek','date','item_id','qty'])
dataframe.to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/if_all_14.csv",index = None,columns=None)
return dataframe
def showGraph_kid(dataframe,store_code):
if len(dataframe)<4:
return
df = pd.DataFrame(dataframe,columns=['twoWeek','date','item_id','qty'])
plt.plot(df.twoWeek,df.qty)
plt.grid()
ax = plt.gca()
ax.xaxis.set_major_locator(xmajorLocator)
ax.tick_params(labelright=True)
plt.savefig('/Users/zhuohaizhen/Desktop/figure/'+str(df.item_id.max())+"_"+str(store_code)+".jpg")
#plt.show()
plt.close('all')
def showGraph_all(dataframe):
if len(dataframe)<4:
return
df = pd.DataFrame(dataframe,columns=['twoWeek','date','item_id','qty'])
plt.plot(df.twoWeek,df.qty)
plt.grid()
ax = plt.gca()
ax.xaxis.set_major_locator(xmajorLocator)
ax.tick_params(labelright=True)
plt.savefig('/Users/zhuohaizhen/Desktop/figure/'+str(df.item_id.max())+"_all"+".jpg")
#plt.show()
plt.close('all')
def transItemID():
item_ids = filtered_outlier_if.item_id.value_counts().sort_values().index
df_data = []
for i in range(0,len(item_ids)):
temp = {}
temp['item_id'] = item_ids[i]
temp['new_id'] = i
item_id_dict[item_ids[i]] = i
#print i,item_ids[i]
df_data.append(temp)
pd.DataFrame(df_data,columns=['item_id','new_id']).to_csv("/Users/zhuohaizhen/PycharmProjects/Tianchi_Python/"
"Data/item_id.csv",index = None,columns=None)
transItemID()
TransferDataByDays_if()
TransferDataByDays_isf() |
import requests
response = requests.get('https://api.github.com')
# Проверка на код
if response.status_code == 200:
print("Success")
elif response.status_code == 404:
print('Not Found')
# Более общая проверка на код в промежутке от 200 до 400
if response:
print('Success')
else:
print('An error has occured')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import hashlib
import pprint
import sys
import logging
import traceback
import getopt
import re
from web.wsgiserver import CherryPyWSGIServer
import json
import threading
import time
import sqlite3
from wechatpy import WeChatClient
from wechatpy import parse_message
from wechatpy.replies import TextReply
CherryPyWSGIServer.ssl_certificate = "D:\\src\\git projs\\horizonadn-ah\\src\\wechat-server.crt"
CherryPyWSGIServer.ssl_private_key = "D:\\src\\git projs\\horizonadn-ah\\src\\wechat-server.key"
AHSERVER_DB_NAME = "ahserver.db"
APPID = None
APPSEC = None
TPL_ID = None
TOKEN = None
ACCESSTOKEN = None
DEVICES = {}
AHDEV_WXUSER_BINDS = []
try:
opts, args = getopt.getopt(sys.argv[2:], "hu:p:t:e:a:", ["help", "username=", "password=", "token=", "template=", "accesstoken="])
for name, value in opts:
if name in ('-h', '--help'):
usage()
sys.exit(1)
elif name in ('-u', '--username'):
APPID = value
elif name in ('-p', '--password'):
APPSEC = value
elif name in ('-t', '--token'):
TOKEN = value
elif name in ('-e', '--template'):
TPL_ID = value
elif name in ('-a', '--accesstoken'):
ACCESSTOKEN = value
else:
usage()
sys.exit(1)
except:
usage()
sys.exit(1)
if not (TOKEN and TPL_ID and ((APPID and APPSEC) or ACCESSTOKEN)):
usage()
sys.exit(1)
def do_log(string):
print string
web.LOG.info(string)
urls = (
'/info', 'HandleInfo',
'/wxaction', 'HandleWxAction', # msg from tencent wx public platform click template msg
'/wx', 'HandleWxMsg', # msg from tencent wx public platform
'/', 'HandleAHMsg', # msg from ah devices
)
render = web.template.render('templates')
class HandleInfo(object):
def GET(self):
DEV = web.DEVICES
return render.devinfo(DEV.items(), web.AHDEV_WXUSER_BINDS)
class HandleWxAction(object):
def GET(self):
user_data = web.input()
do_log(user_data)
return "hello HandleWxAction"
def ah_msg_handler(command):
msgret = {}
wxclient = web.WXCLIENT
ip = command.get("ip", "").strip()
protocol = command.get("protocol", "").strip()
port = str(command.get("port", ""))
comment = command.get("comment", "").strip()
ahaction = command.get("ahaction", "").strip().lower()
paras = command.get("paras", {})
device_id = command.get("device_id", "").strip()
time_dev = command.get("time_dev", "")
public_server = command.get("public_server", "")
if not device_id:
return msgret
if not ahaction:
return msgret
DEV = web.DEVICES
devinfo = DEV.setdefault(device_id, {})
devinfo["last_conn_time"] = time.time()
devinfo["conn_st"] = "connected"
msgret["ahactionback"] = ahaction
if ahaction in ["ahup", "ahdown"]:
is_up = (ahaction == "ahup")
do_log("rcvah <=== %s" % pprint.pformat(command))
followers = wxclient.user.get_followers()
do_log("got followers %s" % pprint.pformat(followers))
p = {
"ip": {
"value": "%s:%s %s" % (ip, port,protocol),
"color": "#173177"
},
"comment":{
"value": comment,
"color": "#173177"
},
"state": {
"value": "UP" if is_up else "DOWN",
"color": "#228B22" if is_up else "#ff0000"
},
"time": {
"value": time_dev,
"color": "#173177"
}
}
if is_up:
jumpurl = ""
else:
jumpurl = ""
# jumpurl = "%s/wxaction?ip=%s&port=%s&protocol=%s" % (public_server, ip, port, protocol)
for open_id in followers["data"]["openid"]:
# to me only
# if open_id != "olzKBwKSad_SdyWSN-hiz2BfNE2w":
# continue
try:
ret = wxclient.message.send_template(open_id, TPL_ID, jumpurl, "", p)
except Exception as e:
do_log("send msg to %s err %s" % (open_id, e))
else:
do_log("send msg to %s ok ret %s" % (open_id, pprint.pformat(ret)))
elif ahaction == "hello":
if devinfo.get("GCMD"):
msgret["ahactionback"] = "do_ah_all"
devinfo.pop("GCMD", 0)
return msgret
def bind_dev_wxuser_db(dev_id, from_openid):
conn = None
try:
conn = sqlite3.connect(AHSERVER_DB_NAME)
cnt = 0
for row in conn.execute('select ID from AHDEVICE_WXUSERS where WX_OPENID=?;', (from_openid, )):
cnt += 1
with conn:
if cnt:
conn.execute('update AHDEVICE_WXUSERS set AH_DEVICEID=?,BIND_TIME=datetime("now", "localtime") where WX_OPENID=?;', (dev_id, from_openid))
else:
conn.execute('insert into AHDEVICE_WXUSERS (AH_DEVICEID,WX_OPENID,BIND_TIME) values (?,?,datetime("now", "localtime"));', (dev_id, from_openid))
except Exception as e:
do_log(traceback.format_exc())
return -1
finally:
if conn:
conn.close()
return 0
def unbind_wxuser_db(from_openid):
conn = None
try:
conn = sqlite3.connect(AHSERVER_DB_NAME)
with conn:
conn.execute('delete from AHDEVICE_WXUSERS where WX_OPENID=?;', (from_openid,))
except Exception as e:
do_log(traceback.format_exc())
return -1
finally:
if conn:
conn.close()
return 0
def has_chinese(check_str):
zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
return zhPattern.search(check_str)
def wx_msg_handler(msg):
DEV = web.DEVICES
helpmsg = "操作指令:\n发送\"自愈\": 尝试自愈所有处于故障状态的业务。"
# helpmsg = "内部错误"
reply = TextReply(message=msg)
reply.content = helpmsg
from_openid = msg.source
if msg.type == "text":
ah_do = re.match(u"\s*自愈\s*", msg.content)
if ah_do:
reply.content = "自愈命令已下发..."
for devid, devinfo in DEV.items():
if devinfo["conn_st"] == "connected":
devinfo["GCMD"] = "reboot"
return reply
ah_do = re.match(u"\s*绑定\s*(\S+)\s*", msg.content)
if ah_do:
dev_id = ah_do.groups()[0]
do_log("wx bind req [%s] [%s]" % (dev_id, from_openid))
if has_chinese(dev_id):
reply.content = "设备ID非法。"
return reply
if DEV.get(dev_id, {}).get("conn_st") != "connected":
reply.content = "设备%s不在线,无法绑定。" % str(dev_id)
return reply
if bind_dev_wxuser_db(str(dev_id), str(from_openid)) < 0:
reply.content = "设备%s绑定失败。" % str(dev_id)
return reply
reply.content = "设备%s绑定成功。" % str(dev_id)
refresh_dev_wx_binds()
return reply
ah_do = re.match(u"\s*解绑\s*", msg.content)
if ah_do:
do_log("wx unbind user [%s]" % (from_openid))
if unbind_wxuser_db(from_openid) < 0:
reply.content = "解绑绑定。"
return reply
reply.content = "解绑成功。"
refresh_dev_wx_binds()
return reply
ah_do = re.match(u"\s*查询绑定\s*", msg.content)
if ah_do:
dev = get_bound_dev_from_wx_openid(from_openid)
if dev:
msg = "本微信已绑定设备:\n"
for d in dev:
msg += str(d)
msg += "\n"
else:
msg = "无绑定设备。"
reply.content = msg
return reply
return reply
class wxTokenThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
if ACCESSTOKEN:
web.WXCLIENT = WeChatClient(None, None, access_token=ACCESSTOKEN)
do_log("WX new token [%s] %s" % (ACCESSTOKEN, web.WXCLIENT))
else:
web.WXCLIENT = WeChatClient(APPID, APPSEC)
do_log("WX new token %s %s %s" % (APPID, APPSEC, web.WXCLIENT))
time.sleep(3600)
class HandleAHMsg(object):
def GET(self):
return "hello"
def POST(self):
msgret = {}
try:
bodystr = web.data()
command = json.loads(bodystr)
msgret = ah_msg_handler(command)
except Exception as e:
do_log("%s" % traceback.format_exc())
msgret["msg"] = str(e)
return json.dumps(msgret)
class HandleWxMsg(object):
def __check_msg(self):
try:
data = web.input()
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
token = TOKEN # 请按照公众平台官网\基本配置中信息填写
list = [token, timestamp, nonce]
list.sort()
sha1 = hashlib.sha1()
map(sha1.update, list)
hashcode = sha1.hexdigest()
if hashcode != signature:
return -1
except Exception as e:
return -2
return 0
def POST(self):
if self.__check_msg():
do_log("rcv invalid wx msg.")
return "success"
try:
xml = web.data()
msg = parse_message(xml)
do_log("rcvwx <=== %s" % msg)
reply = wx_msg_handler(msg)
xml = reply.render()
return xml
except Exception as e:
do_log(traceback.format_exc())
return "success"
def GET(self):
try:
data = web.input()
if len(data) == 0:
return "hello, this is handle view"
signature = data.signature
timestamp = data.timestamp
nonce = data.nonce
echostr = data.echostr
token = TOKEN # 请按照公众平台官网\基本配置中信息填写
list = [token, timestamp, nonce]
list.sort()
sha1 = hashlib.sha1()
map(sha1.update, list)
hashcode = sha1.hexdigest()
do_log("rcv valid msg %s %s" % (hashcode, signature))
if hashcode == signature:
return echostr
else:
return ""
except Exception, Argument:
return Argument
def main_loop():
DEV = web.DEVICES
now = time.time()
for devid, devinfo in DEV.items():
diff = now - devinfo.get("last_conn_time", 0)
if diff > 3600:
DEV.pop(devid, 0)
elif diff > 30:
devinfo["conn_st"] = "disconnected"
elif diff > 15:
devinfo["conn_st"] = "connecting"
class mainLoopThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
try:
main_loop()
except Exception as e:
do_log(traceback.format_exc())
time.sleep(5)
def get_bound_dev_from_wx_openid(openid):
dev = []
for b in web.AHDEV_WXUSER_BINDS:
if b["openid"] == openid:
dev.append(b["dev"])
return dev
def refresh_dev_wx_binds():
ret = []
try:
conn = sqlite3.connect(AHSERVER_DB_NAME)
for row in conn.execute('select AH_DEVICEID,WX_OPENID from AHDEVICE_WXUSERS;'):
ret.append({"dev":row[0],"openid":row[1]})
except Exception as e:
do_log(traceback.format_exc())
return -1
finally:
if conn:
conn.close()
web.AHDEV_WXUSER_BINDS = ret
return 0
def init_ahserver_db():
conn = None
try:
conn = sqlite3.connect(AHSERVER_DB_NAME)
except Exception as e:
do_log("failed to create ah db %s" % e)
return -1
if not conn:
do_log("failed to open ah db.")
return -1
try:
with conn:
conn.execute('''CREATE TABLE if not exists AHDEVICE_WXUSERS (
ID INTEGER PRIMARY KEY AUTOINCREMENT,
AH_DEVICEID TEXT NOT NULL,
WX_OPENID TEXT NOT NULL,
BIND_TIME DATETIME NOT NULL
);''')
except Exception as e:
do_log("failed to create tables %s" % e)
return -1
finally:
conn.close()
return 0
def usage():
print "ah-pbulic-server-all.py <listen port> -u <username> -p <passowrd> -a <access token> -t <token> -e <template id>"
print "-u user name of wx public platform"
print "-p password of wx public platform"
print "-a access token of wx public platform, without -u -p"
print "-t token of wx public platform"
print "-e template id to send ah msg via wx public platform"
if __name__ == '__main__':
LOG = logging.getLogger("AHSRV")
LOG.setLevel(logging.DEBUG)
fh = logging.FileHandler("ah-server.log")
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(formatter)
LOG.addHandler(fh)
web.LOG = LOG
do_log("===== AH server start on %s =====" % sys.argv[1])
if init_ahserver_db() < 0:
sys.exit(-1)
web.DEVICES = DEVICES
web.AHDEV_WXUSER_BINDS = AHDEV_WXUSER_BINDS
t = wxTokenThread()
t.start()
t = mainLoopThread()
t.start()
refresh_dev_wx_binds()
app = web.application(urls, globals())
app.run()
|
from django.contrib.auth import authenticate, login, logout, get_user_model
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse
from .form import LoginForm, RegisterForm, Panel
from .models import Profile
def login_page(request):
form = LoginForm(request.POST or None)
next_url = request.GET.get('next')
context = {
"form": form,
}
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
redirect_url = next_url if next_url else reverse('homepage')
return HttpResponseRedirect(redirect_url)
else:
context = {
"form": form,
"error": "کاربری با این مشخصات یافت نشد!",
}
return render(request, 'accounts/login.html', context)
def logout_view(request):
logout(request)
return redirect('/')
User = get_user_model()
def register(request):
form = RegisterForm(request.POST or None)
if request.user.is_authenticated:
return redirect('/')
context = {'form': form}
if form.is_valid():
username, email, password = request.POST.get('username'), request.POST.get('email'), request.POST.get(
'password')
new_user = User.objects.create_user(username=username, password=password, email=email)
Profile(user=new_user).save()
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('/')
return redirect('/')
return render(request, 'accounts/register.html', context)
def panel(request):
form = Panel(request.POST or None)
context = {
'form': form,
'header': 'panel',
}
# print(phone)
# print(address)
# print(birth)
# print(gender)
if request.method == "POST":
if form.is_valid():
Profile.objects.get_queryset() # TODO just fix this
return render(request, 'accounts/user_complete.html', context)
|
from flask import Flask, render_template
from threading import Thread
from datetime import datetime
import logging
import os
import discord
from discord.ext import commands, tasks
import requests
import json
import aiohttp
import urllib.request
logging.getLogger('werkzeug').disabled = True
os.environ['WERKZEUG_RUN_MAIN'] = 'true'
t_start = 0
app = Flask('')
@app.route('/')
def main():
return 'SET Hacks Bot is online - Uptime: {}'.format(datetime.utcnow() -
t_start)
def run():
global t_start
t_start = datetime.utcnow()
app.run(host="0.0.0.0", port=8080)
def keep_alive():
server = Thread(target=run)
server.start()
def shutdown():
server = Thread(target=run)
server._stop()
# for repl.it
class SelfPing(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.lastping = None
self.response = None
self._last_member = None
self.selfping.start()
def cog_unload(self):
self.selfping.cancel()
@tasks.loop(minutes=1.0)
async def selfping(self):
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
headers={'User-Agent':user_agent}
url = 'https://SET-Bot--itchono.repl.co'
try:
request = urllib.request.Request(url,None,headers)
response = urllib.request.urlopen(request)
self.response = response.read().decode("utf-8")
except:
print("ERROR pinging self!")
@commands.command(name="ping")
async def ping(self, ctx : commands.Context):
await ctx.send("Last Response:`{}`".format(self.response))
@selfping.before_loop
async def before_selfping(self):
await self.bot.wait_until_ready()
print("Self ping routine started.")
|
from django.shortcuts import render
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from .forms import StudentForm
from .models import *
# Create your views here.
def home(request):
return render(request, 'home.html', {})
def form(request):
return render(request, 'form.html', {'form': StudentForm()})
def about(request):
return render(request, 'about.html', {})
def submit(request):
form = StudentForm(request.POST)
form.is_valid()
clean = form.cleaned_data
entry = Student(university = clean['university'].lower(), first_name = clean['first_name'], last_name = clean['last_name'],
email = clean['email'], department = clean['department'].upper(), course_number = clean['course_number'].upper(),
course_type = clean['course_type'].lower(), section = clean['section'], bio = clean['bio'])
entry.save()
return render(request, 'submit.html', {})
def search(request):
return render(request, 'search.html', {'form': StudentForm()})
def result(request):
form = StudentForm(request.POST)
form.is_valid()
clean = form.cleaned_data
students = Student.objects.filter(university=clean['university'].lower()).filter(department=clean['department'].upper()).filter(course_number=clean['course_number'].upper())
if 'course_type' in clean:
students = students.filter(course_type=clean['course_type'].lower())
if 'section' in clean:
students = students.filter(section=clean['section'])
return render(request, 'result.html', {'students': students}) |
#Importing necessary libraries
import os
import shutil
import random
import numpy as np
import cv2
import efficientnet.tfkeras as efn
from PIL import Image, ImageOps
import tensorflow as tf
from keras.models import load_model
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import streamlit as st
st.set_option('deprecation.showfileUploaderEncoding',False)
#Setting the title of the web app
st.title("Upload + Classification Example")
#Putting the loaded model into cache so that we does not have to load model everytime we predict
@st.cache(allow_output_mutation=True)
def load_our_model():
model = tf.keras.models.load_model('/content/DR B3.h5')
return model
#Loading the model into memory
model = load_our_model()
#Uploading the image through web app and storing the image as numpy array
uploaded_file = st.file_uploader("Choose an image...", type="png")
if uploaded_file is not None:
image = Image.open(uploaded_file)
image = np.array(image)
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
new_array = cv2.resize(img, (128, 128))
new_array = new_array/255
st.image(image, caption='Uploaded Image.', use_column_width=True)
st.write("")
st.write("Classifying...")
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
#Predicting the class of the image and displaying it on the web app
x = np.expand_dims(new_array, axis=0)
y = model.predict(x)
y_classes = y.argmax(axis=-1)
if y_classes == 1:
label = "Mild"
elif y_classes == 2:
label = "Moderate"
elif y_classes == 3:
label = "No_DR"
elif y_classes == 4:
label = "Proliferate_DR"
else:
label = "Severe"
st.write(label)
|
from keras.layers import Dense, Activation, Dropout, Bidirectional
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.applications.vgg16 import VGG16
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras import backend as K
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint
import os
import numpy as np
from ..library.utility.frame_extractors.vgg16_feature_extractor import extract_vgg16_features_live, \
scan_and_extract_vgg16_features
BATCH_SIZE = 4
NUM_EPOCHS = 50
VERBOSE = 1
HIDDEN_UNITS = 32
MAX_ALLOWED_FRAMES = 20
EMBEDDING_SIZE = 100
K.set_image_dim_ordering('tf')
def generate_batch(x_samples, y_samples):
num_batches = len(x_samples) // BATCH_SIZE
while True:
for batchIdx in range(0, num_batches):
start = batchIdx * BATCH_SIZE
end = (batchIdx + 1) * BATCH_SIZE
yield np.array(x_samples[start:end]), y_samples[start:end]
class VGG16BidirectionalLSTMVideoClassifier(object):
model_name = 'vgg16-bidirectional-lstm'
def __init__(self):
self.num_input_tokens = None
self.nb_classes = None
self.labels = None
self.labels_idx2word = None
self.model = None
self.vgg16_model = None
self.expected_frames = None
self.vgg16_include_top = True
self.config = None
def create_model(self):
model = Sequential()
model.add(Bidirectional(LSTM(units=HIDDEN_UNITS, return_sequences=True),
input_shape=(self.expected_frames, self.num_input_tokens)))
model.add(Bidirectional(LSTM(2)))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.7))
model.add(Dense(self.nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
@staticmethod
def get_config_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-config.npy'
else:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-config.npy'
@staticmethod
def get_weight_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-weights.h5'
else:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-weights.h5'
@staticmethod
def get_architecture_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-architecture.json'
else:
return model_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-architecture.json'
def load_model(self, config_file_path, weight_file_path):
if os.path.exists(config_file_path):
print('loading configuration from ', config_file_path)
else:
raise ValueError('cannot locate config file {}'.format(config_file_path))
config = np.load(config_file_path).item()
self.num_input_tokens = config['num_input_tokens']
self.nb_classes = config['nb_classes']
self.labels = config['labels']
self.expected_frames = config['expected_frames']
self.vgg16_include_top = config['vgg16_include_top']
self.labels_idx2word = dict([(idx, word) for word, idx in self.labels.items()])
self.config = config
self.model = self.create_model()
if os.path.exists(weight_file_path):
print('loading network weights from ', weight_file_path)
else:
raise ValueError('cannot local weight file {}'.format(weight_file_path))
self.model.load_weights(weight_file_path)
print('build vgg16 with pre-trained model')
vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
self.vgg16_model = vgg16_model
def predict(self, video_file_path):
x = extract_vgg16_features_live(self.vgg16_model, video_file_path)
frames = x.shape[0]
if frames > self.expected_frames:
x = x[0:self.expected_frames, :]
elif frames < self.expected_frames:
temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
temp[0:frames, :] = x
x = temp
predicted_class = np.argmax(self.model.predict(np.array([x]))[0])
predicted_label = self.labels_idx2word[predicted_class]
return predicted_label
def fit(self, data_dir_path, model_dir_path, vgg16_include_top=True, data_set_name='UCF-101', test_size=0.3,
random_state=42):
self.vgg16_include_top = vgg16_include_top
config_file_path = self.get_config_file_path(model_dir_path, vgg16_include_top)
weight_file_path = self.get_weight_file_path(model_dir_path, vgg16_include_top)
architecture_file_path = self.get_architecture_file_path(model_dir_path, vgg16_include_top)
self.vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
self.vgg16_model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
feature_dir_name = data_set_name + '-VGG16-Features'
if not vgg16_include_top:
feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
max_frames = 0
self.labels = dict()
x_samples, y_samples = scan_and_extract_vgg16_features(data_dir_path,
output_dir_path=feature_dir_name,
model=self.vgg16_model,
data_set_name=data_set_name)
self.num_input_tokens = x_samples[0].shape[1]
frames_list = []
for x in x_samples:
frames = x.shape[0]
frames_list.append(frames)
max_frames = max(frames, max_frames)
self.expected_frames = int(np.mean(frames_list))
print('max frames: ', max_frames)
print('expected frames: ', self.expected_frames)
for i in range(len(x_samples)):
x = x_samples[i]
frames = x.shape[0]
if frames > self.expected_frames:
x = x[0:self.expected_frames, :]
x_samples[i] = x
elif frames < self.expected_frames:
temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
temp[0:frames, :] = x
x_samples[i] = temp
for y in y_samples:
if y not in self.labels:
self.labels[y] = len(self.labels)
print(self.labels)
for i in range(len(y_samples)):
y_samples[i] = self.labels[y_samples[i]]
self.nb_classes = len(self.labels)
y_samples = np_utils.to_categorical(y_samples, self.nb_classes)
config = dict()
config['labels'] = self.labels
config['nb_classes'] = self.nb_classes
config['num_input_tokens'] = self.num_input_tokens
config['expected_frames'] = self.expected_frames
config['vgg16_include_top'] = self.vgg16_include_top
self.config = config
np.save(config_file_path, config)
model = self.create_model()
open(architecture_file_path, 'w').write(model.to_json())
Xtrain, Xtest, Ytrain, Ytest = train_test_split(x_samples, y_samples, test_size=test_size,
random_state=random_state,shuffle=True)
train_gen = generate_batch(Xtrain, Ytrain)
test_gen = generate_batch(Xtest, Ytest)
train_num_batches = len(Xtrain) // BATCH_SIZE
test_num_batches = len(Xtest) // BATCH_SIZE
print("debug")
print(train_num_batches)
print(test_num_batches)
print("debug end")
checkpoint = ModelCheckpoint(filepath=weight_file_path, save_best_only=True)
history = model.fit_generator(generator=train_gen, steps_per_epoch=train_num_batches,
epochs=NUM_EPOCHS,
verbose=1, validation_data=test_gen, validation_steps=test_num_batches,
callbacks=[checkpoint])
model.save_weights(weight_file_path)
return history
class VGG16LSTMVideoClassifier(object):
model_name = 'vgg16-lstm'
def __init__(self):
self.num_input_tokens = None
self.nb_classes = None
self.labels = None
self.labels_idx2word = None
self.model = None
self.vgg16_model = None
self.expected_frames = None
self.vgg16_include_top = None
self.config = None
@staticmethod
def get_config_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-config.npy'
else:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-hi-dim-config.npy'
@staticmethod
def get_weight_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-weights.h5'
else:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-hi-dim-weights.h5'
@staticmethod
def get_architecture_file_path(model_dir_path, vgg16_include_top=None):
if vgg16_include_top is None:
vgg16_include_top = True
if vgg16_include_top:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-architecture.json'
else:
return model_dir_path + '/' + VGG16LSTMVideoClassifier.model_name + '-hi-dim-architecture.json'
def create_model(self):
model = Sequential()
model.add(
LSTM(units=HIDDEN_UNITS, input_shape=(None, self.num_input_tokens), return_sequences=False, dropout=0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(self.nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def load_model(self, config_file_path, weight_file_path):
config = np.load(config_file_path).item()
self.num_input_tokens = config['num_input_tokens']
self.nb_classes = config['nb_classes']
self.labels = config['labels']
self.expected_frames = config['expected_frames']
self.vgg16_include_top = config['vgg16_include_top']
self.labels_idx2word = dict([(idx, word) for word, idx in self.labels.items()])
self.model = self.create_model()
self.model.load_weights(weight_file_path)
vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
vgg16_model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
self.vgg16_model = vgg16_model
def predict(self, video_file_path):
x = extract_vgg16_features_live(self.vgg16_model, video_file_path)
frames = x.shape[0]
if frames > self.expected_frames:
x = x[0:self.expected_frames, :]
elif frames < self.expected_frames:
temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
temp[0:frames, :] = x
x = temp
predicted_class = np.argmax(self.model.predict(np.array([x]))[0])
predicted_label = self.labels_idx2word[predicted_class]
return predicted_label
def fit(self, data_dir_path, model_dir_path, vgg16_include_top=True, data_set_name='UCF-101', test_size=0.3, random_state=42):
self.vgg16_include_top = vgg16_include_top
config_file_path = self.get_config_file_path(model_dir_path, vgg16_include_top)
weight_file_path = self.get_weight_file_path(model_dir_path, vgg16_include_top)
architecture_file_path = self.get_architecture_file_path(model_dir_path, vgg16_include_top)
vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
self.vgg16_model = vgg16_model
feature_dir_name = data_set_name + '-VGG16-Features'
if not vgg16_include_top:
feature_dir_name = data_set_name + '-VGG16-HiDimFeatures'
max_frames = 0
self.labels = dict()
x_samples, y_samples = scan_and_extract_vgg16_features(data_dir_path,
output_dir_path=feature_dir_name,
model=self.vgg16_model,
data_set_name=data_set_name)
self.num_input_tokens = x_samples[0].shape[1]
frames_list = []
for x in x_samples:
frames = x.shape[0]
frames_list.append(frames)
max_frames = max(frames, max_frames)
self.expected_frames = int(np.mean(frames_list))
print('max frames: ', max_frames)
print('expected frames: ', self.expected_frames)
for i in range(len(x_samples)):
x = x_samples[i]
frames = x.shape[0]
print(x.shape)
if frames > self.expected_frames:
x = x[0:self.expected_frames, :]
x_samples[i] = x
elif frames < self.expected_frames:
temp = np.zeros(shape=(self.expected_frames, x.shape[1]))
temp[0:frames, :] = x
x_samples[i] = temp
for y in y_samples:
if y not in self.labels:
self.labels[y] = len(self.labels)
print(self.labels)
for i in range(len(y_samples)):
y_samples[i] = self.labels[y_samples[i]]
self.nb_classes = len(self.labels)
y_samples = np_utils.to_categorical(y_samples, self.nb_classes)
config = dict()
config['labels'] = self.labels
config['nb_classes'] = self.nb_classes
config['num_input_tokens'] = self.num_input_tokens
config['expected_frames'] = self.expected_frames
config['vgg16_include_top'] = self.vgg16_include_top
self.config = config
np.save(config_file_path, config)
model = self.create_model()
open(architecture_file_path, 'w').write(model.to_json())
Xtrain, Xtest, Ytrain, Ytest = train_test_split(x_samples, y_samples, test_size=test_size,
random_state=random_state)
train_gen = generate_batch(Xtrain, Ytrain)
test_gen = generate_batch(Xtest, Ytest)
train_num_batches = len(Xtrain) // BATCH_SIZE
test_num_batches = len(Xtest) // BATCH_SIZE
checkpoint = ModelCheckpoint(filepath=weight_file_path, save_best_only=True)
history = model.fit_generator(generator=train_gen, steps_per_epoch=train_num_batches,
epochs=NUM_EPOCHS,
verbose=1, validation_data=test_gen, validation_steps=test_num_batches,
callbacks=[checkpoint])
model.save_weights(weight_file_path)
return history
|
'''Write a program to accept a number “n” from the user; then display the sum of the following series:
1/23 + 1/33 + 1/43 + …… + 1/n3'''
sum=0
n=int(input("enter a number"))
for i in range(1,n):
sum=sum+(1/i**3)
print(round(sum,2))
|
aa = input("enter the string")
print("max value is",max(a))
print("min value is",min(a)) |
from .AdminModelView import AdminModelView
from .BotSettingsView import BotSettingsView
from .CKEditorModelView import CKEditorModelView
from .HiddenModelView import HiddenModelView
from .OrdersModelView import OrderModelView
from .TextsModelView import TextsModelView
__all__ = [BotSettingsView, OrderModelView, CKEditorModelView, HiddenModelView]
|
t=('yyt',19,True,'浙江温州江南皮革城')
print(t)
#可以通过获取元素的下标获取元素
print(t[0])
print(t[3])
#也可以遍历
for member in t:
print(member)
#不能通过像列表一样通过元素的位置修改元素,否则会返回type error
#例如t[0]='杨狗蛋'
# 变量t重新引用了新的元组原来的元组将被垃圾回收
t = ('杨二狗', 20, True, '浙江温州倒闭了的江南皮革厂')
print(t)
#元祖是可以转化为列表的
person=list(t)
print(person)
#变为列表后可以修改它的元素
person[0]='杨一天'
person[1]=19
#列表可以转化为元祖
person_tuple=tuple(person)
print(person_tuple)
|
from pymodm import connect
from pymodm import MongoModel, fields
connect(
"mongodb://heart-rate-db:GODUKE10@ds159263.mlab.com:59263/bme590heartdata")
class Patient(MongoModel):
patient_id = fields.IntegerField(primary_key=True)
attending_email = fields.EmailField()
user_age = fields.IntegerField()
heart_rate = fields.ListField(field=fields.IntegerField())
h_r_times = fields.ListField(field=fields.DateTimeField())
|
import csv
file = csv.reader(open('DATABASE-Table 1new.csv'))
def main():
print("Welcome to car bot!")
print("I will recommend a car or list of cars to you based on what is most important to you.")
print("Pick which feature is most important to you when buying a car out of the options presented: ")
print("1 - luxury")
print("2 - fuel economy")
print("3 - body type")
print("4 - drivetrain")
print("5 - transmission")
print("6 - money")
x = int(input("Enter the number of your most important feature: "))
if x == 1:
return luxury()
elif x == 2:
return fuel()
elif x == 3:
return body()
elif x == 4:
return drivetrain()
elif x == 5:
return transmission()
elif x == 6:
return money()
else:
print("this option is invalid")
def luxury():
print("I define luxury cars as any car which costs over 45 thousand dollars!")
print("These are the cars I recommend considering which match your selections: ")
for line in file:
if int(line[5]) > 45000:
print(line)
def fuel():
print("This feature will give you the most fuel efficiant car from the data base")
print("These are the cars I recommend considering which match your selections: ")
for line in file:
if float(line[24]) > 50:
print(line)
def body():
print("Select your preferred body style: ")
print("1 - sedan")
print("2 - station wagon")
print("3 - convertible")
print("4 - hatchback")
print("5 - pickup truck")
b = int(input("Enter the number which corresponds with your choice of body style: "))
print("These are the cars I recommend considering which match your selections: ")
if b == 1:
for line in file:
if str(line[7]) == "Sedan":
print(line)
elif b == 2:
for line in file:
if str(line[7]) == "Wagon":
print(line)
elif b == 3:
for line in file:
if str(line[7]) == "Convertible":
print(line)
elif b == 4:
for line in file:
if str(line[7]) == "Hatchback":
print(line)
elif b == 5:
for line in file:
if str(line[7]) == "Truck":
print(line)
else:
print("This entry isn't valid")
def drivetrain():
print("This feature will determine what drivetrain you need and will give you a list of vehicle options")
print("Pick which mostly describes you")
print("1 - I live where it snows sometimes")
print("2 - I go heavy off-roading")
print("3 - I live in a place where it rains sometimes but it barely ever snows so I don't need extra capability")
print("4 - I like to race")
d = int(input("Enter the number of the choice which best describes you: "))
print("These are the cars I recommend considering which match your selections: ")
if d == 1:
for line in file:
if str(line[19]) == "all wheel drive":
print(line)
elif d == 2:
for line in file:
if str(line[19]) == "four wheel drive":
print(line)
elif d == 3:
for line in file:
if str(line[19]) == "front wheel drive":
print(line)
elif d == 4:
for line in file:
if str(line[19]) == "rear wheel drive":
print(line)
else:
print("This option was not valid")
def transmission():
print("This feature will give you a list of cars sorted by your transmission preferance")
print("1 - automatic")
print("2 - I'm a pro and want to shift myself (manual)")
t = int(input("Enter the number which corresponds with your choice: "))
print("These are the cars I recommend considering which match your selections: ")
if t == 1:
for line in file:
if str(line[
20]) == "8-speed shiftable automatic" or "7-speed automated manual" or "4-speed automatic" or "6-speed automatic" or "6-speed shiftable automatic" or "10-speed shiftable automatic" or "continuously variable-speed automatic":
print(line)
elif t == 2:
for line in file:
if str(line[20]) == "5-speed manual" or "6-speed manual":
print(line)
else:
print("this option was not valid")
def money():
print("This feature automatically finds the cheapest cars from the database")
print("These are the cars I recommend considering which match your selections: ")
for line in file:
if int(line[5]) < 25000:
print(line)
main()
|
from __future__ import unicode_literals
from django.apps import AppConfig
class InternalKeyConfig(AppConfig):
name = 'internal_key'
|
import json
import boto3
BUCKET = 'seektube'
def lambda_handler(event, context):
transcribe = boto3.client("transcribe",aws_access_key_id=ACCESS_KEY_ID,aws_secret_access_key=ACCESS_SECRET_KEY,
region_name='us-east-1')
job_name=event["queryStringParameters"]['id']
try:
response = transcribe.get_transcription_job(TranscriptionJobName = job_name)
message = response["TranscriptionJob"]["TranscriptionJobStatus"]
except:
message='Job Not exsist'
return {
"statusCode": 200,
"body": json.dumps({"status": message}),
"headers": {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json"
}
}
|
def convert_f_to_c(f):
c=(f-32)*5/9 # now it is converted into celcius
print(f'{f}°F = {c:.3f}°C')
def time_converter(second):
m=second//60
sec=second%60
print(f'{second} seconds is {m} minutes {sec} seconds')
def list_info(elements):
print('The length of list is',len(elements))
print('The first element of list is',elements[0])
print('The fourth element of list is',elements[3])
def list_operations(elements):
elements.pop() #removes the last element of the list
print('Pop operation')
print(elements)
elements.insert(0,1) # adds the element in the list in the given index, in this case it add 1 to first index i.e 0
print('\n Insert operation')
print(elements)
elements.remove(elements[1]) # removes the provided element if it is present in the list
print('\n Remove operation')
print(elements)
def main():
while(1):
print('1. Convert Farenheit to Celcius.')
print('2. Convert Seconds into minutes and seconds')
print('3. Display the length, first value and fourth value of the list')
print('4. Perform various list operations')
print('5. Exit')
num=int(input('Enter a choice: '))
if num==1:
farenheit=float(input('Enter temperature in farenheit: '))
convert_f_to_c(farenheit)
elif num==2:
time=float(input('Enter time in second: '))
time_converter(time)
elif num==3:
num_list=[]
n=int(input('Enter number of elements for the list: '))
for i in range(n):
list_input=int(input())
num_list.append(list_input)
list_info(num_list)
elif num==4:
num_list=[]
n=int(input('Enter number of elements for the list: '))
for i in range(n):
list_input=int(input())
num_list.append(list_input)
list_operations(num_list)
elif num==5:
break
else:
print('Wrong Choice')
if __name__ == "__main__":
main() |
# Generated by Django 2.1.2 on 2019-01-02 08:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0032_auto_20181227_1252'),
]
operations = [
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(blank=True, db_index=True, help_text='Символи "+" будуть замінені на "-"!', max_length=128, null=True, verbose_name='Назва'),
),
migrations.AlterField(
model_name='type',
name='category_plus_type',
field=models.CharField(blank=True, editable=False, help_text='Генерується автоматично!', max_length=50, null=True, verbose_name='Категорія+Тип продукту:'),
),
]
|
import sympy
from typing import Any
from sympy.core.numbers import Float, pi
# YES THIS SHOULD BE PUT IN A JSON FILE
# NO THERE CAN'T BE MORE JS CODE THAN PYTHON CODE IN MY REPO
ATOMIC_MASSES = { # taken from IB chemistry data booklet
'H': 1.01, # Hydrogen
'He': 4.0, # Helium
'Li': 6.94, # Lithium
'Be': 9.01, # Beryllium
'B': 10.81, # Boron
'C': 12.01, # Carbon
'N': 14.01, # Nitrogen
'O': 16.0, # Oxygen
'F': 19.0, # Fluorine
'Ne': 20.18, # Neon
'Na': 22.99, # Sodium
'Mg': 24.3, # Magnesium
'Al': 26.98, # Aluminium
'Si': 28.09, # Silicon
'P': 30.97, # Phosphorus
'S': 32.06, # Sulfur
'Cl': 35.45, # Chlorine
'Ar': 39.95, # Argon
'K': 39.1, # Potassium
'Ca': 40.08, # Calcium
'Sc': 44.96, # Scandium
'Ti': 47.87, # Titanium
'V': 50.94, # Vanadium
'Cr': 52.0, # Chromium
'Mn': 54.94, # Manganese
'Fe': 55.85, # Iron
'Co': 58.93, # Cobalt
'Ni': 58.69, # Nickel
'Cu': 63.55, # Copper
'Zn': 65.38, # Zinc
'Ga': 69.72, # Gallium
'Ge': 72.63, # Germanium
'As': 74.92, # Arsenic
'Se': 78.97, # Selenium
'Br': 79.9, # Bromine
'Kr': 83.8, # Krypton
'Rb': 85.47, # Rubidium
'Sr': 87.62, # Strontium
'Y': 88.91, # Yttrium
'Zr': 91.22, # Zirconium
'Nb': 92.91, # Niobium
'Mo': 95.95, # Molybdenum
'Tc': 98, # Technetium
'Ru': 101.07, # Ruthenium
'Rh': 102.91, # Rhodium
'Pd': 106.42, # Palladium
'Ag': 107.87, # Silver
'Cd': 112.41, # Cadmium
'In': 114.82, # Indium
'Sn': 118.71, # Tin
'Sb': 121.76, # Antimony
'Te': 127.6, # Tellurium
'I': 126.9, # Iodine
'Xe': 131.29, # Xenon
'Cs': 132.91, # Cesium
'Ba': 137.33, # Barium
'La': 138.91, # Lanthanum
'Ce': 140.12, # Cerium
'Pr': 140.91, # Praseodymium
'Nd': 144.24, # Neodymium
'Pm': 145, # Promethium
'Sm': 150.36, # Samarium
'Eu': 151.96, # Europium
'Gd': 157.25, # Gadolinium
'Tb': 158.93, # Terbium
'Dy': 162.5, # Dysprosium
'Ho': 164.93, # Holmium
'Er': 167.26, # Erbium
'Tm': 168.93, # Thulium
'Yb': 173.05, # Ytterbium
'Lu': 174.97, # Lutetium
'Hf': 178.49, # Hafnium
'Ta': 180.95, # Tantalum
'W': 183.84, # Tungsten
'Re': 186.21, # Rhenium
'Os': 190.23, # Osmium
'Ir': 192.22, # Iridium
'Pt': 195.08, # Platinum
'Au': 196.97, # Gold
'Hg': 200.59, # Mercury
'Tl': 204.38, # Thallium
'Pb': 207.21, # Lead
'Bi': 208.98, # Bismuth
'Po': 209, # Polonium
'At': 210, # Astatine
'Rn': 222, # Radon
'Fr': 223, # Francium
'Ra': 226, # Radium
'Ac': 227, # Actinium
'Th': 232.04, # Thorium
'Pa': 231.04, # Protactinium
'U': 238.03, # Uranium
'Np': 237, # Neptunium
'Pu': 244, # Plutonium
'Am': 243, # Americium
'Cm': 247, # Curium
'Bk': 247, # Berkelium
'Cf': 251, # Californium
'Es': 252, # Einsteinium
'Fm': 257, # Fermium
'Md': 258, # Mendelevium
'No': 259, # Nobelium
'Lr': 266, # Lawrencium
'Rf': 267, # Rutherfordium
'Db': 268, # Dubnium
'Sg': 269, # Seaborgium
'Bh': 270, # Bohrium
'Hs': 269, # Hassium
'Mt': 278, # Meitnerium
'Ds': 281, # Darmstadtium
'Rg': 282, # Roentgenium
'Cn': 285, # Copernicium
'Uut': 286, # ununtrium (temporary)
'Nh': 286, # Nihonium
'Uuq': 289, # ununquadium (temporary)
'Fl': 289, # Flerovium
'Uup': 289, # ununpentium (temporary)
'Mc': 289, # Moscovium
'Uuh': 293, # ununhexium (temporary)
'Lv': 293, # Livermorium
'Uus': 294, # ununseptium (temporary)
'Ts': 294, # Tennessine
'Uuo': 294, # ununoctium (temporary)
'Og': 294, # Oganesson
}
def Ar(element: Any) -> Float:
"""Retrieve the relative atomic mass of the element.
:param element: Any object that provides a __str__ or __repr__ method
that gives a string of the symbol of an element.
:return: The relative atomic mass of the element (in amu).
:raise LookupError: Raised when the element symbol can not be found in data.
>>> Ar('H')
1.01
>>> Ar(sympy.Symbol('H'))
1.01
>>> class Foo:
... def __repr__(self):
... return 'H'
>>> Ar(Foo())
1.01
>>> Ar('Ha') # not an element
Traceback (most recent call last):
...
LookupError: unknown element symbol 'Ha'
"""
symbol = str(element)
if symbol in ATOMIC_MASSES:
return Float(ATOMIC_MASSES[symbol], 3)
raise LookupError(f"unknown element symbol '{symbol}'")
CONSTANTS = {
constant_name: sympy.S(constant_value) for constant_name, constant_value in
{
"L": 6.02e23, # Avogadro's constant / mol^{-1}
"N_A": 6.02e23, # Avogadro's constant / mol^{-1}
"R": 8.31, # Gas constant / J K^{-1} mol^{-1}
"c": 3.00e8, # Speed of light in vacuum / m s^{-1}
"c_water": 4.18, # Specific heat capacity of water / kJ kg^{-1} K^{-1} / J g^{-1} K^{-1}
"h": 6.63e-32, # Plank's constant / J s
"F": 9.65e4, # Faraday's / C mol^{-1}
"K_w": 1.00e-14, # Ionic product of water at 298K / mol^2 dm^{-6}
"g": 9.81, # Acceleration of free fall (Earth's surface) / m s^{-2}
"G": 6.67e-11, # Gravitational constant / N m^2 kg^{-2}
"k_B": 1.38e-23, # Boltzmann constant / J K^{-1}
"sigma": 5.67e-8, # Stefan-Boltzmann constant / W m^{-2} K^{-4}
"k": 8.99e9, # Coulomb constant / N m^2 C^{-2}
"epsilon_0": 8.85e-12, # Permittivity of free space / C^2 N^{-1} m^{-2}
"mu_0": 4e-7 * pi, # Permeability of free space / T m A^{-1}
"e": 1.60e-19, # Elementary charge / C
"m_e": 9.110e-31, # Electron rest mass / kg
"m_p": 1.673e-27, # Proton rest mass / kg
"m_n": 1.675e-27, # Neutron rest mass / kg
"S": 1.36e3, # Solar constant / W m^{-2}
"R_0": 1.20e-15 # Fermi radius / m
}.items()
}
CONSTANTS['Ar'] = Ar
|
from partVII.chapter34 import asserter
asserter.f(1)
|
from sql import *
def UpdateAccounts(account_numbers, acc_cards, acc_bal):
conn, cursor = create_connection('./accounts.db')
for account_no in account_numbers:
update_account(conn, cursor, account_no, acc_cards, acc_bal)
conn.close()
|
import re
import string
PUNCTUATION_TRANS = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
WHITESPACE_TRANS = str.maketrans(string.whitespace, ' ' * len(string.whitespace))
def preprocess_text(text):
"""Method for pre-processing the given response text.
It:
* replaces all punctuations with spaces
* replaces all whitespace characters (tab, newline etc) with spaces
* removes trailing and leading spaces
* removes double spaces
* changes to lowercase
:param text: the text to be cleaned
:return: cleaned text
"""
text = text.translate(PUNCTUATION_TRANS)
text = text.translate(WHITESPACE_TRANS)
text = text.strip().lower()
text = re.sub(' +', ' ', text)
return text
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = 'jimit'
__CreateAt__ = '2019\2\24 10:31'
from selenium import webdriver
browser = webdriver.Firefox()
browser.get("http://127.0.0.1:8000/index")
assert "Django" in browser.title |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 11 11:21:51 2019
@author: nico
"""
import sys
sys.path.append('/home/nico/Documentos/facultad/6to_nivel/pds/git/pdstestbench')
import os
import matplotlib.pyplot as plt
import numpy as np
#import seaborn as sns
from pdsmodulos.signals import spectral_estimation as sp
import pandas as pd
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
# Simular para los siguientes tamaños de señal
N = np.array([10, 50, 100, 250, 500, 1000, 5000], dtype=np.int)
fs = 1000 # Hz
Nexp = 200
mu = 0 # media (mu)
var = 2 # varianza
#%% generación de señales
signal0 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[0]) for j in range(Nexp)]))
signal1 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[1]) for j in range(Nexp)]))
signal2 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[2]) for j in range(Nexp)]))
signal3 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[3]) for j in range(Nexp)]))
signal4 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[4]) for j in range(Nexp)]))
signal5 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[5]) for j in range(Nexp)]))
signal6 = np.vstack(np.transpose([ np.random.normal(0, np.sqrt(var), N[6]) for j in range(Nexp)]))
#%% Periodograma
Sper0 = sp.periodogram(signal0, exp=Nexp, ax=0)
Sper1 = sp.periodogram(signal1, exp=Nexp, ax=0)
Sper2 = sp.periodogram(signal2, exp=Nexp, ax=0)
Sper3 = sp.periodogram(signal3, exp=Nexp, ax=0)
Sper4 = sp.periodogram(signal4, exp=Nexp, ax=0)
Sper5 = sp.periodogram(signal5, exp=Nexp, ax=0)
Sper6 = sp.periodogram(signal6, exp=Nexp, ax=0)
#%% Cálculo de la energía
energia0 = np.sum(Sper0, axis=0) / N[0]
energia1 = np.sum(Sper1, axis=0) / N[1]
energia2 = np.sum(Sper2, axis=0) / N[2]
energia3 = np.sum(Sper3, axis=0) / N[3]
energia4 = np.sum(Sper4, axis=0) / N[4]
energia5 = np.sum(Sper5, axis=0) / N[5]
energia6 = np.sum(Sper6, axis=0) / N[6]
#%% Valor medio muestreal
valor_medio_muestreal0 = np.mean(Sper0, axis=1)
valor_medio_muestreal1 = np.mean(Sper1, axis=1)
valor_medio_muestreal2 = np.mean(Sper2, axis=1)
valor_medio_muestreal3 = np.mean(Sper3, axis=1)
valor_medio_muestreal4 = np.mean(Sper4, axis=1)
valor_medio_muestreal5 = np.mean(Sper5, axis=1)
valor_medio_muestreal6 = np.mean(Sper6, axis=1)
#%% valor medio
valor_medio0 = np.mean(valor_medio_muestreal0, axis=0)
valor_medio1 = np.mean(valor_medio_muestreal1, axis=0)
valor_medio2 = np.mean(valor_medio_muestreal2, axis=0)
valor_medio3 = np.mean(valor_medio_muestreal3, axis=0)
valor_medio4 = np.mean(valor_medio_muestreal4, axis=0)
valor_medio5 = np.mean(valor_medio_muestreal5, axis=0)
valor_medio6 = np.mean(valor_medio_muestreal6, axis=0)
#%% sesgo
sesgo0 = np.abs(valor_medio0 - var)
sesgo1 = np.abs(valor_medio1 - var)
sesgo2 = np.abs(valor_medio2 - var)
sesgo3 = np.abs(valor_medio3 - var)
sesgo4 = np.abs(valor_medio4 - var)
sesgo5 = np.abs(valor_medio5 - var)
sesgo6 = np.abs(valor_medio6 - var)
#%% valor muestreal
var_muestreal0 = np.var(Sper0, axis=1)
var_muestreal1 = np.var(Sper1, axis=1)
var_muestreal2 = np.var(Sper2, axis=1)
var_muestreal3 = np.var(Sper3, axis=1)
var_muestreal4 = np.var(Sper4, axis=1)
var_muestreal5 = np.var(Sper5, axis=1)
var_muestreal6 = np.var(Sper6, axis=1)
#%% Varianza
varianza0 = np.mean(var_muestreal0, axis=0)
varianza1 = np.mean(var_muestreal1, axis=0)
varianza2 = np.mean(var_muestreal2, axis=0)
varianza3 = np.mean(var_muestreal3, axis=0)
varianza4 = np.mean(var_muestreal4, axis=0)
varianza5 = np.mean(var_muestreal5, axis=0)
varianza6 = np.mean(var_muestreal6, axis=0)
#%% Grafico
A = ["10", "50", "100", "250", "500", "1000", "5000"]
## ejes de tiempo
tt0 = np.linspace(0, (N[0]-1)/fs, N[0])
tt1 = np.linspace(0, (N[1]-1)/fs, N[1])
tt2 = np.linspace(0, (N[2]-1)/fs, N[2])
tt3 = np.linspace(0, (N[3]-1)/fs, N[3])
tt4 = np.linspace(0, (N[4]-1)/fs, N[4])
tt5 = np.linspace(0, (N[5]-1)/fs, N[5])
tt6 = np.linspace(0, (N[6]-1)/fs, N[6])
ff0 = np.linspace(0,(N[0]-1)*fs/N[0], N[0])/fs
ff1 = np.linspace(0,(N[1]-1)*fs/N[1], N[1])/fs
ff2 = np.linspace(0,(N[2]-1)*fs/N[2], N[2])/fs
ff3 = np.linspace(0,(N[3]-1)*fs/N[3], N[3])/fs
ff4 = np.linspace(0,(N[4]-1)*fs/N[4], N[4])/fs
ff5 = np.linspace(0,(N[5]-1)*fs/N[5], N[5])/fs
ff6 = np.linspace(0,(N[6]-1)*fs/N[6], N[6])/fs
#%% Grafico de los resultados de N=10
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[0], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[0])
plt.plot(tt0,signal0)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[0], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[0])
plt.plot(ff0, Sper0, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[0], constrained_layout=True)
plt.title("Promedio del Periodogramas con N= " + A[0])
plt.plot(ff0, valor_medio_muestreal0, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal0)-0.01, max(valor_medio_muestreal0)+0.01)
plt.grid()
#%% Grafico de los resultados de N=50
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[1], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[1])
plt.plot(tt1,signal1)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[1], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[1])
plt.plot(ff1, Sper1, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[1], constrained_layout=True)
plt.title("Promedio del Periodogramas con N= " + A[1])
plt.plot(ff1, valor_medio_muestreal1, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal1)-0.01, max(valor_medio_muestreal1)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de N=100
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[2], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[2])
plt.plot(tt2,signal2)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[2], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[2])
plt.plot(ff2, Sper2, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[2], constrained_layout=True)
plt.title("Promedio del Periodogramas con N= " + A[2])
plt.plot(ff2, valor_medio_muestreal2, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal2)-0.01, max(valor_medio_muestreal2)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de N=250
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[3], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[3])
plt.plot(tt3,signal3)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[3], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[3])
plt.plot(ff3, Sper3, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[3], constrained_layout=True)
plt.title(" Promedio del Periodogramas con N= " + A[3])
plt.plot(ff3, valor_medio_muestreal3, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal3)-0.01, max(valor_medio_muestreal3)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de N=500
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[4], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[4])
plt.plot(tt4,signal4)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[4], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[4])
plt.plot(ff4, Sper4, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[4], constrained_layout=True)
plt.title(" Promedio del Periodogramas con N= " + A[4])
plt.plot(ff4, valor_medio_muestreal4, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal4)-0.01, max(valor_medio_muestreal4)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de N=1000
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[5], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[5])
plt.plot(tt5,signal5)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[5], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[5])
plt.plot(ff5, Sper5, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[5], constrained_layout=True)
plt.title(" Promedio del Periodogramas con N= " + A[5])
plt.plot(ff5, valor_medio_muestreal5, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal5)-0.01, max(valor_medio_muestreal5)+0.01)
plt.grid()
plt.tight_layout()
#%% Grafico de los resultados de N=5000
plt.figure("Gráfico de realizaciones de ruido blanco con N= " + A[6], constrained_layout=True)
plt.title("Gráfico de realizaciones de ruido blanco con N= " + A[6])
plt.plot(tt6,signal6)
plt.xlabel("tiempo [S]")
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.figure("Periodogramas de ruido blanco con N= " + A[6], constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Estimador Periodograma con N= " + A[6])
plt.plot(ff6, Sper6, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.grid()
plt.subplot(1,2,2)
#plt.figure("Promedio de los Periodogramas de ruido blanco con n°= " + A[6], constrained_layout=True)
plt.title(" Promedio del Periodogramas con N= " + A[6])
plt.plot(ff6, valor_medio_muestreal6, marker='.')
plt.xlabel('frecuecnia normalizada f/fs [Hz]')
plt.ylabel("Amplitud")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(valor_medio_muestreal6)-0.01, max(valor_medio_muestreal6)+0.01)
plt.grid()
plt.tight_layout()
#%% Gráfico de la varianza
varianza = [varianza0, varianza1, varianza2, varianza3, varianza4, varianza5, varianza6]
sesgo =[sesgo0, sesgo1, sesgo2, sesgo3, sesgo4, sesgo5, sesgo6]
plt.figure("Consistencia del estimador", constrained_layout=True)
plt.subplot(1,2,1)
plt.title("Sesgo")
plt.plot(N, sesgo, marker='.')
plt.xlabel('número de ventanas K')
plt.ylabel("Sesgo")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(sesgo)-0.01, max(sesgo)+0.01)
plt.grid()
plt.subplot(1,2,2)
plt.title("Varianza ")
plt.plot(N, varianza, marker='.')
plt.xlabel('número de ventanas K')
plt.ylabel("Varianza")
plt.axhline(0, color="black")
plt.axvline(0, color="black")
plt.ylim(min(varianza)-0.01, max(varianza)+0.01)
plt.grid()
plt.tight_layout()
tus_resultados_per = [
[sesgo0, varianza0], # <-- acá debería haber numeritos :)
[sesgo1, varianza1], # <-- acá debería haber numeritos :)
[sesgo2, varianza2], # <-- acá debería haber numeritos :)
[sesgo3, varianza3], # <-- acá debería haber numeritos :)
[sesgo4, varianza4], # <-- acá debería haber numeritos :)
[sesgo5, varianza5], # <-- acá debería haber numeritos :)
[sesgo6, varianza6], # <-- acá debería haber numeritos :)
]
df = pd.DataFrame(tus_resultados_per, columns=['$s_P$', '$v_P$'],
index=N)
print(df)
|
import unittest
from katas.kyu_7.split_the_bill import split_the_bill
class SplitTheBillTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(split_the_bill({'A': 20, 'B': 15, 'C': 10}),
{'A': 5, 'B': 0, 'C': -5})
def test_equals_2(self):
self.assertEqual(split_the_bill({'A': 40, 'B': 25, 'X': 10}),
{'A': 15, 'B': 0, 'X': -15})
|
#!/usr/bin/python3
sum(1, 1)
sum(3, 6)
sum(x=3)
sum(y=9)
sum(y=1, x=10)
|
import torch.nn as nn
from .utils import repeat_module, LayerNorm, SublayerConnection
class Decoder(nn.Module):
def __init__(self, layer, N):
super().__init__()
self.layers = repeat_module(layer, N)
self.norm = LayerNorm(layer.model_dim)
def forward(self, x, mem, tgt_mask, mem_mask):
for layer in self.layers:
x = layer(x, mem, tgt_mask, mem_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
""" causal self attention + cross attention + FC """
def __init__(self, model_dim, self_attn, cross_attn, fc_net, dropout):
super().__init__()
self.model_dim = model_dim
self.self_attn = self_attn
self.cross_attn = cross_attn
self.fc_net = fc_net
self.sublayers = repeat_module(SublayerConnection(model_dim, dropout), 3)
def forward(self, x, mem, tgt_mask, mem_mask):
m = mem
self_attn_sublayer = lambda x: self.self_attn(x,x,x,tgt_mask)
cross_attn_sublayer = lambda x: self.cross_attn(x,m,m,mem_mask)
x = self.sublayers[0](x, self_attn_sublayer)
x = self.sublayers[1](x, cross_attn_sublayer)
x = self.sublayers[2](x, self.fc_net)
return x |
year = int(input("Enter a year to check if it's a leap year: "))
# function to check whether the given input is a leap year or not
def leap_year(year):
# if year is divisible by 4, 100 and 400 it is a leap year
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
print(year, "is a leap year.")
else:
print(year, "isn't a leap year.")
else:
print(year, "is a leap year.")
else:
print(year, "isn't a leap year.")
leap_year(year) |
class Solution:
def code(self,ch):
return ord(ch)-ord('A') +1
def titleToNumber(self, s):
col_num=0
for i in range(len(s)):
col_num+=self.code(s[i])*pow(26,len(s)-1-i)
return col_num
"""
:type s: str
:rtype: int
"""
|
# Edgar Flores
#Password Generator
## 24 October, 2017
import random
'''
functions
'''
def second_part (m): ##replacing letters
new_word = ""
for ch in m:
if ch == "i":
new_word += "1"
elif ch == "I":
new_word += "1"
elif ch == "s":
new_word += "$"
elif ch == "S":
new_word += "$"
elif ch == "o":
new_word += "0"
elif ch == "O":
new_word += "0"
else:
new_word += ch
return new_word
# main
file = input("Enter the name of the file that contains the word list:")
filvar= open(file,"r")
how_many = int(input("How many passwords do you want to create?"))
print("Here are the passwords:")
words = []
for line in filvar: #opening file
line = line.strip()
words.append(line)
for i in range (how_many):
first = random.choice(words).strip() #first part of password
second = second_part(random.choice(words)).upper().strip() #second part of password
third = random.choice(words).strip() #third part of password
num = str(random.randint(1000,9999)).strip() ##random number generater
password = (first+second+third+num).strip() #final password
print("%s" % password)
filvar.close() #closing the file
|
import imp
from app import db
from migrate.versioning import api
from config import Config
# .SQLALCHEMY_DATABASE_URI,Config.SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('\\versions\\%03d_migration.py'%(v+1))
tmp_module = imp.new_module('old_module')
old_module = api.create_model(SQLALCHEMY_DATABASE_URI,SQLALCHEMY_MIGRATE_REPO)
exec(old_module,tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
|
from math import pow
from random import randint
print(pow(2, 3)) #모듈이름 없이 바로 사용가능
from math import factorial, sqrt #여러개를 들고올거야
print(factorial(10))
print(sqrt(16))
from math import * #모든 멤버를 다 들고올거야
|
import argparse
import os
import utils
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument(
'--dataroot', required=False, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
self.parser.add_argument(
'--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument(
'--loadSize', type=int, default=286, help='scale images to this size')
self.parser.add_argument(
'--fineSize', type=int, default=256, help='then crop to this size')
self.parser.add_argument(
'--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument(
'--output_nc', type=int, default=3, help='# of output image channels')
self.parser.add_argument(
'--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument(
'--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--which_model_netD', type=str,
default='basic', help='selects model to use for netD')
self.parser.add_argument('--which_model_netG', type=str,
default='resnet_9blocks', help='selects model to use for netG')
self.parser.add_argument(
'--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument(
'--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2')
self.parser.add_argument('--name', type=str, default='rarepepes',
help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--align_data', action='store_true',
help='if True, the datasets are loaded from "test" and "train" directories and the data pairs are aligned')
self.parser.add_argument(
'--which_direction', type=str, default='AtoB', help='AtoB or BtoA')
self.parser.add_argument(
'--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str,
default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='instance',
help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true',
help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument(
'--display_winsize', type=int, default=256, help='display window size')
self.parser.add_argument(
'--display_id', type=int, default=1, help='window id of the web display')
self.parser.add_argument('--identity', type=float, default=0.0,
help='use identity mapping. Setting identity other than 1 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set optidentity = 0.1')
self.parser.add_argument(
'--use_dropout', action='store_true', help='use dropout for the generator')
self.parser.add_argument('--max_dataset_size', type=int, default=float(
"inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt, _ = self.parser.parse_known_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
args = vars(self.opt)
return self.opt
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100,
help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100,
help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int,
default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=5,
help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true',
help='continue training: load the latest model')
self.parser.add_argument(
'--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest',
help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument(
'--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100,
help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument(
'--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument(
'--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--no_lsgan', action='store_true',
help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument(
'--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
self.parser.add_argument(
'--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
self.parser.add_argument('--pool_size', type=int, default=50,
help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true',
help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--no_flip', action='store_true',
help='if specified, do not flip the images for data argumentation')
# NOT-IMPLEMENTED self.parser.add_argument('--preprocessing', type=str,
# default='resize_and_crop', help='resizing/cropping strategy')
self.isTrain = True
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument(
'--ntest', type=int, default=float("inf"), help='# of test examples.')
self.parser.add_argument(
'--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument(
'--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument(
'--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest',
help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument(
'--how_many', type=int, default=3, help='how many test images to run')
self.isTrain = False
|
import sys
input = sys.stdin.readline
thing = input()
print(thing.index('java') if 'java' in thing else len(thing))
|
from unittest import TestCase
from src import module_helpers
import json
class TestRequestsHelpers(TestCase):
h = module_helpers.RequestsHelpers()
def test_request_json(self):
msg, val = self.h.request_json(
'http://echo.jsontest.com/key1/key-val/key2/key-val2', return_code=200)
if not val:
self.fail()
j = json.loads(msg)
self.assertDictEqual({'key2': 'key-val2', 'key1': 'key-val'}, j)
def test_request_content(self):
msg, val = self.h.request_content('https://httpbin.org/ip')
if not val:
self.fail()
j = json.loads(msg)
def test_request_raw(self):
r, val = self.h.request_raw('https://httpbin.org/ip')
if not val:
self.fail()
j = json.loads(r.content)
|
def min_max(lst):
a = sorted(len(b) for b in lst)
return a[0], a[-1]
def mxdiflg(a1, a2):
if not a1 or not a2:
return -1
a, b = min_max(a1)
c, d = min_max(a2)
return max(abs(d - a), abs(b - c))
|
#!/usr/bin/env python3
'''This program takes the syslog.log file as input and searches for 'ticky' errors.
the output files are user_statistics.csv and error_message.csv
'''
import re
import operator
import sys
error_dict={}
user_dict={}
info_pattern=r"ticky: INFO ([a-zA-Z0-9 ']*) (\[.*)"
error_pattern=r"ticky: ERROR ([a-zA-Z0-0 '']*) (\(.*)"
name_pattern=r"\((\S*)\)$"
def get_errors(input_file):
with open(input_file) as file:
reader=file.readlines()
for record in reader:
username = get_username(record)
if username:
update_user_table(username, record)
update_error_table(username, record)
def get_username(log_record):
result=re.search(name_pattern, log_record)
if result != None:
name=result.group(1)
if name not in user_dict:
user_dict[name] = [0,0]
return name
return False
def update_user_table(username, log_record):
inforec = re.search(info_pattern, log_record)
if inforec != None:
user_dict[username][0] += 1
print(log_record)
def update_error_table(username, log_record):
errorrec=re.search(error_pattern, log_record)
if errorrec != None:
user_dict[username][1] += 1
error = errorrec.group(1)
if error not in error_dict:
error_dict[error] = 0
error_dict[error] += 1
def sort_dict(in_dict, itemno=0, revOrder=False):
sorted_dict = sorted(in_dict.items(), key=operator.itemgetter(itemno), reverse=revOrder)
return sorted_dict
def write_user_report(filename, dictlist):
keyrec="Username,INFO,ERROR\n"
with open(filename, "w") as file:
file.write(keyrec)
for item in dictlist:
outrec = "{},{},{}\n".format(item[0],item[1][0], item[1][1])
file.write(outrec)
def write_error_report(filename, dictlist):
keyrec="Error,Count\n"
with open(filename, "w") as file:
file.write(keyrec)
for item in dictlist:
outrec="{},{}\n".format(item[0],item[1])
file.write(outrec)
infile='syslog.log'
if len(sys.argv) > 1:
infile = sys.argv[1]
get_errors(infile)
write_user_report("user_statistics.csv", sort_dict(user_dict, 0, False))
write_error_report("error_message.csv", sort_dict(error_dict, 1, True))
|
from linked_list import LinkedList
class Stack(LinkedList):
def push(self, val):
''' O(1) '''
return self.prepend(Stack(val))
def pop(self):
''' O(1) '''
return self.tail
s = Stack(0).push(1).push(2)
assert s == Stack(2, Stack(1, Stack(0)))
assert s.head == 2
assert s.pop().head == 1
assert s.pop().pop().head == 0
|
"""
Models to cluster seuqences of articles by topic.
"""
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.nn import softmax
from sklearn.preprocessing import MinMaxScaler
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
class Cluster():
"""
Class that creates an object for a single cluster of data.
Stores its dataframe and toic words describing it
Parameters
----------
cluster : df
DataFrame of an individual cluster
topic : list
List of words describing cluster
wordcloud : wordcloud object
Wordcloud object ready to be shown with matplotlib
model_name : string (default: kmeans)
Type of model used for clustering
"""
def __init__(self, cluster, topic, wordcloud, **kwargs):
#print(cluster.columns)
self.df = cluster[['title', 'url', 'publishedAt', 'author', 'source']].reset_index().drop(columns='index')
self.topic = topic
self.wordcloud = wordcloud
if 'tokenizer' in kwargs:
tokenizer = kwargs.get('tokenizer')
model = kwargs.get('sa_model')
else:
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
texts = list(cluster["pre_processed_text"])
encoded_input = tokenizer(texts,
return_tensors='tf',
padding=True,
max_length=500, #!!!!!!!!!!!!!!!!might need to change
truncation=True)
output = model(encoded_input)
my_array = softmax(output.logits).numpy()
df = pd.DataFrame(my_array, columns = ['Negative','Positive'])
df['SA'] = df['Positive'] - df['Negative']
# Optional Scalling (we may find out that news are not mostly negatively biased)
self.scaler = MinMaxScaler(feature_range=(-1, 1)) # Instanciate StandarScaler
self.scaler.fit(df[['SA']]) # Fit scaler to data
df['norm_SA'] = self.scaler.transform(df[['SA']]) # Use scaler to transform data
self.df = pd.concat([self.df,df[['SA']]],axis=1)
def show_wordcloud(self, size=8):
"""
Shows wordcloud using matplotlib
"""
plt.imshow(self.wordcloud)
plt.tight_layout(pad = 0)
plt.show()
|
# -*- coding: utf-8 -*-
import codecs
import os
import pickle
import random
import re
import sys
import unittest
from tensorflow.keras import Model
import numpy as np
from sklearn.utils.validation import NotFittedError
try:
from seq2seq_lstm import Seq2SeqLSTM
from seq2seq_lstm.seq2seq_lstm import TextPairSequence
except:
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from seq2seq_lstm import Seq2SeqLSTM
from seq2seq_lstm.seq2seq_lstm import TextPairSequence
class TestSeq2SeqLSTM(unittest.TestCase):
def setUp(self):
self.data_set_name = os.path.join(os.path.dirname(__file__), '..', 'data', 'eng_rus_for_testing.txt')
self.model_name = os.path.join(os.path.dirname(__file__), '..', 'data', 'seq2seq_lstm.pkl')
def tearDown(self):
if os.path.isfile(self.model_name):
os.remove(self.model_name)
def test_creation(self):
seq2seq = Seq2SeqLSTM(batch_size=256, epochs=200, latent_dim=500, validation_split=0.1,
grad_clipping=50.0, lr=0.01, weight_decay=0.0001, lowercase=False, verbose=True)
self.assertIsInstance(seq2seq, Seq2SeqLSTM)
self.assertTrue(hasattr(seq2seq, 'batch_size'))
self.assertEqual(seq2seq.batch_size, 256)
self.assertTrue(hasattr(seq2seq, 'epochs'))
self.assertEqual(seq2seq.epochs, 200)
self.assertTrue(hasattr(seq2seq, 'latent_dim'))
self.assertEqual(seq2seq.latent_dim, 500)
self.assertTrue(hasattr(seq2seq, 'validation_split'))
self.assertAlmostEqual(seq2seq.validation_split, 0.1)
self.assertTrue(hasattr(seq2seq, 'grad_clipping'))
self.assertAlmostEqual(seq2seq.grad_clipping, 50.0)
self.assertTrue(hasattr(seq2seq, 'lr'))
self.assertAlmostEqual(seq2seq.lr, 0.01)
self.assertTrue(hasattr(seq2seq, 'weight_decay'))
self.assertAlmostEqual(seq2seq.weight_decay, 0.0001)
self.assertTrue(hasattr(seq2seq, 'lowercase'))
self.assertFalse(seq2seq.lowercase)
self.assertTrue(hasattr(seq2seq, 'verbose'))
self.assertTrue(seq2seq.verbose)
self.assertTrue(hasattr(seq2seq, 'random_state'))
self.assertIsNone(seq2seq.random_state)
def test_fit_positive01(self):
""" Input and target texts for training are the Python tuples. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(verbose=True, lr=1e-2)
res = seq2seq.fit(tuple(input_texts_for_training), tuple(target_texts_for_training))
self.assertIsInstance(res, Seq2SeqLSTM)
self.assertTrue(hasattr(res, 'input_token_index_'))
self.assertIsInstance(res.input_token_index_, dict)
self.assertTrue(hasattr(res, 'target_token_index_'))
self.assertIsInstance(res.target_token_index_, dict)
self.assertTrue(hasattr(res, 'reverse_target_char_index_'))
self.assertIsInstance(res.reverse_target_char_index_, dict)
self.assertTrue(hasattr(res, 'max_encoder_seq_length_'))
self.assertIsInstance(res.max_encoder_seq_length_, int)
self.assertGreater(res.max_encoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'max_decoder_seq_length_'))
self.assertIsInstance(res.max_decoder_seq_length_, int)
self.assertGreater(res.max_decoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'encoder_model_'))
self.assertIsInstance(res.encoder_model_, Model)
self.assertTrue(hasattr(res, 'decoder_model_'))
self.assertIsInstance(res.decoder_model_, Model)
def test_fit_positive02(self):
""" Input and target texts for training are the 1-D numpy arrays. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(lr=1e-2)
res = seq2seq.fit(np.array(input_texts_for_training), np.array(target_texts_for_training))
self.assertIsInstance(res, Seq2SeqLSTM)
self.assertTrue(hasattr(res, 'input_token_index_'))
self.assertIsInstance(res.input_token_index_, dict)
self.assertTrue(hasattr(res, 'target_token_index_'))
self.assertIsInstance(res.target_token_index_, dict)
self.assertTrue(hasattr(res, 'reverse_target_char_index_'))
self.assertIsInstance(res.reverse_target_char_index_, dict)
self.assertTrue(hasattr(res, 'max_encoder_seq_length_'))
self.assertIsInstance(res.max_encoder_seq_length_, int)
self.assertGreater(res.max_encoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'max_decoder_seq_length_'))
self.assertIsInstance(res.max_decoder_seq_length_, int)
self.assertGreater(res.max_decoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'encoder_model_'))
self.assertIsInstance(res.encoder_model_, Model)
self.assertTrue(hasattr(res, 'decoder_model_'))
self.assertIsInstance(res.decoder_model_, Model)
def test_fit_positive03(self):
""" Input and target texts for training are the Python lists. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(lr=1e-2)
res = seq2seq.fit(input_texts_for_training, target_texts_for_training)
self.assertIsInstance(res, Seq2SeqLSTM)
self.assertTrue(hasattr(res, 'input_token_index_'))
self.assertIsInstance(res.input_token_index_, dict)
self.assertTrue(hasattr(res, 'target_token_index_'))
self.assertIsInstance(res.target_token_index_, dict)
self.assertTrue(hasattr(res, 'reverse_target_char_index_'))
self.assertIsInstance(res.reverse_target_char_index_, dict)
self.assertTrue(hasattr(res, 'max_encoder_seq_length_'))
self.assertIsInstance(res.max_encoder_seq_length_, int)
self.assertGreater(res.max_encoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'max_decoder_seq_length_'))
self.assertIsInstance(res.max_decoder_seq_length_, int)
self.assertGreater(res.max_decoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'encoder_model_'))
self.assertIsInstance(res.encoder_model_, Model)
self.assertTrue(hasattr(res, 'decoder_model_'))
self.assertIsInstance(res.decoder_model_, Model)
def test_fit_positive04(self):
""" Early stopping is not used in the training process. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None, lr=1e-2)
res = seq2seq.fit(input_texts_for_training, target_texts_for_training)
self.assertIsInstance(res, Seq2SeqLSTM)
self.assertTrue(hasattr(res, 'input_token_index_'))
self.assertIsInstance(res.input_token_index_, dict)
self.assertTrue(hasattr(res, 'target_token_index_'))
self.assertIsInstance(res.target_token_index_, dict)
self.assertTrue(hasattr(res, 'reverse_target_char_index_'))
self.assertIsInstance(res.reverse_target_char_index_, dict)
self.assertTrue(hasattr(res, 'max_encoder_seq_length_'))
self.assertIsInstance(res.max_encoder_seq_length_, int)
self.assertGreater(res.max_encoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'max_decoder_seq_length_'))
self.assertIsInstance(res.max_decoder_seq_length_, int)
self.assertGreater(res.max_decoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'encoder_model_'))
self.assertIsInstance(res.encoder_model_, Model)
self.assertTrue(hasattr(res, 'decoder_model_'))
self.assertIsInstance(res.decoder_model_, Model)
def test_fit_positive05(self):
""" Prepared evaluation set is used in the early stopping criterion. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None, lr=1e-2)
res = seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set=(input_texts_for_training[-20:], target_texts_for_training[-20:]))
self.assertIsInstance(res, Seq2SeqLSTM)
self.assertTrue(hasattr(res, 'input_token_index_'))
self.assertIsInstance(res.input_token_index_, dict)
self.assertTrue(hasattr(res, 'target_token_index_'))
self.assertIsInstance(res.target_token_index_, dict)
self.assertTrue(hasattr(res, 'reverse_target_char_index_'))
self.assertIsInstance(res.reverse_target_char_index_, dict)
self.assertTrue(hasattr(res, 'max_encoder_seq_length_'))
self.assertIsInstance(res.max_encoder_seq_length_, int)
self.assertGreater(res.max_encoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'max_decoder_seq_length_'))
self.assertIsInstance(res.max_decoder_seq_length_, int)
self.assertGreater(res.max_decoder_seq_length_, 0)
self.assertTrue(hasattr(res, 'encoder_model_'))
self.assertIsInstance(res.encoder_model_, Model)
self.assertTrue(hasattr(res, 'decoder_model_'))
self.assertIsInstance(res.decoder_model_, Model)
def test_fit_negative01(self):
""" Object with input texts is not one of the basic sequence types. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape(f'`{type({1, 2})}` is wrong type for `X`.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(set(input_texts_for_training), target_texts_for_training)
def test_fit_negative02(self):
""" Object with target texts is not one of the basic sequence types. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape(f'`{type({1, 2})}` is wrong type for `y`.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training, set(target_texts_for_training))
def test_fit_negative03(self):
""" Number of input texts does not equal to number of target texts. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape(f'`X` does not correspond to `y`! {len(input_texts_for_training)} != {len(target_texts_for_training) - 1}.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training, target_texts_for_training[:-1])
def test_fit_negative04(self):
""" Some parameter of the `Seq2SeqLSTM` object is wrong. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(batch_size=0)
true_err_msg = re.escape('`batch_size` must be a positive number! 0 is not positive.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training, target_texts_for_training)
def test_fit_negative05(self):
""" Special evaluation set is neither list nor tuple. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None)
true_err_msg = re.escape(f'`eval_set` must be `{type((1, 2))}` or `{type([1, 2])}`, not `{type({1: "a", 2: "b"})}`!')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set={'X': input_texts_for_training[-20:], 'y': target_texts_for_training[-20:]})
def test_fit_negative06(self):
""" Special evaluation set is not a two-element tuple. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None)
true_err_msg = re.escape('`eval_set` must be a two-element sequence! 3 != 2')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set=(input_texts_for_training[-20:], target_texts_for_training[-20:], [3, 4]))
def test_fit_negative07(self):
""" Object with input texts in the special evaluation set is not one of the basic sequence types. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape(f'`{type({1, 2})}` is wrong type for `X_eval_set`.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set=(set(input_texts_for_training[-20:]), target_texts_for_training[-20:]))
def test_fit_negative08(self):
""" Object with target texts in the special evaluation set is not one of the basic sequence types. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape(f'`{type({1, 2})}` is wrong type for `y_eval_set`.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set=(input_texts_for_training[-20:], set(target_texts_for_training[-20:])))
def test_fit_negative09(self):
""" Number of input texts does not equal to number of target texts in the special evaluation set. """
input_texts_for_training, target_texts_for_training = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM()
true_err_msg = re.escape('`X_eval_set` does not correspond to `y_eval_set`! 20 != 19.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
seq2seq.fit(input_texts_for_training[:-20], target_texts_for_training[:-20],
eval_set=(input_texts_for_training[-20:], target_texts_for_training[-19:]))
def test_predict_positive001(self):
""" Part of correctly predicted texts must be greater than 0.1. """
input_texts, target_texts = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None, epochs=200, lr=1e-2, verbose=True, lowercase=False)
predicted_texts = seq2seq.fit_predict(input_texts, target_texts)
self.assertIsInstance(predicted_texts, list)
self.assertEqual(len(predicted_texts), len(input_texts))
indices = list(range(len(predicted_texts)))
random.shuffle(indices)
print('')
print('Some predicted texts:')
for ind in range(min(5, len(predicted_texts))):
print(' True: ' + self.detokenize_text(target_texts[indices[ind]]) +
'\t Predicted: ' + self.detokenize_text(predicted_texts[indices[ind]]))
self.assertGreater(self.estimate(predicted_texts, target_texts), 0.0001)
def test_predict_negative001(self):
""" Usage of the seq2seq model for prediction without training. """
input_texts_for_testing, _ = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None, epochs=20)
with self.assertRaises(NotFittedError):
_ = seq2seq.predict(input_texts_for_testing)
def test_predict_negative002(self):
""" Input texts for prediction are wrong. """
input_texts_for_testing, target_texts_for_testing = self.load_text_pairs(self.data_set_name)
seq2seq = Seq2SeqLSTM(validation_split=None, epochs=20)
seq2seq.fit(input_texts_for_testing, target_texts_for_testing)
true_err_msg = re.escape(f'`{type({1, 2})}` is wrong type for `X`.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
_ = seq2seq.predict(set(input_texts_for_testing))
def test_check_X_negative001(self):
""" All texts must be a string and have a `split` method. """
texts = ['123', 4, '567']
true_err_msg = re.escape('Sample 1 of `X` is wrong! This sample have not the `split` method.')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
Seq2SeqLSTM.check_X(texts, 'X')
def test_check_X_negative002(self):
""" If list of texts is specified as the NumPy array, then it must be a 1-D array. """
texts = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])
true_err_msg = re.escape('`X` must be a 1-D array!')
try:
checking_method = self.assertRaisesRegex
except:
checking_method = self.assertRaisesRegexp
with checking_method(ValueError, true_err_msg):
Seq2SeqLSTM.check_X(texts, 'X')
def test_serialize_untrained(self):
seq2seq = Seq2SeqLSTM(batch_size=256, epochs=200, latent_dim=500, validation_split=0.1,
grad_clipping=50.0, lr=0.01, weight_decay=0.0001, lowercase=False, verbose=True,
random_state=42)
with open(self.model_name, 'wb') as fp:
pickle.dump(seq2seq, fp)
with open(self.model_name, 'rb') as fp:
another_seq2seq = pickle.load(fp)
self.assertIsInstance(another_seq2seq, Seq2SeqLSTM)
self.assertTrue(hasattr(another_seq2seq, 'batch_size'))
self.assertEqual(another_seq2seq.batch_size, 256)
self.assertTrue(hasattr(another_seq2seq, 'epochs'))
self.assertEqual(another_seq2seq.epochs, 200)
self.assertTrue(hasattr(another_seq2seq, 'latent_dim'))
self.assertEqual(another_seq2seq.latent_dim, 500)
self.assertTrue(hasattr(another_seq2seq, 'validation_split'))
self.assertAlmostEqual(another_seq2seq.validation_split, 0.1)
self.assertTrue(hasattr(another_seq2seq, 'grad_clipping'))
self.assertAlmostEqual(another_seq2seq.grad_clipping, 50.0)
self.assertTrue(hasattr(another_seq2seq, 'lr'))
self.assertAlmostEqual(another_seq2seq.lr, 0.01)
self.assertTrue(hasattr(another_seq2seq, 'weight_decay'))
self.assertAlmostEqual(another_seq2seq.weight_decay, 0.0001)
self.assertTrue(hasattr(another_seq2seq, 'lowercase'))
self.assertFalse(another_seq2seq.lowercase)
self.assertTrue(hasattr(another_seq2seq, 'verbose'))
self.assertTrue(another_seq2seq.verbose)
self.assertTrue(hasattr(another_seq2seq, 'random_state'))
self.assertEqual(another_seq2seq.random_state, 42)
def test_serialize_trained(self):
input_texts, target_texts = self.load_text_pairs(self.data_set_name)
indices = list(range(len(input_texts)))
random.shuffle(indices)
n = int(round(0.2 * len(indices)))
input_texts_for_training = []
target_texts_for_training = []
for ind in indices[:-n]:
input_texts_for_training.append(input_texts[ind])
target_texts_for_training.append(target_texts[ind])
input_texts_for_testing = []
target_texts_for_testing = []
for ind in indices[-n:]:
input_texts_for_testing.append(input_texts[ind])
target_texts_for_testing.append(target_texts[ind])
seq2seq = Seq2SeqLSTM(validation_split=None, epochs=10, lr=1e-3)
seq2seq.fit(input_texts_for_training, target_texts_for_training,
eval_set=(input_texts_for_testing, target_texts_for_testing))
predicted_texts_1 = seq2seq.predict(input_texts_for_testing)
with open(self.model_name, 'wb') as fp:
pickle.dump(seq2seq, fp)
del seq2seq
with open(self.model_name, 'rb') as fp:
another_seq2seq = pickle.load(fp)
predicted_texts_2 = another_seq2seq.predict(input_texts_for_testing)
self.assertEqual(predicted_texts_1, predicted_texts_2)
def test_tokenize_text_positive01(self):
""" Tokenization with saving of the characters register. """
src = 'a\t B c Мама мыла \n\r раму 1\n'
dst_true = ['a', 'B', 'c', 'Мама', 'мыла', 'раму', '1']
dst_predicted = Seq2SeqLSTM.tokenize_text(src, lowercase=False)
self.assertEqual(dst_predicted, dst_true)
def test_tokenize_text_positive02(self):
""" Tokenization with bringing the resulting tokens to lowercase. """
src = 'a\t B c Мама мыла \n\r раму 1\n'
dst_true = ['a', 'b', 'c', 'мама', 'мыла', 'раму', '1']
dst_predicted = Seq2SeqLSTM.tokenize_text(src, lowercase=True)
self.assertEqual(dst_predicted, dst_true)
@staticmethod
def load_text_pairs(file_name):
input_texts = list()
target_texts = list()
line_idx = 1
with codecs.open(file_name, mode='r', encoding='utf-8', errors='ignore') as fp:
cur_line = fp.readline()
while len(cur_line) > 0:
prep_line = cur_line.strip()
if len(prep_line) > 0:
err_msg = f'File "{file_name}": line {line_idx} is wrong!'
line_parts = prep_line.split('\t')
assert len(line_parts) == 2, err_msg
new_input_text = line_parts[0].strip()
new_target_text = line_parts[1].strip()
assert (len(new_input_text) > 0) and (len(new_target_text) > 0), err_msg
input_texts.append(TestSeq2SeqLSTM.tokenize_text(new_input_text))
target_texts.append(TestSeq2SeqLSTM.tokenize_text(new_target_text))
cur_line = fp.readline()
line_idx += 1
return input_texts, target_texts
@staticmethod
def tokenize_text(src):
tokens = list()
for cur in src.split():
tokens += list(cur)
tokens.append('<space>')
return ' '.join(tokens[:-1])
@staticmethod
def detokenize_text(src):
new_text = ''
for cur_token in src.split():
if cur_token == '<space>':
new_text += ' '
else:
new_text += cur_token
return new_text.strip()
@staticmethod
def estimate(predicted_texts, true_texts):
n_corr = 0
n_total = len(predicted_texts)
for i in range(n_total):
cur_predicted = TestSeq2SeqLSTM.detokenize_text(predicted_texts[i]).lower()
cur_true = TestSeq2SeqLSTM.detokenize_text(true_texts[i]).lower()
if cur_predicted == cur_true:
n_corr += 1
return n_corr / float(n_total)
class TestTextPairSequence(unittest.TestCase):
def test_generate_data_for_training(self):
input_texts = [
'a b c',
'a c',
'0 1 b',
'b a',
'b c'
]
target_texts = [
'а б а 2',
'2 3',
'а б а',
'б а',
'б 3'
]
batch_size = 2
max_encoder_seq_length = 3
max_decoder_seq_length = 6
input_token_index = {'0': 0, '1': 1, 'a': 2, 'b': 3, 'c': 4}
target_token_index = {'\t': 0, '\n': 1, '2': 2, '3': 3, 'а': 4, 'б': 5}
true_batches = [
(
[
np.array([
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
]
]),
np.array([
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
],
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
])
],
np.array([
[
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
])
),
(
[
np.array([
[
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0]
],
[
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
]
]),
np.array([
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
])
],
np.array([
[
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
])
),
(
[
np.array([
[
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0]
]
]),
np.array([
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
]
])
],
np.array([
[
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
]
])
)
]
training_set_generator = TextPairSequence(
input_texts=input_texts, target_texts=target_texts, batch_size=batch_size,
max_encoder_seq_length=max_encoder_seq_length, max_decoder_seq_length=max_decoder_seq_length,
input_token_index=input_token_index, target_token_index=target_token_index, lowercase=False
)
self.assertIsInstance(training_set_generator, TextPairSequence)
self.assertTrue(hasattr(training_set_generator, 'input_texts'))
self.assertTrue(hasattr(training_set_generator, 'target_texts'))
self.assertTrue(hasattr(training_set_generator, 'batch_size'))
self.assertTrue(hasattr(training_set_generator, 'max_encoder_seq_length'))
self.assertTrue(hasattr(training_set_generator, 'max_decoder_seq_length'))
self.assertTrue(hasattr(training_set_generator, 'input_token_index'))
self.assertTrue(hasattr(training_set_generator, 'target_token_index'))
self.assertTrue(hasattr(training_set_generator, 'lowercase'))
self.assertTrue(hasattr(training_set_generator, 'n_text_pairs'))
self.assertTrue(hasattr(training_set_generator, 'n_batches'))
self.assertIs(training_set_generator.input_texts, input_texts)
self.assertIs(training_set_generator.target_texts, target_texts)
self.assertEqual(training_set_generator.batch_size, batch_size)
self.assertEqual(training_set_generator.max_encoder_seq_length, max_encoder_seq_length)
self.assertEqual(training_set_generator.max_decoder_seq_length, max_decoder_seq_length)
self.assertIs(training_set_generator.input_token_index, input_token_index)
self.assertIs(training_set_generator.target_token_index, target_token_index)
self.assertFalse(training_set_generator.lowercase)
self.assertIsInstance(training_set_generator.n_text_pairs, int)
self.assertEqual(training_set_generator.n_text_pairs, len(input_texts))
self.assertIsInstance(training_set_generator.n_batches, int)
self.assertEqual(training_set_generator.n_batches, len(true_batches))
for batch_ind in range(len(true_batches)):
predicted_batch = training_set_generator[batch_ind]
self.assertIsInstance(predicted_batch, tuple, msg=f'batch_ind={batch_ind}')
self.assertEqual(len(predicted_batch), 2, msg=f'batch_ind={batch_ind}')
self.assertIsInstance(predicted_batch[0], list, msg=f'batch_ind={batch_ind}')
self.assertIsInstance(predicted_batch[1], np.ndarray, msg=f'batch_ind={batch_ind}')
self.assertEqual(len(predicted_batch[0]), 2, msg=f'batch_ind={batch_ind}')
self.assertIsInstance(predicted_batch[0][0], np.ndarray, msg=f'batch_ind={batch_ind}')
self.assertIsInstance(predicted_batch[0][1], np.ndarray, msg=f'batch_ind={batch_ind}')
self.assertTrue(np.array_equal(predicted_batch[0][0], true_batches[batch_ind][0][0]),
msg=f'batch_ind={batch_ind}, encoder_input_data')
self.assertTrue(np.array_equal(predicted_batch[0][1], true_batches[batch_ind][0][1]),
msg=f'batch_ind={batch_ind}, decoder_input_data')
self.assertTrue(np.array_equal(predicted_batch[1], true_batches[batch_ind][1]),
msg=f'batch_ind={batch_ind}, decoder_target_data')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from django.shortcuts import render, redirect, reverse
from django.contrib import messages
from .models import Users
# render
def index( request ):
if 'login_id' in request.session:
return redirect( "book_reviews:index" )
else:
return render( request, "login/index.html" )
# action
def register( request ):
if request.method == "POST":
db_result = Users.objects.register( request.POST )
if not db_result['status']:
messages.add_message( request, messages.INFO, "Unable to register!" )
for i in db_result['errors']:
messages.add_message( request, messages.INFO, "- " + i )
return redirect( reverse( "login:index" ) )
else:
request.session['login_id'] = db_result['user'].id
request.session['email'] = db_result['user'].email
request.session['name'] = db_result['user'].name
request.session['alias'] = db_result['user'].alias
request.session['new_registration'] = 1
return redirect( reverse( "book_reviews:index" ) )
else:
return redirect( reverse( "login:index" ) )
# action
def login( request ):
if request.method == "POST":
db_result = Users.objects.login( request.POST )
if not db_result['status']:
messages.add_message( request, messages.INFO, "Unable to login!" )
for i in db_result['errors']:
messages.add_message( request, messages.INFO, "- " + i )
return redirect( reverse ( "login:index" ) )
else:
request.session['login_id'] = db_result['user'].id
request.session['email'] = db_result['user'].email
request.session['name'] = db_result['user'].name
request.session['alias'] = db_result['user'].alias
return redirect( reverse( "book_reviews:index" ) )
else:
return redirect( reverse( "login:index" ) )
# action
def logout( request ):
request.session.clear()
return redirect( reverse( "login:index" ) )
# # redirect
# def catcher( request ):
# return redirect( reverse( "login:index" ) )
|
import os
import os.path
import zipfile
from urllib import request
import requests
from win32com.client import Dispatch
def get_version_via_com(filename):
parser = Dispatch("Scripting.FileSystemObject")
version = parser.GetFileVersion(filename)
return version
def download_from_web(dir_path, chrome_drive_version):
link = "http://chromedriver.storage.googleapis.com/LATEST_RELEASE_"+chrome_drive_version
f = requests.get(link)
chromedriver_version = f.text
driver_path = os.path.join(dir_path, 'chromedriver.exe')
if os.path.isfile(driver_path):
try:
os.remove(driver_path)
except OSError:
raise OSError(f'Cannot delete file chromedriver.exe from {dir_path}')
request.urlretrieve(f"http://chromedriver.storage.googleapis.com/{chromedriver_version}/chromedriver_win32.zip", f"{dir_path}\\chromedriver_win32.zip")
zip_ref = zipfile.ZipFile(f"{dir_path}\\chromedriver_win32.zip", "r")
zip_ref.extractall(f"{dir_path}")
zip_ref.close()
if os.path.isfile(os.path.join(dir_path, 'chromedriver_win32.zip')):
os.remove(f"{dir_path}\\chromedriver_win32.zip")
os.environ['PATH'] += os.pathsep + os.path.join(f"{dir_path}")
print(f"New Chrome Driver downloaded into {dir_path} folder")
def download_chrome_driver(driver_dir):
dir_path = driver_dir
if driver_dir is None:
dir_path = os.path.join(os.path.dirname(__file__), 'drivers')
chrome_drive_version = 0
curr_chrome_drive_version = 1
# path = r"C:\Program Files\Google\Chrome\Application\chrome.exe"
path = r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"
if os.path.isfile(path):
chrome_version = get_version_via_com(path)
chrome_drive_version = chrome_version[:chrome_version.rfind(".")]
driver_path = os.path.join(dir_path, 'chromedriver.exe')
if os.path.isfile(driver_path):
my_cmd = f'{driver_path} --version'
# os.system(my_cmd)
with os.popen(my_cmd) as proc:
full_curr_chrome_drive_version = proc.read()
curr_chrome_drive_version = full_curr_chrome_drive_version[full_curr_chrome_drive_version.find(' ')+1:full_curr_chrome_drive_version.rfind(".")]
# print(f'Existing Chrome Driver dir:{dir_path}')
print(f'Chrome Driver Version New ####{chrome_drive_version}#### Existing ####{curr_chrome_drive_version}####')
if chrome_drive_version != curr_chrome_drive_version:
download_from_web(dir_path, chrome_drive_version)
|
from common.run_method import RunMethod
import allure
@allure.step("极运营/工作台/在读学员数据")
def staging_queryDirectorReportForStageRead_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/工作台/在读学员数据"
url = f"/service-crm/staging/queryDirectorReportForStageRead"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/工作台/流失学员数据")
def staging_queryDirectorReportForStageRefund_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/工作台/流失学员数据"
url = f"/service-crm/staging/queryDirectorReportForStageRefund"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/CRM/班主任/单个校区-招生人次")
def staging_geek_data_person_time_count_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/CRM/班主任/单个校区-招生人次"
url = f"/service-crm/staging/geek/data/person-time-count"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/CRM/班主任/单个区域-招生人次列表")
def staging_geek_data_schools_person_time_count_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/CRM/班主任/单个区域-招生人次列表"
url = f"/service-crm/staging/geek/data/schools-person-time-count"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/CRM/班主任/单个区域-招生人次区域")
def staging_geek_get_areas_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/CRM/班主任/单个区域-招生人次区域"
url = f"/service-crm/staging/geek/get-areas"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("CRM/工作台/提成人次")
def staging_geek_data_commissionPersonTimeCount_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "CRM/工作台/提成人次"
url = f"/service-crm/staging/geek/data/commissionPersonTimeCount"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("CRM/工作台/校区提成人次")
def staging_geek_data_schoolCommissionPersonTimeCount_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "CRM/工作台/校区提成人次"
url = f"/service-crm/staging/geek/data/schoolCommissionPersonTimeCount"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
# Generated by Django 2.1.2 on 2019-01-07 11:01
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0035_type_direction_type'),
]
operations = [
migrations.AlterField(
model_name='type',
name='direction_type',
field=models.IntegerField(help_text='Порядок показу типу(відносно категорії): менше значить вище', unique=True, validators=[django.core.validators.MaxValueValidator(999), django.core.validators.MinValueValidator(0)], verbose_name='Пріорітет показу'),
),
]
|
import tweepy, markovify, random
# Keys and Tokens to access @DJTweetbot1
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
# Get random book number, with better naming convention this will change
num = random.randint(1, 900)
# Set the filename, hopefully I can find a clearer naming convention soon
openFile = "sources\\" + str(num) + '.txt'
with open(openFile) as f:
file = f.read()
text = markovify.Text(file)
# Make the tweet, if it is too long or not long enough, do it again
# Num helps me know what book is being used
tweet = str(num) + ': ' + text.make_sentence()
while len(tweet) > 280 or len(tweet) < 95:
tweet = str(num) + ': ' + text.make_sentence()
# Update @DJTweetbot1 status, print what was sent
api.update_status(tweet)
print(tweet)
|
import torch
import torch.nn as nn
import numpy as np
from src.data_processing.transform_data import Augment
class MixMatch(object):
def __init__(self, model, batch_size, device, T=0.5, K=2, alpha=0.75):
self.T = T
self.K = K
self.batch_size = batch_size
self.alpha = alpha
self.softmax = nn.Softmax(dim=1)
self.model = model
self.device = device
self.n_labels = 10 # Warning! hardcoded
self.beta = torch.distributions.beta.Beta(alpha, alpha)
def run(self, x_imgs, x_labels, u_imgs):
# One hot encoding
x_labels = self.one_hot_encoding(x_labels)
x_labels.to(self.device)
# Augment
augment_once = Augment(K=1)
augment_k = Augment(K=self.K)
x_hat = augment_once(x_imgs) # shape (1, batch_size, 3, 32, 32)
u_hat = augment_k(u_imgs) # shape (K, batch_size, 3, 32, 32)
# Generate guessed labels
q_bar = self.guess_label(u_hat)
q = self.sharpen(q_bar) # shape (K, batch_size, 10)
x_hat = x_hat.reshape((-1, 3, 32, 32)) # shape (batch_size, 3, 32, 32)
u_hat = u_hat.reshape((-1, 3, 32, 32)) # shape (K*batch_size, 3, 32, 32)
q = q.repeat(self.K, 1, 1).reshape(-1, 10) # shape (K*batch_size, 10)
# Concat and shuffle
w_imgs = torch.cat((x_hat, u_hat))
w_labels = torch.cat((x_labels, q))
w_imgs, w_labels = self.shuffle_matrices(w_imgs, w_labels)
# Apply MixUp
x_prime, p_prime = self.mixup(x_hat, w_imgs[:self.batch_size], x_labels, w_labels[:self.batch_size])
u_prime, q_prime = self.mixup(u_hat, w_imgs[self.batch_size:], q, w_labels[self.batch_size:])
return (x_prime, p_prime), (u_prime, q_prime)
def mixup(self, x1, x2, p1, p2):
n_samples = x1.shape[0]
lambda_rand = self.beta.sample([n_samples, 1, 1, 1]).to(self.device) # one lambda per sample
lambda_prime = torch.max(lambda_rand, 1 - lambda_rand).to(self.device)
x_prime = lambda_prime * x1 + (1 - lambda_prime) * x2
lambda_prime = lambda_prime.reshape(-1, 1)
p_prime = lambda_prime * p1 + (1 - lambda_prime) * p2
return x_prime, p_prime
def sharpen(self, q_bar):
#q_bar = q_bar.numpy()
q = torch.pow(q_bar, 1 / self.T) / torch.sum(torch.pow(q_bar, 1 / self.T), dim=1)[:, np.newaxis]
return q
def guess_label(self, u_hat):
# Do not change model to eval mode! label guessing must be done in train mode
with torch.no_grad():
q_bar = torch.zeros([self.batch_size, self.n_labels], device=self.device)
for k in range(self.K):
q_bar += self.softmax(self.model(u_hat[k]))
q_bar /= self.K
return q_bar
def one_hot_encoding(self, labels):
shape = (labels.shape[0], self.n_labels)
one_hot = torch.zeros(shape, dtype=torch.float32, device=self.device)
rows = torch.arange(labels.shape[0])
one_hot[rows, labels] = 1
return one_hot
# shuffles along the first axis (axis 0)
def shuffle_matrices(self, m1, m2):
n_samples = m1.shape[0]
rand_indexes = torch.randperm(n_samples)
m1 = m1[rand_indexes]
m2 = m2[rand_indexes]
return m1, m2
|
import pytest
from ethereum.tools.tester import TransactionFailed
from plasma_core.constants import NULL_ADDRESS
def test_token_adding(token, root_chain):
assert not root_chain.hasToken(token.address)
root_chain.addToken(token.address)
assert root_chain.hasToken(token.address)
with pytest.raises(TransactionFailed):
root_chain.addToken(token.address)
def test_token_adding_gas_cost(ethtester, root_chain):
ADDRESS_A = b'\x00' * 19 + b'\x01'
ADDRESS_B = b'\x00' * 19 + b'\x02'
root_chain.addToken(ADDRESS_A)
gas = ethtester.chain.last_gas_used()
print("PriorityQueue first deployment costs {} gas".format(gas))
root_chain.addToken(ADDRESS_B)
gas = ethtester.chain.last_gas_used()
print("PriorityQueue second deployment costs {} gas".format(gas))
def test_token_adding_eth_token_should_fail(root_chain):
assert root_chain.hasToken(NULL_ADDRESS)
with pytest.raises(TransactionFailed):
root_chain.addToken(NULL_ADDRESS)
|
from enum import Enum
class Light(Enum):
"""
Light is either RED or GREEN
"""
RED = 0
GREEN = 1
|
from HighAI.High_AI.envs.high_env import HighEnv
from ray import tune
from ray.rllib.agents.dqn import DQNTrainer
from ray.tune.registry import register_env
#import High_AI
register_env("high_v1", HighEnv)
config = {
"env": "high_v1"
}
stop = {
"info/num_steps_trained": 100000
}
tune.run("DQN", config=config, stop=stop)
|
# -*- encoding: utf-8 -*-
import tempfile
import os
import urllib.request
from threading import Thread
try:
from .functions import *
except SystemError as e:
from functions import *
__all__ = ["clear_cache", "ImageManagerError", "ImageLoader", "ImageManager"]
TIMEOUT = 20 # seconds
CACHE_FILE = os.path.join(tempfile.gettempdir(), 'SublimeTextMarkdownInlineImages.cache.txt')
CACHE_LINE_SEPARATOR = '-%-CACHE-SEPARATOR-%-'
def clear_cache():
os.remove(CACHE_FILE)
class ImageManagerError(Exception):
pass
class ImageLoader(Thread):
def __init__(self, url, callback):
Thread.__init__(self)
self.url = url
self.callback = callback
def run(self):
if not self.url.startswith(('http://', 'https://')):
with open(self.url, 'rb') as fp:
self.callback(fp.read())
return
try:
page = urllib.request.urlopen(self.url, None, TIMEOUT)
except Exception as e:
self.callback(e)
else:
self.callback(page.read())
class ImageManager:
"""Loads a local or remote image, converts it to base64, saves it to a cache fiel and run the
user callback"""
currently_loading_images = {}
@classmethod
def get_cache_for(cls, url):
if not os.path.exists(CACHE_FILE):
return
with open(CACHE_FILE) as fp:
for line in fp.read().splitlines():
line = line.split(CACHE_LINE_SEPARATOR, 1)
if line[0] == url:
return line[1]
@classmethod
def get_callback_for(cls, url, user_callback):
def callback(image_content):
del cls.currently_loading_images[url]
if isinstance(image_content, Exception):
return user_callback(url, image_content)
base64 = convert_to_base64(image_content)
with open(CACHE_FILE, 'a') as fp:
fp.write('\n' + url + CACHE_LINE_SEPARATOR + base64)
user_callback(url, base64)
return callback
@classmethod
def get(cls, url, user_callback):
"""url can be a local path too
user_callback is a function that takes the path, and the base64 content
"""
if url in cls.currently_loading_images:
raise ImageManagerError("Currently loading the image '{}'".format(url))
cache = cls.get_cache_for(url)
if cache:
return user_callback(url, cache)
# actually load the image
# _callback will save the image to the cache AND run the user callback with the base64 image
callback = cls.get_callback_for(url, user_callback)
cls.currently_loading_images[url] = ImageLoader(url, callback)
cls.currently_loading_images[url].start()
if __name__ == '__main__':
callback = lambda path, base64: print("Got '{}', '{}'".format(path, base64[:50]))
ImageManager.get('https://i.ytimg.com/vi/C2O7lM0bU0g/maxresdefault.jpg', callback)
ImageManager.get("http://2017.animationdingle.com/wp-content/uploads/2016/08/hello_world.gif", callback)
|
#/usr/bin/env python
#coding=utf-8
import jieba
import sys
jieba.load_userdict('../Jieba/dict.txt')
#Fillter, if the pair's sim bigger than 0.5, we Retain it
def process(inpath, outpath):
with open(inpath, 'r', encoding='utf-8') as fin, open(outpath, 'w', encoding='utf-8') as fout:
for line in fin:
if len(line) == 0:
continue
# print(line)
lineno, sen1, sen2, label = line.strip().split('\t')
words1= [ w for w in jieba.cut(sen1) if w.strip() ]
words2= [ w for w in jieba.cut(sen2) if w.strip() ]
union = words1 + words2
same_num = 0
for w in union:
if w in words1 and w in words2:
same_num += 1
sim = int(same_num/len(union)*10)
if(sim >= 5):
fout.write(line)
inpath = 'E:\\学习资料\\自然语言处理\\forToolLearn\\data\\ATEC\\Origin\\atec_nlp_sim_train_all.csv'
outpath = 'E:\\学习资料\\自然语言处理\\forToolLearn\\data\\ATEC\\Filter\\sim.csv'
process(inpath,outpath) |
import pandas as pd
import numpy as np
# df = pd.read_csv('customer_data(filtered)_generated.csv', encoding = 'cp949')
# # print(df.head())
from konlpy.tag import Twitter
twitter = Twitter()
def tokenize(text):
stems = []
tagged = twitter.pos(text)
for i in range (0, len(tagged)):
if (tagged[i][1]=='Noun' or tagged[i][1]=='Adjective') :
stems.append(tagged[i][0])
return stems
# tagged = twitter.pos(df['Review'][0])
# for i in range (0, len(tagged)):
# if (tagged[i][1]=='Noun') :
# print(tagged[i])
from sklearn.feature_extraction.text import TfidfVectorizer
# text_data_list = df['Review'].astype(str).tolist()
# text_data_arr = np.array([''.join(text) for text in text_data_list])
vectorizer = TfidfVectorizer(min_df=2, tokenizer=tokenize, norm='l2')
# text_data = vectorizer.fit_transform(text_data_arr)
# df_tfidf = pd.DataFrame(text_data.A, columns=vectorizer.get_feature_names())
# print(df_tfidf.head())
#%%
# import matplotlib.pyplot as plt
# import seaborn as sns
# from matplotlib import font_manager, rc
# font_name = font_manager.FontProperties(fname="c:/Windows/Fonts/malgun.ttf").get_name()
# rc('font', family=font_name)
# g = sns.factorplot('SNS', data=df, kind='count', size=5)
# g.set_xlabels()
# g = sns.factorplot('Addr', data=df, kind='count', size=5)
# g.set_xticklabels(rotation=90)
# g.set_xlabels()
# g = sns.factorplot('Score', data=df, kind='count', size=5)
# g.set_xlabels()
# df = df.dropna(subset=['Score']) #점수가 없는 데이터 제거
# df.index = range(0,len(df))
# df['Score2'] = ''
# for i in range(0,len(df)) : # 1,2:bad, 3:normal, 4,5:good
# if(df['Score'][i] < 3) :
# df['Score2'][i] = 'bad'
# elif (df['Score'][i] > 3) :
# df['Score2'][i] = 'good'
# elif (df['Score'][i] == 3) :
# df['Score2'][i] = 'normal'
# print(df.head())
# g = sns.factorplot('Score2', data=df, kind='count', size=5)
# g.set_xlabels()
# df.to_csv('customer_data(filtered)_generated2.csv')
#%%
#####분석
df = pd.read_csv('customer_data(filtered)_generated2.csv', encoding='utf-8')
# df.head()
review_data = df['Review'].astype(str).tolist()
review_label = df['Score2'].astype(str).tolist()
trainset_size = int(round(len(review_data)*0.80))
x_train = np.array([''.join(data) for data in review_data[0:trainset_size]])
y_train = np.array([data for data in review_label[0:trainset_size]])
x_test = np.array([''.join(data) for data in review_data[trainset_size+1:len(review_data)]])
y_test = np.array([data for data in review_label[trainset_size+1:len(review_label)]])
X_train = vectorizer.fit_transform(x_train)
X_test = vectorizer.transform(x_test)
# df_per = pd.DataFrame(columns=['Classifier', 'F-Measure', 'Accuracy'])
# df_per
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
nb_classifier = MultinomialNB().fit(X_train, y_train)
nb_pred = nb_classifier.predict(X_test)
# print('\n Confusion Matrix \n')
# print(confusion_matrix(y_test, nb_pred))
# print('\n Classification Report \n')
# print(classification_report(y_test, nb_pred))
# print('\n Accuracy \n')
# print(round(accuracy_score(y_test, nb_pred, normalize=True),2))
fm = round(f1_score(y_test, nb_pred, average='weighted'), 2)
ac = round(accuracy_score(y_test, nb_pred, normalize=True), 2)
df_per.loc[len(df_per)] = ['Naive Bayes', fm, ac]
print(df_per)
#Decision Tree
from sklearn.tree import DecisionTreeClassifier
dt_classifier = DecisionTreeClassifier().fit(X_train, y_train)
dt_pred = dt_classifier.predict(X_test)
print('\n Confusion Matrix \n')
print(confusion_matrix(y_test, dt_pred))
print('\n Classification Report \n')
print(classification_report(y_test, dt_pred))
print('\n Accuracy \n')
print(round(accuracy_score(y_test, dt_pred, normalize=True),2))
fm = round(f1_score(y_test, dt_pred, average='weighted'), 2)
ac = round(accuracy_score(y_test, dt_pred, normalize=True), 2)
df_per.loc[len(df_per)] = ['Decison Tree', fm, ac]
# df_per
#Random Forest
from sklearn.ensemble import RandomForestClassifier
rf_classifier = RandomForestClassifier(n_estimators=100)
rf_classifier.fit(X_train, y_train)
rf_pred = rf_classifier.predict(X_test)
print('\n Confusion Matrix \n')
print(confusion_matrix(y_test, rf_pred))
print('\n Classification Report \n')
print(classification_report(y_test, rf_pred))
print('\n Accuracy \n')
print(round(accuracy_score(y_test, rf_pred, normalize=True),2))
ac = round(accuracy_score(y_test, rf_pred, normalize=True), 2)
df_per.loc[len(df_per)] = ['Random Forest', fm, ac]
# df_per
#SVM
from sklearn.svm import LinearSVC
svm_classifier = LinearSVC().fit(X_train, y_train)
svm_pred = svm_classifier.predict(X_test)
print('\n Confusion Matrix \n')
print(confusion_matrix(y_test, svm_pred))
print('\n Classification Report \n')
print(classification_report(y_test, svm_pred))
print('\n Accuracy \n')
print(round(accuracy_score(y_test, svm_pred, normalize=True),2))
fm = round(f1_score(y_test, svm_pred, average='weighted'), 2)
ac = round(accuracy_score(y_test, svm_pred, normalize=True), 2)
df_per.loc[len(df_per)] = ['Support Vector Machine', fm, ac]
df_per
df_per_1 = df_per.set_index('Classifier')
df_per_1
ax = df_per_1[['F-Measure','Accuracy']].plot(kind='bar', title ='Performance', figsize=(10, 7), legend=True, fontsize=12)
ax.set_xlabel('Classifier', fontsize=12)
plt.show()
|
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from celery import shared_task
from websites.models import Website
@shared_task
def retrieve_websites():
temp = []
url = 'http://s3.amazonaws.com/alexa-static/top-1m.csv.zip'
resp = urlopen(url)
zipfile = ZipFile(BytesIO(resp.read()))
for contained_file in zipfile.namelist():
for line in zipfile.open(contained_file).readlines():
temp.append(line)
for site in temp:
rank, url = site.decode().strip().split(',')
Website.objects.create(
url=url,
alexa_rank=rank
)
return True |
import unittest
from katas.kyu_7.selective_fear_of_numbers import am_I_afraid
class AmIAfraidTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(am_I_afraid('Sunday', -666))
def test_true_2(self):
self.assertTrue(am_I_afraid('Tuesday', 965))
def test_true_3(self):
self.assertTrue(am_I_afraid('Friday', 2))
def test_false(self):
self.assertFalse(am_I_afraid('Monday', 13))
def test_false_2(self):
self.assertFalse(am_I_afraid('Tuesday', 2))
|
from torchfly.training.callbacks import Events, handle_event, Callback
class BatchHandler(Callback):
@handle_event(Events.BATCH_BEGIN)
def process_batch(self, trainer):
breakpoint()
pass |
from .object import Object
class IO(Object):
pass
# slashed file path on disk
class Path(IO):
pass
# directory
class Dir(IO):
pass
# generic file
class File(IO):
pass
|
from sklearn.pipeline import FeatureUnion
from transformers import *
from sklearn import svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer,TfidfTransformer
from collections import Counter
class SexClassifiers():
def trainEnglishSexClassifier(self):
#get correct labels from dictionary in trainY and testY
trainX = self.englishTrainData[0]
trainY = self.getYlabels(self.englishTrainData[1], 'sex')
combined_features = FeatureUnion([
("tfidf", TfidfVectorizer()),
("ngrams", TfidfVectorizer(ngram_range=(3, 3), analyzer="char")),
("hashtags", CountHashtags()),
("mentions", CountMentions()),
("english", English()),
("countRepeatingLetters", RepeatingLetters()),
("capitals", CountWordCaps()),
],transformer_weights={
'english': 1,
'tfidf': 3,
'ngrams': 2,
'hashtags': 1,
'mentions': 1,
'countRepeatingLetters' : 1
})
classifier = svm.LinearSVC(C=1.0)
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline
def trainDutchSexClassifier(self):
#get correct labels from dictionary in trainY and testY
trainX = self.dutchTrainData[0]
trainY = self.getYlabels(self.dutchTrainData[1], 'sex')
combined_features = FeatureUnion([("tfidf", TfidfVectorizer()),
("ngrams", TfidfVectorizer(ngram_range=(3, 3), analyzer="char")),
("counts", CountVectorizer()),
],transformer_weights={
'tfidf': 2,
'counts': 1,
'ngrams': 2,
})
X_features = combined_features.fit(trainX, trainY).transform(trainX)
classifier = svm.LinearSVC()
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline
def trainItalianSexClassifier(self):
#get correct labels from dictionary in trainY and testY
trainX = self.italianTrainData[0]
trainY = self.getYlabels(self.italianTrainData[1], 'sex')
combined_features = FeatureUnion([("tfidf", TfidfVectorizer()),
("ngrams", TfidfVectorizer(ngram_range=(3, 3), analyzer="char")),
("counts", CountVectorizer()),
("latin", Latin()),
],transformer_weights={
'latin': 1,
'tfidf': 2,
'ngrams': 2,
'counts': 1,
})
X_features = combined_features.fit(trainX, trainY).transform(trainX)
classifier = svm.LinearSVC()
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline
def trainSpanishSexClassifier(self):
#get correct labels from dictionary in trainY and testY
trainX = self.spanishTrainData[0]
trainY = self.getYlabels(self.spanishTrainData[1], 'sex')
combined_features = FeatureUnion([("tfidf", TfidfVectorizer()),
("ngrams", TfidfVectorizer(ngram_range=(3, 3), analyzer="char")),
("counts", CountVectorizer()),
("latin", Latin()),
],transformer_weights={
'latin': 1,
'tfidf': 2,
'counts': 1,
'ngrams': 2,
})
classifier = svm.LinearSVC()
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline
'''Age classifiers'''
class AgeClassifiers():
def trainEnglishAgeClassifier(self):
#get correct labels from dictionary in trainY and testY
trainX = self.englishTrainData[0]
trainY = self.getYlabels(self.englishTrainData[1], 'age')
combined_features = FeatureUnion([("tfidf", TfidfVectorizer(sublinear_tf=True, max_df=0.05 )),
("repeatingLetters", RepeatingLetters()),
("countsWordCaps", CountWordCaps())
])
classifier = svm.SVC(kernel='rbf', C=1.0, gamma=0.9)
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline
def trainSpanishAgeClassifier(self,classifier):
#get correct labels from dictionary in trainY and testY
trainX = self.spanishTrainData[0]
trainY = self.getYlabels(self.spanishTrainData[1], 'age')
combined_features = FeatureUnion([("capitals", CountCaps()),
("repeatingLetters", RepeatingLetters()),
("countsWordCaps", CountWordCaps()),
("tfidf", TfidfVectorizer( )),
("latin", Latin()),
("classifier", Classifier(classifier))
])
X_features = combined_features.fit(trainX, trainY).transform(trainX)
classifier = svm.SVC(kernel='linear')
pipeline = Pipeline([("features", combined_features), ("classifier", classifier)])
pipeline.fit(trainX, trainY)
return pipeline |
# Практика на Python: Расстояние редактирования
import random
def edit_distance(s1, s2):
return 0
def main():
s1 = input()
s2 = input()
print(edit_distance(s1,s2))
def test(n_iter=100):
for i in range(n_iter):
length = random.randint(0, 64)
s = "".join(random.choice("01") for _ in range(length))
assert edit_distance(s, "") == edit_distance("", s) == len(s)
assert edit_distance(s, s) == 0
assert edit_distance("ab","ab") == 0
assert edit_distance("short","ports") == 3
if __name__ == '__main__':
test() |
from PIL import Image, ImageDraw
import random, os, time
import numpy as np
def randomImage():
img = Image.open('image.png', 'r')
img_w, img_h = img.size
image = Image.new('RGB', (500, 500), (255, 255, 255, 255))
bg_w, bg_h = image.size
dx = dy = 100
x = random.randint(0, bg_w-dx-1)
y = random.randint(0, bg_h-dy-1)
offset = ((x - img_w), (y - img_h))
image.paste(img, (x, y))
return image
def cropImage(image, agent_x, agent_y):
# Crop is a fixed rectangle
x_0 = agent_x + 23
x_1 = agent_x - 23
y_0 = agent_y + 23
y_1 = agent_y - 23
if (x_0, y_0) > image.size:
return -1
if x_1 < 0 or y_1 < 0:
return -1
area = (x_1, y_1, x_0, y_0)
cropped_img = image.crop(area)
return np.array(cropped_img)
def drawRect(image, agent_x, agent_y):
x_0 = agent_x + 23
x_1 = agent_x - 23
y_0 = agent_y + 23
y_1 = agent_y - 23
draw = ImageDraw.Draw(image)
draw.rectangle(((x_1, y_1), (x_0, y_0)), outline="Black")
image.save("lala", "PNG")
|
apple_price = 100
orange_price = 200
grape_price =300
tax = 0.08
a_total_price = apple_price*1 + orange_price*2 + grape_price*3 #1400
b_total_price = apple_price*0 + orange_price*2 + grape_price*3 #1300
c_total_price = apple_price*2 + orange_price*4 + grape_price*0 #1000
a_tax_price = a_total_price * (1 + tax) #1512
b_tax_price = b_total_price * (1 + tax) #1404
c_tax_price = c_total_price * (1 + tax) #1080
print(f"Aさん;税別:{a_total_price}円, 税込:{a_tax_price:.0f}円")
print(f"Bさん;税別:{b_total_price}円, 税込:{b_tax_price:.0f}円")
print(f"Cさん;税別:{c_total_price}円, 税込:{c_tax_price:.0f}円")
#
|
#!/usr/bin/python3
"""
Goal:
- perform a dig against a hostname
- parse the A records from the dig
- print A records to screen
- number of records
"""
import subprocess
def get_a_record(hostname):
"""
this function will call dig and obtain the A records from a given hostname
"""
dig = subprocess.run(['dig', hostname], stdout=subprocess.PIPE)
return dig.stdout
def parse_out(data):
"""
this function will clean up the output of the A record
- convert bytes to string
- slise at newlines
- cleaner digoutput
"""
print(type(data))
print(type(data.decode('utf-8')))
data = data.decode('utf-8')
new_data = data.split('\n')
for index,line in enumerate(new_data):
print(index,line)
a_record = new_data[13].split('\t')
return a_record[-1]
def print_output(data):
print('The IP Address of Google is {}'.format(data))
g = get_a_record('google.com')
parsed = parse_out(g)
print_output(parsed)
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
train = pd.read_csv('data/train.csv', index_col=0)
train.head()
# In[3]:
X = train.drop('target', axis=1)
y = train.target
# In[4]:
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression
X = SelectKBest(mutual_info_regression, k=500).fit_transform(X, y)
X.shape
# In[28]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# In[29]:
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
# In[30]:
def rmsle_metric(y_pred,y_test) :
assert len(y_test) == len(y_pred)
return np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_test))**2))
#y_pred = y_pred.astype('float64')
#y_test = y_test.values.astype('float64')
rmsle = rmsle_metric(y_pred, y_test)
#msle = mean_squared_log_error(y_test, y_pred)
rmsle
# ### SVM
# In[31]:
from sklearn.svm import SVR
svr = SVR()
svr.fit(X_train, y_train)
y_pred = svr.predict(X_test)
print(rmsle_metric(y_pred, y_test))
# In[10]:
1.7021190276421692 - 1.702119314596429
# ### TPOT Test
# In[48]:
from tpot import TPOTRegressor
model = TPOTRegressor(
generations=5,
population_size=100,
#scoring='root_mean_squared_log_error',
n_jobs=1,
verbosity=2,
cv=2
#max_time_mins=180,
#early_stop=3
)
model.fit(X_train, y_train.values)
# In[49]:
y_pred = model.predict(X_test)
print(rmsle_metric(y_pred, y_test))
# ### Checkpoint
# In[50]:
from sklearn.externals import joblib
joblib.dump(model.fitted_pipeline_, 'selectkbest_tpot_163.pkl')
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import hashlib
import os.path
from collections import deque
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable, Mapping
from pants.backend.go.util_rules import cgo, coverage
from pants.backend.go.util_rules.assembly import (
AssembleGoAssemblyFilesRequest,
FallibleAssembleGoAssemblyFilesResult,
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.cgo import CGoCompileRequest, CGoCompileResult, CGoCompilerFlags
from pants.backend.go.util_rules.coverage import (
ApplyCodeCoverageRequest,
ApplyCodeCoverageResult,
BuiltGoPackageCodeCoverageMetadata,
FileCodeCoverageMetadata,
)
from pants.backend.go.util_rules.embedcfg import EmbedConfig
from pants.backend.go.util_rules.goroot import GoRoot
from pants.backend.go.util_rules.import_config import ImportConfig, ImportConfigRequest
from pants.backend.go.util_rules.sdk import GoSdkProcess, GoSdkToolIDRequest, GoSdkToolIDResult
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.engine.engine_aware import EngineAwareParameter, EngineAwareReturnType
from pants.engine.fs import (
EMPTY_DIGEST,
AddPrefix,
CreateDigest,
Digest,
DigestEntries,
DigestSubset,
FileContent,
FileEntry,
MergeDigests,
PathGlobs,
)
from pants.engine.process import FallibleProcessResult, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.resources import read_resource
from pants.util.strutil import path_safe
class BuildGoPackageRequest(EngineAwareParameter):
def __init__(
self,
*,
import_path: str,
pkg_name: str,
digest: Digest,
dir_path: str,
build_opts: GoBuildOptions,
go_files: tuple[str, ...],
s_files: tuple[str, ...],
direct_dependencies: tuple[BuildGoPackageRequest, ...],
import_map: Mapping[str, str] | None = None,
minimum_go_version: str | None,
for_tests: bool = False,
embed_config: EmbedConfig | None = None,
with_coverage: bool = False,
cgo_files: tuple[str, ...] = (),
cgo_flags: CGoCompilerFlags | None = None,
c_files: tuple[str, ...] = (),
header_files: tuple[str, ...] = (),
cxx_files: tuple[str, ...] = (),
objc_files: tuple[str, ...] = (),
fortran_files: tuple[str, ...] = (),
prebuilt_object_files: tuple[str, ...] = (),
pkg_specific_compiler_flags: tuple[str, ...] = (),
pkg_specific_assembler_flags: tuple[str, ...] = (),
is_stdlib: bool = False,
) -> None:
"""Build a package and its dependencies as `__pkg__.a` files.
Instances of this class form a structure-shared DAG, and so a hashcode is pre-computed for
the recursive portion.
"""
if with_coverage and build_opts.coverage_config is None:
raise ValueError(
"BuildGoPackageRequest.with_coverage is set but BuildGoPackageRequest.build_opts.coverage_config is None!"
)
self.import_path = import_path
self.pkg_name = pkg_name
self.digest = digest
self.dir_path = dir_path
self.build_opts = build_opts
self.go_files = go_files
self.s_files = s_files
self.direct_dependencies = direct_dependencies
self.import_map = FrozenDict(import_map or {})
self.minimum_go_version = minimum_go_version
self.for_tests = for_tests
self.embed_config = embed_config
self.with_coverage = with_coverage
self.cgo_files = cgo_files
self.cgo_flags = cgo_flags
self.c_files = c_files
self.header_files = header_files
self.cxx_files = cxx_files
self.objc_files = objc_files
self.fortran_files = fortran_files
self.prebuilt_object_files = prebuilt_object_files
self.pkg_specific_compiler_flags = pkg_specific_compiler_flags
self.pkg_specific_assembler_flags = pkg_specific_assembler_flags
self.is_stdlib = is_stdlib
self._hashcode = hash(
(
self.import_path,
self.pkg_name,
self.digest,
self.dir_path,
self.build_opts,
self.go_files,
self.s_files,
self.direct_dependencies,
self.import_map,
self.minimum_go_version,
self.for_tests,
self.embed_config,
self.with_coverage,
self.cgo_files,
self.cgo_flags,
self.c_files,
self.header_files,
self.cxx_files,
self.objc_files,
self.fortran_files,
self.prebuilt_object_files,
self.pkg_specific_compiler_flags,
self.pkg_specific_assembler_flags,
self.is_stdlib,
)
)
def __repr__(self) -> str:
# NB: We must override the default `__repr__` so that `direct_dependencies` does not
# traverse into transitive dependencies, which was pathologically slow.
return (
f"{self.__class__}("
f"import_path={repr(self.import_path)}, "
f"pkg_name={self.pkg_name}, "
f"digest={self.digest}, "
f"dir_path={self.dir_path}, "
f"build_opts={self.build_opts}, "
f"go_files={self.go_files}, "
f"s_files={self.s_files}, "
f"direct_dependencies={[dep.import_path for dep in self.direct_dependencies]}, "
f"import_map={self.import_map}, "
f"minimum_go_version={self.minimum_go_version}, "
f"for_tests={self.for_tests}, "
f"embed_config={self.embed_config}, "
f"with_coverage={self.with_coverage}, "
f"cgo_files={self.cgo_files}, "
f"cgo_flags={self.cgo_flags}, "
f"c_files={self.c_files}, "
f"header_files={self.header_files}, "
f"cxx_files={self.cxx_files}, "
f"objc_files={self.objc_files}, "
f"fortran_files={self.fortran_files}, "
f"prebuilt_object_files={self.prebuilt_object_files}, "
f"pkg_specific_compiler_flags={self.pkg_specific_compiler_flags}, "
f"pkg_specific_assembler_flags={self.pkg_specific_assembler_flags}, "
f"is_stdlib={self.is_stdlib}"
")"
)
def __hash__(self) -> int:
return self._hashcode
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self._hashcode == other._hashcode
and self.import_path == other.import_path
and self.pkg_name == other.pkg_name
and self.digest == other.digest
and self.dir_path == other.dir_path
and self.build_opts == other.build_opts
and self.import_map == other.import_map
and self.go_files == other.go_files
and self.s_files == other.s_files
and self.minimum_go_version == other.minimum_go_version
and self.for_tests == other.for_tests
and self.embed_config == other.embed_config
and self.with_coverage == other.with_coverage
and self.cgo_files == other.cgo_files
and self.cgo_flags == other.cgo_flags
and self.c_files == other.c_files
and self.header_files == other.header_files
and self.cxx_files == other.cxx_files
and self.objc_files == other.objc_files
and self.fortran_files == other.fortran_files
and self.prebuilt_object_files == other.prebuilt_object_files
and self.pkg_specific_compiler_flags == other.pkg_specific_compiler_flags
and self.pkg_specific_assembler_flags == other.pkg_specific_assembler_flags
and self.is_stdlib == other.is_stdlib
# TODO: Use a recursive memoized __eq__ if this ever shows up in profiles.
and self.direct_dependencies == other.direct_dependencies
)
def debug_hint(self) -> str | None:
return self.import_path
@dataclass(frozen=True)
class FallibleBuildGoPackageRequest(EngineAwareParameter, EngineAwareReturnType):
"""Request to build a package, but fallible if determining the request metadata failed.
When creating "synthetic" packages, use `GoPackageRequest` directly. This type is only intended
for determining the package metadata of user code, which may fail to be analyzed.
"""
request: BuildGoPackageRequest | None
import_path: str
exit_code: int = 0
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class FallibleBuiltGoPackage(EngineAwareReturnType):
"""Fallible version of `BuiltGoPackage` with error details."""
output: BuiltGoPackage | None
import_path: str
exit_code: int = 0
stdout: str | None = None
stderr: str | None = None
dependency_failed: bool = False
def level(self) -> LogLevel:
return (
LogLevel.ERROR if self.exit_code != 0 and not self.dependency_failed else LogLevel.DEBUG
)
def message(self) -> str:
message = self.import_path
message += (
" succeeded." if self.exit_code == 0 else f" failed (exit code {self.exit_code})."
)
if self.stdout:
message += f"\n{self.stdout}"
if self.stderr:
message += f"\n{self.stderr}"
return message
def cacheable(self) -> bool:
# Failed compile outputs should be re-rendered in every run.
return self.exit_code == 0
@dataclass(frozen=True)
class BuiltGoPackage:
"""A package and its dependencies compiled as `__pkg__.a` files.
The packages are arranged into `__pkgs__/{path_safe(import_path)}/__pkg__.a`.
"""
digest: Digest
import_paths_to_pkg_a_files: FrozenDict[str, str]
coverage_metadata: BuiltGoPackageCodeCoverageMetadata | None = None
@dataclass(frozen=True)
class RenderEmbedConfigRequest:
embed_config: EmbedConfig | None
@dataclass(frozen=True)
class RenderedEmbedConfig:
digest: Digest
PATH = "./embedcfg"
@dataclass(frozen=True)
class GoCompileActionIdRequest:
build_request: BuildGoPackageRequest
@dataclass(frozen=True)
class GoCompileActionIdResult:
action_id: str
# TODO(#16831): Merge this rule helper and the AssemblyPostCompilationRequest.
async def _add_objects_to_archive(
input_digest: Digest,
pkg_archive_path: str,
obj_file_paths: Iterable[str],
) -> ProcessResult:
# Use `go tool asm` tool ID since `go tool pack` does not have a version argument.
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
pack_result = await Get(
ProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=(
"tool",
"pack",
"r",
pkg_archive_path,
*obj_file_paths,
),
env={
"__PANTS_GO_ASM_TOOL_ID": asm_tool_id.tool_id,
},
description="Link objects to Go package archive",
output_files=(pkg_archive_path,),
),
)
return pack_result
@dataclass(frozen=True)
class SetupAsmCheckBinary:
digest: Digest
path: str
# Due to the bootstrap problem, the asm check binary cannot use the `LoadedGoBinaryRequest` rules since
# those rules call back into this `build_pkg` package. Instead, just invoke `go build` directly which is fine
# since the asm check binary only uses the standard library.
@rule
async def setup_golang_asm_check_binary() -> SetupAsmCheckBinary:
src_file = "asm_check.go"
content = read_resource("pants.backend.go.go_sources.asm_check", src_file)
if not content:
raise AssertionError(f"Unable to find resource for `{src_file}`.")
sources_digest = await Get(Digest, CreateDigest([FileContent(src_file, content)]))
binary_name = "__go_asm_check__"
compile_result = await Get(
ProcessResult,
GoSdkProcess(
command=("build", "-o", binary_name, src_file),
input_digest=sources_digest,
output_files=(binary_name,),
env={"CGO_ENABLED": "0"},
description="Build Go assembly check binary",
),
)
return SetupAsmCheckBinary(compile_result.output_digest, f"./{binary_name}")
# Check whether the given files looks like they could be Golang-format assembly language files.
@dataclass(frozen=True)
class CheckForGolangAssemblyRequest:
digest: Digest
dir_path: str
s_files: tuple[str, ...]
@dataclass(frozen=True)
class CheckForGolangAssemblyResult:
maybe_golang_assembly: bool
@rule
async def check_for_golang_assembly(
request: CheckForGolangAssemblyRequest,
asm_check_setup: SetupAsmCheckBinary,
) -> CheckForGolangAssemblyResult:
"""Return true if any of the given `s_files` look like it could be a Golang-format assembly
language file.
This is used by the cgo rules as a heuristic to determine if the user is passing Golang assembly
format instead of gcc assembly format.
"""
input_digest = await Get(Digest, MergeDigests([request.digest, asm_check_setup.digest]))
result = await Get(
ProcessResult,
Process(
argv=(
asm_check_setup.path,
*(os.path.join(request.dir_path, s_file) for s_file in request.s_files),
),
input_digest=input_digest,
level=LogLevel.DEBUG,
description="Check whether assembly language sources are in Go format",
),
)
return CheckForGolangAssemblyResult(len(result.stdout) > 0)
# Copy header files to names which use platform independent names. For example, defs_linux_amd64.h
# becomes defs_GOOS_GOARCH.h.
#
# See https://github.com/golang/go/blob/1c05968c9a5d6432fc6f30196528f8f37287dd3d/src/cmd/go/internal/work/exec.go#L867-L892
# for particulars.
async def _maybe_copy_headers_to_platform_independent_names(
input_digest: Digest,
dir_path: str,
header_files: tuple[str, ...],
goroot: GoRoot,
) -> Digest | None:
goos_goarch = f"_{goroot.goos}_{goroot.goarch}"
goos = f"_{goroot.goos}"
goarch = f"_{goroot.goarch}"
digest_entries = await Get(DigestEntries, Digest, input_digest)
digest_entries_by_path: dict[str, FileEntry] = {
entry.path: entry for entry in digest_entries if isinstance(entry, FileEntry)
}
new_digest_entries: list[FileEntry] = []
for header_file in header_files:
header_file_path = PurePath(dir_path, header_file)
entry = digest_entries_by_path.get(str(header_file_path))
if not entry:
continue
stem = header_file_path.stem
new_stem: str | None = None
if stem.endswith(goos_goarch):
new_stem = stem[0 : -len(goos_goarch)] + "_GOOS_GOARCH"
elif stem.endswith(goos):
new_stem = stem[0 : -len(goos)] + "_GOOS"
elif stem.endswith(goarch):
new_stem = stem[0 : -len(goarch)] + "_GOARCH"
if new_stem:
new_header_file_path = PurePath(dir_path, f"{new_stem}{header_file_path.suffix}")
new_digest_entries.append(dataclasses.replace(entry, path=str(new_header_file_path)))
if new_digest_entries:
digest = await Get(Digest, CreateDigest(new_digest_entries))
return digest
else:
return None
# Gather transitive prebuilt object files for Cgo. Traverse the provided dependencies and lifts `.syso`
# object files into a single `Digest`.
async def _gather_transitive_prebuilt_object_files(
build_request: BuildGoPackageRequest,
) -> tuple[Digest, frozenset[str]]:
prebuilt_objects: list[tuple[Digest, list[str]]] = []
queue: deque[BuildGoPackageRequest] = deque([build_request])
while queue:
pkg = queue.popleft()
queue.extend(pkg.direct_dependencies)
if pkg.prebuilt_object_files:
prebuilt_objects.append(
(
pkg.digest,
[
os.path.join(pkg.dir_path, obj_file)
for obj_file in pkg.prebuilt_object_files
],
)
)
object_digest = await Get(Digest, MergeDigests([digest for digest, _ in prebuilt_objects]))
object_files = set()
for _, files in prebuilt_objects:
object_files.update(files)
return object_digest, frozenset(object_files)
# NB: We must have a description for the streaming of this rule to work properly
# (triggered by `FallibleBuiltGoPackage` subclassing `EngineAwareReturnType`).
@rule(desc="Compile with Go", level=LogLevel.DEBUG)
async def build_go_package(
request: BuildGoPackageRequest, go_root: GoRoot
) -> FallibleBuiltGoPackage:
maybe_built_deps = await MultiGet(
Get(FallibleBuiltGoPackage, BuildGoPackageRequest, build_request)
for build_request in request.direct_dependencies
)
import_paths_to_pkg_a_files: dict[str, str] = {}
dep_digests = []
for maybe_dep in maybe_built_deps:
if maybe_dep.output is None:
return dataclasses.replace(
maybe_dep, import_path=request.import_path, dependency_failed=True
)
dep = maybe_dep.output
for dep_import_path, pkg_archive_path in dep.import_paths_to_pkg_a_files.items():
if dep_import_path not in import_paths_to_pkg_a_files:
import_paths_to_pkg_a_files[dep_import_path] = pkg_archive_path
dep_digests.append(dep.digest)
merged_deps_digest, import_config, embedcfg, action_id_result = await MultiGet(
Get(Digest, MergeDigests(dep_digests)),
Get(
ImportConfig,
ImportConfigRequest(
FrozenDict(import_paths_to_pkg_a_files),
build_opts=request.build_opts,
import_map=request.import_map,
),
),
Get(RenderedEmbedConfig, RenderEmbedConfigRequest(request.embed_config)),
Get(GoCompileActionIdResult, GoCompileActionIdRequest(request)),
)
unmerged_input_digests = [
merged_deps_digest,
import_config.digest,
embedcfg.digest,
request.digest,
]
# If coverage is enabled for this package, then replace the Go source files with versions modified to
# contain coverage code.
go_files = request.go_files
cgo_files = request.cgo_files
s_files = list(request.s_files)
go_files_digest = request.digest
cover_file_metadatas: tuple[FileCodeCoverageMetadata, ...] | None = None
if request.with_coverage:
coverage_config = request.build_opts.coverage_config
assert coverage_config is not None, "with_coverage=True but coverage_config is None!"
coverage_result = await Get(
ApplyCodeCoverageResult,
ApplyCodeCoverageRequest(
digest=request.digest,
dir_path=request.dir_path,
go_files=go_files,
cgo_files=cgo_files,
cover_mode=coverage_config.cover_mode,
import_path=request.import_path,
),
)
go_files_digest = coverage_result.digest
unmerged_input_digests.append(go_files_digest)
go_files = coverage_result.go_files
cgo_files = coverage_result.cgo_files
cover_file_metadatas = coverage_result.cover_file_metadatas
# Track loose object files to link into final package archive. These can come from Cgo outputs, regular
# assembly files, or regular C files.
objects: list[tuple[str, Digest]] = []
# Add any prebuilt object files (".syso" extension) to the list of objects to link into the package.
if request.prebuilt_object_files:
objects.extend(
(os.path.join(request.dir_path, prebuilt_object_file), request.digest)
for prebuilt_object_file in request.prebuilt_object_files
)
# Process any Cgo files.
cgo_compile_result: CGoCompileResult | None = None
if cgo_files:
# Check if any assembly files contain gcc assembly, and not Go assembly. Raise an exception if any are
# likely in Go format since in cgo packages, assembly files are passed to gcc and must be in gcc format.
#
# Exception: When building runtime/cgo itself, only send `gcc_*.s` assembly files to GCC as
# runtime/cgo has both types of files.
if request.is_stdlib and request.import_path == "runtime/cgo":
gcc_s_files = []
new_s_files = []
for s_file in s_files:
if s_file.startswith("gcc_"):
gcc_s_files.append(s_file)
else:
new_s_files.append(s_file)
s_files = new_s_files
else:
asm_check_result = await Get(
CheckForGolangAssemblyResult,
CheckForGolangAssemblyRequest(
digest=request.digest,
dir_path=request.dir_path,
s_files=tuple(s_files),
),
)
if asm_check_result.maybe_golang_assembly:
raise ValueError(
f"Package {request.import_path} is a cgo package but contains Go assembly files."
)
gcc_s_files = s_files
s_files = [] # Clear s_files since assembly has already been handled in cgo rules.
# Gather all prebuilt object files transitively and pass them to the Cgo rule for linking into the
# Cgo object output. This is necessary to avoid linking errors.
# See https://github.com/golang/go/blob/6ad27161f8d1b9c5e03fb3415977e1d3c3b11323/src/cmd/go/internal/work/exec.go#L3291-L3311.
transitive_prebuilt_object_files = await _gather_transitive_prebuilt_object_files(request)
assert request.cgo_flags is not None
cgo_compile_result = await Get(
CGoCompileResult,
CGoCompileRequest(
import_path=request.import_path,
pkg_name=request.pkg_name,
digest=go_files_digest,
build_opts=request.build_opts,
dir_path=request.dir_path,
cgo_files=cgo_files,
cgo_flags=request.cgo_flags,
c_files=request.c_files,
s_files=tuple(gcc_s_files),
cxx_files=request.cxx_files,
objc_files=request.objc_files,
fortran_files=request.fortran_files,
is_stdlib=request.is_stdlib,
transitive_prebuilt_object_files=transitive_prebuilt_object_files,
),
)
assert cgo_compile_result is not None
unmerged_input_digests.append(cgo_compile_result.digest)
objects.extend(
[
(obj_file, cgo_compile_result.digest)
for obj_file in cgo_compile_result.output_obj_files
]
)
# Copy header files with platform-specific values in their name to platform independent names.
# For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
copied_headers_digest = await _maybe_copy_headers_to_platform_independent_names(
input_digest=request.digest,
dir_path=request.dir_path,
header_files=request.header_files,
goroot=go_root,
)
if copied_headers_digest:
unmerged_input_digests.append(copied_headers_digest)
# Merge all of the input digests together.
input_digest = await Get(
Digest,
MergeDigests(unmerged_input_digests),
)
# If any assembly files are present, generate a "symabis" file containing API metadata about those files.
# The "symabis" file is passed to the Go compiler when building Go code so that the compiler is aware of
# any API exported by the assembly.
#
# Note: The assembly files cannot be assembled at this point because a similar process happens from Go to
# assembly: The Go compiler generates a `go_asm.h` header file with metadata about the Go code in the package.
symabis_path: str | None = None
extra_assembler_flags = tuple(
*request.build_opts.assembler_flags, *request.pkg_specific_assembler_flags
)
if s_files:
symabis_fallible_result = await Get(
FallibleGenerateAssemblySymabisResult,
GenerateAssemblySymabisRequest(
compilation_input=input_digest,
s_files=tuple(s_files),
import_path=request.import_path,
dir_path=request.dir_path,
extra_assembler_flags=extra_assembler_flags,
),
)
symabis_result = symabis_fallible_result.result
if symabis_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
symabis_fallible_result.exit_code,
stdout=symabis_fallible_result.stdout,
stderr=symabis_fallible_result.stderr,
)
input_digest = await Get(
Digest, MergeDigests([input_digest, symabis_result.symabis_digest])
)
symabis_path = symabis_result.symabis_path
# Build the arguments for compiling the Go coe in this package.
compile_args = [
"tool",
"compile",
"-buildid",
action_id_result.action_id,
"-o",
"__pkg__.a",
"-pack",
"-p",
request.import_path,
"-importcfg",
import_config.CONFIG_PATH,
]
# See https://github.com/golang/go/blob/f229e7031a6efb2f23241b5da000c3b3203081d6/src/cmd/go/internal/work/gc.go#L79-L100
# for where this logic comes from.
go_version = request.minimum_go_version or "1.16"
if go_root.is_compatible_version(go_version):
compile_args.extend(["-lang", f"go{go_version}"])
if request.is_stdlib:
compile_args.append("-std")
compiling_runtime = request.is_stdlib and request.import_path in (
"internal/abi",
"internal/bytealg",
"internal/coverage/rtcov",
"internal/cpu",
"internal/goarch",
"internal/goos",
"runtime",
"runtime/internal/atomic",
"runtime/internal/math",
"runtime/internal/sys",
"runtime/internal/syscall",
)
# From Go sources:
# runtime compiles with a special gc flag to check for
# memory allocations that are invalid in the runtime package,
# and to implement some special compiler pragmas.
#
# See https://github.com/golang/go/blob/245e95dfabd77f337373bf2d6bb47cd353ad8d74/src/cmd/go/internal/work/gc.go#L107-L112
if compiling_runtime:
compile_args.append("-+")
if symabis_path:
compile_args.extend(["-symabis", symabis_path])
# If any assembly files are present, request the compiler write an "assembly header" with API metadata
# about the Go code that can be used by assembly files.
asm_header_path: str | None = None
if s_files:
if os.path.isabs(request.dir_path):
asm_header_path = "go_asm.h"
else:
asm_header_path = os.path.join(request.dir_path, "go_asm.h")
compile_args.extend(["-asmhdr", asm_header_path])
if embedcfg.digest != EMPTY_DIGEST:
compile_args.extend(["-embedcfg", RenderedEmbedConfig.PATH])
if request.build_opts.with_race_detector:
compile_args.append("-race")
if request.build_opts.with_msan:
compile_args.append("-msan")
if request.build_opts.with_asan:
compile_args.append("-asan")
# If there are no loose object files to add to the package archive later or assembly files to assemble,
# then pass -complete flag which tells the compiler that the provided Go files constitute the entire package.
if not objects and not s_files:
# Exceptions: a few standard packages have forward declarations for
# pieces supplied behind-the-scenes by package runtime.
if request.import_path not in (
"bytes",
"internal/poll",
"net",
"os",
"runtime/metrics",
"runtime/pprof",
"runtime/trace",
"sync",
"syscall",
"time",
):
compile_args.append("-complete")
# Add any extra compiler flags after the ones added automatically by this rule.
if request.build_opts.compiler_flags:
compile_args.extend(request.build_opts.compiler_flags)
if request.pkg_specific_compiler_flags:
compile_args.extend(request.pkg_specific_compiler_flags)
# Remove -N if compiling runtime:
# It is not possible to build the runtime with no optimizations,
# because the compiler cannot eliminate enough write barriers.
if compiling_runtime:
compile_args = [arg for arg in compile_args if arg != "-N"]
go_file_paths = (
str(PurePath(request.dir_path, go_file)) if request.dir_path else f"./{go_file}"
for go_file in go_files
)
generated_cgo_file_paths = cgo_compile_result.output_go_files if cgo_compile_result else ()
# Put the source file paths into a file and pass that to `go tool compile` via a config file using the
# `@CONFIG_FILE` syntax. This is necessary to avoid command-line argument limits on macOS. The arguments
# may end up to exceed those limits when compiling standard library packages where we append a very long GOROOT
# path to each file name or in packages with large numbers of files.
go_source_file_paths_config = "\n".join([*go_file_paths, *generated_cgo_file_paths])
go_sources_file_paths_digest = await Get(
Digest, CreateDigest([FileContent("__sources__.txt", go_source_file_paths_config.encode())])
)
input_digest = await Get(Digest, MergeDigests([input_digest, go_sources_file_paths_digest]))
compile_args.append("@__sources__.txt")
compile_result = await Get(
FallibleProcessResult,
GoSdkProcess(
input_digest=input_digest,
command=tuple(compile_args),
description=f"Compile Go package: {request.import_path}",
output_files=("__pkg__.a", *([asm_header_path] if asm_header_path else [])),
env={"__PANTS_GO_COMPILE_ACTION_ID": action_id_result.action_id},
),
)
if compile_result.exit_code != 0:
return FallibleBuiltGoPackage(
None,
request.import_path,
compile_result.exit_code,
stdout=compile_result.stdout.decode("utf-8"),
stderr=compile_result.stderr.decode("utf-8"),
)
compilation_digest = compile_result.output_digest
# TODO: Compile any C files if this package does not use Cgo.
# If any assembly files are present, then assemble them. The `compilation_digest` will contain the
# assembly header `go_asm.h` in the object directory.
if s_files:
# Extract the `go_asm.h` header from the compilation output and merge into the original compilation input.
assert asm_header_path is not None
asm_header_digest = await Get(
Digest,
DigestSubset(
compilation_digest,
PathGlobs(
[asm_header_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin="the `build_go_package` rule",
),
),
)
assembly_input_digest = await Get(Digest, MergeDigests([input_digest, asm_header_digest]))
assembly_fallible_result = await Get(
FallibleAssembleGoAssemblyFilesResult,
AssembleGoAssemblyFilesRequest(
input_digest=assembly_input_digest,
s_files=tuple(sorted(s_files)),
dir_path=request.dir_path,
import_path=request.import_path,
extra_assembler_flags=extra_assembler_flags,
),
)
assembly_result = assembly_fallible_result.result
if assembly_result is None:
return FallibleBuiltGoPackage(
None,
request.import_path,
assembly_fallible_result.exit_code,
stdout=assembly_fallible_result.stdout,
stderr=assembly_fallible_result.stderr,
)
objects.extend(assembly_result.assembly_outputs)
# If there are any loose object files, link them into the package archive.
if objects:
assembly_link_input_digest = await Get(
Digest,
MergeDigests(
[
compilation_digest,
*(digest for obj_file, digest in objects),
]
),
)
assembly_link_result = await _add_objects_to_archive(
input_digest=assembly_link_input_digest,
pkg_archive_path="__pkg__.a",
obj_file_paths=sorted(obj_file for obj_file, digest in objects),
)
compilation_digest = assembly_link_result.output_digest
path_prefix = os.path.join("__pkgs__", path_safe(request.import_path))
import_paths_to_pkg_a_files[request.import_path] = os.path.join(path_prefix, "__pkg__.a")
output_digest = await Get(Digest, AddPrefix(compilation_digest, path_prefix))
merged_result_digest = await Get(Digest, MergeDigests([*dep_digests, output_digest]))
# Include the modules sources in the output `Digest` alongside the package archive if the Cgo rules
# detected a potential attempt to link against a static archive (or other reference to `${SRCDIR}` in
# options) which necessitates the linker needing access to module sources.
if cgo_compile_result and cgo_compile_result.include_module_sources_with_output:
merged_result_digest = await Get(
Digest, MergeDigests([merged_result_digest, request.digest])
)
coverage_metadata = (
BuiltGoPackageCodeCoverageMetadata(
import_path=request.import_path,
cover_file_metadatas=cover_file_metadatas,
sources_digest=request.digest,
sources_dir_path=request.dir_path,
)
if cover_file_metadatas
else None
)
output = BuiltGoPackage(
digest=merged_result_digest,
import_paths_to_pkg_a_files=FrozenDict(import_paths_to_pkg_a_files),
coverage_metadata=coverage_metadata,
)
return FallibleBuiltGoPackage(output, request.import_path)
@rule
def required_built_go_package(fallible_result: FallibleBuiltGoPackage) -> BuiltGoPackage:
if fallible_result.output is not None:
return fallible_result.output
raise Exception(
f"Failed to compile {fallible_result.import_path}:\n"
f"{fallible_result.stdout}\n{fallible_result.stderr}"
)
@rule
async def render_embed_config(request: RenderEmbedConfigRequest) -> RenderedEmbedConfig:
digest = EMPTY_DIGEST
if request.embed_config:
digest = await Get(
Digest,
CreateDigest(
[FileContent(RenderedEmbedConfig.PATH, request.embed_config.to_embedcfg())]
),
)
return RenderedEmbedConfig(digest)
# Compute a cache key for the compile action. This computation is intended to capture similar values to the
# action ID computed by the `go` tool for its own cache.
# For details, see https://github.com/golang/go/blob/21998413ad82655fef1f31316db31e23e0684b21/src/cmd/go/internal/work/exec.go#L216-L403
@rule
async def compute_compile_action_id(
request: GoCompileActionIdRequest, goroot: GoRoot
) -> GoCompileActionIdResult:
bq = request.build_request
h = hashlib.sha256()
# All Go action IDs have the full version (as returned by `runtime.Version()` in the key.
# See https://github.com/golang/go/blob/master/src/cmd/go/internal/cache/hash.go#L32-L46
h.update(goroot.full_version.encode())
h.update("compile\n".encode())
if bq.minimum_go_version:
h.update(f"go {bq.minimum_go_version}\n".encode())
h.update(f"goos {goroot.goos} goarch {goroot.goarch}\n".encode())
h.update(f"import {bq.import_path}\n".encode())
# TODO: Consider what to do with this information from Go tool:
# fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
# TODO: Inject cgo-related values here.
# TODO: Inject cover mode values here.
# TODO: Inject fuzz instrumentation values here.
compile_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("compile"))
h.update(f"compile {compile_tool_id.tool_id}\n".encode())
# TODO: Add compiler flags as per `go`'s algorithm. Need to figure out
if bq.s_files:
asm_tool_id = await Get(GoSdkToolIDResult, GoSdkToolIDRequest("asm"))
h.update(f"asm {asm_tool_id.tool_id}\n".encode())
# TODO: Add asm flags as per `go`'s algorithm.
# TODO: Add micro-architecture into cache key (e.g., GOAMD64 setting).
if "GOEXPERIMENT" in goroot._raw_metadata:
h.update(f"GOEXPERIMENT={goroot._raw_metadata['GOEXPERIMENT']}".encode())
# TODO: Maybe handle go "magic" env vars: "GOCLOBBERDEADHASH", "GOSSAFUNC", "GOSSADIR", "GOSSAHASH" ?
# TODO: Handle GSHS_LOGFILE compiler debug option by breaking cache?
# Note: Input files are already part of cache key. Thus, this algorithm omits incorporating their
# content hashes into the action ID.
return GoCompileActionIdResult(h.hexdigest())
def rules():
return (
*collect_rules(),
*cgo.rules(),
*coverage.rules(),
)
|
'''my first shoot in python
and tornado'''
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
FILE = os.path.join(os.path.dirname(__file__), "static", "songs")
class IndexHandler(tornado.web.RequestHandler):
'''the IndexHandler
of my musiclist'''
def get(self):
all_list = os.listdir(FILE)
playlist = self.get_argument("playlist", "none")
if playlist != 'none':
with open(os.path.join(FILE, playlist), 'r') as file_:
songlist = file_.readlines()
__songs = [x.strip() for x in songlist if x.find(".mp3") >= 0]
__sizes = [os.path.getsize(os.path.join(FILE, i)) for i in __songs]
__playlists = []
self.render('music.html',
songs=__songs,
sizes=__sizes,
playlists=__playlists)
else:
__songs = [x for x in all_list if x.find(".mp3") >= 0]
__playlists = [x for x in all_list if x.find(".txt") >= 0]
__sizes = [os.path.getsize(os.path.join(FILE, i)) for i in __songs]
self.render('music.html',
songs=__songs,
sizes=__sizes,
playlists=__playlists)
if __name__ == '__main__':
tornado.options.parse_command_line()
APP = tornado.web.Application(
handlers=[(r'/', IndexHandler), (r'/music.html', IndexHandler)],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static")
)
HTTP_SERVER = tornado.httpserver.HTTPServer(APP)
HTTP_SERVER.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
import numpy as np
import re
import itertools
from collections import Counter
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{1,}", " ", string)
return string.strip().lower()
def cat_map():
catmap={}
id=0
f=open("cat")
cat=set([s.strip() for s in list(f.readlines())])
for i in cat:
catmap[i]=id
id=id+1
return catmap
def load_data_and_labels():
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
cnn_train = list(open("cnn").readlines())
cnn_train = [s.strip() for s in cnn_train]
cnn_train = [s.split(" ") for s in cnn_train]
lstm_train = list(open("lstm").readlines())
lstm_train = [s.strip() for s in lstm_train]
lstm_train = [s.split(" ") for s in lstm_train]
catmap = cat_map()
Y_train = list(open("cat").readlines())
Y_train = [s.strip() for s in Y_train]
Y_train = [catmap[s] for s in Y_train]
return [cnn_train, lstm_train,Y_train]
def pad_sentences(cnn_train, lstm_train,padding_word="<PAD/>"):
cnn_sequence_length = max(len(x) for x in cnn_train)
lstm_sequence_length = max(len(x) for x in lstm_train)
cnn_padded_sentences = []
lstm_padded_sentences = []
for i in range(len(cnn_train)):
sentence = cnn_train[i]
cnn_num_padding = cnn_sequence_length - len(sentence)
cnn_new_sentence = sentence + [padding_word] * cnn_num_padding
cnn_padded_sentences.append(cnn_new_sentence)
for i in range(len(lstm_train)):
sentence = lstm_train[i]
lstm_num_padding = lstm_sequence_length - len(sentence)
lstm_new_sentence = sentence + [padding_word] * lstm_num_padding
lstm_padded_sentences.append(lstm_new_sentence)
return [cnn_padded_sentences,lstm_padded_sentences]
def build_vocab(cnn_padded_sentences,lstm_padded_sentences):
cnn_word_counts = Counter(itertools.chain(*cnn_padded_sentences))
cnn_vocabulary_inv = [x[0] for x in cnn_word_counts.most_common()]
cnn_vocabulary = {x: i for i, x in enumerate(cnn_vocabulary_inv)}
lstm_word_counts = Counter(itertools.chain(*lstm_padded_sentences))
lstm_vocabulary_inv = [x[0] for x in lstm_word_counts.most_common()]
lstm_vocabulary = {x: i for i, x in enumerate(lstm_vocabulary_inv)}
return [cnn_vocabulary,cnn_vocabulary_inv,lstm_vocabulary,lstm_vocabulary_inv]
def build_input_data(cnn_padded_sentences, Y_train, cnn_vocabulary,lstm_padded_sentences,lstm_vocabulary):
cnn_train = np.array([[cnn_vocabulary[word] for word in sentence] for sentence in cnn_padded_sentences])
lstm_train = np.array([[lstm_vocabulary[word] for word in sentence] for sentence in lstm_padded_sentences])
Y_train = np.array(Y_train)
return [cnn_train,lstm_train,Y_train]
def load_data():
cnn_train,lstm_train,Y_train = load_data_and_labels()
cnn_padded_sentences,lstm_padded_sentences = pad_sentences(cnn_train,lstm_train)
cnn_vocabulary,cnn_vocabulary_inv,lstm_vocabulary,lstm_vocabulary_inv = build_vocab(cnn_padded_sentences,lstm_padded_sentences)
cnn_train,lstm_train,Y_train = build_input_data(cnn_padded_sentences, Y_train, cnn_vocabulary,lstm_padded_sentences,lstm_vocabulary)
return [cnn_train,lstm_train,Y_train,cnn_vocabulary,cnn_vocabulary_inv,lstm_vocabulary,lstm_vocabulary_inv]
def batch_iter(data, batch_size, num_epochs):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
|
import sublime
version = sublime.version()
__all__ = ["Jfile"]
if version >= '3000':
from .jfile import Jfile
else:
from jfile import Jfile
|
from panda3d.core import GeomVertexFormat, Vec4, Point3, GeomVertexWriter
from .Geometry import Geometry
from .PolygonView import PolygonView
# A flat rectangle with color
class Rect(Geometry):
def __init__(self):
Geometry.__init__(self, "rect", GeomVertexFormat.getV3c4())
self.color = Vec4(1, 1, 1, 1)
self.mins = Point3(0)
self.maxs = Point3(0)
def addView(self, primitiveType, drawMask, viewHpr = None, state = None):
return Geometry.addView(self, PolygonView(self, primitiveType, drawMask, viewHpr, state))
def setMinMax(self, mins, maxs):
self.mins = mins
self.maxs = maxs
self.generateVertices()
def setColor(self, color):
self.color = color
self.generateVertices()
def generateVertices(self):
self.vertexBuffer.setNumRows(4)
vwriter = GeomVertexWriter(self.vertexBuffer, "vertex")
cwriter = GeomVertexWriter(self.vertexBuffer, "color")
vwriter.setData3f(self.mins.x, 0, self.mins.z)
cwriter.setData4f(self.color)
vwriter.setData3f(self.mins.x, 0, self.maxs.z)
cwriter.setData4f(self.color)
vwriter.setData3f(self.maxs.x, 0, self.maxs.z)
cwriter.setData4f(self.color)
vwriter.setData3f(self.maxs.x, 0, self.mins.z)
cwriter.setData4f(self.color)
Geometry.generateVertices(self)
|
from parser.constants import NUM_SPECIAL_TOKENS
class PaperConfiguration(object):
"""
Configurations and hyper-parameter values used in the original paper.
"""
dropout = 1.0
learning_rate = 0.001
max_gradient_norm = 40.
hidden_size = 25 # d/2
batch_size = 32
# (N) Number of tokens in vocabulary, including special ones.
vocab_size = 4000 + NUM_SPECIAL_TOKENS
sent_size = 25 # (j) Number of tokens in input.
num_tokens_left = 12 # Number of tokens to be used from the left.
num_tokens_right = 13 # Number of tokens to be used from the right.
num_epochs = 50
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
from scipy.signal import argrelmin
from tqdm import tqdm
class HyFeature(object):
"""
Utility class for representing and fitting individual or multiple absorption features.
"""
def __init__(self, name, pos, width, depth=1, data=None, color='g'):
"""
Create a new feature:
*Arguments*:
- name = a name for this feature.
- pos = the position of this feature (in nm).
- width = the width of this feature (in nm).
- data = a real spectra associated with this feature (e.g. for feature fitting or from reference libraries).
Should be a numpy array such that data[0,:] gives wavelength and data[1,:] gives reflectance.
"""
self.name = name
self.pos = pos
self.width = width
self.depth = depth
self.color = color
self.data = data
self.mae = -1
self.strength = -1
self.components = None
self.endmembers = None
def get_start(self):
"""
Get start of feature.
Return:
returns feature position - 0.5 * feature width.
"""
return self.pos - self.width * 0.5
def get_end(self):
"""
Get approximate end of feature
Return:
returns feature position - 0.5 * feature width.
"""
return self.pos + self.width * 0.5
######################
## Feature models
######################
@classmethod
def lorentzian(cls, x, pos, width, depth, offset=1.0):
"""
Static function for evaluating a Lorentzian feature model
*Arguments*:
- x = wavelengths (nanometres) to evaluate the feature over
- pos = the position of the features (nanometres)
- width = parameter controlling the width of the feature.
Scaled such that pos - width / 2 -> pos + width / 2 contains ~99% of an equivalent
gaussian feature.
- depth = the depth of the feature (max to min)
- offset = the vertical offset of the feature (i.e. value where no absorption exists). Default is 1.0.
"""
width = width / 6 # conversion so that width contains ~99% of (gaussian) feature
return offset - (depth * width ** 2 / width) * width / ((x - pos) ** 2 + width ** 2)
@classmethod
def gaussian(cls, x, pos, width, depth, offset=1.0):
"""
Static function for evaluating a Gaussian feature model
*Arguments*:
- x = wavelengths (nanometres) to evaluate the feature over
- pos = the position of the features (nanometres)
- width = parameter controlling the width of the feature (= standard deviation / 6).
Scaled such that pos - width / 2 -> pos + width / 2 contains ~99% of the feature.
- depth = the depth of the feature (max to min)
- offset = the vertical offset of the feature (i.e. value where no absorption exists). Default is 1.0.
"""
width = width / 6 # conversion so that width contains ~99% of (gaussian) feature
return offset - depth * np.exp(-(x - pos) ** 2 / (2 * width ** 2))
@classmethod
def multi_lorentz(cls, x, pos, width, depth, offset=1.0):
"""
Static function for evaluating a multi-Lorentzian feature model
*Arguments*:
- x = wavelengths (nanometres) to evaluate the feature over
- pos = a list of positions for each individual lorentzian function (nanometres)
- width = a list of widths for each individual lorentzian function.
- depth = a list of depths for each individual lorentzian function (max to min)
- offset = the vertical offset of the functions. Default is 1.0.
"""
y = np.zeros_like(x)
for p, w, d in zip(pos, width, depth):
y += cls.lorentzian(x, p, w, d, 0)
return y + offset
@classmethod
def multi_gauss(cls, x, pos, width, depth, offset=1.0):
"""
Static function for evaluating a multi-gaussian feature model
*Arguments*:
- x = wavelengths (nanometres) to evaluate the feature over
- pos = a list of positions for each individual gaussian function (nanometres)
- width = a list of widths for each individual gaussian function.
- depth = a list of depths for each individual gaussian function (max to min)
- offset = the vertical offset of the functions. Default is 1.0.
"""
y = cls.gaussian(x, pos[0], width[0], depth[0], 0)
if len(pos) > 1:
for p, w, d in zip(pos[1:], width[1:], depth[1:]):
y += cls.gaussian(x, p, w, d, 0)
return y + offset
############################
## Feature fitting
############################
@classmethod
def _lsq(cls, params, func, x, y, n=1):
"""
Calculate error for least squares optimization
"""
return np.nan_to_num( y - func(x, *params), nan=99999999999999,
posinf=99999999999999,
neginf=99999999999999 )
@classmethod
def _lsq_multi(cls, params, func, x, y, n):
"""
Calculate error for least squares optimization of multi-gauss or multi-lorentz
"""
return np.nan_to_num( y - func(x, params[0:n], params[n:(2*n)], params[(2*n):]), nan=99999999999999,
posinf=99999999999999,
neginf=99999999999999 )
@classmethod
def fit(cls, wav, refl, method='lorentz', n=1, vb=True, ftol=1e-4, order=3):
"""
Fit a hyperspectral feature(s) to a (detrended) spectra.
*Arguments*:
- wav = the wavelength of the spectral subset to fit to.
- refl = the reflectance spectra to fit to.
- method = the spectra type to fit. Options are: 'minmax' (quick but rough), 'lorentz' or 'gauss'.
- n = the number of features to fit. Default is 1.
- verbose = True if a progress bar should be created when fitting to multiple spectra (as this can be slow).
- ftol = the stopping criterion for the least squares optimization. Default is 1e-4.
- order = the order of local minima detection. Default is 3. Smaller numbers return smaller local minima but are more
sensitive to noise.
*Returns*: a HyData, or list of HyData instances (n>1) describing each features:
- pos = the optimised feature position ( number or array depending on shape of refl)
- width = the optimised feature width
- depth = the optimised feature depth
- strength = the feature strength (reduction in variance compared to no feature)
"""
# get list of spectra and check it is the correct shape
X = np.array(refl)
if len(X.shape) == 1:
vb = False
X = X[None, :]
assert len(X.shape) == 2, "Error - refl must be an Nxm array of N spectra over m wavelenghts."
assert X.shape[1] == len(wav), "Error - inconsistent lengths; reflectance data must match provided wavelengths."
assert np.isfinite(X).all(), "Error - input spectra contain nans"
X /= max(1.0, np.max(X)) # ensure max of refl is 1
# calculate initial guesses
if n == 1:
idx = np.argmin(X, axis=1)
pos = wav[idx]
depth = 1.0 - X[range(X.shape[0]), idx]
width = 0.5 * (wav[-1] - wav[0]) # TODO; is there a better way to estimate width?
X0 = np.vstack([pos, [width] * len(pos), depth, depth]).T
else:
idx, minima = argrelmin( X, axis=1, order=order ) # get all local minima
width = 0.5 * (wav[-1] - wav[0]) # TODO; is there a better way to estimate width?
midp = int( len(wav) / 2 ) # index of middle of search domain (used as default for pos).
X0 = np.zeros( (X.shape[0], 3*n + 1) )
loop = range(X.shape[0])
if vb: loop = tqdm(loop, desc="Extracting local minima", leave=False)
for i in loop:
# get indices of local minima
_idx = minima[ idx==i ]
if _idx.shape[0] == 0:
continue # no minima
if _idx.shape[0] < n: # ensure we have an index for each feature we want to fit
_idx = np.hstack([_idx,[midp] * (n-len(_idx))])
# get depths
d = 1 - X[i, _idx]
# too many features?
if _idx.shape[0] > n:
srt = np.argsort(d)[::-1][0:n] # keep deepest features
d = d[ srt ]
_idx = _idx[ srt ]
# sort by depth
#srt = np.argsort(d)[::-1]
#d = d[srt]
#_idx = _idx[srt]
# get position and build width prior
p = wav[ _idx ]
w = [ width ] * _idx.shape[0]
# store
X0[i] = np.hstack( [p,w,d,0] )
# quick and dirty and done already!
if 'minmax' in method.lower():
out = X0
else: # loop through all spectra (sloooooooow!)
X0 = X0[:,:-1] # drop last value (faked strengths) from X0
# choose model and associated optimisation function
if 'lorentz' in method.lower():
if n == 1:
fmod = cls.lorentzian
lsq = cls._lsq
else:
fmod = cls.multi_lorentz
lsq = cls._lsq_multi
elif 'gauss' in method.lower():
if n == 1:
fmod = cls.gaussian
lsq = cls._lsq
else:
fmod = cls.multi_gauss
lsq = cls._lsq_multi
else:
assert False, "Error: %s is an unknown method" % method
# calculate bounds constraints
if n == 1:
mn = [wav[0] - 1, (wav[1] - wav[0]) * 5, 0] # min pos, width, depth
mx = [wav[-1] + 1, (wav[-1] - wav[0]) * 2, 1] # max pos, width, depth
else:
mn = np.array([wav[0] - 1] * n + [(wav[1] - wav[0]) * 5] * n + [0] * n) # min pos, width, depth
mx = np.array([wav[-1] + 1] * n + [(wav[-1] - wav[0]) * 2] * n + [1] * n ) # max pos, width, depth
bnds = [mn,mx]
x0 = X0[0, :] # prior x0 for first feature, after this we test x0 from previous spectra
out = np.zeros((X.shape[0], n*3 + 1)) # output array
loop = range(X.shape[0])
if vb:
loop = tqdm(loop, desc="Fitting features", leave=False)
for i in loop:
# check if opt values from previous spectra are a better initial guess
# (as the spectra are probably very similar!).
#if np.sum(lsq(X0[i], fmod, wav, X[i],n)**2) < np.sum(lsq(x0, fmod, wav, X[i],n) ** 2):
# x0 = X0[i]
x0 = X0[i]
# check x0 is feasible
if not ((x0 > bnds[0]).all() and (x0 < bnds[1]).all()):
continue # skip
# do optimisation
fit = least_squares(lsq, x0=x0, args=(fmod, wav, X[i], n), bounds=bnds, ftol=ftol)
#if n > 1: # multi-feature - sort by depth
#idx = np.argsort( fit.x[2*n : 3*n] )[::-1]
#out[i, :] = (*fit.x[0:n][idx], *fit.x[n:(2 * n)][idx], *fit.x[2*n:3*n][idx],
# max(0, np.std(1 - refl) - np.std(fit.fun)))
#else:
# out[i,:] = (*fit.x, max(0, np.std(1 - refl) - np.std(fit.fun)))
# store output
out[i, :] = (*fit.x, max(0, np.std(1 - refl) - np.std(fit.fun)))
#x0 = fit.x
# resolve out into pos, width, depth and strength
if out.shape[0] == 1: # run on a single spectra - return HyFeature instances
if out.shape[1] == 4: # single feature
feat = cls('est', out[0, 0], out[0, 1], out[0, 2], data=np.array([wav, X[0, :]]), color='r')
feat.strength = out[0, 3]
return feat
else:
feat = []
for i in range(n):
feat.append(cls('est', out[0, 0+i], out[0, n+i], out[0, (2*n+i)], data=np.array([wav, X[0, :]]), color='r'))
mf = MixedFeature('mix', feat, data=np.array([wav, X[0, :]]), color='r' )
mf.strength = out[0,-1]
return mf
else:
# resolve out into pos, width, depth and strength
pos = out[:, 0:n]
width = out[:, n:(n*2)]
depth = out[:, (n*2):(n*3)]
strength = out[:, -1]
return pos, width, depth, strength
# noinspection PyDefaultArgument
def quick_plot(self, method='gauss', ax=None, label='top', lab_kwds={}, **kwds):
"""
Quickly plot this feature.
*Arguments*:
- method = the method used to represent this feature. Options are:
- 'gauss' = represent using a gaussian function
- 'lorentz' = represent using a lorentzian function
- 'range' = draw vertical lines at pos - width / 2 and pos + width / 2.
- 'fill' = fill a rectangle in the region dominated by the feature with 'color' specifed in kwds.
- 'line' = plot a (vertical) line at the position of this feature.
- 'all' = plot with all of the above methods.
- ax = an axis to add the plot to. If None (default) a new axis is created.
- label = Label this feature (using it's name?). Options are None (no label), 'top', 'middle' or 'lower'. Or,
if an integer is passed, odd integers will be plotted as 'top' and even integers as 'lower'.
- lab_kwds = Dictionary of keywords to pass to plt.text( ... ) for controlling labels.
*Keywords*: Keywords are passed to ax.axvline(...) if method=='range' or ax.plot(...) otherwise.
*Returns*:
- fig = the figure that was plotted to
- ax = the axis that was plotted to
"""
if ax is None:
fig, ax = plt.subplots()
# plot reference spectra and get _x for plotting
if self.data is not None:
_x = self.data[0, : ]
ax.plot(_x, self.data[1, :], color='k', **kwds)
else:
_x = np.linspace(self.pos - self.width, self.pos + self.width)
# set color
if 'c' in kwds:
kwds['color'] = kwds['c']
del kwds['c']
kwds['color'] = kwds.get('color', self.color)
# get _x for plotting
if 'range' in method.lower() or 'all' in method.lower():
ax.axvline(self.pos - self.width / 2, **kwds)
ax.axvline(self.pos + self.width / 2, **kwds)
if 'line' in method.lower() or 'all' in method.lower():
ax.axvline(self.pos, color='k', alpha=0.4)
if 'gauss' in method.lower() or 'all' in method.lower():
if self.components is None: # plot single feature
_y = HyFeature.gaussian(_x, self.pos, self.width, self.depth)
else:
_y = HyFeature.multi_gauss(_x, [c.pos for c in self.components],
[c.width for c in self.components],
[c.depth for c in self.components] )
ax.plot(_x, _y, **kwds)
if 'lorentz' in method.lower() or 'all' in method.lower():
if self.components is None: # plot single feature
_y = HyFeature.lorentzian(_x, self.pos, self.width, self.depth)
else:
_y = HyFeature.multi_lorentz(_x, [c.pos for c in self.components],
[c.width for c in self.components],
[c.depth for c in self.components] )
ax.plot(_x, _y, **kwds)
if 'fill' in method.lower() or 'all' in method.lower():
kwds['alpha'] = kwds.get('alpha', 0.25)
ax.axvspan(self.pos - self.width / 2, self.pos + self.width / 2, **kwds)
# label
if not label is None:
# calculate label position
rnge = ax.get_ylim()[1] - ax.get_ylim()[0]
if isinstance(label, int):
if label % 2 == 0:
label = 'top' # even
else:
label = 'low' # odd
if 'top' in label.lower():
_y = ax.get_ylim()[1] - 0.05 * rnge
va = lab_kwds.get('va', 'top')
elif 'mid' in label.lower():
_y = ax.get_ylim()[0] + 0.5 * rnge
va = lab_kwds.get('va', 'center')
elif 'low' in label.lower():
_y = ax.get_ylim()[0] + 0.05 * rnge
va = lab_kwds.get('va', 'bottom')
else:
assert False, "Error - invalid label position '%s'" % label.lower()
# plot label
lab_kwds['rotation'] = lab_kwds.get('rotation', 90)
lab_kwds['alpha'] = lab_kwds.get('alpha', 0.5)
ha = lab_kwds.get('ha', 'center')
if 'ha' in lab_kwds: del lab_kwds['ha']
if 'va' in lab_kwds: del lab_kwds['va']
lab_kwds['bbox'] = lab_kwds.get('bbox', dict(boxstyle="round",
ec=(0.2, 0.2, 0.2),
fc=(1., 1., 1.),
))
ax.text(self.pos, _y, self.name, va=va, ha=ha, **lab_kwds)
return ax.get_figure(), ax
class MultiFeature(HyFeature):
"""
A spectral feature with variable position due to a solid solution between known end-members.
"""
def __init__(self, name, endmembers):
"""
Create this multifeature from known end-members.
*Arguments*:
- endmembers = a list of HyFeature objects representing each end-member.
"""
# init this feature so that it ~ covers all of its 'sub-features'
minw = min([e.pos - e.width / 2 for e in endmembers])
maxw = max([e.pos + e.width / 2 for e in endmembers])
depth = np.mean([e.depth for e in endmembers])
super().__init__(name, pos=(minw + maxw) / 2, width=maxw - minw, depth=depth, color=endmembers[0].color)
# store endmemebers
self.endmembers = endmembers
def count(self):
return len(self.endmembers)
def quick_plot(self, method='fill+line', ax=None, suplabel=None, sublabel=('alternate', {}), **kwds):
"""
Quickly plot this feature.
*Arguments*:
- method = the method used to represent this feature. Options are:
- 'gauss' = represent using a gaussian function at each endmember.
- 'lorentz' = represent using a lorentzian function at each endmember.
- 'range' = draw vertical lines at pos - width / 2 and pos + width / 2.
- 'fill' = fill a rectangle in the region dominated by the feature with 'color' specifed in kwds.
- 'line' = plot a (vertical) line at the position of each feature.
- 'all' = plot with all of the above methods.
default is 'fill+line'.
- ax = an axis to add the plot to. If None (default) a new axis is created.
- suplabel = Label positions for this feature. Default is None (no labels). Options are 'top', 'middle' or 'lower'.
- sublabel = Label positions for endmembers. Options are None (no labels), 'top', 'middle', 'lower' or 'alternate'. Or, if an integer
is passed then it will be used to initialise an alternating pattern (even = top, odd = lower).
- lab_kwds = Dictionary of keywords to pass to plt.text( ... ) for controlling labels.
*Keywords*: Keywords are passed to ax.axvline(...) if method=='range' or ax.plot(...) otherwise.
*Returns*:
- fig = the figure that was plotted to
- ax = the axis that was plotted to
"""
if ax is None:
fig, ax = plt.subplots()
# plot
if 'range' in method.lower() or 'all' in method.lower():
super().quick_plot(method='range', ax=ax, label=None, **kwds)
if 'line' in method.lower() or 'all' in method.lower():
for e in self.endmembers: # plot line for each end-member
e.quick_plot(method='line', ax=ax, label=None, **kwds)
if 'gauss' in method.lower() or 'all' in method.lower():
for e in self.endmembers: # plot gaussian for each end-member
e.quick_plot(method='gauss', ax=ax, label=None, **kwds)
if isinstance(sublabel, int): sublabel += 1
if 'lorentz' in method.lower() or 'all' in method.lower():
for e in self.endmembers: # plot lorentzian for each end-member
e.quick_plot(method='lorentz', ax=ax, label=None, **kwds)
if isinstance(sublabel, int): sublabel += 1
if 'fill' in method.lower() or 'all' in method.lower():
super().quick_plot(method='fill', ax=ax, label=None, **kwds)
# and do labels
if not suplabel is None:
if not isinstance(suplabel, tuple): suplabel = (suplabel, {})
super().quick_plot(method='label', ax=ax, label=suplabel[0], lab_kwds=suplabel[1])
if not sublabel is None:
if not isinstance(sublabel, tuple): sublabel = (sublabel, {})
if isinstance(sublabel[0], str) and 'alt' in sublabel[0].lower():
sublabel = (1, sublabel[1]) # alternate labelling
for e in self.endmembers:
e.quick_plot(method='label', ax=ax, label=sublabel[0], lab_kwds=sublabel[1])
sublabel = (sublabel[0] + 1, sublabel[1])
return ax.get_figure(), ax
class MixedFeature(HyFeature):
"""
A spectral feature resulting from a mixture of known sub-features.
"""
def __init__(self, name, components, **kwds):
"""
Create this mixed features from known components.
*Arguments*:
- components = a list of HyFeature objects representing each end-member.
*Keywords*:
- keywords are passed to HyFeature.init()
"""
# init this feature so that it ~ covers all of its 'sub-features'
minw = min([e.pos - e.width / 2 for e in components])
maxw = max([e.pos + e.width / 2 for e in components])
depth = np.mean([e.depth for e in components])
if not 'color' in kwds:
kwds['color'] = components[0].color
super().__init__(name, pos=(minw + maxw) / 2, width=maxw - minw, depth=depth, **kwds)
# store components
self.components = components
def count(self):
return len(self.components) |
#!/usr/bin/env python
import sys
import os
sys.path += [os.getcwd()]
from setuptools import setup, find_packages
import re
import imp
PKG = 'digest'
DESCRIPTION = 'Calculate message digests of files or standard input'
def load_info():
# Look for identifiers beginning with "__" at the beginning of the line.
result = {}
pattern = re.compile(r'^(__\w+__)\s*=\s*[\'"]([^\'"]*)[\'"]')
here = os.path.dirname(os.path.abspath(sys.argv[0]))
for line in open(os.path.join(here, PKG, '__init__.py'), 'r'):
match = pattern.match(line)
if match:
result[match.group(1)] = match.group(2)
sys.path = [here] + sys.path
mf = os.path.join(here, PKG, '__init__.py')
try:
m = imp.load_module(PKG, open(mf), mf,
('__init__.py', 'r', imp.PY_SOURCE))
result['long_description'] = m.__doc__
except:
result['long_description'] = DESCRIPTION
return result
info = load_info()
# Now the setup stuff.
setup (name = PKG,
version = info['__version__'],
description = DESCRIPTION,
long_description = info['long_description'],
long_description_content_type = 'text/x-rst',
packages = find_packages(),
url = info['__url__'],
license = info['__license__'],
author = info['__author__'],
author_email = info['__email__'],
entry_points = {
'console_scripts' : 'digest=digest:main'
},
classifiers = [
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
]
)
|
#!/usr/bin/env python3
# загоняет в список файлы из dirpath
import os
dirpath = '/usr/share'
# размер среза
z = 160000
c = 5
def file_reader(path):
try:
with open(path, 'br') as file:
return file.read()
except FileNotFoundError:
return None
except OSError:
return None
for i in range(c):
print(i + 1, '/', c)
path_list = []
for d, dirs, files in os.walk(dirpath):
for f in files:
path = os.path.join(d, f)
path_list.append(path)
x = []
for i in path_list[:z]:
x.append(file_reader(i))
del x
del path_list
|
import os
import urllib.request
from collections import Counter
import re
# data provided
tmp = os.getenv("TMP", "/tmp")
stopwords_file = os.path.join(tmp, 'stopwords')
harry_text = os.path.join(tmp, 'harry')
urllib.request.urlretrieve(
'https://bites-data.s3.us-east-2.amazonaws.com/stopwords.txt',
stopwords_file
)
urllib.request.urlretrieve(
'https://bites-data.s3.us-east-2.amazonaws.com/harry.txt',
harry_text
)
def get_harry_most_common_word():
stop_words = [
line.strip()
for line in open(stopwords_file)
]
_txt = [
re.sub(r'\W+', r'', word)
for line in open(harry_text)
for word in line.strip().lower().split()
]
word_list = [
word
for word in _txt
if word not in stop_words and len(word) > 1
]
return Counter(word_list).most_common()[0] |
#!/usr/bin/env jython
import os
import re
import requests
import socket
import threading
from burp import IBurpExtender
from burp import IContextMenuFactory
from javax.swing import JMenuItem
from java.util import List, ArrayList
from java.net import URL
# to use this, you need to set the bing api env var
bing_api_key = os.environ.get('BING_API_KEY')
print('welcome to ctlfish blackhatpython bing BurpExtender')
class BurpExtender(IBurpExtender, IContextMenuFactory):
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self.context = None
# set up extension
self._callbacks.setExtensionName('ctlfish blackhatpython bing')
callbacks.registerContextMenuFactory(self)
return
def createMenuItems(self, context_menu):
self.context = context_menu
menu_list = ArrayList()
menu_list.add(JMenuItem("Send to Bing", actionPerformed=self.bing_menu))
return menu_list
def bing_menu(self, event):
# get details of users clicked item
http_traffic = self.context.getSelectedMessages()
print('{} requests highlighted'.format(len(http_traffic)))
for traffic in http_traffic:
http_service = traffic.getHttpService()
host = http_service.getHost()
print('User selected host: {}'.format(host))
self.bing_search(host)
return
def bing_search(self, host):
# check if we have ip or hostname
is_ip = re.match(r'((?:\d+\.){3}\d+)', host)
if is_ip:
ip_address = host
domain = False
else:
ip_address = socket.gethostbyname(host)
domain = True
bing_query_string = 'ip:{}'.format(ip_address)
t = threading.Thread(target=self.bing_query, args=(bing_query_string,))
t.daemon = True
t.start()
#self.bing_query(bing_query_string)
if domain:
bing_query_string = 'domain:{}'.format(host)
t = threading.Thread(target=self.bing_query, args=(bing_query_string,))
t.daemon = True
t.start()
#self.bing_query(bing_query_string)
return
def bing_query(self, bing_query_string):
# FYI: you *must* set the lib path for python addons correctly and install requests there
# while testing, I just pointed to the lib64 version of site-packages within my venv
# todo: csuttles fix this to use the burp libs to send the requests
print('Performing Bing search for: {}'.format(bing_query_string))
bing_url = 'https://api.cognitive.microsoft.com/bing/v7.0/search'
headers = {'user-agent': 'ctlfish/blackhatpython/0.0.1', "Ocp-Apim-Subscription-Key": bing_api_key}
params = {"q": bing_query_string, "textDecorations": True, "textFormat": "HTML"}
resp = requests.get(bing_url, params=params, headers=headers)
#return resp
try:
rjson = resp.json()
for page in rjson['webPages']['value']:
print('*' * 80)
print('page url: {}'.format(page["url"]))
print('page id: {}'.format(page["id"]))
print('page name: {}'.format(page["name"]))
j_url = URL(page['url'])
print('page in scope: {}'.format(self._callbacks.isInScope(j_url)))
if not self._callbacks.isInScope(j_url):
self._callbacks.includeInScope(j_url)
print('added {} to Burp Scope'.format(j_url))
else:
print('url {} already in Burp Scope'.format(j_url))
except Exception as ex:
print('caught exception {}:{}'.format(ex.__class__.__name__, ex))
print('no results from Bing')
pass
return
|
#!/usr/bin/python3
import os, sys, time, multiprocessing
mysql_host = "localhost" if "MYSQL_HOST" not in os.environ else os.environ["MYSQL_HOST"]
mysql_port = 3306 if "MYSQL_PORT" not in os.environ else int(os.environ["MYSQL_PORT"])
mysql_user = "root" if "MYSQL_USER" not in os.environ else os.environ["MYSQL_USER"]
mysql_password = "root" if "MYSQL_PASSWORD" not in os.environ else os.environ["MYSQL_PASSWORD"]
jdbc_url = "jdbc:mysql://{}:{}?user={}&password={}".format(mysql_host, mysql_port, mysql_user, mysql_password)
work_dir = "/work" if "WORK_DIR" not in os.environ else os.environ["WORK_DIR"]
output_dir = "{}/{}".format("/output" if "OUTPUT_DIR" not in os.environ else os.environ["OUTPUT_DIR"], sys.argv[1])
execute_repartition = multiprocessing.cpu_count() if "EXECUTE_REPARTITION" not in os.environ else int(os.environ["EXECUTE_REPARTITION"])
result_repartition = multiprocessing.cpu_count() if "RESULT_REPARTITION" not in os.environ else int(os.environ["RESULT_REPARTITION"])
spark_args = "" if "SPARK_ARGS" not in os.environ else os.environ["SPARK_ARGS"]
table_name = sys.argv[1]
commands = [
"mkdir /mysql-dump-run/ && cp -rf /script/mysql-5-spark-dump-runner.py /mysql-dump-run/ && cp -rf {}/* /mysql-dump-run/".format(work_dir),
"mv /mysql-dump-run/{}.py /mysql-dump-run/mysql_5_dump_mapper.py".format(sys.argv[2]),
"rm -rf {}".format(output_dir),
"cd /mysql-dump-run/ && /spark/bin/spark-submit --packages=mysql:mysql-connector-java:5.1.48 {} mysql-5-spark-dump-runner.py '{}' '{}' '{}' '{}' '{}'".format(spark_args, jdbc_url, table_name, execute_repartition, result_repartition, output_dir)
]
for command in commands:
print("OS Execute: {}".format(command))
for command in commands:
os.system(command) |
import pytest
import tscribe
import os
import pandas
from uuid import uuid4
from pathlib import Path
import sqlite3
from docx import Document
import webvtt
import glob
import logging
logging.basicConfig(filename="log.txt", level=logging.DEBUG, filemode="w")
sample_files = sorted(glob.glob("sample_material/*.json"))
@pytest.mark.parametrize("sample", sample_files)
def test_sample_files(sample):
"""Confirm test files accessible and safe"""
logging.info("test_sample_files")
assert Path(sample).is_file(), "Sample file should exist"
assert Path(sample).suffix == ".json", "Sample files should be json files"
data = tscribe.load_json_as_dict(sample)
assert data["accountId"] == "XXXXXXXXXXXX"
@pytest.mark.parametrize(
"time_stamp,expected",
[
("1.0", "0:00:01"),
("2.5", "0:00:02"),
("60.0", "0:01:00"),
("3600", "1:00:00"),
],
)
def test_convert_time_stamp(time_stamp, expected):
"""
Test timetsamp conversion utility function
GIVEN a float of seconds as data type str
WHEN calling convert_time_stamp(...)
THEN convert the float of seconds to a H:MM:SS format
"""
logging.info("test_convert_time_stamp")
# GIVEN a float of seconds as data type str
time_stamp = time_stamp
# WHEN calling convert_time_stamp(...)
result = tscribe.convert_time_stamp(time_stamp)
# THEN convert the float of seconds to a H:MM:SS format
assert result == expected, f"Result of {time_stamp} should be {expected}"
result = result.split(":")
seconds = int(result[2])
minutes = int(result[1]) * 60
hours = int(result[0]) * 60 * 60
total_seconds = seconds + minutes + hours
assert (
int(float(time_stamp)) == total_seconds
), f"Reverse calculation of {time_stamp} shoud be {total_seconds}"
@pytest.mark.parametrize("input_file", sample_files)
def test_load_json_as_dict(input_file):
"""
Test json to dict function
GIVEN a sample json file
WHEN calling tscribe.load_json_as_dict(...)
THEN return a dict
"""
logging.info("test_load_json_as_dict")
# GIVEN a sample json file
# provided through parametrize
# WHEN calling tscribe.load_json_as_dict(...)
data = tscribe.load_json_as_dict(input_file)
# THEN return a dict
assert isinstance(data, dict), "Data should by of dict type"
@pytest.mark.parametrize("input_file", sample_files)
def test_calculate_confidence_statistics(input_file):
"""
Test confidence stats data modeling
GIVEN a data dict
WHEN calling calculate_confidence_statistics(...)
THEN return the data model with the right components
"""
logging.info("test_calculate_confidence_statistics")
# GIVEN a data dict
# input_file = "sample_multiple.json"
data = tscribe.load_json_as_dict(input_file)
# WHEN calling calculate_confidence_statistics(...)
stats = tscribe.calculate_confidence_statistics(data)
# THEN return the data model with the right components
assert isinstance(stats, dict), "Stats should be of dict type"
assert "timestamps" in stats, "Data model should include timestamps"
assert "9.8" in stats, "Data model should include 9.8"
assert "9" in stats, "Data model should include 9"
assert "8" in stats, "Data model should include 8"
assert "7" in stats, "Data model should include 7"
assert "6" in stats, "Data model should include 6"
assert "5" in stats, "Data model should include 5"
assert "4" in stats, "Data model should include 4"
assert "3" in stats, "Data model should include 3"
assert "2" in stats, "Data model should include 2"
assert "1" in stats, "Data model should include 1"
assert "0" in stats, "Data model should include 0"
@pytest.mark.parametrize("input_file", sample_files)
def test_make_graph_png(input_file):
"""
Test function for creating graphs from confidence stats
GIVEN confidence stats from an input file
WHEN calling make_graph_png(...)
THEN produce chart.png
"""
logging.info("test_make_graph_png")
filepath = Path("chart.png")
# Ensure blank slate
if filepath.is_file():
os.remove(filepath)
# GIVEN confidence stats from an input file
data = tscribe.load_json_as_dict(input_file)
stats = tscribe.calculate_confidence_statistics(data)
# WHEN calling make_graph_png(...)
tscribe.make_graph_png(stats, "./")
# THEN produce chart.png
assert filepath.is_file(), "chart.png should be created"
os.remove(filepath)
@pytest.mark.parametrize("input_file", sample_files)
def test_decode_transcript_to_dataframe(input_file):
"""
Test transcript decoding function
GIVEN a data dict
WHEN calling decode_transcript_to_dataframe(...)
THEN
"""
logging.info("test_decode_transcript_to_dataframe")
# GIVEN a data dict
data = tscribe.load_json_as_dict(input_file)
# WHEN calling decode_transcript_to_dataframe(...)
df = tscribe.decode_transcript_to_dataframe(data)
# THEN
assert isinstance(
df, pandas.DataFrame
), "decode_transcript_to_dataframe should return a Pandas Data Frame"
rows, cols = df.shape
assert cols == 4, "Dataframe should have four columns"
if input_file == "sample_single.json":
# TODO
pass
if input_file == "sample_multiple.json":
assert rows == len(
data["results"]["speaker_labels"]["segments"]
), "Rows should match number of segments"
@pytest.mark.parametrize("input_file", sample_files)
def test_write_to_docx(input_file):
"""
Test production of docx output
GIVEN an input file
WHEN writing to docx
THEN check output exists and contains content
"""
logging.info("test_write_to_docx")
# GIVEN an input file
# WHEN writing to docx
output_filename = Path(f"{uuid4().hex}.docx")
tscribe.write(input_file, save_as=output_filename, format="docx")
# THEN check output exists and contains content
assert output_filename.is_file(), "Output file should exist"
document = Document(output_filename)
assert (
len(document.tables) == 2
), "Document should contain two tables, stats and transcript"
t_conf = document.tables[0].cell(0, 0).text
t_count = document.tables[0].cell(0, 1).text
t_perc = document.tables[0].cell(0, 2).text
assert (t_conf, t_count, t_perc) == (
"Confidence",
"Count",
"Percentage",
), "First table should be stats headers"
assert len(document.tables[0].rows) == 12, "Stats table should hold 12 rows"
t_time = document.tables[1].cell(0, 0).text
t_speaker = document.tables[1].cell(0, 1).text
t_content = document.tables[1].cell(0, 2).text
assert (t_time, t_speaker, t_content) == (
"Time",
"Speaker",
"Content",
), "Second table should be transcript headers"
data = tscribe.load_json_as_dict(input_file)
df = tscribe.decode_transcript_to_dataframe(data)
assert (
len(document.tables[1].rows) == len(df) + 1
), "Second table should be length of dataframe + headers"
assert (
"chart.png" in document.paragraphs[6]._p.xml
), "Chart should be in paragraph six"
# Teardown
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
def test_write_to_csv(input_file):
"""
Test production of csv output
GIVEN an input file
WHEN writing to csv
THEN check output exists and contains content
"""
logging.info("test_write_to_csv")
# GIVEN an input file
# WHEN writing to csv
output_filename = Path(f"{uuid4().hex}.csv")
tscribe.write(input_file, save_as=output_filename, format="csv")
# THEN check output exists and contains content
assert output_filename.is_file(), "Output file should exist"
with open(output_filename, "r") as file:
lines = file.readlines()
data = tscribe.load_json_as_dict(input_file)
df = tscribe.decode_transcript_to_dataframe(data)
assert len(lines) == len(df) + 1, "CSV should be length of dataframe + headers"
# Teardown
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
def test_write_to_sqlite(input_file):
"""
Test production of sqlite output
GIVEN an input file
WHEN writing to sqlite
THEN check output exists and contains content
"""
logging.info("test_write_to_sqlite")
# GIVEN an input file
# WHEN writing to sqlite
output_filename = Path(f"{uuid4().hex}.db")
tscribe.write(input_file, save_as=output_filename, format="sqlite")
# THEN check output exists and contains content
assert output_filename.is_file(), "Output file should exist"
conn = sqlite3.connect(str(output_filename))
c = conn.cursor()
c.execute("SELECT * FROM transcript")
query = c.fetchall()
data = tscribe.load_json_as_dict(input_file)
df = tscribe.decode_transcript_to_dataframe(data)
assert len(query) == len(df), "Database table should be length of dataframe"
# Teardown
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
def test_write_to_vtt(input_file):
"""
Test production of vtt format
GIVEN an input file
WHEN writing to vtt
THEN check output exists and contains content
"""
logging.info("test_write_to_vtt")
# GIVEN an input file
# WHEN writing to vtt
output_filename = Path(f"{uuid4().hex}.vtt")
tscribe.write(input_file, save_as=output_filename, format="vtt")
# THEN check output exists and contains content
vtt = webvtt.read(output_filename)
data = tscribe.load_json_as_dict(input_file)
df = tscribe.decode_transcript_to_dataframe(data)
assert len(vtt.captions) == len(
df
), "vtt file should have equal captions to df rows"
for caption in vtt.captions:
assert hasattr(caption, "start"), "each caption should have a start_time"
assert hasattr(caption, "end"), "each caption should have a end_time"
assert hasattr(caption, "text"), "each caption should have text"
assert (
len(caption.lines) >= len(caption.text) / 80
), "text should be split into max 80 long lines"
if input_file != "sample_single.json":
assert hasattr(
caption, "identifier"
), "each caption should have an identifier"
# Teardown
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
def test_write_to_default(input_file):
"""
Test production of default output
GIVEN an input file
WHEN not specifying output
THEN check output is the default format
"""
logging.info("test_write_to_default")
# GIVEN an input file
# WHEN not specifying output
tscribe.write(input_file)
expected_filename = input_file.replace(".json", ".docx")
output_filename = Path(expected_filename)
# THEN check output exists and contains content
assert output_filename.is_file(), "Output file should exist"
# Teardown
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
@pytest.mark.parametrize("output_format", ["docx", "csv", "sqlite"])
@pytest.mark.parametrize("location", [".", "output"])
def test_save_as(input_file, output_format, location):
"""
Test saving of supported formats to locations
GIVEN locations of current or specific folder
WHEN writing transcript in any supported format
THEN check output exists
"""
logging.info("test_save_as")
if not Path("output").is_dir():
os.mkdir("output")
# GIVEN locations of current or specific folder
output_filename = Path(location) / Path(Path(input_file).parts[-1]).with_suffix(
f".{output_format}"
)
# WHEN writing transcript in any supported format
tscribe.write(input_file, format=output_format, save_as=output_filename)
# THEN check output exists
assert output_filename.is_file()
os.remove(output_filename)
@pytest.mark.parametrize("input_file", sample_files)
@pytest.mark.xfail
def test_depricated_tmp_dir(input_file):
"""
Test that using tmp_dir fails
GIVEN an input file
WHEN calling tscribe with tmp_dir
THEN receive warning and fail
"""
logging.info("test_deprecated_tmp_dir")
# GIVEN an input file
# WHEN calling tscribe with tmp_dir
# THEN receive warning and fail
tscribe.write(input_file, tmp_dir=".")
@pytest.mark.parametrize("input_file", sample_files)
@pytest.mark.xfail
def test_unrecognised_output_format(input_file):
"""
Test for exception when given unrecognised output format
GIVEN an input file and an unrecognised output format
WHEN calling tscribe.write(...)
THEN xfail
"""
# GIVEN an input file and an unrecognised output format
unrecognised_format = "unrecognised"
# WHEN calling tscribe.write(...)
# THEN xfail
tscribe.write(input_file, format=unrecognised_format)
|
import torch.nn as nn
import torch.nn.functional as F
class AutoEncoder(nn.Module):
def __init__(self, code_size, imgsize, height, width):
super().__init__()
self.code_size = code_size
self.imgsize = imgsize
self.height = height
self.width = width
# Encoder specification
self.enc_cnn_1 = nn.Conv2d(1, 10, kernel_size=5)
self.enc_cnn_2 = nn.Conv2d(10, 20, kernel_size=5)
self.enc_linear_1 = nn.Linear(80, 50)
self.enc_linear_2 = nn.Linear(50, self.code_size)
# Decoder specification
self.dec_linear_1 = nn.Linear(self.code_size, 160)
self.dec_linear_2 = nn.Linear(160, self.imgsize)
def forward(self, images):
code = self.encode(images)
out = self.decode(code)
return out, code
def encode(self, images):
code = self.enc_cnn_1(images)
code = F.selu(F.max_pool2d(code, 2))
code = self.enc_cnn_2(code)
code = F.selu(F.max_pool2d(code, 2))
code = code.view([images.size(0), -1])
code = F.selu(self.enc_linear_1(code))
code = self.enc_linear_2(code)
return code
def decode(self, code):
out = F.selu(self.dec_linear_1(code))
out = F.sigmoid(self.dec_linear_2(out))
out = out.view([code.size(0), 1, self.width, self.height])
return out
|
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^diary/', include('diary.urls', namespace='diary')),
url(r'^admin/', admin.site.urls),
url(r'^member/', include('member.urls', namespace='member')),
url(r'^index/$', TemplateView.as_view(template_name='index.html'), name='index'),
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
|
#!/usr/bin/env python3
import os
from setuptools import setup, find_packages
import sys
sys.path.append(os.getcwd())
import build_cffi
def add_pkg_data_dirs(pkg, dirs):
pkg_data = []
for d in dirs:
for root, _, files in os.walk(os.path.join(pkg, d)):
r = os.path.relpath(root, pkg)
pkg_data.extend([os.path.join(r, f) for f in files])
return pkg_data
with open('README.md') as f:
long_description = f.read()
setup(name='pypcode',
version='1.0.2',
description='Python bindings to Ghidra\'s SLEIGH library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Matt Borgerson',
author_email='contact@mborgerson.com',
url='https://github.com/angr/pypcode',
packages=['pypcode'],
package_data={'pypcode': add_pkg_data_dirs('pypcode', ['processors', 'docs'])},
setup_requires=['cffi'],
install_requires=['cffi'],
cffi_modules=['build_cffi.py:ffibuilder'],
cmdclass={'build_ext': build_cffi.FfiPreBuildExtension},
python_requires='>=3.6'
)
|
import sys
sys.path.append('../lib')
import requests
import logging
import time
from core.env import Environment
from core.event_hub import EventHub
class VkPoller:
vk_usernames_cache = {}
def __init__(self, vk_group_id: int, vk_token: str, event_hub: EventHub):
self.vk_group_id = vk_group_id
self.vk_token = vk_token
self.event_hub = event_hub
def loop(self):
self.__establish_connection_until_success()
while (True):
try:
events = self.__poll_events()
for event in events:
vk_user_id = event['object']['user_id']
self.__mark_as_read(vk_user_id, event['object']['id'])
event['object']['user_name'] = self.__get_username(vk_user_id)
if (len(events) > 0):
self.event_hub.from_vk(events)
except Exception as e:
# Feb 20 18:08:22 ex-prod-server python3[1047]: ERROR:root:Error occured during vk events polling: HTTPSConnectionPool(host='lp.vk.com', port=443): Max retries exceeded with url: /wh117878831?act=a_check&key=f9325639441d3241dde5c3ee8a4fb6ab39f2110e&ts=2149&wait=25 (Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x7f09b6f122b0>: Failed to establish a new connection: [Errno -3] Temporary failure in name resolution'))
logging.error(f'Error occured during vk events polling: {e}')
self.__establish_connection_until_success()
def __get_user(self, user_id):
return self.__execute_method('users.get', {
'user_ids': f'{user_id}',
})
def __mark_as_read(self, peer_id, start_message_id):
self.__execute_method('messages.markAsRead', {
'peer_id': peer_id,
'start_message_id': start_message_id,
'group_id': self.vk_group_id
})
def __poll_events(self):
def poll_impl():
return requests.get(self._server, params={
'act': 'a_check',
'key': self._sessionKey,
'ts': self._eventId,
'wait': 25
}).json()
json = poll_impl()
failed_code = json.get('failed')
if (failed_code):
logging.debug(f'Failed polling attempt encountered. Response: {json}')
if (failed_code == 1):
self._eventId = json['ts']
else:
self.__establish_connection_until_success()
json = poll_impl()
self._eventId = json['ts']
return json['updates']
def __establish_connection(self):
json = self.__execute_method('groups.getLongPollServer', {
'group_id': self.vk_group_id
})['response']
self._server = json['server']
self._sessionKey = json['key']
self._eventId = json['ts']
logging.debug(f'Established vk api connection. Response: {json}')
def __establish_connection_until_success(self):
while (True):
try:
logging.info('Establishing vk connection')
self.__establish_connection()
break
except Exception as e:
logging.warning(f'Error occured during vk connection establishment: {e}')
time.sleep(20)
def __execute_method(self, method, params):
return requests.get(f'https://api.vk.com/method/{method}', params={
'access_token': self.vk_token,
'v': '5.103',
**params
}).json()
def __get_username(self, vk_user_id):
if (vk_user_id in self.vk_usernames_cache):
return self.vk_usernames_cache[vk_user_id]
user = self.__get_user(vk_user_id)
name = f'{user["response"][0]["first_name"]} {user["response"][0]["last_name"]}'
self.vk_usernames_cache[vk_user_id] = name
return name
if (__name__ == '__main__'):
env = Environment()
vk_group_id = env.vk_group_id
vk_token = env.vk_token
event_hub = EventHub(env)
poller = VkPoller(vk_group_id, vk_token, event_hub)
poller.loop()
|
# Copyright 2019 Adobe
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it. If you have received this file from a source other than Adobe,
# then your use, modification, or distribution of it requires the prior
# written permission of Adobe.
#
import gzip
import httplib
import json
import logging
import socket
import time
import zlib
# import traceback
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
from protector.proxy.http_request import HTTPRequest
from protector.query.query import OpenTSDBQuery, OpenTSDBResponse
class ProxyRequestHandler(BaseHTTPRequestHandler):
protector = None
backend_address = None
def __init__(self, *args, **kwargs):
self.http_request = HTTPRequest()
self.tsdb_query = None
# Address to time series backend
backend_host, backend_port = self.backend_address
self.backend_netloc = "{}:{}".format(backend_host, backend_port)
self.scheme = "http"
self.path = None
self.connection = None
self.rfile = None
self.wfile = None
self.close_connection = 0
#self.timeout = 12
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_error(self, log_format, *args):
# Suppress "Request timed out: timeout('timed out',)"
# if isinstance(args[0], socket.timeout):
# logging.error("{}".format(traceback.format_exc()))
# logging.error(pprint.pprint(args))
self.log_message(log_format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip address and current date/time are prefixed to every
message.
"""
if self.headers:
xff = self.headers.getheader('X-Forwarded-For', '-')
xgo = self.headers.getheader('X-Grafana-Org-Id', '-')
ua = self.headers.getheader('User-Agent', '-')
logging.info("%s - - [%s] %s [X-Forwarded-For: %s, X-Grafana-Org-Id: %s, User-Agent: %s]" %
(self.client_address[0], self.log_date_time_string(), format % args, xff, xgo, ua))
else:
logging.info("%s - - [%s] %s" %
(self.client_address[0], self.log_date_time_string(), format % args))
def do_GET(self):
if self.path == "/metrics":
data = generate_latest()
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.send_header("Content-Length", str(len(data)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(data)
else:
self.headers['Host'] = self.backend_netloc
self.filter_headers(self.headers)
self._handle_request(self.scheme, self.backend_netloc, self.path, self.headers)
self.finish()
self.connection.close()
def do_POST(self):
length = int(self.headers['Content-Length'])
post_data = self.rfile.read(length)
self.headers['Host'] = self.backend_netloc
self.filter_headers(self.headers)
# Only process query requests, everything else should pass through
if self.path == "/api/query":
self.tsdb_query = OpenTSDBQuery(post_data)
self.headers['X-Protector'] = self.tsdb_query.get_id()
# Check the payload against the Protector rule set
result = self.protector.check(self.tsdb_query)
if not result.is_ok():
self.protector.REQUESTS_BLOCKED.labels(self.protector.safe_mode, result.value["rule"]).inc()
if not self.protector.safe_mode:
logging.warning("OpenTSDBQuery blocked: %s. Reason: %s", self.tsdb_query.get_id(), result.value["msg"])
self.send_error(httplib.FORBIDDEN, result.value["msg"])
return
post_data = self.tsdb_query.to_json()
self.headers['Content-Length'] = str(len(post_data))
status = self._handle_request(self.scheme, self.backend_netloc, self.path, self.headers, body=post_data, method="POST")
#['method', 'path', 'return_code']
self.protector.REQUESTS_COUNT.labels('POST', self.path, status).inc()
self.finish()
self.connection.close()
def send_error(self, code, message=None):
"""
Send and log plain text error reply.
:param code:
:param message:
"""
message = message.strip()
self.log_error("code %d, message: %s", code, message)
self.send_response(code)
self.send_header("Content-Type", "application/json")
self.send_header('Connection', 'close')
self.end_headers()
if message:
# Grafana style
j = {'message': message, 'error': message}
self.wfile.write(json.dumps(j))
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"):
"""
Run the actual request
"""
backend_url = "{}://{}{}".format(scheme, netloc, path)
startTime = time.time()
try:
response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers))
respTime = time.time()
duration = respTime - startTime
self.protector.TSDB_REQUEST_LATENCY.labels(response.status).observe(duration)
self._return_response(response, method, duration)
return response.status
except socket.timeout, e:
respTime = time.time()
duration = respTime - startTime
self.protector.save_stats_timeout(self.tsdb_query, duration)
self.protector.TSDB_REQUEST_LATENCY.labels(httplib.GATEWAY_TIMEOUT).observe(duration)
self.send_error(httplib.GATEWAY_TIMEOUT, "Query timed out. Configured timeout: {}s".format(20))
return httplib.GATEWAY_TIMEOUT
except Exception as e:
respTime = time.time()
duration = respTime - startTime
#logging.error("{}".format(traceback.format_exc()))
err = "Invalid response from backend: '{}'".format(e)
logging.debug(err)
self.protector.TSDB_REQUEST_LATENCY.labels(httplib.BAD_GATEWAY).observe(duration)
self.send_error(httplib.BAD_GATEWAY, err)
return httplib.BAD_GATEWAY
def _process_response(self, payload, encoding, duration):
"""
:param payload: JSON
:param encoding: Content Encoding
"""
try:
resp = OpenTSDBResponse(self.decode_content_body(payload, encoding))
self.protector.save_stats(self.tsdb_query, resp, duration)
except Exception as e:
err = "Skip: {}".format(e)
logging.debug(err)
def _process_bad_request(self, payload, encoding):
"""
:param payload: JSON
:param encoding: Content Encoding
"""
# Re-package the error json for Grafana
j = json.loads(self.decode_content_body(payload, encoding))
err = j.get('error', None)
b = {}
if err:
b = {'message': err.get('message', '?'), 'error': err.get('details', '?')}
return self.encode_content_body(json.dumps(b), encoding)
def _return_response(self, response, method, duration):
"""
:param response: HTTPResponse
"""
self.filter_headers(response.msg)
#cl = response.msg["content-length"]
if "content-length" in response.msg:
del response.msg["content-length"]
self.send_response(response.status, response.reason)
for header_key, header_value in response.msg.items():
self.send_header(header_key, header_value)
body = response.read()
if method == "POST":
if response.status == httplib.OK:
# Process the payload
self._process_response(body, response.getheader('content-encoding'), duration)
if response.status == httplib.BAD_REQUEST:
body = self._process_bad_request(body, response.getheader('content-encoding'))
self.send_header('Content-Length', str(len(body)))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(body)
do_HEAD = do_GET
do_OPTIONS = do_GET
@staticmethod
def filter_headers(headers):
# http://tools.ietf.org/html/rfc2616#section-13.5.1
hop_by_hop = (
'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers',
'transfer-encoding', 'upgrade'
)
for k in hop_by_hop:
if k in headers:
del headers[k]
@staticmethod
def encode_content_body(text, encoding):
if encoding == 'identity':
return text
if encoding in ('gzip', 'x-gzip'):
io = StringIO()
with gzip.GzipFile(fileobj=io, mode='wb') as f:
f.write(text)
return io.getvalue()
if encoding == 'deflate':
return zlib.compress(text)
raise Exception("Unknown Content-Encoding: %s" % encoding)
@staticmethod
def decode_content_body(data, encoding):
if encoding == 'identity':
return data
if encoding in ('gzip', 'x-gzip'):
io = StringIO(data)
with gzip.GzipFile(fileobj=io) as f:
return f.read()
if encoding == 'deflate':
return zlib.decompress(data)
raise Exception("Unknown Content-Encoding: %s" % encoding)
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.views import password_reset,password_reset_done,password_reset_confirm,password_reset_complete
from web.accountviews import *
from web.homeviews import *
#from web.authviews import *
from web.permissionviews import *
from web.filehandler import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', login),
url(r'^media/(?P<path>.*)$','django.views.static.serve',{'document_root':'media/'}),
url(r'^home/files',files),
url(r'^filelist',filelist),
url(r'^login$',login),
url(r'^login_do$',login_do),
url(r'^register$',register),
url(r'^register_do$',register_do),
url(r'^password_reset$',password_reset,{'template_name':'accounts/password_reset_form.html',\
'email_template_name':'accounts/password_reset_email.html',\
'post_reset_redirect':'/password_reset_done',\
}),
url(r'^password_reset_done$',password_reset_done,{'template_name':'accounts/password_reset_done.html'}),
url(r'^password_reset_confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',password_reset_confirm,\
{'template_name': 'accounts/password_reset_confirm.html','post_reset_redirect':'/password_done'}),
url(r'^password_done$',password_reset_complete,{'template_name':'accounts/password_reset_complete.html'}),
url(r'^logout$',logout),
url(r'^uploadhandler$',uploadhandler),
url(r'^upload',uploadfile),
url(r'^download_file',download_file),
url(r'^delete_file',delete_file),
url(r'^rename_file',rename_file),
url(r'^batchDownload',batch_download),
url(r'^new_folder',new_folder),
# Examples:
# url(r'^$', 'ThuCloudDisk.views.home', name='home'),
# url(r'^ThuCloudDisk/', include('ThuCloudDisk.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
import os
import sys
import json
import tempfile
import subprocess
SPEC = os.path.split(__file__)[0]
FILES = os.path.join(SPEC, '_files')
ROOT = os.path.join(SPEC, '..')
NODE_PATH = '/home/fafhrd/opt/bin/node'
NODE_PATH = 'node'
HOGAN = open(os.path.join(SPEC, 'hogan-2.0.0.common.js'), 'rb').read();
sys.path.append(ROOT)
import pyhogan
def load_test(name):
template = open(os.path.join(FILES, "%s.mustache"%name), 'rb').read()
view = open(os.path.join(FILES, "%s.js"%name), 'rb').read()
partial_file = os.path.join(FILES, "%s.partial"%name)
partial = ''
if os.path.isfile(partial_file):
partial = open(partial_file, 'rb').read()
expect = open(os.path.join(FILES, "%s.txt"%name), 'rb').read()
return [template, view, partial, expect]
def run_js(js):
fd, name = tempfile.mkstemp()
runner_file = os.fdopen(fd, 'w+b')
runner_file.write(js)
runner_file.close()
res = subprocess.check_output((NODE_PATH, name))
runner_file.close()
os.unlink(name)
return res
JS = """%(hogan)s
try {
var TEMPLATE = %(tmpl)s;
%(view)s
var partial = {partial: %(partial)s};
console.log(TEMPLATE.render(%(name)s, partial));
} catch (e) {
console.log(e);
}
"""
JS2 = """%(hogan)s
try {
var TEMPLATE = %(tmpl)s;
%(view)s
var partial = {partial: %(partial)s};
console.log(Hogan.compile(TEMPLATE, {asString: true}), "\\n");
console.log(Hogan.compile(TEMPLATE).render(%(name)s, partial));
} catch (e) {
console.log(e);
}
"""
tests = sys.argv[1:]
total = 0
success = 0
failure = 0
for name in sorted(os.listdir(FILES)):
if name.endswith('.mustache'):
name = name[:-9]
if tests and name not in tests:
continue
template, view, partial, expect = load_test(name)
js = JS%{
'name': name,
'tmpl': pyhogan.compile(template, verbose=bool(tests)),
'view': view,
'partial': json.dumps(partial),
'hogan': HOGAN
}
res = run_js(js).strip()
expect = expect.strip()
print name, '...',
if res == expect:
print 'passed'
success += 1
else:
print 'FAILED'
failure += 1
if tests:
js = JS2%{
'name': name,
'tmpl': json.dumps(template),
'view': view,
'partial': json.dumps(partial),
'hogan': HOGAN
}
print run_js(js).strip()
print '=============================='
total += 1
if tests:
print pyhogan.compile(template)
print 'got: -------------------------'
print res
print 'expect: ----------------------'
print expect
print '=========================='
print 'Total: %s, success: %s, failures: %s'%(total, success, failure)
|
from braindecode.veganlasagne.batch_norm import batch_norm
from lasagne.layers import Conv2DLayer
import lasagne
from lasagne.layers.special import NonlinearityLayer, ExpressionLayer
from lasagne.layers.merge import ElemwiseSumLayer
from lasagne.layers.shape import PadLayer
from braindecode.veganlasagne.layers import StrideReshapeLayer
from braindecode.veganlasagne.random_switch import RandomSwitchLayer
import theano.tensor as T
from lasagne.layers.pool import Pool2DLayer
# create a residual learning building block with two stacked 3x3 convlayers as in paper
def residual_block(l, batch_norm_alpha, batch_norm_epsilon,
nonlinearity, survival_prob, add_after_nonlin,
reduction_method, reduction_pool_mode,
increase_units_factor=None, half_time=False, projection=False,
):
assert survival_prob <= 1 and survival_prob >= 0
input_num_filters = l.output_shape[1]
if increase_units_factor is not None:
out_num_filters = int(input_num_filters*increase_units_factor)
assert (out_num_filters - input_num_filters) % 2 == 0, ("Need even "
"number of extra channels in order to be able to pad correctly")
else:
out_num_filters = input_num_filters
if (not half_time) or (reduction_method == 'conv'):
stack_1 = batch_norm(Conv2DLayer(l, num_filters=out_num_filters, filter_size=(3,3),
stride=(1,1), nonlinearity=nonlinearity, pad='same',
W=lasagne.init.HeNormal(gain='relu')),
epsilon=batch_norm_epsilon,
alpha=batch_norm_alpha)
else:
assert half_time and reduction_method == 'pool'
stack_1 = Pool2DLayer(l, pool_size=(3,1), stride=(1,1), pad=(1,0),
mode=reduction_pool_mode)
# 1x1 conv here, therefore can do stride later without problems
# otherwise would have to do stride here before
# and make extra if condition later (only reshape with stride
# in case of reduction method conv)...
stack_1 = batch_norm(Conv2DLayer(stack_1, num_filters=out_num_filters, filter_size=(1,1),
stride=(1,1), nonlinearity=nonlinearity, pad='same',
W=lasagne.init.HeNormal(gain='relu')),
epsilon=batch_norm_epsilon,
alpha=batch_norm_alpha)
if half_time:
stack_1 = StrideReshapeLayer(stack_1,n_stride=2)
stack_2 = batch_norm(Conv2DLayer(stack_1, num_filters=out_num_filters, filter_size=(3,3),
stride=(1,1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu')),
epsilon=batch_norm_epsilon,
alpha=batch_norm_alpha)
# add shortcut connections
shortcut = l
if half_time:
# note since we are only reshaping
# this is ok both for later identity and later projection
# 1x1 conv of projection is same if we do it before or after this reshape
# (would not be true if it was anything but 1x1 conv(!))
shortcut = StrideReshapeLayer(shortcut,n_stride=2)
if increase_units_factor is not None:
if projection:
# projection shortcut, as option B in paper
shortcut = batch_norm(Conv2DLayer(shortcut,
num_filters=out_num_filters,
filter_size=(1,1), stride=(1,1), nonlinearity=None,
pad='same', b=None),
epsilon=batch_norm_epsilon,
alpha=batch_norm_alpha)
else:
# identity shortcut, as option A in paper
n_extra_chans = out_num_filters - input_num_filters
shortcut = PadLayer(shortcut, [n_extra_chans//2,0,0], batch_ndim=1)
if add_after_nonlin:
stack_2 = NonlinearityLayer(stack_2)
block = ElemwiseSumLayer([stack_2, shortcut])
else:
block = NonlinearityLayer(ElemwiseSumLayer([stack_2, shortcut]),
nonlinearity=nonlinearity)
if survival_prob != 1:
# Hack to make both be broadcastable along empty third dim
# Otherwise I get an error that they are of different type:
# shortcut: TensorType(False,False,False,True)
# block: TensorType4d(32) or sth
shortcut = ExpressionLayer(shortcut, lambda x: T.addbroadcast(x, 3))
block = ExpressionLayer(block, lambda x: T.addbroadcast(x, 3))
block = RandomSwitchLayer(block, shortcut, survival_prob)
return block
|
#!/usr/bin/env python3
from gevent import monkey; monkey.patch_all()
from arago.actors import Actor, Monitor, Root, RESUME
import arago.actors.pattern_matching as matching
from arago.common.logging import getCustomLogger
import gevent
import random
logger = getCustomLogger(level="DEBUG")
class Echo(Actor):
@matching.match(msg = "crash")
def handle(self, msg, payload, sender): return undefined
@matching.match(msg = "stop")
def handle(self, msg, payload, sender): self.stop()
@matching.match(msg = matching.isoftype(str))
def handle(self, msg, payload, sender): return "{me} replies: {msg}".format(me=self, msg=msg)
def send(target):
for message in (5 * ["hello"] + random.choices(["hello", "crash"], weights=[2,1], k=10) + ["stop"] + 3 * ["hello"]):
try:
logger.info("Sending {msg} to {target}".format(msg=message, target=target))
answer = target.wait_for(message)
logger.info(answer)
except Exception as e:
logger.warning("Target raised an exception: {e}".format(e=e))
echo = Echo(name="echo")
monitor = Monitor(name="monitor", policy=RESUME, children=[echo])
gevent.spawn(send, echo).join()
|
from flask import url_for
from flask_testing import TestCase
from application.models import Todos
from application import app, db
class TestBase(TestCase):
def create_app(self):
app.config.update(
SQLALCHEMY_DATABASE_URI="sqlite:///data.db",
SECRET_KEY="TEST_SECRET_KEY",
DEBUG=True,
WTF_CSRF_ENABLED=False
)
return app
def setUp(self):
db.create_all()
db.session.add(Todos(task="Test the application", complete=False))
db.session.add(Todos(task="Take out the trash", complete=False))
db.session.add(Todos(task="Be a real cool dude", complete=False))
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
class TestViews(TestBase):
def test_index_get(self):
response = self.client.get(url_for("index"))
self.assertEqual(response.status_code, 200)
self.assertIn(b'Test the application', response.data)
self.assertIn(b'Take out the trash', response.data)
self.assertIn(b'Be a real cool dude', response.data)
def test_add_get(self):
response = self.client.get(url_for("add"))
self.assertEqual(response.status_code, 200)
def test_update_get(self):
response = self.client.get(url_for("update", id=1))
self.assertEqual(response.status_code, 200)
class TestOrder(TestBase):
def test_index_order_by_id(self):
response = self.client.post(
url_for("index"),
data=dict(order_with="id"),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
def test_index_order_by_complete(self):
response = self.client.post(
url_for("index"),
data=dict(order_with="complete"),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
def test_index_order_by_incomplete(self):
response = self.client.post(
url_for("index"),
data=dict(order_with="incomplete"),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
def test_index_order_by_other(self):
response = self.client.post(
url_for("index"),
data=dict(order_with="other"),
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
class TestAdd(TestBase):
def test_add_post(self):
response = self.client.post(
url_for("add"),
data=dict(task="123456789"),
follow_redirects=True
)
self.assertIn(b'123456789', response.data)
def test_add_existing_post(self):
response = self.client.post(
url_for("add"),
data=dict(task="Test the application"),
follow_redirects=True
)
self.assertIn(b'You already added this Todo', response.data)
class TestComplete(TestBase):
def test_complete(self):
response = self.client.get(
url_for("complete", id=1),
follow_redirects=True
)
self.assertEquals(Todos.query.filter_by(id=1).first().complete, True)
class TestIncomplete(TestBase):
def test_incomplete(self):
response = self.client.get(
url_for("incomplete", id=1),
follow_redirects=True
)
self.assertEquals(Todos.query.filter_by(id=1).first().complete, False)
class TestUpdate(TestBase):
def test_update(self):
response = self.client.post(
url_for("update", id=1),
data=dict(task="Updated task"),
follow_redirects=True
)
self.assertIn(b'Updated task', response.data)
class TestDelete(TestBase):
def test_delete(self):
response = self.client.get(
url_for("delete", id=1),
follow_redirects=True
)
self.assertNotIn(b'Test the application', response.data) |
import model
import load
import data
import args
def test(train_x_filename,
train_y_filename,
test_x_filename,
output_filename,
model_filename,
model_name):
data_loader = load.DataLoader()
test_x = data_loader.read_testing_data(
train_x_filename,
train_y_filename,
test_x_filename)
feature_num = test_x.shape[1]
if model_name == 'LogisticRegression':
trainer = model.LogisticRegression(
feature_num,
train=False)
trainer.load_model(model_filename)
pred_y = trainer.forward(test_x)
pred_y[pred_y >= 0.5] = 1
pred_y[pred_y < 0.5] = 0
elif model_name == 'GaussianNaiveBayes':
trainer = model.GaussianNaiveBayes()
trainer.load_model(model_filename)
pred_y = trainer.predict(test_x)
with open(output_filename, 'w') as f:
f.write('id,Value\n')
for i, pred in enumerate(pred_y):
f.write('id_%d,%d\n' % (i, int(pred)))
if __name__ == '__main__':
args = args.get_args(train=False)
test(args.train_x_filename,
args.train_y_filename,
args.test_x_filename,
args.output,
args.model_filename,
args.model)
|
#digital signature and verification of a byte table
#
from crypto2 import encryptSignature,decryptSignature
from crypto4 import sha
#Digital Signature
#@parameters:
# byte table (string)
# Private RSA key (int)
#@return:
# text hashed(sha)
# signature-byte table (string)
#
def signature(text,n,d):
hashed_msg=sha(text, 256)
hashedtext=("".join(hashed_msg))
signature= encryptSignature(hashedtext,n,d)
return hashedtext,signature
#Signature Verification
#@parameters:
# signature-byte table
# text to verify (SHA256)
# Public RSA key (int)
#@return:
# Boolean (TRUE/FALSE)
#
def verify(signature,text,e,n):
tmp_plain=[chr(i) for i in decryptSignature(signature, e, n)]
plain="".join(tmp_plain)
for i in range(len(plain)):
if(plain[i]!=text[i]):
return False
return True
|
def ficha(nome='<desconhecido>', gols=0):
print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')
nome = str(input('Nome: ')).strip()
gols = str(input('Gols: ')).strip()
if nome == '' and gols.isnumeric() == False:
ficha()
elif nome == '' and gols.isnumeric() == True:
ficha(gols=int(gols))
elif nome != '' and gols.isnumeric() == False:
ficha(nome)
else:
ficha(nome, int(gols)) |
#!/usr/bin/python
# Copyright 2015 Jason Edelman <jason@networktocode.com>
# Network to Code, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r"""
---
module: ntc_save_config
short_description: Save the running config locally and/or remotely
description:
- Save the running configuration as the startup configuration or to a file on the network device.
Optionally, save the running configuration to this computer.
- Supported platforms include Cisco Nexus switches with NX-API, Cisco IOS switches or routers, Arista switches with eAPI.
notes:
- This module is not idempotent.
author: Jason Edelman (@jedelman8)
version_added: 1.9.2
requirements:
- pyntc
extends_documentation_fragment:
- networktocode.netauto.netauto
options:
remote_file:
description:
- Name of remote file to save the running configuration. If omitted it will be
saved to the startup configuration.
required: false
default: null
type: str
local_file:
description:
- Name of local file to save the running configuration. If omitted it won't be locally saved.
required: false
default: null
type: str
global_delay_factor:
description:
- Sets delay between operations.
required: false
default: 1
type: int
delay_factor:
description:
- Multiplication factor for timing delays
required: false
default: 1
type: int
"""
EXAMPLES = r"""
- hosts: all
vars:
nxos_provider:
host: "{{ inventory_hostname }}"
username: "ntc-ansible"
password: "ntc-ansible"
platform: "cisco_nxos_nxapi"
connection: local
- networktocode.netauto.ntc_save_config:
platform: cisco_nxos_nxapi
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
- networktocode.netauto.ntc_save_config:
ntc_host: n9k1
- networktocode.netauto.ntc_save_config:
platform: arista_eos_eapi
host: "{{ inventory_hostname }}"
username: "{{ username }}"
password: "{{ password }}"
remote_file: running_config_copy.cfg
transport: https
# You can get the timestamp by setting get_facts to True, then you can append it to your filename.
- networktocode.netauto.ntc_save_config:
provider: "{{ nxos_provider }}"
local_file: config_{{ inventory_hostname }}_{{ ansible_date_time.date | replace('-','_') }}.cfg
"""
RETURN = r"""
local_file:
description: The local file path of the saved running config.
returned: success
type: str
sample: '/path/to/config.cfg'
remote_file:
description: The remote file name of the saved running config.
returned: success
type: str
sample: 'config_backup.cfg'
remote_save_successful:
description: Whether the remote save was successful.
May be false if a remote save was unsuccessful because a file with same name already exists.
returned: success
type: bool
sample: true
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.networktocode.netauto.plugins.module_utils.args_common import (
CONNECTION_ARGUMENT_SPEC,
MUTUALLY_EXCLUSIVE,
NETMIKO_BACKEND,
REQUIRED_ONE_OF,
)
try:
HAS_PYNTC = True
from pyntc import ntc_device, ntc_device_by_name
except ImportError:
HAS_PYNTC = False
def main(): # pylint: disable=too-many-locals,too-many-branches,too-many-statements
"""Main execution."""
base_argument_spec = dict(
global_delay_factor=dict(default=1, required=False, type="int"),
delay_factor=dict(default=1, required=False, type="int"),
remote_file=dict(required=False, type="str"),
local_file=dict(required=False, type="str"),
)
argument_spec = base_argument_spec
argument_spec.update(CONNECTION_ARGUMENT_SPEC)
argument_spec["provider"] = dict(required=False, type="dict", options=CONNECTION_ARGUMENT_SPEC)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=MUTUALLY_EXCLUSIVE,
required_one_of=[REQUIRED_ONE_OF],
supports_check_mode=False,
)
if not HAS_PYNTC:
module.fail_json(msg="pyntc is required for this module.")
provider = module.params["provider"] or {}
# allow local params to override provider
for param, pvalue in provider.items():
if module.params.get(param) is not False:
module.params[param] = module.params.get(param) or pvalue
platform = module.params["platform"]
host = module.params["host"]
username = module.params["username"]
password = module.params["password"]
ntc_host = module.params["ntc_host"]
ntc_conf_file = module.params["ntc_conf_file"]
transport = module.params["transport"]
port = module.params["port"]
global_delay_factor = int(module.params["global_delay_factor"])
delay_factor = int(module.params["delay_factor"])
secret = module.params["secret"]
if ntc_host is not None:
device = ntc_device_by_name(ntc_host, ntc_conf_file)
else:
kwargs = {}
if transport is not None:
kwargs["transport"] = transport
if port is not None:
kwargs["port"] = port
if secret is not None:
kwargs["secret"] = secret
if platform in NETMIKO_BACKEND:
if global_delay_factor is not None:
kwargs["global_delay_factor"] = global_delay_factor
if delay_factor is not None:
kwargs["delay_factor"] = delay_factor
device_type = platform
device = ntc_device(device_type, host, username, password, **kwargs)
remote_file = module.params["remote_file"]
local_file = module.params["local_file"]
argument_check = {"host": host, "username": username, "platform": platform, "password": password}
for key, val in argument_check.items():
if val is None:
module.fail_json(msg=str(key) + " is required")
device.open()
if remote_file:
remote_save_successful = device.save(remote_file)
else:
remote_save_successful = device.save()
changed = remote_save_successful
if local_file:
device.backup_running_config(local_file)
changed = True
device.close()
remote_file = remote_file or "(Startup Config)"
module.exit_json(
changed=changed, remote_save_successful=remote_save_successful, remote_file=remote_file, local_file=local_file
)
if __name__ == "__main__":
main()
|
# ------------------------------------------------------------------------ Importing packages ----------------------------------------------------------------------------
import tkinter as tk
from tkinter import messagebox as mb
import tkinter.font as f
import random as r
import winsound
import shelve as sh
op = sh.open('Movies.bd') # Movie list file
La = op['MOVIE-LIST'] # Pointer stroing movie list
AB = 0 # Game play GUI
EndGame = False
count = 0
attempts = 7
# ------------------------------------------------------------------------- Window alignment function --------------------------------------------------------------------
def alignment(root):
# root.iconbitmap('Images/ic.ico')
winWidth = root.winfo_reqwidth()
winHeight = root.winfo_reqheight()
posRight = int(root.winfo_screenwidth()/2 - winWidth)
posLeft = int(root.winfo_screenheight()/2 - winHeight)
root.geometry( "+{}+{}" .format(posRight, posLeft))
# ------------------------------------------------------------------------ Loading of first window -------------------------------------------------------------------
def load():
# ---------------------------------------------------------------- Buttons --------------------------------------------------------------------
ButtonAdd = tk.Button(GUI, text = "Add Movie", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = add)
ButtonAdd['font'] = f.Font(family = 'Helvetica' )
ButtonAdd.place(x = 11, y = 100)
ButtonAdd.config(width = 20, height = 2) # Add Movie Button
ButtonGame = tk.Button(GUI, text = "Start Game", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = lambda : StartPlay(0))
ButtonGame['font'] = f.Font(family = 'Helvetica' )
ButtonGame.place(x = 210, y = 100)
ButtonGame.config(width = 20, height = 2) # Start Game Button
EndButton = tk.Button(GUI, text = "Quit", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = q)
EndButton.place(x = 100, y = 200)
EndButton['font'] = f.Font(family = 'Helvetica' )
EndButton.config(width = 20, height = 2) # Quit Button
# ---------------------------------------------------------------------------- Add Movie Dialog Box ----------------------------------------------------------------------
def add():
global GUI
GUI.withdraw()
GUI = tk.Tk()
GUI.geometry("400x100")
GUI.title("Movie Name")
GUI.resizable(0, 0)
alignment(GUI)
L = tk.Label(GUI, text = "Add Movie Name : ")
L.place(x = 10, y = 30) # Add Movie Label
Name = tk.StringVar() # Input
entry1 = tk.Entry(GUI, textvariable = Name)
entry1.place(x = 120, y = 30) # Entry Label
# ------------------------------------------------ Confirm movie and empty entry label -------------------------------------------------------
def delete():
confirm(entry1.get())
entry1.delete(0, len(str(entry1.get())))
AdButton = tk.Button(GUI, text = "Confirm", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = delete)
AdButton.place(x = 260, y = 30) # Add Movie Button
StartGame = tk.Button(GUI, text = "Start Game", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = lambda : StartPlay(0))
StartGame.place(x = 320, y = 30) # Start Game Button
GUI.mainloop()
# ------------------------------------------------------------------------ Adding Movie to List -------------------------------------------------------------------------
def confirm(a):
global op
flag = 0 # Movie present in list flag
n1 = str(a) # Entered movie by user
n1 = n1.replace(" ", "--")
# ------------------------------------------------ if movie present in list -----------------------------------------------------
for i in La:
if i.lower() == n1.lower():
flag = 1
break
# ---------------------------------------------------------------------------------------------------------------- If not exists
if flag == 0:
La.append(n1) # Temperory list (Stores all movies)
# ---------------------------------- Add Movie permenantly -------------------------------------
ans = mb.askquestion("Notification", "Do you want to add this movie permenantly to your list!?")
if ans == 'yes':
tempLa = [i for i in op['MOVIE-LIST']] # Permanent list
tempLa.append(n1)
op['MOVIE-LIST'] = tempLa
del(tempLa) # Empty permanent list
op.close()
op = sh.open('Movies.bd')
s = "Added successfully"
# --------------------------------------------------------------------------------------------------------------------- If exists
else:
s = "Name already exists"
mb.showinfo("Notification", s)
# --------------------------------------------------------------------------- Quit function ------------------------------------------------------------------------------
def q():
if(mb.askquestion("Notification", "Do you want to leave us soo soon!?") == "yes"):
mb.showinfo("Quit", "Thank you! See you real soon!")
quit()
# ------------------------------------------------------------------------------ Game Play -------------------------------------------------------------------------
def StartPlay(n):
global AB
global EndGame
global count
global attempts
# ------------ Close Previous windows -----------
if n == 0:
GUI.withdraw()
if n == 1:
count = 0 # Number of wrong attempts
attempts = 7
AB.withdraw()
AB = tk.Tk()
AB.geometry("520x375")
AB.title("Let's Start!🎉")
AB.resizable(0, 0)
alignment(AB)
# ----------- Generating random number -----------
a = 0
x = 0
while a == x:
x = r.randint(0, (len(La)-1))
st = La[x] # Movie name in the x position of the list
Letters = ['a', 'e', 'i', 'o', 'u']
entry = "" # The character entered by the user
finalString = "" # Variable storing the movie name in the format to be displayed
# ----------- Formatting the movie name -----------
for i in range(len(st)):
if st[i].lower() in Letters: # If character encountered present in letters list
finalString = finalString + str(st[i]) + " "
elif st[i] == '-': # If the character encountered is a space
finalString = finalString + " "
else: # If the character encountered not present
finalString = finalString + '_' + " "
finalString = "Guess the movie name : \n" + finalString # Final formatted string to be displayed
Label_movie = tk.Label(AB)
Label_movie.place(x = 11, y = 10)
Label_movie.config( text = finalString, width = 55, height = 5, bg = "black", fg = "white", bd = 5, font=("Helvetica", 12))
Label_movie.pack(side = "top") # Display for the formatted movie name
Options = tk. StringVar() # Entry of the character by the user
Label_input = tk.Label(AB, text = "Enter the letter : ")
Label_input.place(x = 140, y = 120)
Label_input.config(font=("Helvetica", 12)) # 'Enter the letter' display
Entry_option = tk.Entry(AB, textvariable = Options)
Entry_option.place(x = 260, y = 123)
Entry_option.config(width = 5) # Entry of the character by the user
text = "Hello! Okay then let's start!"
text1 = "more chances!"
Label_result = tk.Label(AB)
Label_result.place(x = 100, y = 150)
Label_result.config(height = 2, width = 30, bd = 10) # Display if the answer is correct or not
canvas_image = tk.Label(AB, width = 1, height = 1, bd = 10)
canvas_image.place(x = 170, y = 192) # Display of the 'number of attempts'
canvas_attempts = tk.Label(AB, width = 12, height = 2)
canvas_attempts.place(x = 203, y = 202)
canvas_attempts.config(text = text1, font=("Helvetica", 12)) # Display of attempts left
Game_over = tk.Label(AB)
Game_over.place(x = 107, y = 250) # Display 'game over'
New_movie_bt = tk.Button(AB, text = "Try new Movie", activebackground = "grey", activeforeground = "black", bg = "black", fg = "white", command = lambda : StartPlay(1))
New_movie_bt.place(x = 15, y = 300)
New_movie_bt['font'] = f.Font(family = 'Helvetica' )
New_movie_bt.config(width = 22, height = 2) # New Movie Button
quit_bt = tk.Button(AB, text = "End Game", activebackground = "grey", activeforeground = "black", bg = "black", fg = "white", command = q)
quit_bt.place(x = 255, y = 300)
quit_bt['font'] = f.Font(family = 'Helvetica' )
quit_bt.config(width = 22, height = 2) # Quit Button
# ---------------- Change of text in both Number of attempts and if the input is correct or not ----------------
Label_result.config(text = text, font=("Helvetica", 12))
canvas_image.config(text = attempts, font=("Helvetica", 25))
# ------------------------------------------------------- Check of the input character --------------------------------------------------------------------
def check():
global count
global attempts
global EndGame
f = False
entry = str(Entry_option.get()) # Input character
# ------------ Check if the only character entered is an alphabet or not ------------
if not entry.isalpha(): # If the entered character is not an alphabet
mb.showerror("Error", "Not a valid character")
Entry_option.delete(0, len(entry))
elif len(entry) > 1: # If 2 or more characters are entered by the user
mb.showerror("Error", "Too many arguments")
Entry_option.delete(0, len(entry))
else: # if the character entered as no exceptional errors
finalString = ""
Entry_option.delete(0, len(entry))
# ------------------------------------------------------------------------------------------- If the input character is present in the movie name
if entry.lower() in st.lower():
if not entry.lower() in Letters: # The letter entered is not present is in letters list
Letters.append(entry.lower())
text = "Hurray!! Try picking another letter."
winsound.Beep(10000, 250)
else:
text = "Letter already attempted!." # The letter entered is present is in letters list
for i in range(2):
winsound.Beep(5000, 250)
# ------------------------------------------------------------------------------------------- If the input character is not present in the movie name
else:
if not entry.lower() in Letters: # The letter entered is not present is in letters list
Letters.append(entry.lower())
text = "Oops! No match, guess another letter."
count = count + 1
for i in range(3):
winsound.Beep(1000, 500)
else: # The letter entered is present is in letters list
text = "Letter already attempted!."
for i in range(2):
winsound.Beep(5000, 250)
# ------------ Editing movie name after user entry ------------
for i in range (len(st)):
if st[i].lower() in Letters: # if the character is present in Letters list
finalString = finalString + str(st[i]) + " "
elif st[i] == '-': # if the character is a word divider
finalString = finalString + " "
else: # if the character is not yet revealed
finalString = finalString + '_' + " "
# ------------------------ Scan movie name ------------------------
for i in range(len(finalString)):
if finalString[i] == '_':
f = True
break
if f == True: # Yet the movie name should be completed
finalString = "Guess the movie name : \n" + finalString
else: # The movie name is completed
finalString = "Yayy!! Done"
res = attempts - count # Remaining attempts left
# ------------------------------------------------------------------------------------------- Attempted even after the game is over
if res < 0:
if EndGame == True:
mb.showerror("Error", "Chances over! Try a new game.")
# ------------------------------------------------------------------------------------------- if the game over
elif res == 0:
t1 = "Game over! Try again. "
text = "Sorry! No match"
EndGame = True
Label_result.config(text = text)
canvas_image.config(text = res, font=("Helvetica", 25))
Game_over.config(text = t1, font=("Helvetica", 20))
# ------------------------------------------------------------------------------------------- Still attempts are left
else:
Label_result.config(text = text)
Label_movie.config(text = finalString)
canvas_image.config(text = res, font=("Helvetica", 25))
sButton = tk.Button(AB, text = "Check!", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = check)
sButton.place(x = 310, y = 120) # Check button for user input
a = x # previous movie name index
# ------------------------------------------------------------------------ The entry window design -------------------------------------------------------------------
GUI = tk.Tk()
GUI.geometry("450x275")
GUI.resizable(0, 0)
GUI.title("Play Hangman")
alignment(GUI)
ButtonPlay = tk.Button(GUI, text = "Yayy! Hangman", bg = "black", fg = "white", activebackground = "grey", activeforeground = "black", command = load)
ButtonPlay['font'] = f.Font(family = 'Helvetica' )
ButtonPlay.place(x = 11, y = 10)
ButtonPlay.config(width = 38, height = 3) # Play button
GUI.mainloop()
|
L_sm=int(input("L_sm= "))
L_m=int(L_sm/100)
print(L_m) |
import time
list_item=[]
n=int(input("How many items you want to put in the list : "))
i=0
while i<n:
item=int(input("Please enter the integer input :"))
list_item.append(item)
i+=1
print("creating list ...")
time.sleep(1)
print("List has been created and now we will check number of 4's in it ")
time.sleep(1)
print("Checking ...")
time.sleep(1)
result= list_item.count(4)
print("Total number of 4's is :",result)
|
# coding: utf-8
# ## 图片重复检测神经网络
# In[29]:
import concurrent.futures
from scipy import misc
import numpy as np
import pickle
from PIL import Image
import requests
from io import StringIO
from io import BytesIO
import threading
import _thread
import codecs
import json
import multiprocessing
from multiprocessing import Process
from multiprocessing import Manager
from multiprocessing import Queue
import matplotlib.pyplot as plt
import tensorflow as tf
import sklearn
from sklearn import cross_validation
import random
# ## 全局变量
# In[31]:
tfrecords_file = "/home/recsys/hzwangjian1/data/train.tfrecords"
file_to_write = "/home/recsys/hzwangjian1/data/imagepath_pair_duplabel.data"
# In[32]:
# 获取《图片一路径,图片二路径,标记》数据对
def load_data():
file_to_write = "/home/recsys/hzwangjian1/data/imagepath_pair_duplabel.data"
reader_handler = open(file_to_write, 'r')
image_one_path_list = []
image_two_path_list = []
label_list = []
count = 0
for line in reader_handler:
count = count + 1
elems = line.split("\t")
if len(elems) < 3:
print("len(elems) < 3:" + line)
continue
image_one_path = elems[0].strip()
image_two_path = elems[1].strip()
label = int(elems[2].strip())
# if label == 0:
# label = -1
image_one_path_list.append(image_one_path)
image_two_path_list.append(image_two_path)
label_list.append(label)
print(len(image_one_path_list))
print(len(image_two_path_list))
print(len(label_list))
return image_one_path_list, image_two_path_list, label_list
# ## map多线程、多进程
# In[33]:
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
def creat_thumbnail(image_path):
img = misc.imread(image_path)
img_arr = np.asarray(img)
return img_arr
def load_image_with_path_list(image_path_list):
# pool = Pool()
pool = ThreadPool(30)
image_list = pool.map(creat_thumbnail, image_path_list)
pool.close()
pool.join()
return image_list
# In[34]:
tf.reset_default_graph()
# Parameters
learning_rate = 0.001
training_iters = 20000
batch_size = 50
display_step = 10
# Network Parameters
image_width = 256
image_height = 256
image_channel = 3
n_input = image_width * image_height
# tf Graph input
with tf.name_scope('input_data') as scope:
X1 = tf.placeholder(tf.float32, [None, image_width, image_height, image_channel], name='image_one')
X2 = tf.placeholder(tf.float32, [None, image_width, image_height, image_channel], name='image_two')
y = tf.placeholder(tf.float32, [None], name='label')
keep_prob = tf.placeholder(tf.float32, shape=(), name='drop_out') #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
variables_dict = {
"W_conv1":tf.Variable(tf.random_normal(shape=[11,11, image_channel,32]), name='weight'),
"b_conv1":tf.Variable(tf.random_normal(shape=[1,32]), name='bias'),
"W_conv2":tf.Variable(tf.random_normal(shape=[5,5,32,64]), name='weight'),
"b_conv2":tf.Variable(tf.random_normal( shape=[1,64]), name='bias'),
"W_conv3":tf.Variable(tf.random_normal(shape=[3,3,64,64]), name='weight'),
"b_conv3":tf.Variable(tf.random_normal(shape=[1,64], name='bias')),
"W_full":tf.Variable(tf.random_normal(shape=[8 * 8 * 64, 1024]), name='weight'),
"b_full":tf.Variable(tf.random_normal(shape=[1, 1024]), 'bias')
}
# Create model
def conv_net(x,dropout):
with tf.name_scope('model') as scope:
# Reshape input picture
x = tf.reshape(x, shape=[-1, image_width, image_height, image_channel])
# Convolution Layer
# Max Pooling (down-sampling)
with tf.name_scope('layer1') as scope:
W_conv1 = variables_dict["W_conv1"]
b_conv1 = variables_dict["b_conv1"]
convOne = tf.nn.conv2d(x, W_conv1, strides=[1,1,1,1], padding="SAME")
reluOne = tf.nn.relu(convOne + b_conv1)
conv1 = tf.nn.max_pool(reluOne, ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
# Convolution Layer
with tf.name_scope('layer2') as scope:
W_conv2 = variables_dict["W_conv2"]
b_conv2 = variables_dict["b_conv2"]
convTwo = tf.nn.conv2d(conv1, W_conv2, strides=[1,1,1,1], padding="SAME")
reluTwo = tf.nn.relu(convTwo + b_conv2)
conv2 = tf.nn.max_pool(reluTwo, ksize=[1,4,4,1], strides=[1,4,4,1],padding="SAME")
with tf.name_scope('layer3') as scope:
W_conv3 = variables_dict["W_conv3"]
b_conv3 = variables_dict["b_conv3"]
convThree = tf.nn.conv2d(conv2, W_conv3, strides=[1,1,1,1], padding="SAME")
reluThree = tf.nn.relu(convThree + b_conv3)
conv3 = tf.nn.max_pool(reluThree, ksize=[1,2,2,1], strides=[1,2,2,1],padding="SAME")
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
with tf.name_scope('full_connect') as scope:
W_full = variables_dict["W_full"]
b_full = variables_dict["b_full"]
input_flat=tf.reshape(conv3, shape=[-1, 8 * 8 * 64])
fc1 = tf.nn.relu(tf.matmul(input_flat, W_full) + b_full)
# Apply Dropout
with tf.name_scope('dropout_layer') as scope:
drop_out = tf.nn.dropout(fc1,keep_prob)
norm_fc1 = tf.reduce_sum(tf.mul(fc1,fc1),reduction_indices=1)
return fc1
with tf.name_scope('whole_model') as scope:
img_one_rep = conv_net(X1, dropout=keep_prob)
img_two_rep = conv_net(X2, dropout=keep_prob)
norm_one_rep = tf.sqrt(tf.reduce_sum(tf.mul(img_one_rep,img_one_rep),reduction_indices=1))
norm_two_rep = tf.sqrt(tf.reduce_sum(tf.mul(img_two_rep, img_two_rep),reduction_indices=1))
norm_mul = tf.mul(norm_one_rep, norm_two_rep)
cos_rep = tf.div(tf.reduce_sum(tf.mul(img_one_rep, img_two_rep),reduction_indices=1), norm_mul)
with tf.name_scope('result') as scope:
with tf.name_scope('norm_W'):
norm_W1 = tf.sqrt(tf.reduce_sum(tf.mul(variables_dict["W_conv1"], variables_dict["W_conv1"])))
norm_W2 = tf.sqrt(tf.reduce_sum(tf.mul(variables_dict["W_conv2"], variables_dict["W_conv2"])))
norm_W3 = tf.sqrt(tf.reduce_sum(tf.mul(variables_dict["W_conv3"], variables_dict["W_conv3"])))
norm_full = tf.sqrt(tf.reduce_sum(tf.mul(variables_dict["W_full"], variables_dict["W_full"])))
norm_W = norm_W1 + norm_W2 + norm_W3 + norm_full
# cross_entropy_cnn = -(y * tf.nn.log_softmax(cos_rep) + (1-y) * tf.nn.log_softmax(1 - cos_rep))
cross_entropy_cnn = tf.nn.sigmoid_cross_entropy_with_logits(logits=cos_rep, targets=y)
cost =tf.reduce_sum(cross_entropy_cnn, name='cost') + 0.01 * norm_W
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
cos_rep_gt_zero = tf.greater(cos_rep, 0)
label_gt_zero = tf.greater(y, 0)
correct_pred = tf.equal(cos_rep_gt_zero, label_gt_zero)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
# ## 主入口
# In[35]:
# 获取《图片一,图片二,标记》数据对 , label等于 0 或 +1
image_one_path_list, image_two_path_list, label_list = load_data()
# 数据划分
img_one_train, img_one_test, img_two_train, img_two_test, label_train, label_test = cross_validation.train_test_split(image_one_path_list, image_two_path_list, label_list, test_size= 0.2)
print(len(img_one_test))
print(len(img_two_test))
print(len(label_test))
# 获取图片数据
# print("获取图片数据")
# images_one = load_image_with_path_list(img_one_train)
# print(len(images_one))
# images_two = load_image_with_path_list(img_two_train)
# print(len(images_two))
# images_test_one = load_image_with_path_list(img_one_test)
# images_test_two = load_image_with_path_list(img_two_test)
# In[36]:
def get_image(image_path):
"""Reads the jpg image from image_path.
Returns the image as a tf.float32 tensor
Args:
image_path: tf.string tensor
Reuturn:
the decoded jpeg image casted to float32
"""
content =tf.read_file(image_path)
return tf.image.decode_jpeg(content, channels=3)
## RunnerQueue
train_input_queue = tf.train.slice_input_producer( [img_one_train, img_two_train, label_train], shuffle=True, capacity=4 * batch_size)
img_one_queue = get_image(train_input_queue[0])
img_two_queue = get_image(train_input_queue[1])
label_queue = train_input_queue[2]
batch_img_one, batch_img_two, batch_label = tf.train.shuffle_batch([img_one_queue, img_two_queue, label_queue], batch_size=batch_size,capacity = 10 + 3* batch_size, min_after_dequeue = 10,num_threads=16, shapes=[(image_width, image_height, image_channel), (image_width, image_height, image_channel),()])
# In[37]:
cost_summary = tf.scalar_summary("cost", cost)
accuracy_summary = tf.scalar_summary("accuracy", accuracy)
norm_summary = tf.scalar_summary("norm_W", norm_W)
# Initializing the variables
init = tf.initialize_all_variables()
sess = tf.Session()
# with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
# img_one_sample_tf = tf.reshape(tf.cast(images_one[8], tf.uint8), shape=[-1, image_width, image_height, image_channel])
# image_summary_op = tf.image_summary("img_one_sample", img_one_sample_tf)
image_summary_one_op = tf.image_summary("img_one_sample", tf.reshape(batch_img_one, shape=[-1, image_width, image_height, image_channel]))
summary_op = tf.merge_summary([cost_summary, accuracy_summary, norm_summary])
summary_writer = tf.train.SummaryWriter('/home/recsys/hzwangjian1/tensorboard/test', graph_def=sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
for i in range(100):
batch_img_one_val, batch_img_two_val, label = sess.run([batch_img_one, batch_img_two,batch_label])
print(batch_img_one_val.shape)
print(label)
if i% 10 == 0:
train_accuracy = accuracy.eval(feed_dict={X1:batch_img_one_val, X2:batch_img_two_val, y:label, keep_prob:1.0},session=sess)
print ("step "+ str(i) +", training accuracy :"+ str(train_accuracy))
cross_entropy_val = cross_entropy_cnn.eval({X1:batch_img_one_val, X2:batch_img_two_val, y:label, keep_prob:1.0},session=sess)
summary_str = sess.run(summary_op, feed_dict={X1:batch_img_one_val, X2:batch_img_two_val, y:label, keep_prob:(1.0)})
summary_writer.add_summary(summary_str, i)
sess.run([optimizer,summary_op], feed_dict={X1:batch_img_one_val, X2:batch_img_two_val, y:label, keep_prob:0.75})
# fig = plt.figure()
# fig.add_subplot(1,2,1)
# plt.imshow(batch_img_one_val[1])
# fig.add_subplot(1,2,2)
# plt.imshow(batch_img_two_val[1])
# plt.show()
# print("test accuracy :" + str(accuracy.eval(feed_dict={X1:images_test_one[1:500], X2:images_test_two[1:500] ,y:label_test[1:500], keep_prob:1.0},session=sess)))
coord.request_stop()
coord.join(threads)
sess.close()
summary_writer.close()
print("all done")
# In[ ]:
# del images_one
# del images_two
index = 98
print(images_one[index].shape)
fig = plt.figure()
fig.add_subplot(1,2,1)
plt.imshow(images_one[index])
fig.add_subplot(1,2,2)
plt.imshow(images_two[index])
plt.show()
print(len(images_one[1:5]))
print(label_train[index])
index_test = 98
fig = plt.figure()
fig.add_subplot(1,2,1)
plt.imshow(images_test_one[index_test])
fig.add_subplot(1,2,2)
plt.imshow(images_test_two[index_test])
plt.show()
print(label_test[index_test])
print( random.randint(0,len(img_one_train) - batch_size))
|
import random
print ("Hello World")
print random.randint(1,4)
for x in (1,10):
print (x)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scanner runner script test."""
import pickle
import mock
import anytree
import unittest
import yaml
from tests.unittest_utils import ForsetiTestCase
from google.cloud.security.scanner.scanners import groups_scanner
from tests.scanner.test_data import fake_groups_scanner_data as fake_data
class GroupsScannerTest(ForsetiTestCase):
def _pickle_dump(self, obj, filename):
"""Dump object to pickle file.
Args:
obj: Any object to be pickled.
filename: String of the pickle filename.
Returns:
None
"""
pickle.dump(obj, open('tests/scanner/test_data/' + filename, 'wb'))
def _pickle_load(self, filename):
"""Loads a pickle file.
Args:
filename: String of the pickle filename to load.
Returns:
The object that was pickled.
"""
return pickle.load(open('tests/scanner/test_data/' + filename, 'rb'))
def _render_ascii(self, starting_node, attr):
"""Render an ascii representation of the tree structure.
Args:
starting_node: The starting node to render the ascii.
Returns:rm
attr: String of the attribute to render.
"""
return anytree.RenderTree(
starting_node,
style=anytree.AsciiStyle()).by_attr(attr)
def setUp(self):
pass
@mock.patch('google.cloud.security.scanner.scanners.groups_scanner.group_dao.GroupDao', spec=True)
def test_build_group_tree(self, mock_dao):
mock_dao.get_all_groups.return_value = fake_data.ALL_GROUPS
mock_dao.get_group_members.side_effect = fake_data.ALL_GROUP_MEMBERS
scanner = groups_scanner.GroupsScanner({}, {}, '', '')
scanner.dao = mock_dao
root = scanner._build_group_tree('')
self.assertEquals(fake_data.EXPECTED_MEMBERS_IN_TREE,
self._render_ascii(root, 'member_email'))
@mock.patch('google.cloud.security.scanner.scanners.groups_scanner.group_dao.GroupDao', spec=True)
def test_apply_rule(self, mock_dao):
root = self._pickle_load('expected_root_without_rules.pickle')
with open('tests/scanner/test_data/fake_group_rules.yaml', 'r') as f:
rules = yaml.load(f)
scanner = groups_scanner.GroupsScanner({}, {}, '', '')
root_with_rules = scanner._apply_all_rules(root, rules)
self.assertEquals(fake_data.EXPECTED_MEMBERS_IN_TREE,
self._render_ascii(root_with_rules, 'member_email'))
self.assertEquals(fake_data.EXPECTED_RULES_IN_TREE,
self._render_ascii(root_with_rules, 'rules'))
@mock.patch('google.cloud.security.scanner.scanners.groups_scanner.group_dao.GroupDao', spec=True)
def test_find_violations(self, mock_dao):
root = self._pickle_load('expected_root_with_rules.pickle')
scanner = groups_scanner.GroupsScanner({}, {}, '', '')
all_violations = scanner._find_violations(root)
self.assertEquals(3, len(all_violations))
for violation in all_violations:
self.assertEquals('christy@gmail.com', violation.member_email)
if __name__ == '__main__':
unittest.main()
|
class node:
def __init__(self,val=None, next=None):
self.val = val
self.next = next
def __del__(self):
print("I will delete myself")
def createlist():
nHead = node(0)
nHead.next = node(1)
return nHead
def printList(nHead):
temp = nHead
while temp != None:
print(temp.val, end=" ")
p = temp.next
del temp
temp = p
print("")
print("delete end")
nHead=createlist();
printList(nHead)
print("the program will die") |
import pika, os
from apiRequest.main import *
from prediction.Match.PredictMatch import *
from prediction.Tournament import *
from flask import Flask, request
from flask_cors import CORS, cross_origin
# Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
url = os.environ.get('CLOUDAMQP_URL', 'amqp://migmnpgf:D0y8DZ6E25ziu6A4Aa3igR9UxdBZjBGT@squid.rmq.cloudamqp.com/migmnpgf')
params = pika.URLParameters(url)
connection = pika.BlockingConnection(params)
channel = connection.channel() # start a channel
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/")
@cross_origin()
def index():
print("serveur chargé")
channel.queue_declare(queue='predict_match')
channel.basic_consume(queue="predict_match", on_message_callback=predictMatchCallBack, auto_ack=True)
channel.basic_consume(queue="predict_tournament", on_message_callback=predictTournamentCallBack, auto_ack=True)
channel.start_consuming()
return "<h1>Weclome to the client-lourd"
def postPredictMatchResponse(result):
channel.queue_declare(queue='predict_match_response') # Declare a queue
channel.basic_publish(exchange='',
routing_key='predict_match_response',
body=result)
def postPredictTournamentResponse(result):
print(result)
channel.queue_declare(queue='predict_tournament_response') # Declare a queue
channel.basic_publish(exchange='',
routing_key='predict_tournament_response',
body=result)
def jsonL(request_json):
request_json["home_name"] = getShortNameByApiId(request_json['home_id'])
request_json["away_name"] = getShortNameByApiId(request_json['away_id'])
if request_json["home_name"] is None:
request_json["home_name"] = request_json["home_id"]
if request_json["away_name"] is None:
request_json["away_name"] = request_json["away_id"]
return request_json
def requesToResponse(request):
response_json = None
print(request)
try:
request_json = jsonL(json.loads(request))
response = predictFromTeamIds(request_json['home_id'], request_json['away_id'], request_json['id'])
predictionIA = predictionBetweenTwoTeams(request_json["home_name"], request_json["away_name"])
responseString = ", 'home_goal_prediction':'{}', 'away_goal_prediction':'{}'".format(predictionIA.item(0), predictionIA.item(1))
response_json = response + responseString + "}"
postPredictMatchResponse(response_json)
except KeyError as e:
print(e)
print("key error")
response_json = "Error"
except:
response_json = "Error"
return response_json
def callback(ch, method, properties, body):
print(body)
def predictMatchCallBack(ch, method, properties, body):
print(body)
requesToResponse(body)
def predictTournamentCallBack(ch, method, properties, body):
response = None
try:
request = json.loads(body)
shortNameList = teamAPIIdToShortName(request["id_team_list"])
response = PredictTournament(shortNameList)
response = '"id": "{}", "response" : {}'.format(str(request["id"]), str(response))
response = "{" + response + "}"
except ValueError:
response = str(ValueError)
postPredictTournamentResponse(response)
initViews()
if __name__ == '__main__':
app.run() |
import unittest
import os
import sys
import tempfile
import shutil
import importlib
import traceback
from contextlib import contextmanager
from gmc.conf import settings, ENVIRONMENT_VARIABLE
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
cls._old_settings = {}
for setting in settings:
cls._old_settings[setting] = getattr(settings, setting)
cls.dataset_dir = os.path.realpath(os.path.join(
tempfile.gettempdir(),
cls.__name__,
'dummy_dataset',
))
cls.brain_dir = os.path.realpath(os.path.join(
tempfile.gettempdir(),
cls.__name__,
'dummy_brain',
))
if not os.path.exists(cls.dataset_dir):
os.makedirs(cls.dataset_dir)
if not os.path.exists(cls.brain_dir):
os.makedirs(cls.brain_dir)
#create music directories and files
for gen_dir in settings.GENRES:
dir_path = os.path.join(cls.dataset_dir, gen_dir)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for i in range(5):
with open(os.path.join(dir_path, '%s%d.wav'%(gen_dir, i)), 'w'):
pass
cls.settings_file = os.path.realpath(os.path.join(
tempfile.gettempdir(),
cls.__name__,
'settings.py',
))
with open(cls.settings_file, 'w') as set_file:
set_file.write("DATASET_DIR = '%s'\n" % cls.dataset_dir)
set_file.write("BRAIN_DIR = '%s'" % cls.brain_dir)
settings.modify({
'DATASET_DIR' : cls.dataset_dir,
'BRAIN_DIR' : cls.brain_dir
})
@classmethod
def tearDownClass(cls):
os.remove(cls.settings_file)
shutil.rmtree(cls.dataset_dir)
shutil.rmtree(cls.brain_dir)
settings.modify(cls._old_settings)
super(TestCase, cls).tearDownClass()
# @contextmanager
# def subTest(self, **kwargs):
# #Ugly way to provide functionality for unittest.TestCase subTest()
# for item in kwargs:
# setattr(self, item, kwargs[item])
# try:
# yield
# except Exception:
# exctype, value, tb = sys.exc_info()
# while tb and self._is_relevant_tb_level(tb):
# tb = tb.tb_next
# print(self.id() + '\n'+
# ''.join(traceback.format_exception(exctype, value, tb)))
# for item in kwargs:
# delattr(self, item)
# def _is_relevant_tb_level(self, tb):
# return '__unittest' in tb.tb_frame.f_globals
# def _count_relevant_tb_levels(self, tb):
# length = 0
# while tb and not self._is_relevant_tb_level(tb):
# length += 1
# tb = tb.tb_next
# return length |
import theano.tensor as T
import numpy as np
import theano
from theano.tensor.nnet import neighbours
from lasagne.layers import Layer
# see https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py#L375
def logsumexp(arr, axis=0):
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=axis, keepdims=True)
out = T.log(T.sum(np.exp(arr - vmax), axis=axis))
out += vmax
return out
def compute_log_probs_gaussian_mixture(trials, means_sym, covars_sym, weights_sym):
diffs = trials.dimshuffle(0,'x',1) - means_sym.dimshuffle('x',0,1)
# now location x gaussian x features
scaled_diffs = T.sqr(diffs / covars_sym.dimshuffle('x',0,1))
# still location x gaussian x features
exponent = -T.sum(scaled_diffs, axis=2) / 2.0
# now location x gaussian
# ignoring constant sqrt(2pi ** #num_features) for more stability(?)
log_denominator = T.log(T.sum(covars_sym, axis=1)) / 2.0
log_prob_per_location_per_gaussian = (exponent -
log_denominator.dimshuffle('x', 0))
log_weighted = log_prob_per_location_per_gaussian + (
weights_sym.dimshuffle('x', 0))
# still location x gaussian
# sums over gaussian so have to logsumexp...
log_prob_per_location = logsumexp(log_weighted, axis=1)
return log_prob_per_location
def compute_probs_gaussian_mixture(trials, means_sym, covars_sym, weights_sym):
"""Warning: usually failed ... possibly numerical problems?"""
diffs = trials.dimshuffle(0,'x',1) - means_sym.dimshuffle('x',0,1)
# now location x gaussian x features
scaled_diffs = T.sqr(diffs / covars_sym.dimshuffle('x',0,1))
# still location x gaussian x features
exponent = -T.sum(scaled_diffs, axis=2) / 2.0
nominator = T.exp(exponent)
# now location x gaussian
# ignoring constant sqrt(2pi ** #num_features) for more stability(?)
denominator = T.prod(T.sqrt(covars_sym), axis=1)
prob_per_location_per_gaussian = nominator / (
denominator.dimshuffle('x', 0))
prob_weighted = prob_per_location_per_gaussian * (
weights_sym.dimshuffle('x', 0))
# still location x gaussian
prob_per_location = T.sum(prob_weighted, axis=1)
return prob_per_location
def img_2_neibs_with_chans(inputs_sym, patch_size):
flat_patches = neighbours.images2neibs(inputs_sym, patch_size, (1,1))
topo_flat_patches = T.reshape(flat_patches,(inputs_sym.shape[0],
inputs_sym.shape[1],
inputs_sym.shape[2]-patch_size[0]+1,
inputs_sym.shape[3]-patch_size[1]+1,
patch_size[0],
patch_size[1]))
flat_patches = topo_flat_patches.dimshuffle(0,2,3,1,4,5)
flat_patches = T.reshape(flat_patches, (T.prod(flat_patches.shape[:3]),
T.prod(flat_patches.shape[3:])))
return flat_patches
def create_neibs_fn(patch_size):
inputs_sym = T.ftensor4()
flat_patches = img_2_neibs_with_chans(inputs_sym, patch_size)
return theano.function([inputs_sym], flat_patches)
def get_patch_size(layer):
if hasattr(layer, 'filter_size'):
patch_size = layer.filter_size
else:
patch_size = layer.pool_size
return patch_size
class GaussianMixtureSimilarityLayer(Layer):
def __init__(self, incoming, means, covariances, weights, patch_size,
pool_func=T.sum, **kwargs):
self.means = T.constant(means, dtype=theano.config.floatX)
self.covariances = T.constant(covariances, dtype=theano.config.floatX)
self.weights = T.constant(weights, dtype=theano.config.floatX)
self.patch_size = patch_size
self.pool_func = pool_func
super(GaussianMixtureSimilarityLayer,self).__init__(incoming, **kwargs)
def get_output_shape_for(self, input_shape):
return [input_shape[0]]
def get_output_for(self, input, **kwargs):
flat_patches = img_2_neibs_with_chans(input, self.patch_size)
log_prob_per_location = compute_log_probs_gaussian_mixture(flat_patches,
self.means, self.covariances, self.weights)
log_prob_per_input = log_prob_per_location.reshape(input.shape[0],-1)
return self.pool_func(log_prob_per_input, axis=1) |
#!/usr/bin/env python3
# Node class to represent data contained
# within a doubly linked list
class Node():
data, prev, next_ = None, None, None
def __init__(self, data, prev, next_):
self.data = data
self.prev = prev
self.next_ = next_
def __str__(self):
return str(self.data)
class DoublyLinkedList():
size = 0
head, tail = None, None
def is_empty(self):
return self.size == 0
# Add a node to the tail of the linked list, O(1)
def add(self, elem):
self.add_last(elem)
# Add a node to the tail of the linked list, O(1)
def add_last(self, elem):
if self.is_empty():
self.head = self.tail = Node(elem, None, None)
else:
self.tail.next_ = Node(elem, self.tail, None)
self.tail = self.tail.next_
self.size += 1
# Add an element to the beginning of this linked list, O(1)
def add_first(self, elem):
if self.is_empty():
self.head = self.tail = Node(elem, None, None)
else:
self.head.prev = Node(elem, None, self.head)
self.head = self.head.prev
self.size += 1
# Check the value of the first node if it exists, O(1)
def peek_first(self):
if self.is_empty(): raise RuntimeError("Empty linked list")
return self.head.data
# Check the value of the last node if it exists, O(1)
def peek_last(self):
if self.is_empty(): raise RuntimeError("Empty linked list")
return self.tail.data
# Remove the first value at the head of the linked list, O(1)
def remove_first(self):
# Can't remove data from an empty list -_-
if self.is_empty(): raise RuntimeError("Empty linked list")
# Extract the data at the head and move
# the head pointer forwards one node
data = self.head.data
self.head = self.head.next_
self.size -= 1
# If the list is empty set the tail to None
if self.is_empty(): self.tail = None
# Cleanup previous node
else: self.head.prev = None
# Return the data that was at the first node we just removed
return data
# Remove the last value at the tail of the linked list, O(1)
def remove_last(self):
# Can't remove data from an empty list -_-
if self.is_empty(): raise RuntimeError("Empty linked list")
# Extract the data at the tail and move
# the tail pointer backwards one node
data = self.tail.data
self.tail = self.tail.prev
self.size -= 1
# If the list is now empty set the head to None
if self.is_empty(): self.head = None
# Cleanup the node that was just removed
else: self.tail.next_ = None
# Return the data that was in the last node we just removed
return data
# Remove an arbitrary node from the linked list, O(1)
def _remove(self, node):
# If the node to remove is somewhere either at the
# head or the tail handle those independently
if node.prev == None: return self.remove_first()
if node.next_ == None: return self.remove_last()
# Make the pointers of adjacent nodes skip over 'node'
node.next_.prev = node.prev
node.prev.next_ = node.next_
# Temporarily store the data we want to return
data = node.data
print(node, data)
# Memory cleanup
node.data = None
node.prev = node.next_ = node = None
self.size -= 1
# Return the data in the node we just removed
return data
# Remove a node at a particular index, O(n)
def remove_at(self, index):
if index < 0 or index >= self.size: raise IndexError("Illegal index")
trav = self.head
for i in range(index+1):
if i == index: break
trav = trav.next_
return self._remove(trav)
# Remove a particular value in the linked list, O(n)
def remove(self, elem):
trav = self.head
while trav != None:
# Support searching for None elements
if elem == None:
if trav.data == elem:
self._remove(trav)
return True
# Support searching for none None elements
else:
if elem.__eq__(trav.data):
self._remove(trav)
return True
trav = trav.next_
return False
# Find the index of a particular value in the linked list, O(n)
def index_of(self, obj):
trav, i = self.head, 0
if obj == None:
while trav != None:
if trav.data == None:
return i
trav = trav.next_
i += 1
else:
while trav != None:
if obj.__eq__(trav.data):
return i
trav = trav.next_
i += 1
return -1
def __contains__(self, elem):
return self.index_of(elem) != -1
def __len__(self):
return self.size
def __str__(self):
s = "["
trav = self.head
while trav != None:
if trav.next_ != None:
s += trav.data + ", "
else:
s += trav.data
trav = trav.next_
s += "]"
return s
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.