text stringlengths 8 6.05M |
|---|
"""处理 okex ws 数据
"""
import json
import logging
import aioredis
from interceptor.interceptor import Interceptor, execute
from okws.ws.okex.decode import decode
from .candle import config as candle
from .normal import config as normal
logger = logging.getLogger(__name__)
class App(Interceptor):
MAX_ARRAY_LENGTH = 100,
def __init__(self, name, exchange_params={}, redis_url="redis://localhost"):
self.name = name
self.redis_url = redis_url
self.redis = None
self.decode = decode(exchange_params)
async def __call__(self, ctx):
return await execute(ctx, [self.decode, self])
async def enter(self, request):
# logger.debug(f"request={request}")
if request['_signal_'] == 'READY' and self.redis is None:
self.redis = await aioredis.create_redis_pool(self.redis_url)
elif request['_signal_'] == 'CONNECTED':
await self.redis.publish(f"okex/{self.name}/event", json.dumps({'op': 'CONNECTED'}))
logger.debug(f"{self.name} 已连接")
elif request['_signal_'] == 'DISCONNECTED':
await self.redis.publish(f"okex/{self.name}/event", json.dumps({'op': 'DISCONNECTED'}))
logger.debug(f"{self.name} 已连接")
elif request['_signal_'] == 'EXIT':
await self.redis.publish(f"okex/{self.name}/event", json.dumps({'op': 'EXIT'}))
logger.info(f"{self.name} 退出")
await self.close()
elif request['_signal_'] == 'ON_DATA':
logger.debug(request['DATA'])
if "table" in request['DATA']:
await self.redis.publish(f"okex/{self.name}/{request['DATA']['table']}", request['_data_'])
# save to redis
await execute({"data": request['DATA'], "redis": self.redis, "name": self.name},
[normal['write'], candle['write']])
elif "event" in request['DATA']:
await self.redis.publish(f"okex/{self.name}/event", request['_data_'])
if request['DATA']['event'] == 'error':
logger.warning(f"{self.name} 收到错误信息:{request['DATA']}")
else:
logger.info(f"{self.name} :{request['DATA']}")
else:
logger.warn(f"{self.name} 收到未知数据:{request['_data_']}")
async def close(self):
if self.redis is not None:
self.redis.close()
await self.redis.wait_closed()
self.redis = None
def __del__(self):
# logger.info('退出')
if self.redis is not None:
self.redis.close()
|
""""Classifies images using the dataset to train the model (features to be used mus be stored into files X,y)."""
import numpy as np
from skimage.io import imshow
from skimage.io import imread
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier
from pickle import dump, load
from library.utils import medial_axis_skeleton
from library.utils import skeleton_lines
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import scale
from library.data_utils import load_labels
from library.feature_extraction import extract_features
from library.feature_extraction import preprocess_image
from sys import argv
if __name__ == '__main__':
labels = load_labels("database", "classes.csv")
# load model
X = np.array([])
y = np.array(labels)
X = load(open("X", "rb"))
y = load(open("y", "rb"))
# Check arguments
if len(argv) < 2:
print("Error: no pgm file given")
exit()
else:
# Process image
image = imread(argv[1], as_grey=True)
p_image = preprocess_image(image)
features = extract_features(p_image)
# resize the features to "normalize" them compared the feature dataset
large_X = np.concatenate((X, [features]))
large_X = scale(large_X)
# re-extract now it's scaled
features = large_X[-1]
X = large_X[:-1]
# Build the classifier from binary data loaded
gamma = 0.1
C = 10
clf = OneVsRestClassifier(
SVC(kernel='rbf', C=C, gamma=gamma, probability=True), n_jobs=4)
clf.fit(X, y)
# Classifies the image
proba = clf.predict_proba([features])
for p in proba[0]:
print("%.13f" % p)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# 时间函数
print "############################## 1. time时间模块"
# 时间间隔是以秒为单位的浮点小数
# 每个时间戳都以自从1970年1月1日午夜(历元)经过了多长时间来表示
import time # 引入 time 时间模块
a = time.time()
print '当前时间戳:' + str(a)
print "############################## 2. 时间元组"
# 用一个元组装起来的9组数字处理时间:
localtime = time.localtime(time.time())
print "本地时间为 :", localtime
print "############################## 3. 格式化时间"
# 可读的时间模式的函数是asctime()
localtime = time.asctime( time.localtime(time.time()) )
print "本地时间为 :", localtime
# 使用 time.strftime(format[, t]) 来格式化时间
# 格式化成2016-03-20 11:45:39形式
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 格式化成Sat Mar 28 22:24:24 2016形式
print time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())
# 将格式字符串转换为时间戳
a = "Mon Sep 18 06:32:40 2017"
print time.mktime(time.strptime(a, "%a %b %d %H:%M:%S %Y"))
print "############################## 4. 获取日历 Calendar"
import calendar # 引入日历模块
cal = calendar.month(2017, 9)
print "以下输出2017年9月份的日历:"
print cal
print "############################## 5. datetime模块"
print "############################## 6. pytz模块"
print "############################## 7. dateutil模块"
|
from pathlib import Path
BASE_DIR = Path(__file__).parent
|
#Esse script devolve a corrente média de um csv gerado a apartir do log de voo gerado pela pixhawk e obtido pela qGroundControl
def corrente_media_de_um_logPX4_csv(nome_do_arquivo_ponto_csv):
import pandas as pd
import numpy as np
df = pd.read_csv(nome_do_arquivo_ponto_csv)
print(df.head())
correntes = df.iloc[:, 3].values
print(correntes)
#print(type(correntes))
print("Corrente_media = " + str(np.mean(correntes))+ " A")
corrente_media_de_um_logPX4_csv('bateria.csv')
|
for(int a0 = 0; a0 < T; a0++){
int L = in.nextInt();
long A = in.nextInt();
int N = in.nextInt();
int D = in.nextInt();
if (N<D || N>L || A<D){
System.out.println("SAD");
continue;
}
// Deal with the special case
if (D==1){
System.out.println(L*A);
continue;
}
// The number of accessories
// A : a1
// A-1 to A-n: a2
// A-n-1: a3
long max = 0;
int a2Max = (N-1)/(D-1);
// Loop start from maximun a2
for (int a2=a2Max;a2>=1; a2--){
// Calculate a1, a3, and n by a2
long a1 = N + (a2-1) - a2*(D-1);
long n = (L-a1)/a2;
long a3 = (L-a1)%a2;
// Break when the type of accessories (A) is not enough
if (n>A-1 || (n==A-1 && a3 > 0)){
break;
}
// Caclulate cost
sum = A*a1 + (A-1+A-n)*n/2*a2 + a3 * (A-n-1);
// Break when cost starts decreasing
if (sum<=max){
break;
}
max = sum;
}
System.out.println(max==0?"SAD":max);
} |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 3 18:42:01 2018
@author: lenovo
"""
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from pandas import DataFrame
import math
import pandas as pd
class OperateHuanbao(object):
def __init__(self,dir_out='huanbao\\'):
self.dir_out = dir_out
self.web_out = self.dir_out + 'web\\'
# 城市所对应的编码
self.citys_id = {
'济南':'370100',
'青岛':'370200',
'淄博':'370300',
'枣庄':'370400',
'东营':'370500',
'烟台':'370600',
'潍坊':'370700',
'济宁':'370800',
'泰安':'370900',
'威海':'371000',
'日照':'371100',
'莱芜':'371200',
'临沂':'371300',
'德州':'371400',
'聊城':'371500',
'滨州':'371600',
'菏泽':'371700'
}
#济南区县所对应编码
self.jncountys_id = {
'历下':'370102',
'市中':'370103',
'槐荫':'370104',
'天桥':'370105',
'高新':'37010001',
'历城':'370112',
'长清':'370113',
'平阴':'370124',
'济阳':'370125',
'商河':'370126',
'章丘':'370181',
'':''}
self.jncountys_name = [
'平阴',
'历下',
'市中',
'槐荫',
'天桥',
'高新',
'历城',
'长清',
'济阳',
'商河',
'章丘']
self.url = 'http://58.56.98.90/REG/f/announcement/announcementShow'
self.ses_datas = {
'buildCity': '',
'buildCounty':'',
'buildProvince':'370000',
'orderBy':'',
'pageNo':'1',
'pageSize':'10',
'projectName':'',
'recordNumber':''
}
self.ses_header={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0'}
self.session = requests.Session()
def private_get_web_num(self,city_name='济南',county_name='',key=''):
'''【私有】获取满足要求的环保申报的条数'''
print('================================================')
print('---> 开始获取%s%s环保申报的总条数:'%(city_name,county_name))
if key != '':
print('---->关键字为:%s'%(key))
self.ses_datas['buildCity'] = self.citys_id[city_name]
self.ses_datas['buildCounty'] = self.jncountys_id[county_name]
sres = self.session.post(self.url, data=self.ses_datas ,headers = self.ses_header)
html_text = sres.text
bs = BeautifulSoup(html_text, "lxml")
body_tag = bs.body
div_tag = body_tag.find('div',{'class':"pagination",'style':"text-align: right;"})
tmp_index = ((div_tag.text).find('共'))+2
tmp_str = (div_tag.text)[tmp_index:]
tmp_index = tmp_str.find('条')-1
nums = int(tmp_str[:tmp_index])
print('---> %s%s%s共获取%d条环保申报'%(city_name,county_name,key,nums))
print('================================================')
return nums
def private_get_detail(self,html_text):
'''【私有】获取环保申报详细信息'''
bs = BeautifulSoup(html_text, "lxml")
body_tag = bs.body
table_tag = body_tag.find('table')
tbody_tag = table_tag.find('tbody')
flag1 = 0
tmp = []
for tr_tag in tbody_tag.findAll('tr'):
if flag1 == 3:
flag2 = 0
for cell in tr_tag.findAll('td'):
if flag2 == 1:
tmp.append(cell.string)
flag2 += 1
elif flag1 == 8:
flag2 = 0
for cell in tr_tag.findAll('td'):
if flag2 == 1:
tmp.append(cell.string)
flag2 += 1
flag1 += 1
return tmp
def private_getJnteleByConty(self,city_name='济南',county_name='',
company='中国电信股份有限公司济南分公司',
date='',ifAll=False):
'''【私有】按照区县获取济南电信环保申报信息
date,str,从哪天开始获取,默认为所有申报
ifAll,是否获取申报详细信息,默认为非详细信息
'''
print('================================================')
if(ifAll):
if_all = '需要获取详细信息'
else:
if_all = '不需要获取详细信息'
if date == '':
print('-->开始获取系统所有申报信息:%s'%(if_all))
else:
print('-->开始获取申报信息:%s,起始日期:%s'%(if_all,date))
date_end = datetime.strptime(date,'%Y-%m-%d')
shenbao_num = self.private_get_web_num(city_name,county_name,key)
shanbao_pages = int(math.ceil(shenbao_num/100))
self.ses_datas['pageSize'] = '100'
print('-->%s%s%s共%d页环保申报信息'%(city_name,county_name,key,shanbao_pages))
num_id = 0
num_ok = 0
all_infos = []
for shanbao_page in range(1,shanbao_pages+1):
print('---->开始获取第%d页信息...'%shanbao_page)
self.ses_datas['pageNo'] = '%d'%shanbao_page
sres = self.session.post(self.url, data=self.ses_datas ,headers = self.ses_header)
html_text = sres.text
bs = BeautifulSoup(html_text, "lxml")
body_tag = bs.body
table_tag = body_tag.find('table',{'id':'index_table'})
tbody_tag = table_tag.find('tbody')
url_all = 'http://58.56.98.90'
ok_flag = True
get_date = False
for tr_tag in tbody_tag.findAll('tr'):
tmp = []
n = 0
num_id += 1
for cell in tr_tag.findAll('td'):
if ok_flag:
if n==2:
tmp.append('ID'+cell.string.strip())
elif n==3:
if cell.string.strip() == company:
tmp.append(cell.string.strip())
print('--> 共%d信息,第%d信息:%s,ok'%(shenbao_num,num_id,cell.string.strip()))
num_ok = num_ok+1
else:
print('--> 共%d信息,第%d信息:%s,Pass'%(shenbao_num,num_id,cell.string.strip()))
ok_flag = False
n += 1
continue
elif n==5:
if date != '':
date_now = datetime.strptime(cell.string.strip(),'%Y-%m-%d')
if date_now > date_end:
tmp.append(cell.string.strip())
else:
get_date = True
break
else:
tmp.append(cell.string.strip())
elif n==6:
info = (cell.findAll('a')[0])['href']
tmp.append(info[(info.find('id=')+3):])
if ifAll:
res = self.session.get(url_all+info,headers = self.ses_header)
# print(info)
print('--> 获取%d详细信息:%s'%(num_id,res))
tmp_all = self.private_get_detail(res.text)
tmp.append(tmp_all[0])
tmp.append(tmp_all[1])
else:
tmp.append(cell.string.strip())
n += 1
if get_date:
break
if ok_flag:
all_infos.append(tmp)
else:
ok_flag =True
if get_date:
break
print('---->第%d页信息获取完成...'%shanbao_page)
cols_name = ['序号','项目名称','备案号','建设单位/个人','建设地点','公示日期','环评文件']
if ifAll:
cols_name.append('联系人')
cols_name.append('建设内容及规模')
print('---> 已收入%d条申报'%len(all_infos))
list_df = DataFrame(all_infos,columns=cols_name)
list_df = list_df[list_df['建设单位/个人']==company]
file_out = city_name+county_name+key+datetime.now().strftime("%Y%m%d")+'.xlsx'
list_df.to_excel(self.web_out+file_out,header=True,index=False)
print('-->申报信息已获取完成,共%d条,数据已保存入%s'%(len(all_infos),file_out))
print('================================================')
return file_out
def getJnteleWebInfo(self,countys='',date='',ifAll=False):
'''获取环保申报信息入口'''
if countys == '':
countys = self.jncountys_name
dats = DataFrame()
print('****************************************************')
print('--> 开始获取济南电信环保申报信息:(各区县数据单独保存)')
for county in countys:
tmp = self.private_getJnteleByConty(county_name=county,date=date,ifAll=ifAll)
dats_tmp = pd.read_excel(self.web_out+tmp)
dats = pd.concat([dats,dats_tmp],axis=0)
dats
file_out = 'ALL'+datetime.now().strftime("%Y%m%d")+'.xlsx'
dats.to_excel(self.web_out+file_out,header=True,index=False)
print('--> 济南电信环保申报信息已保存,合计共%d条'%(dats.shape[0]))
print('****************************************************')
return dats
def matchJnteleInfo(self,key='',file_in='',stations_in='stations.xlsx'):
'''处理取得的环保数据'''
print('****************************************************')
print('--> 开始将基站名与环保申报数据相匹配')
if file_in == '':
file_in = 'ALL'+datetime.now().strftime("%Y%m%d")+'.xlsx'
dats = pd.read_excel(self.web_out + file_in)
if key == '':
print('---> 无项目关键词过滤')
else:
print('---> 已根据项目关键词【%s】进行过滤'%(key))
dats = dats[[key in project_name for project_name in dats.loc[:,'项目名称']]]
stations = pd.read_excel(self.dir_out+stations_in)
print('---> 共%d站点信息'%(stations.shape[0]))
df_ok = DataFrame(columns=['区县','站点设计名','申报编码'])
num = 0
for county_name in self.jncountys_name:
print('---> 开始匹配'+county_name+'环保申报信息>>>>>>>>>>>>>>')
stations_county = stations[stations['区县'] == county_name]
print('----> 共获得'+county_name+'站点数:【%d】'%(stations_county.shape[0]))
dats_county = dats[[county_name in position_name for position_name in dats.loc[:,'建设地点']]]
print('----> 共获得'+county_name+'申报数:【%d】'%(dats_county.shape[0]))
for station in stations_county.loc[:,'设计名称'].unique():
station_info = ','+station + '基站'
for infos,huanbao_id in zip(dats_county['建设内容及规模'], dats_county['备案号']):
if station_info in infos:
df_ok.loc[num] = [county_name,station,huanbao_id]
num = num+1
print('---> %s-%s共有%d的申请,实际匹配到%s站'%(key,county_name,dats_county.shape[0],df_ok[df_ok['区县']==county_name].shape[0]))
writer = pd.ExcelWriter('%s%s-ok.xlsx'%(self.dir_out,key))
df_ok.to_excel(writer,'OK',header=True,index=False)
print('--> 处理完成,共获取%d个基站的环保信息'%(df_ok.shape[0]))
if __name__ == '__main__':
city_name = '济南'
county_name = '平阴'
key=''
company='中国电信股份有限公司济南分公司'
huanbao_operate = OperateHuanbao()
# huanbao_file = huanbao_operate.getJnteleWebInfo(date='2018-4-1',ifAll=True)
# huanbao_operate.private_getJnteleByConty(county_name='平阴',date='2018-4-1')
# huanbao_operate.session.close()
# huanbao_operate.matchJnteleInfo(file_in='ALL20180420.xlsx',stations_in='stations2.xlsx')
# dateend = '2018-4-1'
# datenow = '2018-4-3'
# tmp = datestr.split('-')
# date_time = datetime.strptime(dateend,'%Y-%m-%d')
|
def partition(data, pivot_index, first_index, last_index):
pivot_value = data[pivot_index]
left_index = first_index
right_index = last_index
while True:
while left_index <= right_index and data[left_index] < pivot_value:
left_index += 1
while left_index <= right_index and data[right_index] > pivot_value:
right_index -= 1
if right_index <= left_index:
break
data[left_index], data[right_index] = data[right_index], data[left_index]
return right_index
def quicksort(data, begin, end):
if begin >= end:
return
pivot = partition(data, begin, begin, end)
quicksort(data, begin, pivot - 1)
quicksort(data, pivot + 1, end)
if __name__ == '__main__':
data = [1, 5, 3, 7, 2, 9, 6]
print quick(data, 0, len(data) - 1)
print data
|
# -*- coding: utf-8 -*-
import os
import sys
from elftools.elf.elffile import *
from elftools.common.exceptions import ELFError
import modules
from helper import Helper
class BinAnalyzer:
def __init__(self, args):
self.args = args
self.filelist = []
self.mod_list = modules.__all__
self.mods_choice = None
args.no_color
self.helper = Helper(not args.no_color)
if args.mods != None:
self.mods_choice = args.mods.split(',')
if args.list:
self.helper.print_title("Modules availables")
for m in self.mod_list:
mod = m()
s = " %s - %s\n" %(mod.name.ljust(20), mod.desc)
self.helper.print_normal(s)
sys.exit(0)
def isELF(self, filename):
is_valid = True
try:
with open(filename, "rb") as f:
try:
elffile = ELFFile(f)
text = elffile["e_machine"]
except ELFError, e:
is_valid = False
text = "NOT ELF"
except IOError, e:
is_valid = False
text = "IO ERROR"
return is_valid, text
def create_list_of_binaries(self):
if os.path.isfile(self.args.file):
self.helper.print_title("Analysing standalone binary")
self.include_file_list('',self.args.file)
elif os.path.isdir(self.args.file):
self.helper.print_title("Creating list of binaries")
for (folder, _, files) in os.walk(self.args.file):
for f in files:
self.include_file_list(folder,f)
self.helper.print_normal("\n Found %d binaries\n\n" %(len(self.filelist)))
def include_file_list(self, folder, f):
path = os.path.abspath(os.path.join(folder, f))
iself, text = self.isELF(path)
if self.args.verbose > 0:
self.helper.print_normal(" %s|" %(path.ljust(60)))
if iself:
self.filelist.append(path)
if self.args.verbose > 0:
self.helper.print_good(text)
self.helper.print_normal("\n")
else:
if self.args.verbose > 0:
self.helper.print_bad(text)
self.helper.print_normal("\n")
def create_out_dir(self):
out_dir = os.path.abspath(self.args.out_dir)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
else:
self.helper.print_warning("%s exists, using previous results" %(out_dir))
self.helper.print_normal("\n")
def pre_scan(self):
self.create_list_of_binaries()
if self.args.out_dir:
self.create_out_dir()
temp_list = list(self.mod_list)
if self.mods_choice != None:
if self.args.exclude:
for m in temp_list:
mod = m()
if mod.name in self.mods_choice:
self.mod_list.remove(m)
del mod
else:
for m in temp_list:
mod = m()
if mod.name not in self.mods_choice:
self.mod_list.remove(m)
del mod
else:
self.mod_list = []
self.helper.print_warning("select modules to run")
self.helper.print_normal("\n")
def scan(self):
for m in self.mod_list:
mod = m()
mod.run(self.helper, self.filelist, self.args)
print
|
n = int(input())
ans = [0 for i in range(n+1)]
ans[1] = 1
if n >= 2:
ans[2] = 2
for i in range(3, n+1):
ans[i] = ans[i-1]+ans[i-2]
print(ans[n] % 10007)
'''
1 2 3 5 8 13 21 34 55
''' |
X = int(input("Digite um número"))
lista = []
for i in range(1 , X+1):
if i %2 != 0:
lista.append(i)
print(lista)
|
from common.base import Base
from selenium import webdriver
import unittest
from time import sleep
login_url = "http://127.0.0.1:81/zentao/user-login-L3plbnRhby8=.html"
class LoginPage(Base):
#定位登录
loc_user = ("id","account")
loc_pwd = ("name","password")
loc_button = ("id","submit")
loc_keep = ("id","keepLoginon")
loc_forget_pwd = ("link text","忘记密码")
def input_user(self,text=""):
self.sendKeys(self.loc_user,text)
def input_pwd(self,text=""):
self.sendKeys(self.loc_pwd,text)
def click_keep(self):
self.sendKeys(self.loc_keep)
def clivk_forget_pwd(self):
self.click(self.loc_forget_pwd)
def click_login_button(self):
self.click(self.loc_button)
# def get_login_name(self):
# user = self.get_text()
# if __name__ == '__main__':
# unittest.main()
# from time import sleep
# driver = webdriver.Chrome()
# login_page = LoginPage(driver)
# driver.get(login_url)
# login_page.input_user("admin")
# login_page.input_pwd("123456")
# login_page.click_login_button()
# sleep(2)
# driver.quit()
|
from models.reviews import ReviewSubmittal
from models.reviews import Review
import fastapi
from typing import Optional, List
from models.location import BeerPlace, Location
from services import review_service, beerplace_service
router = fastapi.APIRouter()
@router.get('/api/random_beerplace')
async def random_beerplace():
try:
return await beerplace_service.get_random_beerplace_async()
except Exception as x:
return fastapi.Resoponse(content=str(x), status_code=500)
@router.get('/api/beerplaces', name='all_beerplaces')
async def beerplaces_get() -> List[BeerPlace]:
return await beerplace_service.get_beerplaces_async()
@router.post('api/beerplaces', name='add_beerplaces', status_code=201)
async def beerplaces_post(location: Location, name: str) -> BeerPlace:
# SOME THINGS
return await beerplace_service.add_beerplace(location, name)
@router.get('/api/reviews', name='all_reviews')
async def reports_get() -> List[Review]:
return await review_service.get_reviews_async()
@router.post('api/reviews', name='add_review', status_code=201)
async def reviews_post(review_subittal: ReviewSubmittal) -> Review:
# SOME THINGS
description = review_subittal.description
beerplace = review_subittal.beerplace
rating = review_subittal.rating
return await review_service.add_review(description, beerplace, rating) |
from grpc_tools import protoc
protoc.main((
'',
'-I./todo/proto',
'--python_out=./todo/proto',
'--grpc_python_out=./todo/proto',
'./todo/proto/todo.proto',
)) |
from openspending.model import Dataset
from openspending.test import DatabaseTestCase, helpers as h
class MockEntry(dict):
name = "testentry"
label = "An Entry"
def __init__(self):
self['name'] = self.name
self['label'] = self.label
def make_dataset():
return Dataset(name='testdataset')
class TestDataset(DatabaseTestCase):
def setup(self):
super(TestDataset, self).setup()
self.dat = make_dataset()
self.dat.save()
def test_dataset_properties(self):
assert self.dat.name == 'testdataset'
def test_get_regions(self):
assert self.dat.get_regions() == []
def test_add_region(self):
self.dat.add_region("region 1")
assert self.dat.get_regions() == ["region 1"]
def test_add_region_ignores_duplicates(self):
self.dat.add_region("region 1")
self.dat.add_region("region 1")
assert self.dat.get_regions() == ["region 1"]
def test_distinct_regions(self):
b = make_dataset()
self.dat.add_region("region 1")
self.dat.add_region("region 2")
b.add_region("region 1")
self.dat.save()
b.save()
assert Dataset.distinct_regions() == ["region 1", "region 2"]
def test_find_by_region(self):
self.dat.add_region("region 1")
self.dat.save()
assert Dataset.find_by_region("region 1").next() == self.dat
def test_entry_custom_html(self):
assert self.dat.entry_custom_html is None
self.dat.entry_custom_html = '<span>custom html</span>'
self.dat.save()
assert Dataset.find_one().entry_custom_html == '<span>custom html</span>'
def test_render_entry_custom_html_none(self):
h.assert_equal(self.dat.render_entry_custom_html(MockEntry()), None)
def test_render_entry_custom_html_plain_text(self):
self.dat.entry_custom_html = 'No templating.'
self.dat.save()
h.assert_equal(self.dat.render_entry_custom_html(MockEntry()),
'No templating.')
def test_render_entry_custom_html_genshi_template(self):
self.dat.entry_custom_html='${entry.name}: ${entry.label}'
self.dat.save()
h.assert_equal(self.dat.render_entry_custom_html(MockEntry()),
'testentry: An Entry')
|
import os
print("asdfsadf")
with open("sqlmap_list.txt") as f:
for line in f:
print(line)
os.system("python sqlmap\sqlmap.py " + line + ' --timeout=2') |
#munchkin_invaders.py
import sys
import pygame
from pygame.sprite import Group
#import class 'Settings' from settings.py
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
#import class 'Ship' from ship.py
from ship import Ship
from alien import Alien
#import game_functions.py
import game_functions as gf
def run_game():
"""Initialize game and create a screen object"""
pygame.init()
infrompy_settings = Settings()
screen = pygame.display.set_mode((infrompy_settings.screen_width, infrompy_settings.screen_height))#1200,800 is a tuple
pygame.display.set_caption("Munchkin Invaders")
#make a play button
play_button = Button(infrompy_settings, screen, "Play")
#Create an instance to store game stats and create a scoreboard
stats = GameStats(infrompy_settings)
sb = Scoreboard(infrompy_settings, screen, stats)
#Make a ship, a group of bullets and a group of aliens
# Create a ship, pass 'screen' parameter
#create ship before main loop so that we dont create a new ship on each pass
ship = Ship(infrompy_settings, screen)
# Make a group to store bullets in
bullets = Group()
#Make an alien
#alien = Alien(infrompy_settings, screen)
aliens = Group()
# create fleet of aliens
gf.create_fleet(infrompy_settings, screen, ship, aliens)
# Start main loop for the game.
while True:
#refactored from game_functions.py
gf.check_events(infrompy_settings, screen, stats,sb, play_button, ship, aliens, bullets)
if stats.game_active:
ship.update()
gf.update_bullets(infrompy_settings, screen, stats, sb, ship, aliens, bullets)
gf.update_aliens(infrompy_settings, stats, sb, screen, ship, aliens, bullets)
gf.update_screen(infrompy_settings, screen, stats, sb, ship, aliens, bullets, play_button)
run_game()
|
from django.http import HttpResponse
from dialog_manager.serivices import get_all_intent
def index(request):
response = get_all_intent()
return HttpResponse(response)
|
from random import randint
import string
import requests
class Game:
LETTERS = string.ascii_uppercase[:27]
def __init__(self):
self.grid = []
for _ in range(0, 9):
self.grid.append(self.LETTERS[randint(0, 25)])
def is_valid(self, grid):
for i in grid:
if i not in self.LETTERS:
return False
if len(grid) > 7:
return False
return self.__check_dictionary(grid)
def __check_dictionary(self, word):
r = requests.get(f"https://wagon-dictionary.herokuapp.com/{word}")
response = r.json()
return response['found']
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-12-19 04:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nova', '0063_auto_20171219_1153'),
]
operations = [
migrations.AddField(
model_name='httptest',
name='item_id',
field=models.CharField(default=0, max_length=20, verbose_name='item_id'),
preserve_default=False,
),
]
|
import gym
from gym import wrappers, logger
import gridworld2
import numpy as np
import matplotlib
matplotlib.use("TkAgg")
from collections import defaultdict
''' init env'''
env = gym.make('gridworld-v1')
outdir = 'gridworld-v1/random-agent-results'
env = wrappers.Monitor(env, directory=outdir, force=True, video_callable=False)
env.setPlan("gridworldPlans/plan5.txt", {0: -0.001, 3: 1, 4: 1, 5: -1, 6: -1})
''' init values '''
qvalues = defaultdict(lambda : np.zeros(4))
rsum = 0
pause = 0.01
nb_actions = 4
''' hyperparameters '''
alpha = 0.001
gamma = 0.99
tau = 0.999
episode_count = 10000
for i in range(episode_count):
current_state = env.reset()
if(i):
tau *= 0.999
if(tau<=0.01):
print('Temperature too small, have to stop \n End at episode : {}'.format(str(i)))
break
rsum,reward,j = 0,0,0
env.verbose = (i % 100 == 0 and i > 0)
env.verbose = False
if env.verbose:
env.render(pause)
while(True):
current_reward = reward
j += 1
proba = np.exp(np.array(qvalues[current_state])/tau)
proba = proba/np.sum(proba)
action = np.random.choice(nb_actions,p=proba)
obs,reward,done,_ = env.step(action)
rsum += reward
qvalues[current_state][action] += alpha * (reward + gamma*np.max(qvalues[obs]) - qvalues[current_state][action])
current_state = obs
if env.verbose:
env.render(pause)
if(done):
print("Episode : " + str(i) + " rsum=" + str(rsum) + ", " + str(j) + " actions")
break
print("done")
env.close()
|
from .Node import Node
from .Stack import Stack
from .Queue import Queue
from .Link import Link
from .Bag import Bag
__about__ = ['Node', 'Stack', 'Queue', 'Link', 'Bag']
|
#!/usr/bin/env python
from setuptools import setup
CLASSIFIERS = """\
Development Status :: 4 - Beta
Intended Audience :: Science/Research
License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
Operating System :: MacOS
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Programming Language :: Python
Topic :: Scientific/Engineering
"""
setup(
name="ensalada",
version="0.0.1",
author="James Priestley, Zhenrui Liao",
author_email="zhenrui.liao@columbia.edu, jbp2150@columbia.edu",
description=("Supervised latent Dirichlet allocation for neural data analysis"),
license="GNU GPLv2",
keywords="supervised topic model neural ensemble",
packages=['ensalada'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
install_requires=[],
)
|
import pytest
import common
import sync
class TestSync:
@pytest.fixture(autouse=True)
def monkeypatch_clients(self, monkeypatch, mock_ddb_table, mock_box_client):
monkeypatch.setattr(common, "get_ddb_table", lambda: mock_ddb_table)
monkeypatch.setattr(common, "get_box_client", lambda: (mock_box_client, "some-webhook-key"))
def test_sync_empty(self, ddb_items):
sync.lambda_handler({}, None)
assert len(ddb_items) == 0
def test_sync(
self,
ddb_items,
create_folder,
create_file,
create_shared_file,
create_shared_folder,
managed_folder,
create_shared_link,
mock_box_client,
):
# supposed to exist after sync
shared_folder = create_shared_folder(parent_folder=managed_folder)
correct_file = create_shared_file(parent_folder=shared_folder)
ddb_items.append(common.make_ddb_item(correct_file))
# supposed to exist after sync
missing_file = create_shared_file(parent_folder=shared_folder)
# not supposed to exist after sync
no_longer_shared_file = create_file(parent_folder=managed_folder)
ddb_items.append(
{
"filepath": common.get_filepath(no_longer_shared_file),
"box_file_id": no_longer_shared_file.id,
"download_url": "some-bogus-download-url",
}
)
# not supposed to exist after sync
ddb_items.append(
{
"filepath": "some/deleted/file.dat",
"box_file_id": "123456789",
"download_url": "some-other-bogus-download-url",
}
)
# file in a shared folder that's missing from ddb
# supposed to exist after sync
unshared_file = create_file(parent_folder=shared_folder)
# shared file in an unshared folder, not supposed to exist after sync
unshared_folder = create_folder(parent=managed_folder)
shared_file = create_shared_file(parent=unshared_folder)
sync.lambda_handler({}, None)
assert len(ddb_items) == 3
file_ids = {i["box_file_id"] for i in ddb_items}
assert file_ids == {correct_file.id, missing_file.id, unshared_file.id}
assert common.is_box_object_public(shared_file) is False
def test_sync_ddb_paging(self, ddb_items):
for i in range(5 * 2 + 1):
ddb_items.append(
{
"filepath": f"some/defunct/file-{i}.dat",
"box_file_id": f"123456{i}",
"download_url": f"some-defunct-download-url-{i}",
}
)
sync.lambda_handler({}, None)
assert len(ddb_items) == 0
|
import web
import app
import application.models.model_main as model_main
render = web.template.render('application/views/carrito/', base="master.html")
class Pagar():
def GET(self):
try:
return render.pagar()
except Exception as e:
return "Error Pagar Controller" + str(e.args)
def POST(self):
try:
return render.pagar()
except Exception as e:
return "Error Pagar Controller POST: " + str(e.args)
|
def count_vowels_consonants(word):
vowels = "aeiouyAEIOUY"
result_dictionary = {"vowels": 0, "consonants": 0}
for char in word:
if char in vowels:
result_dictionary["vowels"] += 1
else:
result_dictionary["consonants"] += 1
return result_dictionary
print(count_vowels_consonants("aaaAcccD"))
|
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from KnowledgeEngine import KnowledgeEngine
import nltk
import pprint
from practnlptools.tools import Annotator
from NLPEngine import NaturalLanguageProcessor
from AnswerEngine import AnswerEngine
from Engine import Engine
"Engine main class"
class MainEngine(Engine):
def __init__(self):
print("Engine started")
self.Knowledge = KnowledgeEngine()
self.NLP = NaturalLanguageProcessor();
self.AnswerEngine = AnswerEngine();
def reply(self,inputText):
#Gets a user response and returns a reply
self.NLP.mainProcedure(inputText)
self.Knowledge.resetKnowledge()
self.Knowledge.requestKnowledge(self.NLP.getCurrentState())
self.AnswerEngine.injectKnowledge(self.Knowledge.getKnowledge())
self.AnswerEngine.injectNLP(self.NLP.getCurrentState())
return(self.AnswerEngine.generateResponse())
def startConversation(self):
reply = 'Hello there, what would you like to talk about?'
return reply
|
import sys
input = sys.stdin.readline
#N個の変数v_1, ..., v_n
#Q個のクエリ
#各クエリはv_aにwを加えるという操作
#answerは各クエリに対してv_1 + ... + v_aを求める
#クエリごとにどんどんv_1, ..., v_nは更新される
#x-1となっているのは、リストが0番目からになっているから。
#x&(-x)で2進数で表した場合の最も下位にある1の位置を取り出すことができる。
#例えば,10 = 1010なら10を返し、7 = 111なら1を返す。
def main():
N, Q = map( int, input().split())
A = list( map( int, input().split()))
bit = [0]*N
def add(bit,a,w):#リストに値を追加する関数
x = a
while x <= N:
bit[x-1] += w
x += x&(-x)
def sums(bit,a):#k番目までの和
x = a
S = 0
while x > 0:
S += bit[x-1]
x -= x&(-x)
return S
for i, a in enumerate(A):
add(bit,i+1,a)
Q = [ tuple( map( int, input().split())) for _ in range(Q)]
ANS = []
for q,p,x in Q:
if q == 0:
add(bit,p+1,x)
else:
ANS.append( sums(bit,x)-sums(bit,p))
print( "\n".join( map( str, ANS)))
if __name__ == '__main__':
main()
|
words_to_19 = ['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen']
words_len = map(lambda x: len(x), words_to_19)
words_teens = ['twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety']
#words_teens_len = [6, 6, 6, 5, 5, 7, 6, 6]
words_teens_len = map(lambda x: len(x), words_teens)
#length of 'hundred'
h_len = 7
#length of 'and'
a_len = 3
# sum 1 to 19
a = sum(words_len)
# sum 20 to 99
# twenty, thirty, fourty...
b = sum(words_teens_len)
# twenty one, twenty two... thirty one, thirty two... ...
b = b*10 + sum(words_len[0:9])*8
# sum 100 to 999
# all ones, twos, threes beginning the 100s like
# one hundred, one hundred and one, two hundred, two hundred and one
c = sum(words_len[0:9]) * 100
# all 'hundred' from 100 to 999
c = c + h_len * 100 * 9
# all 'and' in hundreds such as one hundred 'and' one,
# one hundred 'and' two, etc
c = c + 99 * 9 * a_len
# all 1 to 99 for each hundred (100s, 200s, etc)
c = c + (a+b)*9
# one thousand
d = len('one') + len('thousand')
print a + b + c + d
|
a=float(input("zadej delku strany v cm: "))
nepravdiva_hodnota=a<0
if nepravdiva_hodnota:
print("debile")
elif a==0:
print("Nulova delka? Rly?")
else:
print("Obsah ctverce se stranou", a, "je", a*a, "cm2.")
print("Obvod ctverce se stranou", a, "je", 4*a, "cm.")
print("Kdyby to byl kruh, tak s polomerem", a, "by mel obvod", 2*3.1415*a, "cm.")
print("A todle se tady ukaze vzdycky.")
|
from RatVenture_functions import *
from RatVenture_classes import *
import sys
#Default values
v_filename = "save.txt"
v_location="0,0"
v_day = 1
v_rat_encounter = False
v_town_locations = ["0,0", "3,1", "5,2", "1,3", "4,6"]
v_orb_location = setOrbLocation(v_town_locations)
v_rat_king_alive = True
#Display Main Menu
while(True):
mainMenu()
option = int(input("Enter your option: "))
if(option == 1):
#Creates a new game using newGame() function and receives player object
player = newGame()
break
elif(option == 2):
player, v_location, v_day = resumeGame(v_filename)
break
elif(option == 3):
print("The game will now exit.")
exitGame()
else:
print("Invalid option. Please enter again.")
#Main program
while(True):
# If player is in a town
if(checkLocation(v_location, v_town_locations) == "You are in a town"):
# If orb in town
if (v_location == v_orb_location and player.orb == False):
player = pickOrb(player)
townMenu(v_day)
choice = int(input("Enter your choice: "))
# View Character
if(choice == 1):
viewCharacter(player)
continue
# View Map
elif(choice == 2):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
continue
# Move
elif(choice == 3):
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + " " + checkLocation(v_location, v_town_locations))
break
# Rest
elif(choice == 4):
v_day, player.health = rest(v_day, player.health)
continue
# Save Game
elif(choice == 5):
saveGame(player.health, v_location, v_day)
continue
# Exit Game
elif(choice == 6):
exitGame()
#User inputs invalid option
else:
print("Invalid option")
continue
# Rat encounter
elif(checkLocation(v_location, v_town_locations) == "You are in the open" and v_rat_encounter == False):
enemy = Entity('Rat', 10, '1-3', 1)
in_combat = True
while(in_combat):
combatMenu(enemy)
combatChoice = input("Enter Choice: ")
# Attack
if(combatChoice == '1'):
player, enemy, status = attack(player, enemy, player.orb)
if(status == 2):
continue
elif(status == 0):
print('The rat is dead! You are victorious!')
in_combat = False
v_rat_encounter = True
elif(status == 1):
print('You died. Game over.')
exitGame()
# Run
elif(combatChoice == '2'):
run()
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# Rat encounter (Health is reset)
enemy = Entity('Rat', 10, '1-3', 1)
# View Map
elif(outdoorChoice == '2'):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
# Rat encounter (Health is reset)
enemy = Entity('Rat', 10, '1-3', 1)
# Move
elif(outdoorChoice == '3'):
in_combat = False
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
else:
print("Invalid option. Please try again.")
continue
# Rat King encounter
elif(checkLocation(v_location, v_town_locations) == "You see the Rat King!" and v_rat_king_alive == True):
enemy = Entity('Rat King', 25, '8-12', 5)
in_combat = True
while(in_combat):
combatMenu(enemy)
combatChoice = input("Enter Choice: ")
# Attack
if(combatChoice == '1'):
player, enemy, status = attack(player, enemy, player.orb)
if(status == 2):
continue
elif(status == 0):
print('The Rat King is dead! You are victorious!')
in_combat = False
v_rat_king_alive = False
exitGame()
elif(status == 1):
print('You died. Game over.')
exitGame()
# Run
elif(combatChoice == '2'):
run()
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# Rat encounter (Health is reset)
enemy = Entity('Rat King', 25, '8-12', 5)
# View Map
elif(combatChoice == '2'):
print(viewMap(v_location))
# Rat encounter (Health is reset)
enemy = Entity('Rat King', 25, '8-12', 5)
# Move
elif(combatChoice == '3'):
in_combat = False
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
else:
print("Invalid option. Please try again.")
continue
# If player is in the open and has already encountered a rat
elif(checkLocation(v_location, v_town_locations) == "You are in the open"):
outdoorMenu()
outdoorChoice = input("Enter choice: ")
# View Character
if(outdoorChoice == '1'):
viewCharacter(player)
# View Map
elif(outdoorChoice == '2'):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
# Move
elif(outdoorChoice == '3'):
while(True):
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("W = up; A = left; S = down; D = right")
direction = input("Your Move: ")
if(move(v_location, direction, v_day) != 0):
v_location, v_day = move(v_location, direction, v_day)
print(viewMap(v_location, v_town_locations, v_orb_location, player.orb))
print("Day " + str(v_day) + ". " + checkLocation(v_location, v_town_locations))
break
# Exit Game
elif(outdoorChoice == '4'):
exitGame()
else:
print("Invalid option. Please try again.")
|
"""
Server to guess what ... serving ludos model.
This service is running a zmq client/server interface
to run the inference.
To access it, just connect to the server using
```
socket = context.socket(zmq.REQ)
socket.connect("tcp://IP_OF_THE_SERVER:PORT_OF_THE_SERVER")
```
The server expected request format and serialized in a specifc way.
The request should be a dict with three keys
1. model_id which reference the model to use
2. predict_method which store the name of the method you want to run
3. kwargs which store the argument of the method
Then this request should be pickled/compressed using
```
req = pickle.dumps(request, protocol)
req = zlib.compress(req)
```
before being sent to the
"""
import hashlib
import json
import logging
import pickle
import traceback
import zlib
import box
from box import Box
import zmq
from ludos.models import common
def get_logger():
log = logging.getLogger(__name__)
ch = logging.StreamHandler()
formatter = logging.Formatter(
'[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
return log
class RequestFormatError(ValueError):
pass
CODES = {
'success': 200,
'exceptions': {
common.ModelLoadingError: 401,
ValueError: 404,
AttributeError: 404,
TypeError: 404,
RuntimeError: 404,
box.exceptions.BoxKeyError: 401,
RequestFormatError: 402
}
}
class LudosServer(object):
"""
Simple server exposing models inference via a client/server zeroMQ interface.
The server expected request format and serialized in a specifc way.
The request should be a dict with three keys
1. model_id: Name of the model in the registry
2. predict_method: which store the name of the method you want to run
3. predict_kwargs: which store the argument of the method
Then this request should be pickled/compressed before being sent to the
server. Inversely, the response should also be decrompress/unserialized
using pickle
Full client side workflow below:
```
socket = context.socket(zmq.REQ)
socket.connect("tcp://IP_OF_THE_SERVER:PORT_OF_THE_SERVER")
req = zlib.compress(pickle.dumps(request, protocol))
socket.send(req,flags = 0)
response = socket.recv(flags = 0)
response = pickle.loads(zlib.decompress(response))
```
"""
def __init__(self, host: str = '*', port: int = 5555):
"""
Args:
host (str): IP address of the host
port (int): Port to access the server
"""
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REP)
self.socket.bind("tcp://{}:{}".format(host, port))
self.models = Box()
self.host = host
self.port = port
self.logger = get_logger()
def send_response(self,
payload,
status_code: int,
protocol: int = 2,
flags: int = 0):
"""
Send the response to the client
Args:
payload (obj): output of the model
status_code (int): exit status
protocol (int): Protocol to pickle the msg. Use 2
to talk to python2
"""
response = dict()
response['payload'] = payload
response['status_code'] = status_code
p = pickle.dumps(response, protocol)
z = zlib.compress(p)
return self.socket.send(z, flags=flags)
def receive_request(self, flags: int = 0):
"""
Receive request.
A request is made of three attributes
request = dict(model_id='cctv_expert',
predict_method='predict_prob',
predict_kwargs=...)
Returns:
Deserialized request sent by the client.
"""
z = self.socket.recv(flags)
p = zlib.decompress(z)
request = pickle.loads(p, encoding='latin1')
for key in ['model_id', 'predict_method', 'predict_kwargs']:
if key not in request.keys():
self.logger.error('Missing key {}'.format(key))
raise RequestFormatError()
return request
def load_model(self, model_id: str, model_task: str, model_name: str,
expname: str):
"""
Args:
model_id (str): model key
model_task (str): Task for the model
model_name (str): Name of the model
expname (str): Name of the experiment
Returns:
model
"""
self.logger.info('Loading model {}/{}/{}'.format(
model_task, model_name, expname))
self.models[model_id] = common.load_model(model_task=model_task,
model_name=model_name,
expname=expname)
self.logger.info('Succesfully load model {}/{}/{}'.format(
model_task, model_name, expname))
def start(self):
"""
Start the server loop
"""
self.logger.info('Server started on http://{}:{}'.format(
self.host, self.port))
while True:
try:
self.logger.info('Waiting new request')
request = self.receive_request()
self.logger.info('Running inference')
out = getattr(
self.models[request['model_id']],
request['predict_method'])(**request['predict_kwargs'])
except (common.ModelLoadingError, RequestFormatError, ValueError,
TypeError, RuntimeError, AttributeError,
box.exceptions.BoxKeyError) as e:
trace = traceback.format_exc()
code_status = CODES['exceptions'][e.__class__]
self.logger.error('Error with status: {}'.format(code_status))
self.logger.error('Traceback: {}'.format(trace))
self.send_response(payload='', status_code=code_status)
continue
except Exception as e:
trace = traceback.format_exc()
self.logger.error('Traceback: {}'.format(trace))
code_status = 404
self.send_response(payload='', status_code=code_status)
continue
self.send_response(payload=out, status_code=CODES['success'])
self.logger.info('Succesfully run inference')
|
def binary_to_decimal(binary_string):
return int(binary_string, base=2)
|
import math
def get_x_value(x):
return (x / 180) * math.pi
def get_factorial(degree):
factorial = 1
for i in range(1, degree + 1):
factorial *= i
return factorial
def get_sh_function_in_sequence(x, degree):
result = 0.0
for i in range(1, degree + 1):
degree = 2 * i - 1
result += x ** degree / get_factorial(degree)
return result
def horner_method(x, value_sequence, i, max):
if i + 1 < max:
return horner_method(x, value_sequence, i + 1, max) * x + value_sequence[i];
else:
return value_sequence[max] * x + value_sequence[i];
# Initialize.
sequence_degree = 5
x1 = 55
x2 = 97
list_of_x = [0]
for i in range(1, sequence_degree + 1):
list_of_x.append(1 / get_factorial(2 * i - 1))
list_of_x.append(0)
# Print X1 result.
x1_result_standard = get_sh_function_in_sequence(get_x_value(x1), sequence_degree)
x1_result_horner = horner_method(get_x_value(x1), list_of_x, 0, len(list_of_x) - 1)
print("X1: ")
print(x1_result_standard)
print(x1_result_horner)
print("%s: %f%s" % ("Difference", x1_result_standard / x1_result_horner - 1, "%"))
print("----------------------------------------")
# Print X2 result
x2_result_standard = get_sh_function_in_sequence(get_x_value(x2), sequence_degree)
x2_result_horner = horner_method(get_x_value(x2), list_of_x, 0, len(list_of_x) - 1)
print("X2: ")
print(x2_result_standard)
print(x2_result_horner)
print("%s: %f%s" % ("Difference", x2_result_standard / x2_result_horner - 1, "%")) |
from django import forms
from django.forms import ModelForm
from .models import Article
class Form(ModelForm):
x = forms.IntegerField()
y = forms.IntegerField()
class Meta:
model = Article
fields = ['file_obj'] |
import sys
# This is a "glue" module
class Arguments:
def __init__(self):
self.argnum = len(sys.argv)
self.pdefault=1 #default value of p
self.pdef_unknown=0.5
self.pgranu = 4 # granularity for p, pgranu+1 discrete values from 0
# pclas is the matrix for class reasoning. C(%1,%2)p1 and %X(%2,%3)p2 -> %x(%2,%3)pclas, pclas[p2,p1]
self.pclas = [[0, 0, 0, 0, 0], [0, 1, 1, 1, 1], [0, 1, 1, 2, 2], [0, 1, 2, 2, 3], [0, 1, 2, 3, 4]]
self.rcode = {
"X":-1, "W": 1, "S": 2, "D": 3, "C": 4, "F": 5,
"Q": 6, "A": 7, "I": 8, "R": 9, "T": 10,
"P": 11, "M": 12, "IM": 13, "N": 14, "V": 15,
"AND": 16, "NOT": 17, "OR": 18, "XOR": 19
}
self.rcodeBack = {
-1:"X", 1: "W", 2: "S", 3: "D", 4: "C", 5: "F",
6: "Q", 7: "A", 8: "I", 9: "R", 10: "T",
11: "P", 12: "M", 13: "IM", 14: "N", 15: "V",
16: "AND", 17: "NOT", 18: "OR", 19: "XOR"
}
class Logging:
def __init__(self, fname="logfile.txt"):
try:
self.logf = open(fname, "w")
except:
print("ERROR: Logging: log file could not be opened")
def add_log(self, what): # what must be iterable
try:
for item in what: self.logf.write(str(item))
self.logf.write("\n")
except:
print("ERROR: Logging: log file not present or called incorrectly", str(what))
if __name__ == "__main__":
print("This is a module file, run natlan.py instead")
|
Plane = [list(line.strip()) for line in open("input_11.txt").readlines()]
# Plane = [
# "L.LL.LL.LL",
# "LLLLLLL.LL",
# "L.L.L..L..",
# "LLLL.LL.LL",
# "L.LL.LL.LL",
# "L.LLLLL.LL",
# "..L.L.....",
# "LLLLLLLLLL",
# "L.LLLLLL.L",
# "L.LLLLL.LL",
# ]
W = len(Plane[0])
L = len(Plane)
Changed = True
def newSeat(plane, row, col):
global Changed
if plane[row][col] == ".":
return ".", False
seats = list()
for r in [row - 1, row, row + 1]:
for c in [col - 1, col, col + 1]:
if r == row and c == col:
pass
elif not ((0 <= c < W) and (0 <= r < L)):
pass
elif "." == plane[r][c]:
pass
elif plane[r][c] == "#":
seats.append(1)
else:
seats.append(0)
if plane[row][col] == "#":
if sum(seats) >= 4:
return "L", True
else:
return "#", False
elif plane[row][col] == "L":
if sum(seats) == 0:
return "#", True
else:
return "L", False
else:
print("ERROR!!!!!!!!!!!!!!")
while True:
Changed = False
NewP = []
for row in range(L):
NewP.append([])
for col in range(W):
result = newSeat(Plane, row, col)
NewP[row].append(result[0])
if result[1]:
Changed = True
Plane = NewP.copy()
if not Changed:
break
print(sum(row.count("#") for row in Plane))
|
from django.apps import AppConfig
class MovieraterConfig(AppConfig):
name = 'movieRater'
|
from django.contrib import admin
from .models import Profile, Board, Photo, Reservation
# Register your models here.
admin.site.register(Profile)
admin.site.register(Board)
admin.site.register(Photo)
admin.site.register(Reservation)
|
import requests, sys
proxies = []
iter_proxies = []
def get_proxies(path="proxies.txt"):
global proxies
try:
with open(path, 'r') as file:
contents = file.readlines()
for i, x in enumerate(contents):
if x.lower() == 'none':
contents[i] = 'none'
else:
x = x.strip()
if x.count(":") > 2:
x = x.rsplit(":", 3)
else:
x = x.rsplit(":", 1)
# print(x)
if len(x) == 4:
contents[i] = x[2] + ":" + x[3] + "@" + x[0] + ":" + x[1]
elif len(x) == 2:
contents[i] = x[0] + ":" + x[1]
else:
print("wrong format:", x.join(":"))
# so this saves the proxies in here and also returns it
proxies = contents
return contents
except Exception as e:
print("error in processing proxies: ", e)
sys.exit()
def get_proxy():
"""
linear proxy rotation
"""
global proxies, iter_proxies
if len(iter_proxies) == 0: iter_proxies = list(proxies)
return iter_proxies.pop(0)
def get_session(headers=None, proxy=None):
s = requests.Session()
#check if header is specified, if not then use default
if headers:
try:
s.headers.update(headers)
except:
raise Exception("get_session error: wrong header format")
else:
s.headers.update({
'accept' : '*/*',
'accept-language' : 'en-US,en;q=0.9',
'dnt' : '1',
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
})
if proxy:
if "none" in proxy:
pass
elif "socks5://" in proxy:
s.proxies.update({
'http' : 'socks5h://'+ proxy.replace("socks5://",""),
'https': 'socks5h://'+ proxy.replace("socks5://","")
})
else:
s.proxies.update({
'http' : 'http://'+ proxy.replace("http://",""),
'https': 'https://'+ proxy.replace("http://","")
})
return s
def change_proxy(s, proxy):
try:
if "none" in proxy:
pass
elif "socks5://" in proxy:
s.proxies.update({
'http' : 'socks5h://'+ proxy.replace("socks5://",""),
'https': 'socks5h://'+ proxy.replace("socks5://","")
})
else:
s.proxies.update({
'http' : 'http://'+ proxy.replace("http://",""),
'https': 'https://'+ proxy.replace("http://","")
})
return s
except Exception as e:
raise e
return s
|
from renderer_strategy import WindowRendererStratgey
from map_registry import map_registry
from state import State
from constants import MessageID
from entity import create_entity_by_id
import pygame
class World(State):
def __init__(self, context, stack, map):
super().__init__()
self.context = context
self.stack = stack
self.map = map
self.message_handler = self.context.message_handler
self.hide_player = False
self.player = create_entity_by_id("player", context, 3, 3)
def notify(self, message):
message.response = self.map
def on_use_action(self):
x, y = self.player.get_faced_tile_coords()
trigger = self.map.get_trigger(x, y)
if trigger is not None:
trigger.on_use(
trigger,
self.player,
x,
y,
)
def enter(self, data):
self.set_render_units()
self.message_handler.subscribe(MessageID.GET_MAP, self)
def exit(self):
self.message_handler.unsubscribe(MessageID.GET_MAP, self)
def switch_map(self, identifier):
self.map = map_registry[identifier]()
self.set_render_units()
def set_render_units(self):
renderer = self.context.renderer
renderer.strategy = WindowRendererStratgey()
renderer.reset(
self.map.map_width,
self.map.map_height
)
def handle_event(self, event):
self.player.controller.handle_event(event)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.on_use_action()
def update(self, dt):
self.player.controller.update(dt)
for npc in self.map.npcs:
npc.controller.update(dt)
return False
def render(self, renderer):
player = self.player
if self.hide_player:
player = None
self.map.render(renderer, player)
|
from django.conf import settings
from apps.tests.utils import get_test_forms
def site(request):
test_form, test_formset = get_test_forms(request)
return {
'url_name': request.resolver_match.url_name,
'settings': settings,
'test_form': test_form,
'test_formset': test_formset,
} |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
import collections
import bisect
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
IS=float('inf')
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
def gcd(a,b): return a if b==0 else gcd(b,a%b)
def lcm(a,b): return a*b/gcd(a,b)
def euclid_dis(x1,y1,x2,y2): return ((x1-x2)**2+(y1-y2)**2)**0.5
def choco(xa,ya,xb,yb,xc,yc,xd,yd): return 1 if abs((yb-ya)*(yd-yc)+(xb-xa)*(xd-xc))<1.e-10 else 0
d={0:'oxxx',1:'xoox',2:'xooo',3:'xxoo',4:'ooox',5:'oooo',6:'oxoo',7:'ooxx',8:'oooo',9:'oxxo'}
n=int(raw_input())
l=raw_input()
ans=1
for i in range(4):
chk=0
for j in l:
if d[int(j)][i]=='x':
chk+=1
ans=min(ans,chk)
print 'YES' if ans else 'NO'
|
#coding:utf-8
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver =webdriver.Chrome()
driver .set_window_size(1080,800)
driver.implicitly_wait(20)
driver.get('http://www.scholat.com/login.html')
#username
time.sleep(1)
#driver.find_element_by_id("login_user").click()
driver.find_element_by_id("j_username").send_keys("523786283@qq.com")
driver.find_element_by_id("j_password_ext").send_keys("**********")
driver.find_element_by_id("login").click()
time.sleep(1)
driver.find_element_by_css_selector(u"#t7 > p").click()
driver.find_element_by_link_text(u"邀请注册").click()
#ChineseName
driver.find_element_by_id("ChineseName").send_keys(u"董浩业")
driver.find_element_by_id("workUnit").send_keys(u"广东财经大学")
driver.find_element_by_id("workEmail").send_keys(u"donghy@mail.sysu.edu.cn")
#driver.find_element_by_link_text(u"邀请好友注册").click()
|
# Copyright (C) 2009 Kevin Ollivier All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Helper functions for the WebKit build.
import commands
import glob
import os
import platform
import re
import shutil
import sys
import urllib
import urlparse
def get_output(command):
"""
Windows-compatible function for getting output from a command.
"""
if sys.platform.startswith('win'):
f = os.popen(command)
return f.read().strip()
else:
return commands.getoutput(command)
def get_excludes(root, patterns):
"""
Get a list of exclude patterns going down several dirs.
TODO: Make this fully recursive.
"""
excludes = []
for pattern in patterns:
subdir_pattern = os.sep + '*'
for subdir in [subdir_pattern, subdir_pattern*2, subdir_pattern*3]:
adir = root + subdir + os.sep + pattern
files = glob.glob(adir)
for afile in files:
excludes.append(os.path.basename(afile))
return excludes
def get_dirs_for_features(root, features, dirs):
"""
Find which directories to include in the list of build dirs based upon the
enabled port(s) and features.
"""
outdirs = dirs
for adir in dirs:
for feature in features:
relpath = os.path.join(adir, feature)
featuredir = os.path.join(root, relpath)
if os.path.exists(featuredir) and not relpath in outdirs:
outdirs.append(relpath)
return outdirs
def download_if_newer(url, destdir):
"""
Checks if the file on the server is newer than the one in the user's tree,
and if so, downloads it.
Returns the filename of the downloaded file if downloaded, or None if
the existing file matches the one on the server.
"""
obj = urlparse.urlparse(url)
filename = os.path.basename(obj.path)
destfile = os.path.join(destdir, filename)
urlobj = urllib.urlopen(url)
size = long(urlobj.info().getheader('Content-Length'))
def download_callback(downloaded, block_size, total_size):
downloaded = block_size * downloaded
if downloaded > total_size:
downloaded = total_size
sys.stdout.write('%s %d of %d bytes downloaded\r' % (filename, downloaded, total_size))
# NB: We don't check modified time as Python doesn't yet handle timezone conversion
# properly when converting strings to time objects.
if not os.path.exists(destfile) or os.path.getsize(destfile) != size:
urllib.urlretrieve(url, destfile, download_callback)
print ''
return destfile
return None
def update_wx_deps(conf, wk_root, msvc_version='msvc2008'):
"""
Download and update tools needed to build the wx port.
"""
import Logs
Logs.info('Ensuring wxWebKit dependencies are up-to-date.')
wklibs_dir = os.path.join(wk_root, 'WebKitLibraries')
waf = download_if_newer('http://wxwebkit.kosoftworks.com/downloads/deps/waf', os.path.join(wk_root, 'WebKitTools', 'wx'))
if waf:
# TODO: Make the build restart itself after an update.
Logs.warn('Build system updated, please restart build.')
sys.exit(1)
# since this module is still experimental
wxpy_dir = os.path.join(wk_root, 'WebKit', 'wx', 'bindings', 'python')
swig_module = download_if_newer('http://wxwebkit.kosoftworks.com/downloads/deps/swig.py.txt', wxpy_dir)
if swig_module:
shutil.copy2(os.path.join(wxpy_dir, 'swig.py.txt'), os.path.join(wxpy_dir, 'swig.py'))
if sys.platform.startswith('win'):
Logs.info('downloading deps package')
archive = download_if_newer('http://wxwebkit.kosoftworks.com/downloads/deps/wxWebKitDeps-%s.zip' % msvc_version, wklibs_dir)
if archive and os.path.exists(archive):
os.system('unzip -o %s -d %s' % (archive, os.path.join(wklibs_dir, msvc_version)))
dest_lib_dir = os.path.join(wklibs_dir, msvc_version, 'win', 'lib')
dest_include_dir = os.path.join(wklibs_dir, msvc_version, 'win', 'include')
# substitute libcurl.lib if necessary
icu_repl = os.environ.get('WEBKIT_ICU_REPLACEMENT', None)
if icu_repl is not None:
Logs.info('WEBKIT_ICU_REPLACEMENT is %r' % icu_repl)
if not os.path.isdir(icu_repl):
conf.fatal('directory does not exist: %r' % icu_repl)
for l in 'icudt icuin icuio icule iculx icutu icuuc'.split():
libfile = os.path.join(icu_repl, 'lib', l + '.lib')
shutil.copy2(libfile, dest_lib_dir)
inc_dir = os.path.join(icu_repl, 'include', 'unicode')
if not os.path.isdir(inc_dir):
conf.fatal('not a directory: %r' % inc_dir)
for inc in os.listdir(inc_dir):
shutil.copy2(os.path.join(inc_dir, inc),
os.path.join(dest_include_dir, 'unicode'))
curl_libfile = os.environ.get('WEBKIT_CURL_REPLACEMENT', None)
if curl_libfile is not None:
Logs.info('WEBKIT_CURL_REPLACEMENT is %r' % curl_libfile)
if not os.path.isfile(curl_libfile):
conf.fatal('file does not exist: WEBKIT_CURL_REPLACEMENT %r' % curl_libfile)
shutil.copy2(curl_libfile, os.path.join(dest_lib_dir, 'libcurl.lib'))
jpeg_dir = os.environ.get('WEBKIT_JPEG_REPLACEMENT', None)
if jpeg_dir is not None:
Logs.info('WEBKIT_JPEG_REPLACEMENT is %r' % jpeg_dir)
jpeglib = os.path.join(jpeg_dir, 'libjpeg.lib')
if not os.path.isfile(jpeglib):
conf.fatal('file does not exist: %r' % jpeglib)
shutil.copy2(jpeglib, dest_lib_dir)
jpegheaders = '''\
cderror.h
cdjpeg.h
jconfig.h
jdct.h
jerror.h
jinclude.h
jmemsys.h
jmorecfg.h
jpegint.h
jpeglib.h
jversion.h
transupp.h'''.split()
for h in jpegheaders:
header_file = os.path.join(jpeg_dir, h)
if not os.path.isfile(header_file):
conf.fatal('file does not exist: %r' % header_file)
shutil.copy2(header_file, dest_include_dir)
elif sys.platform.startswith('darwin'):
# export the right compiler for building the dependencies
if platform.release().startswith('10'): # Snow Leopard
os.environ['CC'] = conf.env['CC'][0]
os.environ['CXX'] = conf.env['CXX'][0]
os.system('%s/WebKitTools/wx/install-unix-extras' % wk_root)
def includeDirsForSources(sources):
include_dirs = []
for group in sources:
for source in group:
dirname = os.path.dirname(source)
if not dirname in include_dirs:
include_dirs.append(dirname)
return include_dirs
def flattenSources(sources):
flat_sources = []
for group in sources:
flat_sources.extend(group)
return flat_sources
def git_branch_name():
try:
branches = commands.getoutput("git branch --no-color")
match = re.search('^\* (.*)', branches, re.MULTILINE)
if match:
return ".%s" % match.group(1)
except:
pass
return ""
def get_config(wk_root):
config_file = os.path.join(wk_root, 'WebKitBuild', 'Configuration')
config = 'Debug'
if os.path.exists(config_file):
config = open(config_file).read()
return config
def svn_revision():
if os.system("git-svn info") == 0:
info = commands.getoutput("git-svn info ../..")
else:
info = commands.getoutput("svn info")
for line in info.split("\n"):
if line.startswith("Revision: "):
return line.replace("Revision: ", "").strip()
return ""
|
#! /usr/bin/python3
import matplotlib.pyplot as pylab
def isfetc_1D(L_list, min_spacing, rel_perm_list, salt_conc, pH, site_density, p_doping_density, n_doping_density, req_char, T, pK1, pK2, steric):
if steric==True:
import ISFETsolverSteric as isfs
else:
import ISFETsolver as isfs
tol = 1e-9
err = 1.0
print("Looking for fluid bias for sheet charge density of", req_char,"C/cm2 at salt concentration",salt_conc,"M, pH",pH,"and surface site density",site_density,"1/cm2")
req_charge = req_char*1e4
def sheet_charge_eval(rbias):
global space_grid, end_s, end_o, V_grid, E, D, conc_h, conc_e, conc_H, conc_OH, conc_posI, conc_negI, conc_SiO, conc_SiOH, conc_SiOH2, rho_SC
space_grid, end_s, end_o, V_grid, E, D, conc_h, conc_e, conc_H, conc_OH, conc_posI, conc_negI, conc_SiO, conc_SiOH, conc_SiOH2, rho_SC = isfs.isfet_1D(L_list, min_spacing, rel_perm_list, salt_conc, pH, site_density, p_doping_density, n_doping_density, rbias, T, pK1, pK2)
return rho_SC
rbias = 0.0
delta_b = 0.001
while abs(err)>tol:
bb2 = sheet_charge_eval(rbias+delta_b)
bb1 = sheet_charge_eval(rbias)
jac = (bb2-bb1)/delta_b
y = (bb1-req_charge)*(1.0/jac)
y = min(max(y,-0.2),0.2)
rbias = rbias - y
err = bb1-req_charge
print("Current Sheet Charge difference=",err)
print("Calculated fluid bias for",req_char,"C/cm2 sheet charge is", rbias)
print("Sheet charge in Semiconductor at calculated potential =",bb1*1e-4)
return space_grid, end_s, end_o, V_grid, E, D, conc_h, conc_e, conc_H, conc_OH, conc_posI, conc_negI, conc_SiO, conc_SiOH, conc_SiOH2, rho_SC, rbias
|
def get_whole_play_n(l):
play_n = 0
for each in l:
play_n += each[1]
return play_n
def solution(genres, plays):
music_dict = {}
answer = []
for i, gp in enumerate(zip(genres, plays)):
g, p = gp
if g not in music_dict:
music_dict[g] = [(i, p)]
else:
music_dict[g].append((i, p))
for is_ps in sorted(music_dict.values(), key=get_whole_play_n, reverse=True):
count = 0
for i_p in sorted(is_ps, key=lambda x: x[1], reverse=True):
if count >= 2:
break
i, _ = i_p
answer.append(i)
count += 1
return answer
print(solution(["classic", "pop", "classic",
"classic", "pop"], [500, 600, 150, 800, 2500]))
|
#Como utilizar cores no terminal em python
#Usando o codigo ANSI '\033[m
#Style(0,1,4, 7)
#text(30 até 37)
#Background (40 até 47)
print('Olá mundo!')
print('\033[31mOlá mundo!')
print('\033[32;43mOlá mundo!')
print('\033[1;34;43mOlá mundo!')
print('\033[1;35;43mOlá mundo!\033[m')
print('\033[7;30;45mOlá mundo!\033[m')# 7 invente
print('\033[7;30mOlá mundo!\033[m')
n1 = 4
n2 = 5
print('a soma de \033[35m{}\033[m + \033[36m{}\033[m ??? '.format(n1, n2))
## uma forma é fazer no format
nome = 'Lucas'
print('Olá! Muito prazer {}{}{}!!'.format('\033[4;35m',nome, '\033[m'))
# outra forma é fazer um dicionario de para formatação
cores = {'limpa':'\033[m',
'Azul':'\033[34m',
'Amarelo':'\033[33m',
'pretoecinza':'\033[7;30m'}
print('Olá! Muito prazer {}{}{}!!'.format(cores['Azul'],nome,cores['limpa']))
|
"""
We will use this script to teach Python to absolute beginners
The script is an example of salary calculation implemented in Python
The salary calculator:
Net_Income = Gross_Income - Taxable_Due
Taxable_Due = taxable_income + Social_security + Medicare_tax
Taxable_Income = Gross_Income -120,00
Social_security = 6.2% of Gross_Income
Medicare_Tax = 1.45 % of Gross_Income
Federal tax brackets
10% $0 to $9,525
12% $9,526 to $38,700
22% $38,701 to $82,500
24% $82,501 to $157,500
32% $157,501 to $200,000
35% $200,001 to $500,000
37% $500,001 or more
"""
# Enter the gross income
print("Enter the gross income")
gross_income = float(raw_input())
# Taxable income will be reduced from gross income
taxable_deduction = 12000
# Taxable income
taxable_income = gross_income-taxable_deduction
# This list contains the list of salary taxable brackets
tax_bracket = [9525,29175,43799,74999,42499,299999,500000]
# This list contains the percentage of tax for the taxable brackets
tax_rate = [10,12,22,24,32,35,37]
sigma_of_federal_tax = 0
# If else loop to check in which category the employee will come for the tax calculation
if taxable_income >= 500001:
for i in range(6):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-500001)*37)/100)
elif taxable_income > 200001 and taxable_income <= 500000:
for i in range(5):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-200001)*35)/100)
elif taxable_income > 157501 and taxable_income <= 200000:
for i in range(4):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-157501)*32)/100)
elif taxable_income > 82501 and taxable_income <= 157500:
for i in range(3):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-82501)*24)/100)
elif taxable_income > 38701 and taxable_income <= 82500:
for i in range(2):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-38701)*22)/100)
elif taxable_income >= 9525 and taxable_income <= 38700:
for i in range(1):
federal_tax_bracket = ((tax_rate[i]*tax_bracket[i])/100)
sigma_of_federal_tax = federal_tax_bracket + sigma_of_federal_tax
federal_tax = sigma_of_federal_tax + (((taxable_income-9526)*12)/100)
elif taxable_income >0 and taxable_income < 9525:
federal_tax = ((taxable_income *10)/100)
else:
federal_tax =0
print("The employee no need to pay the federal tax")
print("The employee federal tax is " , federal_tax)
# 6.2% of gross income is social security
social_security = ((gross_income *6.2)/100)
if social_security >= 7960.80:
social_security = 7960
print("The employee social security is",social_security)
# 1.45% of gross income is medicare tax
medicare_tax = gross_income *(1.45/100)
print("The employee medicare tax is",medicare_tax)
# Taxable due
taxable_due = federal_tax + social_security + medicare_tax
# Net income
net_income = gross_income - taxable_due
print("The employee take home salary is : ", net_income) |
# Generated by Django 3.2.3 on 2021-08-03 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0009_alter_backgroundphoto_image'),
]
operations = [
migrations.AddField(
model_name='movie',
name='tagline',
field=models.CharField(default='Put on a happy face', max_length=150),
preserve_default=False,
),
]
|
from .djdt_flamegraph import FlamegraphPanel
|
#!/usr/bin/env python
from numpy.lib.npyio import save
import rospy
import rospkg
import cv2
import time
import numpy as np
import sys
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
import os
def on_new_image(msg):
global frame_msg
frame_msg = msg
def create_input_label(frame):
img_label = np.zeros(frame.shape, frame.dtype)
cv2.namedWindow("Select output", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Select output", (800, 800))
rois = cv2.selectROIs("Select output", frame, False)
# Copy regions of interest to new empty image.
for roi in rois:
x1 = roi[0]
y1 = roi[1]
x2 = roi[2]
y2 = roi[3]
img_label[y1:y1+y2, x1:x1+x2] = frame[y1:y1+y2, x1:x1+x2]
# Show this image for some time.
cv2.namedWindow("Label", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Label", (800, 800))
cv2.imshow("Label", img_label)
cv2.waitKey(10)
time.sleep(1.5)
# Destroy windows associated with this data sample.
cv2.destroyWindow("Select output")
cv2.destroyWindow("Label")
return img_label
def save_sample(input_img, output_img, input_folder, output_folder):
ok = False
input_path = input_folder + "/" + str(save_sample.cntr) + ".jpg"
# Find free filename.
while not ok:
if os.path.isfile(input_path):
save_sample.cntr += 1
input_path = input_folder + "/" + str(save_sample.cntr) + ".jpg"
else:
ok = True
# Generate path for output image too.
output_path = output_folder + "/" + str(save_sample.cntr) + ".jpg"
# Save.
cv2.imwrite(input_path, input_img)
cv2.imwrite(output_path, output_img)
# File written so increase the counter.
save_sample.cntr += 1
save_sample.cntr = 1
if __name__ == "__main__":
# Init ROS.
rospy.init_node('lidar_fconv_dataset_collector')
# Load params.
statek_name = rospy.get_param("~statek_name", "statek")
dataset_folder = rospy.get_param(
"~dataset_folder", "/home/" + os.environ.get("USER") + "/datasets/lidar_dataset")
dataset_inputs = dataset_folder + "/inputs"
dataset_outputs = dataset_folder + "/outputs"
# Init lidar image subscriber.
rospy.Subscriber("/" + statek_name + "/laser/scan_img",
Image, on_new_image, queue_size=1)
# Create folders for dataset if necessary.
try:
os.makedirs(dataset_inputs)
os.makedirs(dataset_outputs)
except:
pass
frame_msg = []
bridge = CvBridge()
# Window to show live lidar measurements.
cv2.namedWindow("Lidar live", cv2.WINDOW_NORMAL)
cv2.resizeWindow("Lidar live", (800, 800))
print("Press space to assign label or ESC to exit.")
while True:
# Show lidar live.
if frame_msg != []:
frame = bridge.imgmsg_to_cv2(
frame_msg, desired_encoding='passthrough')
cv2.imshow("Lidar live", frame)
key = cv2.waitKey(33)
if key == ord(' '): # Space.
output = create_input_label(frame)
save_sample(frame, output, dataset_inputs, dataset_outputs)
if key == 27: # ESC.
cv2.destroyWindow("Lidar live")
sys.exit(0)
|
# Generated by Django 2.2.11 on 2020-04-12 15:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wishlist', '0002_auto_20200412_1759'),
]
operations = [
migrations.RenameField(
model_name='wishlistmodel',
old_name='product',
new_name='name',
),
]
|
import numpy as np
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
def mean_absolute_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
x = np.mean(np.abs(y_true - y_pred))
return x
# In[] code
import pandas as pd
from pandas import datetime
from matplotlib import pyplot as plt
import numpy as np
def parser(x):
return datetime.strptime(x, '%d/%m/%Y %H:%M:%S')
df = pd.read_excel('/Users/bounouamustapha/Desktop/data/Passage.xlsx', parse_dates=[2, 3], date_parser=parser)
holidays = pd.read_excel('/Users/bounouamustapha/Desktop/data/holidays2018.xlsx')
data = df.drop(columns=['NUM_SEJOUR', 'CODE', 'DATE_SORTIE', 'CCMU', 'GEMSA'], axis=1)
data.index = data['DATE_ARRIVEE']
del data['DATE_ARRIVEE']
data['nb'] = 1
daily = data.resample('D').sum()
import matplotlib.pyplot as plt
daily.plot(title="Le nombre des arrivés par jour pour l'année 2018")
plt.show()
# In[] test
def sarimax(data, testdate, horizon, nbjourtest, seasonal, seasonality,useexog):
from pyramid.arima import auto_arima
from datetime import timedelta
test_date_time = datetime.strptime(testdate, '%d/%m/%Y')
end_test = test_date_time + timedelta(days=horizon - 1)
end_train = test_date_time - timedelta(1)
start_train = test_date_time - timedelta(days=nbjourtest)
train = data[start_train:end_train]
test = data[test_date_time:end_test]
if useexog:
print('------------ variables exogene --------------------------')
train_exogene = getexplanatoryvariables(train)
test_exogene = getexplanatoryvariables(test)
print('training set :' + str(start_train) + ' au ' + str(end_train))
print('test set :' + str(test_date_time) + ' au ' + str(end_test))
if useexog:
arima_model = auto_arima(train, exogenous=train_exogene, seasonality=False, error_action='ignore',
trace=1, stepwise=True)
else:
arima_model = auto_arima(train, seasonality=False,
error_action='ignore',
trace=1, stepwise=True)
if useexog:
prevision = arima_model.predict(horizon, exogenous=test_exogene)
else:
prevision = arima_model.predict(horizon)
precision = mean_absolute_percentage_error(test, prevision)
print(arima_model.summary())
print('-----------------------------------------------------------------------------')
print('--------Mape : --------' + str(precision) + '--------------------------------------')
x = daily[start_train:end_test]
plt.plot(x.index, x)
plt.plot(test.index, prevision)
plt.legend(['observation', 'prevision'])
plt.title('La prevision sur un horizon de :' + str(horizon))
plt.show()
# In[]
def sarima_prim(data,p,d,q,P,D,Q,s):
import statsmodels.api as sm
model = sm.tsa.statespace.SARIMAX(data, order = (p, d, q),seasonal_order = (P, D, Q, s)).fit(disp=-1)
return model
# In[]
def getexplanatoryvariables(data):
days_week = []
init = [0, 0, 0, 0, 0, 0,0]
i = 0
for index, item in data.iterrows():
day = np.array(init)
if index.weekday() < 6:
day[index.weekday()] = 1
print('tes==='+ str(holidays['holiday'][i]))
if holidays['holiday'][i]:
day[6] = 1
days_week.append(day)
i += 1
x = np.transpose(days_week)
return pd.DataFrame({'lundi': x[0], 'Mardi': x[1], 'Mercredi': x[2], 'Jeudi': x[3], 'Vendredi': x[4], 'Samedi': x[5] ,'holiday':x[6]},index=data.index)
# In[]:
sarimax(daily, '1/7/2018', 15, 180, False, 180,False)
# In[]:
sarimax(daily, '28/1/2018', 4, 20, False, 30,True)
# In[]:
# In[]:
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
import numpy as np
da = daily['1/1/2018':'31/12/2018']
x = getexplanatoryvariables(da)
i = 0
z=s=k=0
for index, row in da.iterrows():
if (x['holiday'].loc[index]):
print("ok")
s +=row['nb']
i += 1
z +=row['nb']
k +=1
print('moyenne en vacance :' + str (s/i))
print('moyenne sans vacance :' + str (z/k))
# In[] test
def tsplot(y, lags=None, figsize=(12, 7), style='bmh'):
import statsmodels.api as sm
import statsmodels.tsa.api as smt
from matplotlib import pyplot as plt
"""
Plot time series, its ACF and PACF, calculate Dickey–Fuller test
y - timeseries
lags - how many lags to include in ACF, PACF calculation
"""
if not isinstance(y, pd.Series):
y = pd.Series(y)
with plt.style.context(style):
fig = plt.figure(figsize=figsize)
layout = (2, 2)
ts_ax = plt.subplot2grid(layout, (0, 0), colspan=2)
acf_ax = plt.subplot2grid(layout, (1, 0))
pacf_ax = plt.subplot2grid(layout, (1, 1))
y.plot(ax=ts_ax)
p_value = sm.tsa.stattools.adfuller(y)[1]
ts_ax.set_title('Time Series Analysis Plots\n Dickey-Fuller: p={0:.5f}'.format(p_value))
smt.graphics.plot_acf(y, lags=lags, ax=acf_ax)
smt.graphics.plot_pacf(y, lags=lags, ax=pacf_ax)
plt.tight_layout()
plt.show()
# In[] test
model=sarima_prim(daily['nb'],3,0,2,0,1,1,7)
# In[] test
tsplot(model.resid,20)
# In[] test
def testarima(data, testdate, horizon, nbjourtest, p,d,q,P,D,Q,s):
from datetime import timedelta
from pandas import datetime
import matplotlib.pyplot as plt
print(str(nbjourtest))
print(str(horizon))
test_date_time = datetime.strptime(testdate, '%d/%m/%Y')
end_test = test_date_time + timedelta(days=horizon - 1)
end_train = test_date_time - timedelta(1)
start_train = test_date_time - timedelta(days=nbjourtest)
train = data[start_train:end_train]
test = data[test_date_time:end_test]
arima_model = sarima(train,p,d,q,P,D,Q,s)
prevision = arima_model.predict(horizon)
precision = mean_absolute_percentage_error(test, prevision)
print(arima_model.summary())
print('-----------------------------------------------------------------------------')
print('-------- Mape : --------' + str(precision) + '--------------------------------------')
plt.plot(test.index, test)
print('-------- test : --------' + str(len(test)))
print('-------- horizon : --------' + str(horizon))
print('-------- prevision : --------' + str(len(prevision)))
plt.plot(np.arange(358), prevision)
plt.legend(['observation', 'prevision'])
plt.title('La prevision sur un horizon de :' + str(horizon))
plt.show()
# In[
def optimizeSARIMA(data,parameters_list, d, D, s):
import statsmodels.api as sm
results = []
best_aic = float("inf")
i=0
for param in parameters_list:
print("----------------------------------------------------")
print("--"+ str(i + 1)+"/"+str(len(parameters_list)))
print("ARIMA "+ "("+str(param[0])+","+str(d)+","+str(param[1])+") ("+str(param[2])+","+str(D)+","+str(param[3])+")"+str(s))
try:
model = sm.tsa.statespace.SARIMAX(data, order=(param[0], d, param[1]),seasonal_order=(param[2], D, param[3], s)).fit(disp=-1)
print("fitting")
print("----------------------------------------------------")
except :
print("Infini")
print("----------------------------------------------------")
continue
aic = model.aic
# saving best model, AIC and parameters
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
# sorting in ascending order, the lower AIC is - the better
result_table = result_table.sort_values(by='aic', ascending=True).reset_index(drop=True)
return result_table
# In[
ps = range(0, 4)
d = 0
qs = range(0, 4)
Ps = range(0, 3)
D = 1
Qs = range(0, 3)
from itertools import product
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
aa=daily['1/7/2017':'30/6/2018']
optimizeSARIMA(aa,parameters_list,d,D,7)
# In[
aa=daily['1/7/2017':'31/12/2018']
testarima(aa,'1/7/2018', 100,365,2,0,2,0,1,1,7)
|
#Import necessary packages
import cv2
import math, operator
import functools
def sendmail():
print("Accident: Send message to Control Room")
#Function to find difference in frames
def diffImg(t0, t1, t2):
d1 = cv2.absdiff(t2, t1)
d2 = cv2.absdiff(t1, t0)
return cv2.bitwise_and(d1, d2)
#Import video from webcam
cam = cv2.VideoCapture("C:/Users/Sam Christian/Desktop/Guvi Hack/rem.mkv")
#Creating window to display
winName = "Accident Detector"
cv2.namedWindow(winName)
cv2.namedWindow("Video")
#Reading frames at multiple instances from webcam to different variables
t_minus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
t = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
cv2.imwrite("C:/Users/Sam Christian/Desktop/Guvi Hack/shotp.jpg",t)
cascade_src = 'cars.xml'
bus_src='Bus_front.xml'
motorbike='two_wheeler.xml'
video_src = 'C:/Users/Sam Christian/Desktop/Guvi Hack/road.mp4'
cap = cv2.VideoCapture(video_src)
car_cascade = cv2.CascadeClassifier(cascade_src)
bus_cascade = cv2.CascadeClassifier(bus_src)
mb_cascade = cv2.CascadeClassifier(motorbike)
while True:
#Display video out through the window we created
cv2.imshow( winName, diffImg(t_minus, t, t_plus) )
cv2.imshow("Video",t)
#Calling function diffImg() and assign the return value to 'p'
p=diffImg(t_minus, t, t_plus)
#Writing 'p' to a directory
cv2.imwrite("C:/Users/Sam Christian/Desktop/Guvi Hack/shot.jpg",p)
#From Python Image Library(PIL) import Image class
from PIL import Image
#Open image from the directories and returns it's histogram's
h1 = Image.open("C:/Users/Sam Christian/Desktop/Guvi Hack/shotp.jpg").histogram()
h2 = Image.open("C:/Users/Sam Christian/Desktop/Guvi Hack/shot.jpg").histogram()
#Finding rms value of the two images opened before
rms = math.sqrt(functools.reduce(operator.add,map(lambda a,b: (a-b)**2, h1, h2))/len(h1))
#If the RMS value of the images are under our limit
print(rms)
if (rms<3160):
print("Accident")
sendmail()
#Updates the frames
t_minus = t
t = t_plus
t_plus = cv2.cvtColor(cam.read()[1], cv2.COLOR_RGB2GRAY)
ret, img = cap.read()
if (type(img) == type(None)):
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.1, 1)
buses = car_cascade.detectMultiScale(gray, 1.1, 1)
bikes = car_cascade.detectMultiScale(gray, 1.1, 1)
for (x,y,w,h) in cars:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
for (x,y,w,h) in buses:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
for (x,y,w,h) in bikes:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('video', img)
#Destroys the window after key press
key = cv2.waitKey(10)
if key == 27:
# cv2.destroyWindow(winName)
# cv2.destroyWindow("Video")
cv2.destroyAllWindows()
break |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('course_selection', '0014_auto_20150103_0906'),
]
operations = [
migrations.AlterField(
model_name='course_listing',
name='course',
field=models.ForeignKey(related_name='course_listings', to='course_selection.Course'),
preserve_default=True,
),
migrations.AlterField(
model_name='schedule',
name='title',
field=models.CharField(default=b'schedule', max_length=100),
preserve_default=True,
),
]
|
# -*- coding:utf-8 -*-
# @Time:2021/3/6 18:48
# @Author: explorespace
# @Email: cyberspacecloner@qq.com
# @File: SVMwithSMO.py
# software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs, make_circles, make_moons
from sklearn.preprocessing import StandardScaler
class SMO:
def __init__(self, X, y, C, kernel, Lambdas, b, errors, eps, tol):
self.X = X # training data vector
self.y = y # class label vector
self.C = C # regularization parameter
self.kernel = kernel # kernel function
self.Lambdas = Lambdas # lagrange multiplier vector
self.b = b # scalar bias term
self.errors = errors # error cache
self._obj = [] # record of objective function value
self.m = len(self.X) # store size of training set
self.eps = eps
self.tol = tol
def linear_kernel(x, y, b=1):
return x @ y.T + b
def gaussian_kernel(x, y, sigma=1):
if np.ndim(x) == 1 and np.ndim(y) == 1:
# np.ndim() 获取 array 的维度数值
result = np.exp(- (np.linalg.norm(x - y, 2)) ** 2 / (2 * sigma ** 2))
elif (np.ndim(x) > 1 and np.ndim(y) == 1) or (np.ndim(x) == 1 and np.ndim(y) > 1):
result = np.exp(- (np.linalg.norm(x - y, 2, axis=1) ** 2) / (2 * sigma ** 2))
# np.linalg.norm() 范数,ord=2,l2 范数,axis=1,列向量
elif np.ndim(x) > 1 and np.ndim(y) > 1:
result = np.exp(- (np.linalg.norm(x[:, np.newaxis] - y[np.newaxis, :], 2, axis=-1) ** 2) / (2 * sigma ** 2))
return result
def objective_function(Lambdas, target, kernel, X_train):
return np.sum(Lambdas) - 0.5 * np.sum(
(target[:, None] * target[None, :]) * kernel(X_train, X_train) * (Lambdas[:, None] * Lambdas[None, :]))
# [:,None],None 表示该维不进行切片,而是将该维整体作为数组元素处理,即增加一个维度,和 [:,np.newaxis] 作用一致
def decision_function(Lambdas, target, kernel, X_train, x_test, b):
result = (Lambdas * target) @ kernel(X_train, x_test) - b
return result
def plot_decision_boundary(model, ax, resolution=100, colors=('b', 'k', 'r'), levels=(-1, 0, 1)):
"""Plots the model's decision boundary on the input axes object.
Range of decision boundary grid is determined by the training data.
Returns decision boundary grid and axes object (`grid`, `ax`)."""
# Generate coordinate grid of shape [resolution x resolution]
# and evaluate the model over the entire space
x_range = np.linspace(model.X[:, 0].min(), model.X[:, 0].max(), resolution)
y_range = np.linspace(model.X[:, 1].min(), model.X[:, 1].max(), resolution)
grid = [[decision_function(model.Lambdas, model.y, model.kernel, model.X, np.array([xr, yr]), model.b) for xr in x_range] for yr in y_range]
grid = np.array(grid).reshape(len(x_range), len(y_range))
# Plot decision contours using grid and
# make a scatter plot of training data
ax.contour(x_range, y_range, grid, levels=levels, linewidths=(1, 1, 1),
linestyles=('--', '-', '--'), colors=colors)
ax.scatter(model.X[:, 0], model.X[:, 1],
c=model.y, cmap=plt.cm.viridis, lw=0, alpha=0.25)
# Plot support vectors (non-zero Lambdas)
# as circled points (linewidth > 0)
mask = np.round(model.Lambdas, decimals=2) != 0.0
ax.scatter(model.X[mask, 0], model.X[mask, 1],
c=model.y[mask], cmap=plt.cm.viridis, lw=1, edgecolors='k')
return grid, ax
def take_step(i_1, i_2, model):
# Skip if chosen Lambdas are the same
if i_1 == i_2:
return 0, model
Lambda_1 = model.Lambdas[i_1]
Lambda_2 = model.Lambdas[i_2]
y_1 = model.y[i_1]
y_2 = model.y[i_2]
E_1 = model.errors[i_1]
E_2 = model.errors[i_2]
s = y_1 * y_2
# Compute L & H, the bounds on new possible alpha values
if (y_1 != y_2):
L = max(0, Lambda_2 - Lambda_1)
H = min(model.C, model.C + Lambda_2 - Lambda_1)
elif (y_1 == y_2):
L = max(0, Lambda_1 + Lambda_2 - model.C)
H = min(model.C, Lambda_1 + Lambda_2)
if (L == H):
return 0, model
# Compute kernel & 2nd derivative eta
k_11 = model.kernel(model.X[i_1], model.X[i_1])
k_12 = model.kernel(model.X[i_1], model.X[i_2])
k_22 = model.kernel(model.X[i_2], model.X[i_2])
eta = 2 * k_12 - k_11 - k_22
# Compute new alpha 2 (Lamb_2) if eta is negative
if (eta < 0):
Lamb_2 = Lambda_2 - y_2 * (E_1 - E_2) / eta
# Clip Lamb_2 based on bounds L & H
if L < Lamb_2 < H:
Lamb_2 = Lamb_2
elif (Lamb_2 <= L):
Lamb_2 = L
elif (Lamb_2 >= H):
Lamb_2 = H
# If eta is non-negative, move new Lamb_2 to bound with greater objective function value
else:
Lambdas_adj = model.Lambdas.copy()
Lambdas_adj[i_2] = L
# objective function output with Lamb_2 = L
Lobj = objective_function(Lambdas_adj, model.y, model.kernel, model.X)
Lambdas_adj[i_2] = H
# objective function output with Lamb_2 = H
Hobj = objective_function(Lambdas_adj, model.y, model.kernel, model.X)
if Lobj > (Hobj + model.eps):
Lamb_2 = L
elif Lobj < (Hobj - model.eps):
Lamb_2 = H
else:
Lamb_2 = Lambda_2
# Push Lamb_2 to 0 or C if very close
if Lamb_2 < 1e-8:
Lamb_2 = 0.0
elif Lamb_2 > (model.C - 1e-8):
Lamb_2 = model.C
# If examples can't be optimized within model.epsilon (model.eps), skip this pair
if (np.abs(Lamb_2 - Lambda_2) < model.eps * (Lamb_2 + Lambda_2 + model.eps)):
return 0, model
# Calculate new alpha 1 (Lamb_1)
Lamb_1 = Lambda_1 + s * (Lambda_2 - Lamb_2)
# Update threshold b to reflect newly calculated Lambdas
# Calculate both possible thresholds
b_1 = E_1 + y_1 * (Lamb_1 - Lambda_1) * k_11 + y_2 * (Lamb_2 - Lambda_2) * k_12 + model.b
b_2 = E_2 + y_1 * (Lamb_1 - Lambda_1) * k_12 + y_2 * (Lamb_2 - Lambda_2) * k_22 + model.b
# Set new threshold based on if Lamb_1 or Lamb_2 is bound by L and/or H
if 0 < Lamb_1 and Lamb_1 < model.C:
b_new = b_1
elif 0 < Lamb_2 and Lamb_2 < model.C:
b_new = b_2
# Average thresholds if both are bound
else:
b_new = (b_1 + b_2) * 0.5
# Update model object with new Lambdas & threshold
model.Lambdas[i_1] = Lamb_1
model.Lambdas[i_2] = Lamb_2
# Update error cache
# Error cache for optimized Lambdas is set to 0 if they're unbound
for index, alph in zip([i_1, i_2], [Lamb_1, Lamb_2]):
if 0.0 < alph < model.C:
model.errors[index] = 0.0
# Set non-optimized errors based on equation 12.11 in Platt's book
non_opt = [n for n in range(model.m) if (n != i_1 and n != i_2)]
model.errors[non_opt] = model.errors[non_opt] + \
y_1 * (Lamb_1 - Lambda_1) * model.kernel(model.X[i_1], model.X[non_opt]) + \
y_2 * (Lamb_2 - Lambda_2) * model.kernel(model.X[i_2], model.X[non_opt]) + model.b - b_new
# Update model threshold
model.b = b_new
return 1, model
def examine_example(i_2, model):
y_2 = model.y[i_2]
Lambda_2 = model.Lambdas[i_2]
E_2 = model.errors[i_2]
r_2 = E_2 * y_2
# Proceed if error is within specified tolerance (tol)
if ((r_2 < -model.tol and Lambda_2 < model.C) or (r_2 > model.tol and Lambda_2 > 0)):
if len(model.Lambdas[(model.Lambdas != 0) & (model.Lambdas != model.C)]) > 1:
# Use 2nd choice heuristic is choose max difference in error
if model.errors[i_2] > 0:
i_1 = np.argmin(model.errors)
elif model.errors[i_2] <= 0:
i_1 = np.argmax(model.errors)
step_result, model = take_step(i_1, i_2, model)
if step_result:
return 1, model
# Loop through non-zero and non-C Lambdas, starting at a random point
for i_1 in np.roll(np.where((model.Lambdas != 0) & (model.Lambdas != model.C))[0],
np.random.choice(np.arange(model.m))):
step_result, model = take_step(i_1, i_2, model)
if step_result:
return 1, model
# loop through all Lambdas, starting at a random point
for i_1 in np.roll(np.arange(model.m), np.random.choice(np.arange(model.m))):
step_result, model = take_step(i_1, i_2, model)
if step_result:
return 1, model
return 0, model
def train(model):
num_changed = 0
examine_all = 1
while (num_changed > 0) or (examine_all):
num_changed = 0
if examine_all:
# loop over all training examples
for i in range(model.Lambdas.shape[0]):
examine_result, model = examine_example(i, model)
num_changed += examine_result
if examine_result:
obj_result = objective_function(model.Lambdas, model.y, model.kernel, model.X)
model._obj.append(obj_result)
else:
# loop over examples where Lambdas are not already at their limits
for i in np.where((model.Lambdas != 0) & (model.Lambdas != model.C))[0]:
examine_result, model = examine_example(i, model)
num_changed += examine_result
if examine_result:
obj_result = objective_function(model.Lambdas, model.y, model.kernel, model.X)
model._obj.append(obj_result)
if examine_all == 1:
examine_all = 0
elif num_changed == 0:
examine_all = 1
return model
if __name__ == '__main__':
X_train, y = make_blobs(n_samples=1000, centers=2,
n_features=2, random_state=1)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train, y)
y[y == 0] = -1
# Set model parameters and initial values
C = 1000.0
m = len(X_train_scaled)
initial_Lambdas = np.zeros(m)
initial_b = 0.0
# Set tolerances
tol = 0.01 # error tolerance
eps = 0.01 # alpha tolerance
# Instantiate model
# model = SMO(X_train_scaled, y, C, linear_kernel,
# initial_Lambdas, initial_b, np.zeros(m), eps, tol)
#
# # Initialize error cache
# initial_error = decision_function(model.Lambdas, model.y, model.kernel,
# model.X, model.X, model.b) - model.y
# model.errors = initial_error
# np.random.seed(0)
# output = train(model)
# fig, ax = plt.subplots()
# grid, ax = plot_decision_boundary(output, ax)
# plt.show()
def guass_kernel():
X_train, y = make_circles(n_samples=500, noise=0.1,
factor=0.1,
random_state=1)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train, y)
y[y == 0] = -1
# Set model parameters and initial values
C = 1.0
m = len(X_train_scaled)
initial_Lambdas = np.zeros(m)
initial_b = 0.0
# Instantiate model
model = SMO(X_train_scaled, y, C, gaussian_kernel,
initial_Lambdas, initial_b, np.zeros(m), eps, tol)
# Initialize error cache
initial_error = decision_function(model.Lambdas, model.y, model.kernel,
model.X, model.X, model.b) - model.y
model.errors = initial_error
output = train(model)
fig, ax = plt.subplots()
grid, ax = plot_decision_boundary(output, ax)
plt.show()
def moon():
X_train, y = make_moons(n_samples=500, noise=0.1,
random_state=1)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train, y)
y[y == 0] = -1
# Set model parameters and initial values
C = 1.0
m = len(X_train_scaled)
initial_Lambdas = np.zeros(m)
initial_b = 0.0
# Instantiate model
model = SMO(X_train_scaled, y, C, lambda x, y: gaussian_kernel(x, y, sigma=0.5),
initial_Lambdas, initial_b, np.zeros(m), eps, tol)
# Initialize error cache
initial_error = decision_function(model.Lambdas, model.y, model.kernel,
model.X, model.X, model.b) - model.y
model.errors = initial_error
output = train(model)
fig, ax = plt.subplots()
grid, ax = plot_decision_boundary(output, ax)
plt.show()
# guass_kernel()
moon() |
"""
test_djdt_flamegraph
----------------------------------
Tests for `djdt_flamegraph` module.
"""
import unittest
from djdt_flamegraph import djdt_flamegraph
from djdt_flamegraph import FlamegraphPanel
from djdt_flamegraph import flamegraph
class TestDjdtFlamegraph(unittest.TestCase):
def setUp(self):
pass
def test_subprocess(self):
stack = 'unix`_sys_sysenter_post_swapgs;genunix`close 5'
res = flamegraph.stats_to_svg(stack)
self.assertIn('svg version="1.1"', res)
def test_000_something(self):
pass
|
from .resnet3d import Resnet3DBuilder
|
import sys
def countingValleys(n, s):
current = 0
valleys = 0
for i in range(n):
j = 1 if s[i] == 'U' else -1
if current == 0 and j == -1:
valleys += 1
current += j
return valleys
if __name__ == '__main__':
lines = [i.strip() for i in sys.stdin]
n = int(lines[0])
s = lines[1]
result = countingValleys(n, s)
print(result)
|
'''this is to test verious algorithms for determining the peice size of a torrent'''
def test(torrentSize):
#piece size = 2^(19+floor(max[Log[2, x] - 28, 0]/2)) # http://www.wolframalpha.com/input/?i=2%5E(19%2Bfloor(max%5BLog%5B2,+x%5D+-+28,+0%5D%2F2)),+x%3D1024*1024*1024*1024%2B1
#number of peices = ceil(x/2^(19+floor(max[Log[2, x] - 28, 0]/2))) # http://www.wolframalpha.com/input/?i=Plot%5Bceil(x%2F2%5E(19%2Bfloor(max%5BLog%5B2,+x%5D+-+28,+0%5D%2F2))),+%7Bx,+0,+1024*1024*1024*1024%7D%5D
'''Every time the torrentSize x4, the piece size x2'''
# min block size 512*1024
import math
pieceSize = 2**(19 + math.floor(max(math.log2(torrentSize) - 28, 0) // 2))
return pieceSize
def test2(torrentSize):
"""Takes a (int)size, returns a (int) peicessize"""
# http://www.wolframalpha.com/input/?i=Plot%5Bceil(x%2F2%5E(18%2Bfloor(max%5BLog%5B2,+x%5D+-+26,+0%5D%2F2))),+ceil(x%2F2%5E(19%2Bfloor(max%5BLog%5B2,+x%5D+-+28,+0%5D%2F2))),+%7Bx,+0,+1024*1024*1024*1024%7D%5D
# http://www.wolframalpha.com/input/?i=Plot%5Bceil(x%2F2%5E(18%2Bfloor(max%5BLog%5B2,+x%5D+-+26,+0%5D%2F2))),++x%2F2%5E(18%2B(log_2(x)-26)%2F2),+%7Bx,+0,+1024*1024*1024*1024%7D%5D
'''Every time the torrentSize x4, the piece size x2'''
# min block size 256*1024
import math
pieceSize = 2**(18 + math.floor(max(math.log2(torrentSize) - 26, 0) // 2))
return pieceSize
if __name__ == "__main__":
for i in range(0,64):
print(i, 2**i, test2(2**i), 2**i//test(2**i), 64*2**i//test(2**i))
|
import os
class FileHandler:
def __init__(self, path):
self.path = path
def get(self):
with open(self.path) as data_file:
data = data_file.read()
return data
def set(self, data):
with open(self.path) as data_file:
data_file.write(data)
def delete(self):
confirm = str(raw_input('You are about to remove {0} are you sure?'.format(self.path)))
if confirm == 'y' or confirm == 'yes':
os.remove(self.path)
def __str__(self):
return str(self.path)
|
import datetime as dt
import time
import quick_start
import Events_storage as EVENTS_STORAGE
def add_events(events):
for event in events:
# start = event['start'].get('dateTime', event['start'].get('date'))
# print(start, event['summary'])
# print(event)
date_time_event_all = event['start']['dateTime'] #'2020-10-17T14:00:00+07:00'
date_time_event_1 = date_time_event_all[0:-6]
# timezone = date_time_event_all[20:22]
# print(timezone)
# timezone_ts = timezone.timetuple()
# print(timezone_ts)
# print(date_time_event_1)
date_time_event_2 = dt.datetime.strptime(date_time_event_1, "%Y-%m-%dT%H:%M:%S")
date_time_event_ts = time.mktime(date_time_event_2.timetuple())
# print(date_time_event_ts)
text_event = event['summary']
print(text_event)
if date_time_event_ts not in EVENTS_STORAGE.EVENTS_STORAGE.keys():
EVENTS_STORAGE.EVENTS_STORAGE[date_time_event_ts] = {"msg": text_event, 'isPush': False} |
from ninja import Router
router = Router()
@router.get("/health")
def get_health(request):
return {"result": True}
|
from django.db import models
from major.models import Major
from teacher.models import Teacher
# Create your models here.
class Classes(models.Model):
name = models.CharField(max_length = 20)
major = models.ForeignKey(Major,on_delete=models.DO_NOTHING)
teacher = models.ForeignKey(Teacher,on_delete=models.DO_NOTHING)
def __str__(self):
return self.name
class Meta:
unique_together = ['name','major']
verbose_name = '班级'
verbose_name_plural = '班级'
|
from django.db import models
# Create your models here.
class Check(models.Model):
venue = models.CharField(max_length=255, db_index=True)
timestamp = models.DateTimeField(auto_now_add=True)
location_id = models.CharField(max_length=255, default="", db_index=True)
location = models.CharField(max_length=255, db_index=True)
slot_time = models.DateTimeField()
open_seats = models.IntegerField(default=-1)
taken_seats = models.IntegerField(default=-1)
is_full = models.BooleanField(default=False) |
from was import app, db
from was.models import *
from was.utils import token, smtp
from was.decorators import args, auth
from urllib.request import urljoin
from flask import request, jsonify, render_template
from pony.orm import db_session, core
from datetime import datetime, timedelta
@app.route('/account/registerNewUser', methods=['POST'])
@auth.is_valid_client()
@args.is_exists(body=['email', 'nickName', 'password'])
@db_session
def registerNewUser():
try:
otp = token.generateOtp(lambda otp:PendingRequestUser.get(otpToken=otp))
url = urljoin(request.base_url.split('account')[0], '/account/emailConfirm?otp=' + otp)
parameters = request.get_json()
parameters['otpToken'] = otp
if User.get(email=parameters['email']):
return jsonify({'statusCode': 400, 'result': 'Duplicated email: %s' % request.get_json()['email']})
old_pending_user = PendingRequestUser.get(email=parameters['email'])
if old_pending_user:
old_pending_user.delete()
db.commit()
PendingRequestUser(**parameters)
db.commit()
smtp.sendConfirmMail(parameters['email'], parameters['nickName'], url)
return jsonify({'statusCode': 200, 'result': 'Check your mailbox to email confirmation'})
except ValueError:
return jsonify({'statusCode': 400, 'result': 'Invalid email: %s' % request.get_json()['email']})
@app.route('/account/emailConfirm', methods=['GET'])
@args.is_exists(body=['otp'])
@db_session
def emailConfirm():
otp = request.args.get('otp')
pending_user = PendingRequestUser.get(otpToken=otp)
if pending_user and pending_user.endDate > datetime.now():
parameters = pending_user.to_dict()
del parameters['endDate']
del parameters['createDate']
del parameters['otpToken']
user = User(**parameters)
UserConfiguration(user=user.email)
pending_user.delete()
db.commit()
return render_template('welcome.html')
elif pending_user and pending_user.endDate < datetime.now():
return jsonify({'statusCode': 400, 'result': 'Token is expired'})
else:
return jsonify({'statusCode': 400, 'result': 'Invalid otp'})
|
# Pythom program to make an introduction
name = input('My name is Maeve. What is your name?\n')
print ('Hi,', name)
|
from tkinter import ttk
from tkinter import *
from db_class import Profile
window = Tk()
window.geometry("1000x500")
path = "Profiles.sql"
def View():
profiles = Profile(path)
data = profiles.get_all_profiles()
for profile in data:
print(profile) # it print all records in the database
tree.insert("", END, values=profile)
tree= ttk.Treeview(window, column=("column1", "column2", "column3", "column4", "column5" ), show='headings')
tree.heading("#1", text="ID")
tree.heading("#2", text="FIRST NAME")
tree.heading("#3", text="SURNAME")
tree.heading("#4", text="EMAIL")
tree.heading("#5", text="PASSWORD")
tree.pack()
b2 = Button(text="view data", command=View)
b2.pack()
window.mainloop() |
# Generated by Django 3.1.5 on 2021-04-18 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_notesinfo_date'),
]
operations = [
migrations.AlterField(
model_name='notesinfo',
name='date',
field=models.DateField(default='00-00-00', null=True),
),
]
|
#ece366Project 2
import numpy as np
import array as ar
def hexdecode(char):
return {
'0':0,
'1':1,
'2':2,
'3':3,
'4':4,
'5':5,
'6':6,
'7':7,
'8':8,
'9':9,
'a':10,
'b':11,
'c':12,
'd':13,
'e':14,
'f':15,
}[char]
def hextobin(hexitem):
binary =ar.array('I',[0])
hexterms = hexitem.split('\n')
c=0
for term in hexterms:
#select only the last 8 characters in each line
hexterms[c] = term[-8:]
c+=1
k=0
for term in hexterms:
c = 0
for char in term:
num = hexdecode(char)
binary[k] += num * 16**(7-c)
c+=1
binary.append(0)
k+=1
if binary[k]==0:
binary.pop()
return binary
class mipsMachine:
def __init__(self, arra):
self.reg = np.int32([0]*32)
self.mem = np.int32([0]*0x4c)
self.code = arra
self.pc = 0
self.offset = 0x2000
#instruction count [total, ALU, jump, branch, memory, other]
self.count = [0]*6
def execute(self):
bottom = len(self.code.tolist())
while self.pc < bottom :
command= self.code[self.pc]
types = command>>26
if command == 268500991:
self.count[0]+=1
self.count[3]+=1
break
elif types==0:
#r-type
self.rtype(command)
elif types > 3:
#i-type
self.itype(command)
else:
self.jtype(command)
#j-type
self.pc += 1
self.reg[0] = 0 #reg0 is always 0
self.count[0]+=1
self.result()
def rshift(self, val, n): return val>>n if val >= 0 else (val+0x100000000)>> n
def rtype(self,command):
self.count[1]+=1
#extract values from command
rs = 0x1f & (command >> 21)
rt = 0x1f & (command >> 16)
rd = 0x1f & (command >> 11)
sh = 0x1f & (command >> 6)
func = 0x3f & command
if func == 0x00:#sll
self.reg[rd] = self.reg[rt] << sh
elif func == 0x02:#srl
self.reg[rd] = self.rshift(self.reg[rt], sh)
elif func == 0x03:#sra
self.reg[rd] = self.reg[rt] >> sh
elif func == 0x20:#add
self.reg[rd]=self.reg[rs]+self.reg[rt]
elif func == 0x21:#addu
self.reg[rd]=self.reg[rs]+self.reg[rt]
self.count[1]-=1
self.count[5]+=1
elif func == 0x22:#sub
self.reg[rd]=self.reg[rs]-self.reg[rt]
elif func == 0x23:#subu
self.reg[rd]=self.reg[rs]-self.reg[rt]
self.count[1]-=1
self.count[5]+=1
elif func == 0x24:#and
self.reg[rd]=self.reg[rs]&self.reg[rt]
elif func == 0x25:#or
self.reg[rd]=self.reg[rs]|self.reg[rt]
elif func == 0x26:#xor
self.reg[rd]=self.reg[rs]^self.reg[rt]
elif func == 0x27:#nor
self.reg[rd]= ~(self.reg[rs]|self.reg[rt])
elif func == 0x2a:#slt
self.count[1]-=1
self.count[5]+=1
if self.reg[rs] < self.reg[rt]:
self.reg[rd] = 1
else:
self.reg[rd] = 0
elif func == 0x2b:#sltu
self.count[1]-=1
self.count[5]+=1
if self.reg[rs] < self.reg[rt]:
self.reg[rd] = 1
else:
self.reg[rd] = 0
def itype(self,command):
#extract values from command
opcode= 0x3f & (command >> 26)
rs = 0x1f & (command >> 21)
rt = 0x1f & (command >> 16)
imm = np.int16(0xffff & command) #check this
if opcode == 4:#beq
if self.reg[rs] == self.reg[rt]:
self.pc += imm
self.count[3] += 1
if opcode == 5:#bne
if self.reg[rs] != self.reg[rt]:
self.pc += imm
self.count[3] += 1
if opcode == 0xc: #andi
imm = (0xffff & command) #check this
self.reg[rt] = self.reg[rs] & imm
self.count[1] += 1
if opcode == 0xd: #ori
imm = (0xffff & command) #check this
self.reg[rt] = self.reg[rs] | imm
self.count[1] += 1
if opcode == 8:#addi
self.reg[rt]= self.reg[rs] + imm
self.count[1] += 1
if opcode == 0xf:#addi
self.reg[rt]= imm << 16
self.count[1] += 1
if opcode ==0x2b:#sw
self.mem[(self.reg[rs]+imm-self.offset)>>2] = self.reg[rt]
self.count[4] += 1
if opcode ==0x23:#lw
self.reg[rt] = self.mem[(self.reg[rs]+imm-self.offset)>>2]
self.count[4] += 1
def jtype(self,command):
self.count[2]+=1
#extract values from command
addr = 0x3ffffff & (command)
opcode = 0x3f & (command >> 26)
if opcode == 2:
self.pc = (((self.pc<<2)&0xf0000000)| addr<<2) >> 2
def result(self):
print('Registers:')
i = 0
for thing in self.reg:
print(str(i) + ': ' + str(hex(thing & 0xffffffff)))
i += 1
c=0
self.pc *= 4
print('PC: ' + str(self.pc))
print('Memory: Hex Decimal')
while c < 0x15:
print(str(format(self.offset+(c*4),'0x'))+': '+hex(self.mem[c]&0xffffffff)+' '+str(self.mem[c]))
c+=1
print('Total instructions run: '+str(self.count[0]))
print('ALU instructions: '+str(self.count[1]))
print('Jump Instructions: '+str(self.count[2]))
print('Branch Instructions: '+str(self.count[3]))
print('Memory Instructions: '+str(self.count[4]))
print('Other Instructions: '+str(self.count[5]))
#end mipsMachine class
infile = open("in.txt", 'r')
mipshex = infile.read()
binary = hextobin(mipshex)
outfile = open('out.txt','w')
#to check my work
for instruction in binary:
outfile.write(format(instruction, '032b')+'\n')
outfile.close()
order66 = mipsMachine(binary)
order66.execute()
|
import zipfile
import os
# add more dirs ....
dirs = ['icons']
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
zf = zipfile.ZipFile('code.zip', 'w')
for d in dirs:
zipdir(d, zf)
# if you want to add more file
zf.write('content.js')
zf.write('jquery.min.js')
zf.write('manifest.json')
zf.close()
|
import tarfile
import os
from collections import defaultdict
from pprint import pprint
import cPickle as pickle
import numpy as np
import string
##this can only be run on meez02, computes the document frequency of each hashtag, creates a dictionary object where each key is the #hastag and value is the df (number of tweets where that hashtag appears)
def computeHashTagIDF():
tweet_files = [i for i in os.listdir("/e2/twitter/") if i.endswith(".txt")]
#read compressed tweets file, the is the whole twitter data set from 2009
hashtag_df = defaultdict(lambda : 0)
for tweet_file in tweet_files:
tweet_file = open("/e2/twitter/"+tweet_file,'r')
num_hashtags = 0
for index,line in enumerate(tweet_file):
if line.split("\t")[0]=="W":
tweet = line.split("\t")[1].strip()
hashtags = list(set([i for i in tweet.split() if (i[0]=="#" and len(i)>1)]))
num_hashtags+=len(hashtags)
for hashtag in hashtags:
hashtag_df[hashtag]+=1
#print num_hashtags
#print hashtag_df.items()
df_dict_file = open("/home/mattburg/hashtagDocumentFrequencies.p",'w')
pickle.dump(dict(hashtag_df.items()),(df_dict_file))
def getHashtagDict(lines):
listFrequency_dict = defaultdict(lambda : 0)
num_hashtags = 0
for index,line in enumerate(lines):
tweet = line.split("\t")[2].strip()
hashtags = list(set([i.lower().rstrip(string.punctuation) for i in tweet.split() if (i[0]=="#" and len(i)>1)]))
num_hashtags+=len(hashtags)
for hashtag in hashtags:
listFrequency_dict[hashtag]+=1
return dict(listFrequency_dict.items())
def getUserHashtagDict(lines):
user_tweet_tuples = [(i[1],i[2].strip()) for i in (j.split("\t") for j in lines)]
users = list(set([i[0] for i in user_tweet_tuples]))
hashtag_user_dict = defaultdict(lambda : [])
for user,tweet in user_tweet_tuples:
hashtags = list(set([i.lower() for i in tweet.split() if (i[0]=="#" and len(i)>1)]))
for hashtag in hashtags:
hashtag_user_dict[hashtag] = hashtag_user_dict[hashtag]+ [user]
for key in hashtag_user_dict.keys():
hashtag_user_dict[key] = len(set(hashtag_user_dict[key]))
return hashtag_user_dict
def getTopKHashtags():
NUM_TWEETS = 32342002204
path = os.environ["CHOCOLATE"]+"/data/prelim_data/lists/training/"
df_dict = pickle.load(open(os.environ["CHOCOLATE"]+"/data/temp_data/hashtagDocumentFrequencies.p",'r'))
tweet_files = os.listdir(path)
hashtag_outfile = open(os.environ["CHOCOLATE"]+"/data/temp_data/hashtags.txt",'w')
for tweet_file in tweet_files:
print tweet_file
lines = open(path+tweet_file,'r').readlines()
#hashtag_users_dict = getUserHashtagDict(lines)
listFrequency_dict = getHashtagDict(lines)
scores = []
for hashtag in listFrequency_dict.keys():
try:
tf_idf_score = listFrequency_dict[hashtag]*np.log(NUM_TWEETS/df_dict[hashtag])#*hashtag_users_dict[hashtag]
except KeyError:
tf_idf_score = listFrequency_dict[hashtag]*np.log(NUM_TWEETS)#*hashtag_users_dict[hashtag]
scores.append((hashtag,tf_idf_score,listFrequency_dict[hashtag]))
for i,score in enumerate(scores):
if "ff" in score[0] or "fb" in score[0] or "ww" in score[0] or "mm" in score[0]:
scores[i]=(score[0],0)
scores = sorted(scores,key = lambda x: x[1],reverse=True)
print '\n\n'
top_hashtags = [i[0] for i in scores[0:100]]
hashtag_outfile.write("%s\t%s\n" % (tweet_file,' '.join(top_hashtags)))
hashtag_outfile.close()
def main():
getTopKHashtags()
if __name__ == "__main__":
main()
|
import xml.etree.ElementTree as ET
import os
import re
from json import dumps
from pathlib import Path
import urllib.request, json
import time
import requests
from datetime import datetime
from datetime import date
# Gets the file path
def getFilePath():
cwd_path = os.path.dirname(os.path.abspath(__file__))
filePath = cwd_path
return cwd_path
# Loads data from the config file
def loadSet():
cvConfigFiletree = ET.parse('cvConfig.xml')
cvConfigFileroot = cvConfigFiletree.getroot()
url = [cvConfigFileroot[0][0].text.strip()]
clubID = [cvConfigFileroot[0][1].text.strip()]
dataHeader = [cvConfigFileroot[0][2].text.strip()]
distanceRun = [cvConfigFileroot[0][3].text.strip()]
distanceWalk = [cvConfigFileroot[0][4].text.strip()]
distanceBike = [cvConfigFileroot[0][5].text.strip()]
pageLines = [cvConfigFileroot[0][7].text.strip()]
client_id = [cvConfigFileroot[0][8].text.strip()]
client_secret = [cvConfigFileroot[0][9].text.strip()]
search_term = [cvConfigFileroot[0][10].text.strip()]
return url, clubID, dataHeader, distanceRun, distanceWalk, distanceBike, pageLines, client_id, client_secret, search_term
def returnTheDate():
now = date.today()
full = "-" + str(now.day) + "-" + str(now.month) +"-" + str(now.year)
return full
def writeDataToFile(dWalk,dRun,dRide):
#Create the fileName
date = returnTheDate()
file_name = 'myfile' + date + '.txt'
print(file_name)
# if file dosn't exist create - it will be created
# if file name exists - over wright
file=open(file_name,"w")
#Write race data to file
dataToFile = "Walk:"+ str(dWalk)+" Run:"+str(dRun)+" Ride:"+str(dRide)
file.write(dataToFile)
#Close the file
file.close()
def outputTotals(dWalk,dRun,dRide):
print("Totals --- Walk Distance: ",dWalk, " Run Distance: ",dRun," Total Run/Walk=",dWalk+dRun, "Ride Distance: ",dRide, " Total KM:", dWalk+dRun+dRide)
print("Totals --- Walk Distance: ",(dWalk)/1000*0.621371, " Run Distance: ",dRun/1000*0.621371)
print("Total Run/Walk=",dWalk+dRun/1000*0.621371, "Ride Distance: ",dRide/1000*0.621371, " Total Miles:", (dWalk+dRun+dRide)/1000*0.621371)
def refreshTokens(strava_tokens, access_token,client_secret,client_id):
# Make Strava auth API call with current refresh token
response = requests.post(
url = 'https://www.strava.com/oauth/token',
data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
'refresh_token': strava_tokens['refresh_token']
}
)
# Save response as json in new variable
new_strava_tokens = response.json()
# Save new tokens to file
with open('strava_tokens.json', 'w') as outfile:
json.dump(new_strava_tokens, outfile)
# Use new Strava tokens from now
strava_tokens = new_strava_tokens
access_token=strava_tokens['access_token']
return access_token
# Main program.
def main():
try:
#load data from config file
url, clubID, dataHeader, distanceRun, distanceWalk, distanceBike, pageLines, client_id, client_secret, search_term = loadSet()
print (dataHeader)
dRun = float(distanceRun[0])
dWalk = float(distanceWalk[0])
dRide = float(distanceBike[0])
pLines = str(pageLines[0])
client_id = str(client_id[0])
client_secret = str(client_secret[0])
search_term = str(search_term[0])
with open('strava_tokens.json') as json_file:
strava_tokens = json.load(json_file)
file_path = getFilePath()
access_token=strava_tokens['access_token']
# If access_token has expired then use the refresh_token to get the new access_token
if strava_tokens['expires_at'] < time.time():
print("TOKEN - expired")
try:
#Refresh tokens
access_token = refreshTokens(strava_tokens, access_token,client_secret,client_id)
except:
print("Program was unable to refresh token.")
# build the URL
urlApi = url[0].strip()+clubID[0].strip()+"/activities?access_token="+access_token+"&per_page="+pLines
# Grab the data using the URL
with urllib.request.urlopen(urlApi) as url:
data = json.loads(url.read().decode())
#######################################
# Write Data to File if/when needed
#######################################
#file=open("testDataFile.txt","w")
##Write race data to file
#dataToFile = json.dumps(data)
#file.write(dataToFile)
##Close the file
#file.close()
# Get/check number of lines fetched
length = len(data)
# Loop through the lines and add up distances for Walk, Run and Ride
for x in range(0,length):
if re.search(search_term,data[x]["name"]) or re.search(search_term.lower(),data[x]["name"]):
#Check if Walk/Run/Ride and total up
if re.search('Walk',data[x]["type"]):
print(" --- ", data[x]["athlete"]["firstname"], ", ",data[x]["name"], ", -Walk- " ,", Distance:",",", data[x]["distance"])
dWalk = dWalk + data[x]["distance"]
if re.search('Run',data[x]["type"]):
print(" --- ", data[x]["athlete"]["firstname"], ", ",data[x]["name"], ", -Run- " ,", Distance:",",", data[x]["distance"])
dRun = dRun + data[x]["distance"]
if re.search('Ride',data[x]["type"]):
print(" --- ", data[x]["athlete"]["firstname"], ", ",data[x]["name"], ", -Ride- " ,", Distance:",",", data[x]["distance"])
dRide = dRide + data[x]["distance"]
# if the activity hasn't got required ID in the title it will be printed with ELSE
#else:
# print(" -ELSE- ", "Name: ", data[x]["athlete"]["firstname"], " ", data[x]["name"], " ", data[x]["distance"])
#Output totals
outputTotals(dWalk,dRun,dRide)
#Output to file
writeDataToFile(dWalk,dRun,dRide)
except:
print("Program was unable to run.")
if __name__ == "__main__":
main()
|
import os
import re
from typing import Tuple, List
class FileWriter:
def write(self, outdata: List[Tuple[str, str]], pathdir=os.getcwd() + "/tests/out/"):
'''The method write all files to a directory "pathdir"'''
pathdir = os.path.abspath(pathdir)
if not os.path.isdir(pathdir):
os.mkdir(pathdir)
for i in outdata:
self.__writefile(pathdir + '/' + i[0].replace('java', 'go'), i[1])
def __writefile(self, filepath: str, data: str):
fin = open(filepath, 'w')
fin.write(data)
|
import numpy as np
from textblob import TextBlob
import sqlite3
import flask
import requests
# i need asr code to generate speech and send it to a server with a timestamp along with the direction of sound at the time
#an endpoint to save it to a words table
@app.route('/word/', methods=['GET', 'POST'])
def log_word():
w = request.form['word']
d = request.form['direction']
conn = sqlite3.connect('database.db')
#query latest convo
#type of query
@app.route('/lastConvo/', methods=['GET', 'POST'])
def last_convo():
angleOfhead = 33
angleTolerance = 5
secondsToEndConvo = 100
t = request.form['type']
#pull last thousand words
#parse for speaker and convo
conn = sqlite3.connect('database.db')
cur = con.cursor()
cur.execute("SELECT user_id, MAX(created_at) FROM objects GROUP BY user_id")
rows = cur.fetchall()
#iterate though the rows
convo = []
i=0
interval = 0
lastTime = rows[0].time
while(row[i].time - lastTime < secondsToEndConvo):
if(abs(row[i].angle - angleOfhead) > angleTolerance):
lastTime = row[i].time
convo.append(rows[i])
i = i + 1
#seperate the words you said from the ones they said based on the angle of incedence
you = []
them = []
for w in convo:
if(abs(w.angle - angleOfhead) > angleTolerance):
you.append(w.word)
else:
them.append(w.word)
#convert list to strings
youStr = ""
themStr = ""
for w in you:
youStr = youStr + " " + w
for w in them:
themStr = themStr + " " + w
#return a word describing the sentiment
if(t == "i"):
p = TextBlob(youStr).sentiment.polarity
if(p>.25):
return "positive"
if(p<-.25):
return "negitive"
return "pretty neutral"
if(t == "them"):
p = TextBlob(themStr).sentiment
if(p>.25):
return "positive"
if(p<-.25):
return "negitive"
return "pretty neutral"
|
import numpy as np
import glob
from core.shelper import *
import logging
from model import sbss_net
from keras.utils import np_utils
import h5py
import os
def main(data_path, n_segments, data_save_path):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
data_path_folder_names = os.listdir(data_path)
train_imgs = []
train_label = []
for folder_name in data_path_folder_names:
print("在操作", folder_name)
temp_img_path = os.path.join(data_path, folder_name, "ct_data.npy")
temp_mask_path = os.path.join(data_path, folder_name, "Heart.npy")
if os.path.exists(temp_mask_path) and os.path.exists(temp_img_path):
imgs = np.load(temp_img_path)
labels = np.load(temp_mask_path)
# 判断当前的数据是否有心脏数据,如果有则加入,没有则剔除
imgs, labels = ExtractInfo(imgs, labels)
for j in range(imgs.shape[0]):
cut_img = imgs[j, :, :][100:300, 200:380]
cut_label = labels[j, :, :][100:300, 200:380]
PatchN, regions, superpixel, slice_colors = SuperpixelExtract(cut_img, n_segments, is_data_from_nii=0)
labelvalue, patch_data, patch_coord, count, region_index, patch_liver_index = PatchExtract(regions, cut_img, cut_label)
print("handle: {}, {}".format(str(j), str(len(patch_liver_index))))
# 查看提取的patch_liver_index是否符合标准,重新生成超像素显示
# y_shape, x_shape = cut_label.shape[0], cut_label.shape[1]
# whiteboard_region_2 = np.zeros((y_shape, x_shape))
# for lindex3 in patch_liver_index:
# temp_region = regions[lindex3]
# for value in temp_region.coords:
# whiteboard_region_2[value[0], value[1]] = 1
# ShowImage(1, whiteboard_region_2)
if len(patch_data) > 0:
# patch_data.shape = [Number, 32, 32] 所有的_slice必须具有相同的shape才能用_slice
patch_data = np.stack(([_slice for _slice in patch_data]), axis=0)
train_imgs.append(patch_data)
if len(labelvalue) > 0:
train_label.append(labelvalue)
else:
print("当前的{}数据缺失!".format(folder_name))
train_imgs = np.concatenate(([_slice for _slice in train_imgs]), axis=0)
train_label = np.concatenate(([_slice for _slice in train_label]), axis=0)
print('start storing... ')
# 保存
with h5py.File(data_save_path, 'w') as fwrite:
fwrite.create_dataset('Patch', data=train_imgs)
fwrite.create_dataset('Mask', data=train_label)
print("Finish All")
if __name__ == '__main__':
data_path = r'G:\data\heart_data\masks'
data_save_path = r'G:\data\heart_data\masks\40patients_2000.h5'
n_segments = 2000
main(data_path, n_segments, data_save_path)
|
import time
import requests
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import json
import win32com.client
import pypandoc
mainUrl="https://www.1800petmeds.com"
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu') # Last I checked this was necessary.
driver = webdriver.Chrome("C:\CURSOS\pyhton raspa tela telegram bot\chromedriver.exe", options=options)
class HTML(object):
text = ""
# The class "constructor" - It's actually an initializer
def __init__(self, text):
self.text = text
def as_dict(self):
return {'text': self.text}
def addText(self,texto):
self.text+=texto
def printWord(self):
output = pypandoc.convert_text(self.text, format='html', to='docx', outputfile='output.docx', extra_args=['-RTS'])
def getLinksPets():
#getting the links for each page so we can access it later
page = requests.get('https://www.1800petmeds.com/education')
soup = BeautifulSoup(page.text, 'html.parser')
links =soup.find_all("a", class_="link education-folder-link")
return links
def accessLinksPets(listaPets):
#with the links now we can acces each one of the pages
for a in listaPets:
concatenatedUrl=mainUrl + a['href']
driver.get(concatenatedUrl)
getPetData()
def getPetData():
#here we are going to get the data from the paragraphs we need
#getting the first paragraph
summary=driver.find_element_by_xpath("//div[@class='container content-container']//div[@class='content-asset']").get_attribute('outerHTML')
fullHTML.addText(summary)
#this try except is used because there are some links that dont have tabs
try:
#now to click on the other 3 tabs and get the other paragraphs is needed to close an ad
#(try opening without 'options.add_argument('--headless')' and you will see it is blocking the buttons we need to click)
driver.find_element_by_xpath("//div[@class='modal-content form-wrapper']//button").click()
#with the ad closed we click on the other tabs and get the content inside them
driver.find_element_by_link_text("Symptoms & Diagnosis").click()
Symptoms=driver.find_element_by_xpath("//div[@class='container content-container']//div[@class='content-asset']").get_attribute('outerHTML')
driver.find_element_by_link_text("Treatment").click()
Treatment=driver.find_element_by_xpath("//div[@class='container content-container']//div[@class='content-asset']").get_attribute('outerHTML')
#adding text to the fullHTML who is going to be the one we are going to output to word in the end
fullHTML.addText(Symptoms)
fullHTML.addText(Treatment)
except:
print("doesn't have other tabs")
#creating a object fullHTML who is going to receive all of the text from the pages
fullHTML=HTML("")
accessLinksPets(getLinksPets())
fullHTML.printWord()
driver.quit() |
#Advanced HOUSE PRICE PREDICTIONS
# PART 1 :- Getting the Data
#Import the libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Import the Dataset
train_df=pd.read_csv('train.csv')
test_df=pd.read_csv('test.csv')
#PART 2:- #Exploratory Data Analysis
#sales price info
info_train=train_df.SalePrice.describe()
#to check skewness of target feature
print ("Skew is:", train_df.SalePrice.skew())
plt.hist(train_df.SalePrice, color='blue')
plt.show()
#log transform the target variable since its skewed
target = np.log(train_df.SalePrice)
print ("Skew is:", target.skew())
plt.hist(target, color='blue')
plt.show()
#get the numeric features from the dataset
numeric_features_train = train_df.select_dtypes(include=[np.number]) #include all numeric features
numeric_features_test = test_df.select_dtypes(include=[np.number])
#getting the categorical features and its description
categorical_features = train_df.select_dtypes(exclude=[np.number]) #exclude all numeric features
categorical_feature_description=categorical_features.describe()
#Check for missing values in train.csv and test.csv files
null_train = pd.DataFrame(numeric_features_train.isnull().sum().sort_values(ascending=False)[:80])
null_test = pd.DataFrame(numeric_features_test.isnull().sum().sort_values(ascending=False)[:80])
#Correlation matrix
corr_matrix=train_df.corr() #.abs()
corr_sales=corr_matrix['SalePrice'].sort_values(ascending=False)[:38] #correlated features with SalesPrice
#Get the heatmap
import seaborn as sns
sns.heatmap(corr_matrix)
#Get the boxplot to identify an outlier
sns.boxplot(x=train_df['MSSubClass'])
sns.boxplot(x=train_df['LotFrontage'])
# PART 3- Data Preprocessing
#Taking care of missing values
#train.columns.get_loc('LotFrontage') #to get the location/index
from sklearn.impute import SimpleImputer
imputer=SimpleImputer(missing_values=np.nan,strategy='mean')
train_df.iloc[:,[3,26,59]]=imputer.fit_transform(train_df.iloc[:,[3,26,59]])
train_df.select_dtypes(include=[np.number]).isnull().sum()
test_df.iloc[:,[3,26,34,36,37,38,48,47,59,62,61]]=imputer.fit_transform(test_df.iloc[:,[3,26,34,36,37,38,48,47,59,62,61]])
test_df.select_dtypes(include=[np.number]).isnull().sum()
#Removing Outliers
from scipy import stats
def drop_numerical_outliers(numeric_features_train, z_thresh=3):
# Constrains will contain `True` or `False` depending on if it is a value below the threshold.
constrains = numeric_features_train.select_dtypes(include=[np.number]) \
.apply(lambda x: np.abs(stats.zscore(x)) < z_thresh, result_type='reduce') \
.all(axis=1)
# Drop (inplace) values set to be rejected
numeric_features_train.drop(numeric_features_train.index[~constrains], inplace=True)
drop_numerical_outliers(train_df)
#drop_numerical_outliers(test_df)
#Taking care of categorical data (Encoding)
#OneHotEncoding approach is not appropriate for multiple linear regression.
#1 Encoding-MSZoning
print (train_df.MSZoning.value_counts())
def encode(x): #Encoding RL as 1 and others as 0.
return 1 if x == 'RL' else 0 #to encode
train_df['enc_MSZoning'] = train_df.MSZoning.apply(encode)
test_df['enc_MSZoning'] = test_df.MSZoning.apply(encode)
print (train_df.enc_MSZoning.value_counts())
#2 - Encoding Street
print (train_df.Street.value_counts())
def encode(x): #Encoding RL as 1 and others as 0.
return 1 if x == 'Pave' else 0 #to encode
train_df['enc_Street'] = train_df.Street.apply(encode)
test_df['enc_Street'] = test_df.Street.apply(encode)
print (train_df.enc_Street.value_counts())
#3 - Encoding LotShape
print (train_df.LotShape.value_counts())
def encode(x):
return 1 if x == 'Reg' else 0 #to encode
train_df['enc_LotShape'] = train_df.LotShape.apply(encode)
test_df['enc_LotShape'] = test_df.LotShape.apply(encode)
print (train_df.enc_LotShape.value_counts())
#4 -Encoding HouseStyle
print (train_df.HouseStyle.value_counts())
def encode(x):
return 0 if x == '1Story' else 1 #to encode
train_df['enc_HouseStyle'] = train_df.HouseStyle.apply(encode)
test_df['enc_HouseStyle'] = test_df.HouseStyle.apply(encode)
print (train_df.enc_HouseStyle.value_counts())
#6 -Encoding GarageCond
print (train_df.GarageCond.value_counts())
def encode(x):
return 1 if x == 'TA' else 0 #to encode
train_df['enc_GarageCond'] = train_df.GarageCond.apply(encode)
test_df['enc_GarageCond'] = test_df.GarageCond.apply(encode)
print (train_df.enc_GarageCond.value_counts())
#7- Encoding Central Air #when only 2 categories are present
print ("Original: \n")
print (train_df.CentralAir.value_counts(), "\n")
train_df['enc_CentralAir'] = pd.get_dummies(train_df.CentralAir, drop_first=True)
test_df['enc_CentralAir'] = pd.get_dummies(test_df.CentralAir, drop_first=True)
print ('Encoded: \n')
print (train_df.enc_CentralAir.value_counts())
"""#Not the best method to take care of missing values
data = train.select_dtypes(include=[np.number]).interpolate().dropna()
sum(data.isnull().sum() != 0) #Check if the all of the columns have 0 null values."""
#Remove highly correlated features
corr_matrix=train_df.corr() #.abs()
#Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape),k=1).astype(np.bool))
#since every correlation matrix is symmetric
# Find index of feature columns with correlation greater than 0.95
to_drop = [column for column in upper.columns if any(upper[column] > 0.90)] #List Comprehension
print(to_drop)
# Drop Marked Features
train_df=train_df.drop(train_df[to_drop], axis=1)
#get the final numeric features from the dataset for modelling
train = train_df.select_dtypes(include=[np.number])
test = test_df.select_dtypes(include=[np.number])
#PART 4 - Build a linear model
#DV and IDV's
y = np.log(train.SalePrice)
#y=train.iloc[:,-7].values
X = train.drop(['SalePrice', 'Id'], axis=1)
#Building the optiomal model using Backward Elimination
#import statsmodels.formula.api as sm
import statsmodels.regression.linear_model as sm
X=np.append(arr=np.ones((1460,1)).astype(int),values=X,axis=1)
#np.ones create 1 column with only 1's
#axis=1 for column,0 for rows
#actual backward elimination
#Compare x and x_opt index
X_opt=X[:,0:43]
regressor_OLS=sm.OLS(endog=y ,exog=X_opt).fit()
regressor_OLS.summary()
"""X_opt=X[:,[0,1,2,3,4,5,6,7,8,9,10,
11,12,13,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,
31,32,33,34,35,36,37,38,39,40,
41,42]]
regressor_OLS=sm.OLS(endog=y ,exog=X_opt).fit()
regressor_OLS.summary()"""
X_opt=X[:,[0,1,2,3,4,5,6,7,9,
12,13,14,17,18,19,
22,24,26,29,
32,33,35,38,
41,42]]
regressor_OLS=sm.OLS(endog=y ,exog=X_opt).fit()
regressor_OLS.summary()
"""X_opt=X[:,[0,1,2,3,4,5,6,7,
11,12,16,17,18,19,
22,24,26,29,
32,33,38,
41,42]]
regressor_OLS=sm.OLS(endog=y ,exog=X_opt).fit()
regressor_OLS.summary()"""
#removed 20 variables(p>0.05) by backward elimination
#The higher the t-statistic (and the lower the p-value), the more significant the predictor
#Final best variables for modelling
X=train.iloc[:,[0,1,2,3,4,5,6,7,9,
12,13,14,17,18,19,
22,24,26,29,
32,33,35,38,
41,42]].values
y=y
#Splitting the dataset into trining set and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=.33)
"""# Feature Scaling #FS is required in Dimension reduction
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)"""
# Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
model=regressor.fit(X_train, y_train)
"""import xgboost
from xgboost import XGBRegressor
regression = XGBRegressor()
model=regression.fit(X_train, y_train)"""
#Predicting the test set results
y_pred=regressor.predict(X_test) #using train set
# Applying k-Fold Cross Validation (model evaluation)
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = regressor, X = X_train, y = y_train, cv = 10)
accuracies.mean()
accuracies.std()
#Evaluate the model
print ("R^2 is: \n", model.score(X_test, y_test))
#higher r-squared value means a better fit.
#RMSE measures the distance between our predicted values and actual values.
from sklearn import metrics
print(metrics.mean_absolute_error(y_test,y_pred)) #MAE
print(metrics.mean_squared_error(y_test,y_pred)) #MSE
print(np.sqrt(metrics.mean_squared_error(y_test,y_pred))) #RMSE
#Plot
actual_values = y_test
plt.scatter(y_pred, actual_values, alpha=.7,color='b') #alpha helps to show overlapping data
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Linear Regression Model')
plt.show()
"""
#Regularization
for i in range (-2, 3):
alpha = 10**i
rm = model.Ridge(alpha=alpha)
ridge_model = rm.fit(X_train, y_train)
preds_ridge = ridge_model.predict(X_test)
plt.scatter(preds_ridge, actual_values, alpha=.75, color='b')
plt.xlabel('Predicted Price')
plt.ylabel('Actual Price')
plt.title('Ridge Regularization with alpha = {}'.format(alpha))
overlay = 'R^2 is: {}\nRMSE is: {}'.format(
ridge_model.score(X_test, y_test),
mean_squared_error(y_test, preds_ridge))
plt.annotate(s=overlay,xy=(12.1,10.6),size='x-large')
plt.show()
#adjusting the alpha did not substantially improve our model
"""
#Predicting the test set results
#Predicting the test set results
features = test.select_dtypes(include=[np.number]).drop(['Id','MasVnrArea',
'BsmtFinSF2','BsmtUnfSF','LowQualFinSF','GrLivArea',
'HalfBath','BedroomAbvGr',
'TotRmsAbvGrd','GarageYrBlt','GarageArea',
'WoodDeckSF','PoolArea', #poolarea/3ssnporch
'ScreenPorch','MiscVal','YrSold',
'enc_MSZoning','enc_LotShape','enc_HouseStyle'], axis=1)
features=np.append(arr=np.ones((1459,1)).astype(int),values=features,axis=1)
#since i have added one column of 1's during backward elimination
predictions = model.predict(features)
final_predictions = np.exp(predictions)
#Getting a csv file
output=pd.DataFrame({'Id':test.Id, 'SalePrice':final_predictions})
output.to_csv('my_submission_FS10.csv', index=False) |
# 전에 풀었던 문제인데 정확도와 효율성을 높이고자 하였으나.. 실패해서 예전에 풀었던 코드를 올립니다
def solution(food, k):
if sum(food) <= k:
return -1
L = len(food)
m = k // L # k번이 food를 돌 수 있는 최소 횟수 라고 생각
# 최소 횟수에 따른 food 처리 (m바퀴를 이미 돌았다 가정)
for i in range(L):
if food[i] <= m:
k -= food[i]
food[i] = 0
else:
k -= m
food[i] -= m
j = 0 # j가 먹어야할 인덱스, 즉 답
while k >= 0: # k번 돈다
if food[j] > 0:
k -= 1
j += 1
j %= L
if sum(food) <= 0: # 다 돌았는데 다 먹었을 경우
return -1
return j
print(solution([3, 1, 2], 5)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 8 16:10:13 2019
@author: kai
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 4 17:15:52 2019
@author: kai
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as pt
import time
import pandas as pd
def SAW(max_algs, X,Y, X_branches, Y_branches):
global neighbours,rejections, table, epsilon, points, branches
length = temp_length
branch_length = b_length
neighbours = 0
epsilon = 0.9
points = 4
branches = 8
#below are the 7 transformation matrices which can be applied
matrices = np.array([[[0,-1],[1,0]],[[-1,0],[0,-1]],
[[0,1],[-1,0]], [[1,0],[0,-1]], [[-1,0], [0,1]], [[0,1], [1,0]], [[0,-1], [-1,0]]])
global X2, Y2, X2_branches, Y2_branches
X2, X2_branches = np.copy(X), np.copy(X_branches)
Y2, Y2_branches = np.copy(Y), np.copy(Y_branches)
#loop for number of times pivot applied
rejections = 0
table = {}
for i in range(max_algs):
table2 = table
X3, X3_branches = np.copy(X2), np.copy(X2_branches)
Y3, Y3_branches = np.copy(Y2), np.copy(Y2_branches)
rand_matrix = np.random.randint(0,len(matrices)-1)
trans_matrix = matrices[rand_matrix]
#loop for applying pivot to end of walk
arm_or_branch = np.random.randint(0,2)
if arm_or_branch == 0:
selector = np.random.randint(0,points)
pivot = np.random.randint(1,length - 2)
j = pivot + 1
k = 0
while j < length:
[X2[selector][j], Y2[selector][j]] = trans_matrix.dot(([X2[selector][j] - X2[selector][pivot], Y2[selector][j] - Y2[selector][pivot]])) + [X2[selector][pivot], Y2[selector][pivot]]
j = j+1
while k < branch_length:
[X2_branches[2*selector][k], Y2_branches[2*selector][k]] = trans_matrix.dot(([X2_branches[2*selector][k] - X2[selector][pivot], Y2_branches[2*selector][k] - Y2[selector][pivot]])) + [X2[selector][pivot], Y2[selector][pivot]]
[X2_branches[2*selector+1][k], Y2_branches[2*selector+1][k]] = trans_matrix.dot(([X2_branches[2*selector+1][k] - X2[selector][pivot], Y2_branches[2*selector+1][k] - Y2[selector][pivot]])) + [X2[selector][pivot], Y2[selector][pivot]]
k = k+1
if arm_or_branch == 1:
selector = np.random.randint(0, branches)
pivot = np.random.randint(1, branch_length - 2)
j = pivot + 1
while j < branch_length:
[X2_branches[selector][j], Y2_branches[selector][j]] = trans_matrix.dot(([X2_branches[selector][j] - X2_branches[selector][pivot], Y2_branches[selector][j] - Y2_branches[selector][pivot]])) + [X2_branches[selector][pivot], Y2_branches[selector][pivot]]
j = j+1
table = {}
overlap = False
for i in range(points):
for j in range(length):
table[X2[i][j],Y2[i][j]] = True
for i in range(branches):
for j in range(branch_length):
table[X2_branches[i][j], Y2_branches[i][j]] = True
if len(table) < points*length + branches*branch_length - 11:
overlap = True
table = table2
'''if overlap == False:
old_neighbours = neighbours
neighbours = 0
for (i, j) in table2:
if (i+1, j) in table2:
neighbours = neighbours + 1
if (i-1, j) in table2:
neighbours = neighbours + 1
if (i, j+1) in table2:
neighbours = neighbours + 1
if (i, j-1) in table2:
neighbours = neighbours + 1
#neighbours = neighbours -2*points*end_length + 2*points
#neighbours = neighbours/2
if neighbours < old_neighbours:
acc = np.random.rand()
if acc > np.exp((epsilon*(neighbours-old_neighbours))):
overlap = True
neighbours = old_neighbours'''
if overlap:
X2, X2_branches = np.copy(X3), np.copy(X3_branches)
Y2, Y2_branches = np.copy(Y3), np.copy(Y3_branches)
rejections = rejections + 1
#Defining function to calculate radius of gyration
def Rg(x,y,x_b,y_b):
global R_cm
N = len(x)*len(x[0]) + len(x_b)*len(x_b[0])
R = np.zeros((N,2))
x2,y2 = [], []
for i in range(len(x)):
for j in range(len(x[0])):
x2.append(x[i][j])
y2.append(y[i][j])
for k in range(len(x_b)):
for l in range(len(x_b[0])):
x2.append(x_b[k][l])
y2.append(y_b[k][l])
R[:,0] = x2
R[:,1] = y2
R_cm = [0,0]
j = 0
while j < N:
R_cm = R_cm + R[j]
j = j + 1
R_cm = R_cm/N
R_g = 0
i = 0
while i < N:
R_temp = R[i] - R_cm
R_g = R_g + R_temp[0]**2 + R_temp[1]**2
i = i + 1
R_g = np.sqrt(R_g/N)
return R_g
#Defining initial lengths of arms and branches
temp_length = 100
b_length = int(temp_length/5)
t0 = time.time()
zeros = np.zeros(temp_length)
line = np.arange(temp_length)
branch = np.arange(b_length)
b_loc1 = [temp_length-1]*b_length
b_loc2 = [-temp_length+1]*b_length
x_branches = [branch, -branch, b_loc1, b_loc1, branch, -branch, b_loc2, b_loc2]
y_branches = [b_loc1, b_loc1, branch, -branch, b_loc2, b_loc2, branch, -branch]
X0 = [zeros, line, zeros, -line]
Y0 = [line, zeros, -line, zeros]
'''SAW(3000, X0, Y0, x_branches, y_branches)
#Plotting initial configuration
pt.figure()
pt.title('1st Generation Cayley Tree', fontsize=15)
pt.xlabel('$x$',fontsize=11)
pt.ylabel('$y$', fontsize=11)
for i in range(points):
pt.plot(X0[i],Y0[i])
for j in range(branches):
pt.plot(x_branches[j], y_branches[j])
#Plotting chain after pivot steps
pt.figure()
pt.title('1st Generation Cayley Tree', fontsize=15)
pt.xlabel('$x$',fontsize=11)
pt.ylabel('$y$', fontsize=11)
for i in range(points):
pt.plot(X2[i],Y2[i])
for j in range(branches):
pt.plot(X2_branches[j], Y2_branches[j])
'''
#Equilibration
overlap = False
rsq = []
x_axis = []
rg_in = Rg(X0,Y0, x_branches, y_branches)
print(rg_in)
x_axis.append(0)
rsq.append(rg_in)
print(R_cm)
for i in range(1,1000):
if i%50 == 0:
print(i)
SAW(6*i, X0, Y0, x_branches, y_branches)
x_axis.append(6*i)
if overlap ==False:
rsq.append(Rg(X2, Y2, X2_branches, Y2_branches))
else:
rsq.append(rsq[i-1])
print('finished')
#Plotting equilibration
pt.figure()
pt.title('Equilibrium of 1st Generation Cayley Tree')
pt.xlabel('Number of pivots applied')
pt.ylabel('$R_g$')
pt.plot(x_axis,rsq)
|
from PyQt5.QtCore import QDateTime,Qt
datetime = QDateTime.currentDateTime()
print("Today Date And Time Is:"+datetime.toString(Qt.ISODate))
print("Adding 12 Days To The Date: {0}".format(datetime.addDays(12).toString(Qt.ISODate)))
print("Subtracting 25 Days:{0}".format(datetime.addDays(-25).toString(Qt.ISODate)))
print("Adding 50 Seconds:{0}".format(datetime.addSecs(50).toString(Qt.ISODate)))
print("Adding 3 Months: {0}".format(datetime.addMonths(3).toString(Qt.ISODate))) |
import subprocess
print (subprocess.checkoutput(["run_workflow.sh", "Consensus", donor, gnos_or_igcg]))
|
import logging
import socket
import os
import sys
HULU_ENV = os.environ.get("HULU_ENV", "dev")
HULU_DC = os.environ.get("HULU_DC", "els")
DONKI = os.getenv("DONKI", False)
SERVICE_NAME = "requestflow"
DOPPLER_NAME = "requestflow"
HOSTNAME = socket.gethostname()
BANC_PORT = os.getenv("BANC_PORT")
BANC_HOST = "127.0.0.1"
if HULU_ENV == "els":
BANC_HOST = "bank.els.prod.hulu.com"
elif HULU_ENV == "iad":
BANC_HOST = "bank.iad.prod.hulu.com"
LOGSTASH_URL = "http://kibana.prod.hulu.com:9200/logstash-*/_search"
if (HULU_DC, HULU_ENV) == ("els", "prod"):
DOPPLER_HOST = "doppler-ingest.els.prod.hulu.com"
elif (HULU_DC, HULU_ENV) == ("els", "stage"):
DOPPLER_HOST = "doppler-ingest.els.staging.hulu.com"
elif (HULU_DC, HULU_ENV) == ("els", "dev") or (HULU_DC, HULU_ENV) == ("els", "test"):
DOPPLER_HOST = None
else:
raise Exception("Invalid Config Requested: HULU_DC = %s and HULU_ENV = %s" % (HULU_DC, HULU_ENV))
# Setup Stats #
from hpc.metrics_client import get_stats
track_stats = get_stats(SERVICE_NAME, HULU_ENV, HULU_DC, BANC_HOST, BANC_PORT,
no_send=HULU_ENV in ["test", "dev"])
def setup_logging():
root_logger = logging.getLogger("")
if len(root_logger.handlers) != 0: return
from hpc.requeststore import ContextFilter
format = logging.Formatter('%(asctime)s : {0} : <%(process)d> : %(name)-12s : Track-%(tracking_id)s : %(levelname)-8s : %(message)s'.format(HOSTNAME), datefmt="%Y-%m-%d %H:%M:%S")
ctx_filter = ContextFilter()
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.addFilter(ctx_filter)
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(format)
root_logger.addHandler(stream_handler)
root_logger.setLevel(logging.NOTSET)
if DOPPLER_HOST:
from hpc.doppler import UDPHandler, get_flask_request_info_for_doppler
from hpc.requeststore import get_trace
doppler_udp_handler = UDPHandler(host=DOPPLER_HOST, port=50001,
datacenter=HULU_DC, env=HULU_ENV,
hostname=HOSTNAME, codebasename=DOPPLER_NAME,
get_request_info=get_flask_request_info_for_doppler,
get_trace=get_trace, emit_stdout=True)
doppler_udp_handler.setLevel(level=logging.ERROR)
root_logger.addHandler(doppler_udp_handler)
def setup_sso(app):
if HULU_ENV == "test": return
from hpc.sso.flask_client import HuluSSO
app.config['SSO_LOGIN_GROUPS'] = ['Devs']
app.config['SSO_HIJACK_PROTECTION'] = False
app.config['SSO_VIA_IP_IS_CLIENTS_FAULT'] = True
hulu_sso = HuluSSO(app)
return hulu_sso
|
""" -------------------------------------------------------------------------------------------------------------------
ITEC 136: Lab 03
Develop a program that asks the user to enter a text.
The program should analyze the text and print out unique letters,
in alphabetical order,
with the percentage information of each letter.
Case should be ignored.
Write a function to analyze the text string.
No global variables are used in the function.
Function parameters must be used.
@author: Dani Hooven
@version: 10/20/2020
-------------------------------------------------------------------------------------------------------------------- """
import turtle
def drawTurtle(t, c, height):
""" Get turtle t to draw one bar, of height. """
# -----------------------
t.up()
t.write(c) # write character.lkl
t.left(90)
t.forward(2)
t.right(90)
t.down()
t.begin_fill() # start filling this shape
t.left(90)
t.forward(height)
t.right(90)
t.forward(20)
t.right(90)
t.forward(height)
# t.left(90)
t.end_fill() # stop filling this shape
t.up()
t.forward(2)
t.left(90)
t.down()
#text = input("Enter text: ")
text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore " \
"magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo " \
"consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. " \
"Excep zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz"
alpha = 'abcdefghijklmnopqrstuvwxyz'
text_lower = text.lower()
text_length = 0
max_count = 0
letter_count = {} # empty dictionary
for char in text_lower:
if char in alpha:
if char in letter_count:
letter_count[char] = letter_count[char] + 1
if (letter_count[char]) > max_count:
max_count = letter_count[char]
else:
letter_count[char] = 1
text_length = text_length + 1
window = turtle.Screen()
awesome = turtle.Turtle()
awesome.speed(100)
border = 2
window.bgcolor("lightblue")
window.setworldcoordinates(0 - border, 0 - border, 20 * text_length + border + 20, max_count + border)
keys = letter_count.keys()
percentage = 0
awesome.up()
awesome.left(90)
for i in range(11):
awesome.write(percentage)
awesome.forward(max_count * .1)
awesome.goto(0, awesome.ycor())
percentage = round(percentage + 0.1, 1)
awesome.right(90)
awesome.goto(0 - border, 0 - border)
awesome.forward(20)
for char in sorted(keys):
drawTurtle(awesome, char, letter_count[char])
window.exitonclick()
|
import sys
def get_lower_and_upper_bound(n):
i = 1
while pow(i, 3) < n:
i *= 2
if pow(i, 3) == n:
return (i, i)
lower = i // 2
upper = i
return (lower, upper)
def closest_to_n(n, lower, upper):
diff_below = n - pow(lower, 3)
diff_above = pow(upper, 3) - n
if diff_below < diff_above:
return lower
else:
return upper
def search_approximate_cube_root(n, lower, upper):
if upper - lower == 1:
return closest_to_n(n, lower, upper)
center = (lower + upper) // 2
if pow(center, 3) < n:
return search_approximate_cube_root(n, center, upper)
elif pow(center, 3) > n:
return search_approximate_cube_root(n, lower, center)
else: # center == cube_root_of n
return center
def solve(n):
n = int(n)
if n == 0:
print(0)
return
lower, upper = get_lower_and_upper_bound(n)
approx_cube_root = search_approximate_cube_root(n, lower, upper)
print(approx_cube_root)
if __name__ == "__main__":
for line in sys.stdin:
solve(line.strip())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_dialog_information.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog_information(object):
def setupUi(self, Dialog_information):
Dialog_information.setObjectName("Dialog_information")
Dialog_information.resize(740, 533)
self.gridLayout = QtWidgets.QGridLayout(Dialog_information)
self.gridLayout.setObjectName("gridLayout")
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog_information)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.textEdit = QtWidgets.QTextEdit(Dialog_information)
self.textEdit.setObjectName("textEdit")
self.gridLayout.addWidget(self.textEdit, 0, 0, 1, 1)
self.retranslateUi(Dialog_information)
self.buttonBox.accepted.connect(Dialog_information.accept)
self.buttonBox.rejected.connect(Dialog_information.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog_information)
def retranslateUi(self, Dialog_information):
_translate = QtCore.QCoreApplication.translate
Dialog_information.setWindowTitle(_translate("Dialog_information", "Information"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog_information = QtWidgets.QDialog()
ui = Ui_Dialog_information()
ui.setupUi(Dialog_information)
Dialog_information.show()
sys.exit(app.exec_())
|
import numpy
import json
import csv
import collections
import operator
import codecs
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
region=[]
list_of_specialities=[]
spec_patient_analysis = collections.defaultdict(dict)
analyzed_data=collections.defaultdict(dict)
medicine_array=collections.defaultdict(dict)
speciality_region_data=collections.defaultdict(dict)
speciality_settlement_type_data=collections.defaultdict(dict)
speciality_gender_data=collections.defaultdict(dict)
medicine_median=collections.defaultdict(dict)
medicine_region_data=collections.defaultdict(dict)
output_data=collections.defaultdict(dict)
def generate_medicine_region_data(region,medicine):
if region in medicine_region_data[medicine].keys():
medicine_region_data[medicine][region]+=1
else:
medicine_region_data[medicine][region]=1
return
def create_speciality_data(region,settlement_type,gender,speciality):
if region in speciality_region_data[speciality].keys():
speciality_region_data[speciality][region]+=1
else:
speciality_region_data[speciality][region]=1
if settlement_type in speciality_settlement_type_data[speciality].keys():
speciality_settlement_type_data[speciality][settlement_type] += 1
else:
speciality_settlement_type_data[speciality][settlement_type] = 1
if gender in speciality_gender_data[speciality].keys():
speciality_gender_data[speciality][gender]+=1
else:
speciality_gender_data[speciality][gender]=1
return
def write_gender_data_as_csv(speciality_gender_data):
with open('speciality_gender_data.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(["speciality","Male","Female"])
for key, value in speciality_gender_data.items():
if 'M' in speciality_gender_data[key].keys():
male = speciality_gender_data[key]["M"]
else:
male=0
if 'F' in speciality_gender_data[key].keys():
female = speciality_gender_data[key]["F"]
else:
female=0
key=key.encode("utf-8")
writer.writerow([key,male,female])
return
def write_medicine_region_data_as_csv(medicine_region_data):
with open('medicine_region_data.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["speciality", "South", "Midwest", "West", "Northeast"])
for key, value in medicine_region_data.items():
if 'South' in medicine_region_data[key].keys():
south = medicine_region_data[key]["South"]
else:
south = 0
if 'Midwest' in medicine_region_data[key].keys():
Midwest = medicine_region_data[key]["Midwest"]
else:
Midwest = 0
if 'West' in medicine_region_data[key].keys():
West = medicine_region_data[key]["West"]
else:
West = 0
if 'Northeast' in medicine_region_data[key].keys():
Northeast = medicine_region_data[key]["Northeast"]
else:
Northeast = 0
key = key.encode("utf-8")
# print(key, south, Midwest, West, Northeast)
writer.writerow([key, south, Midwest, West, Northeast])
return
def write_region_data_as_csv(speciality_region_data):
with open('speciality_region_data.csv','w') as f:
writer = csv.writer(f)
writer.writerow(["speciality","South", "Midwest", "West", "Northeast"])
for key, value in speciality_region_data.items():
if 'South' in speciality_region_data[key].keys():
south = speciality_region_data[key]["South"]
else:
south = 0
if 'Midwest' in speciality_region_data[key].keys():
Midwest = speciality_region_data[key]["Midwest"]
else:
Midwest = 0
if 'West' in speciality_region_data[key].keys():
West = speciality_region_data[key]["West"]
else:
West = 0
if 'Northeast' in speciality_region_data[key].keys():
Northeast = speciality_region_data[key]["Northeast"]
else:
Northeast = 0
key = key.encode("utf-8")
print(key,south,Midwest,West,Northeast)
writer.writerow([key,south,Midwest,West,Northeast])
return
def write_settlement_data_as_csv(speciality_settlement_type_data):
with open('speciality_settlement_type_data.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(["speciality", "Urban","Non-Urban"])
for key, value in speciality_settlement_type_data.items():
if 'non-urban' in speciality_settlement_type_data[key].keys():
nonUrban=speciality_settlement_type_data[key]["non-urban"]
else:
nonUrban = 0
if 'urban' in speciality_settlement_type_data[key].keys():
urban = speciality_settlement_type_data[key]['urban']
else:
urban = 0
key=key.encode("utf-8")
print(key,nonUrban,urban)
writer.writerow([key,nonUrban,urban])
return
def write_medicine_median_as_csv(medicine_median):
with open("medicine_median.csv",'w') as f:
writer = csv.writer(f)
writer.writerow(["medicine","median"])
for key,value in medicine_median.items():
writer.writerow([key,value])
return
def write_medicine_speciality_probability_as_csv(analyzed_data):
with open("medicine_speciality_probability.csv",'w') as f:
writer = csv.writer(f)
arr=[]
arr.append("medicine")
arr.append(list_of_specialities)
writer.writerow(arr)
arr=[]
for key,value in analyzed_data.items():
arr=[]
arr.append(key)
for i in list_of_specialities:
if i in analyzed_data[medicines].keys():
arr.append(analyzed_data[medicines][i])
else:
arr.append(0)
writer.writerow(arr)
print("Lakhai Gayu")
return
def generate_medicine_median(medicine,value):
if medicine in medicine_array.keys():
medicine_array[medicine].append(value)
array=medicine_array[medicine]
median=numpy.median(array)
else:
arr=[]
arr.append(value)
medicine_array[medicine]=arr
median=value
# print(medicine_median)
medicine_median[medicine] = median
# if speciality in value_data[medicine].keys():
# value_data[medicine][speciality].append(value)
# medicine_array=value_data[medicine][speciality]
# medicine_speciality_median = numpy.median(medicine_array)
# else:
# arr=[]
# arr.append(value)
# value_data[medicine][speciality] = arr
# medicine_speciality_median=arr[0]
# change_median(medicine,speciality,medicine_speciality_median)
return
def insert_into_analysis(medicine,speciality,years_of_practicing,value):
if value > float(medicine_median[medicine]):
median_factor=1+(value-float(medicine_median[medicine]))/value
else:
median_factor=1
if years_of_practicing > 5:
years_of_practicing = numpy.floor((years_of_practicing - 5)/5)
exp_factor = 1 + years_of_practicing/10
else:
exp_factor = 1
analyzed_value = value * median_factor * exp_factor
if speciality in analyzed_data[medicine].keys():
analyzed_data[medicine][speciality]+=analyzed_value
else:
analyzed_data[medicine][speciality]=analyzed_value
# print(analyzed_data)
#print(freq_data[medicine])
return
def read_spec_patient_from_csv(speciality,region):
print(speciality)
print(region)
with open('spec.csv') as f:
reader=csv.DictReader(f,delimiter=',')
for line in reader:
# print(line["Specialty"])
if speciality in line["Specialty"]:
patients="Patients in "+region
doctors="Doctors in "+region
ratio ="Actual P/D ratio in "+region
recommended_ratio="recommended P/D value"
print("Patients in "+region+": "+line[patients])
print("Doctors in "+region+": "+line[doctors])
print("Actual P/D ratio in"+region+": "+line[ratio])
print("Recommended P/D value: "+line[recommended_ratio])
print(line[ratio])
print(line[recommended_ratio])
if int(line[recommended_ratio]) > int(line[ratio]):
print("There are sufficient doctors")
else:
print()
print()
print("There has to be more doctors as patients are more")
print("Possible solutions:")
print("1:Transfer doctors from one region to another")
print("2:Improve Medical Infrastructure of that region")
print("3:Promote similar courses in medical colleges")
print()
print()
print()
break
def read_medicine_median_from_csv():
with open('medicine_median.csv') as f:
reader=csv.DictReader(f, delimiter=',')
for line in reader:
medicine_median[line["medicine"]] = line["median"]
# print(medicine_median)
return
def generate_visualize_probability(x):
for key,value in analyzed_data.items():
total = 0
for ky,val in value.items():
total = total + val
for ky,val in value.items():
analyzed_data[key][ky]=analyzed_data[key][ky]/total
return
def generate_output(medicine,x):
print(output_data)
print(analyzed_data[medicine])
for key,value in analyzed_data[medicine].items():
if key in output_data.keys():
output_data[key]=output_data[key]+value
# print(type(output_data[key]))
# print(output_data[key])
else:
output_data[key]=value
for key,value in output_data.items():
output_data[key]=output_data[key]/x
print(output_data)
print("Predicted field & probability")
predicted_field = "Please Enter Valid Medicine"
probability="Do you want to try again?"
if output_data.__len__()>0:
# print("Below is ------------------------------")
# print(sorted(output_data.iteritems(), key=lambda x: -x[1])[:5])
predicted_field = max(output_data.items(), key=operator.itemgetter(1))[0]
probability=max(output_data.items(), key=operator.itemgetter(1))[1]
print(predicted_field)
print(probability)
return
def suggest_doctors(spec):
print(spec)
with open("abc.jsonl") as f:
x=0
for line in f:
# print(x)
j_content = json.loads(line)
speciality = j_content.get('provider_variables').get('specialty')
region = j_content.get('provider_variables').get('region')
settlement_type = j_content.get('provider_variables').get('settlement_type')
years_of_practicing = j_content.get('provider_variables').get('years_practicing')
gender=j_content.get('provider_variables').get('gender')
# print(speciality)
# print(spec)
npi= j_content.get('npi')
# print(npi)
if speciality == spec:
x = x + 1
print("npi: ",npi," region: ",region," settlement_type: ",settlement_type," experience: ",years_of_practicing,"gender: ",gender)
if x > 10:
break
read_medicine_median_from_csv()
with open("ok.jsonl") as f:
# x=0
for line in f:
# print(x)
# x=x+1
j_content = json.loads(line)
speciality = j_content.get('provider_variables').get('specialty')
if speciality not in list_of_specialities:
list_of_specialities.append(speciality)
region = j_content.get('provider_variables').get('region')
settlement_type = j_content.get('provider_variables').get('settlement_type')
years_of_practicing = j_content.get('provider_variables').get('years_practicing')
# create_speciality_data(region,settlement_type,years_of_practicing,speciality)
#print(speciality)
medicines = j_content.get('cms_prescription_counts')
#print(medicines)
# for key,value in medicines.items():
# if key not in list_of_medicines:
# list_of_medicines.append(key)
for key, value in medicines.items():
# if key not in list_of_medicines:
insert_into_analysis(key,speciality,years_of_practicing,value)
# generate_medicine_median(key,value)
# generate_medicine_region_data(region,key)
# print(analyzed_data)
# generate_probability(analyzed_data)
# read_spec_patient_from_csv("'Acute Care'","Northeast")
# print(analyzed_data)
# print(medicine_median)
# print(value_data)
# print(speciality_region_data)
# print(speciality_gender_data)
# print(speciality_settlement_type_data)
# write_medicine_median_as_csv(medicine_median)
# write_gender_data_as_csv(speciality_gender_data)
# write_region_data_as_csv(speciality_region_data)
# write_settlement_data_as_csv(speciality_settlement_type_data)
# write_medicine_region_data_as_csv(medicine_region_data)
# write_medicine_speciality_probability_as_csv(analyzed_data)
# read_medicine_median_from_csv()
# print(freq_data)
# print(value_data)
x=0
choice=0
while(choice!="4"):
print("1. Predict speciality from prescription")
print("2.Suggest Doctors")
print("3.Show Statistics")
print("4.Exit")
choice = input("What do you want to do?")
if choice =="2":
spec=input("Enter speciality")
suggest_doctors(spec)
if choice == "1":
x = 1
generate_output(input("Enter Medicine:"),x)
add_more=input("Do you want to add more?")
while(add_more!="N"):
x=x+1
generate_output(input("Enter Medicine:"), x)
add_more = input("Do you want to add more?")
if output_data.__len__()>0:
predicted_field="'"+max(output_data.items(), key=operator.itemgetter(1))[0]+"'"
print(predicted_field)
else:
print("You have not entered valid medicine")
# print("1.Suggest Doctors")
print("2.Show statistics")
print("3.exit")
ai=input("Enter input:")
if(ai=="2"):
print("1.South")
print("2.Midwest")
print("3.West")
print("4.Northeast")
region_input=input("Enter Region from above to show statistics and recommendation")
if region_input == "1":
region_input = "South"
elif region_input == "2":
region_input = "Midwest"
elif region_input == "3":
region_input = "West"
else:
region_input = "Northeast"
if predicted_field.__len__()>1:
read_spec_patient_from_csv(predicted_field,region_input)
else:
print("Invalid medicine entered")
else:
choice=4
elif choice =="3":
specialty=input("Enter Specialty:")
print("1.South")
print("2.Midwest")
print("3.West")
print("4.Northeast")
region_input = input("Enter Region from above to show statistics and recommendation")
if region_input == "1":
region_input="South"
elif region_input=="2":
region_input="Midwest"
elif region_input=="3":
region_input="West"
else:
region_input="Northeast"
read_spec_patient_from_csv(specialty,region_input)
# while(input("Do you want to predict more?")!="N"):
# x =x +1
# generate_output(input("Enter Medicine:"), x)
# print(output_data)
# generate_output(output_data)
# else:
# if speciality in key.items():
# key[speciality]+=value
# else:
# key[speciality]=value
#print(spec.dumps())
#print(spec.values())
# if spec not in region:
# region.append(spec)
# print("changed")
# print(region.__len__())
# print(region)
##to read region
# with open("roam_prescription_based_prediction.jsonl") as f:
# for line in f:
# j_content = json.loads(line)
# spec=j_content.get('provider_variables').get('region')
# if spec not in region:
# region.append(spec)
# print("changed")
# print(region.__len__())
# print(region)
|
import tensorflow as tf
import numpy as np
# for generator
def get_conv2d_weights(name, shape, mask=None):
weights_initializer = tf.contrib.layers.xavier_initializer()
W = tf.get_variable(name, shape=shape, dtype=np.float32, initializer=weights_initializer)
if mask:
mask_filter = np.ones(shape, dtype=np.float32)
filter_mid_x = shape[0]//2
filter_mid_y = shape[1]//2
mask_filter[filter_mid_x, filter_mid_y+1:, :, :] = 0.
mask_filter[filter_mid_x+1:, :, :, :] = 0.
if mask == 'a':
mask_filter[filter_mid_x, filter_mid_y, :, :] = 0.
W *= mask_filter
return W
def get_row_conv2d_weights(name, shape, direction='down', mask=None):
weights_initializer = tf.contrib.layers.xavier_initializer()
W = tf.get_variable(name, shape=shape, dtype=np.float32, initializer=weights_initializer)
if mask:
mask_filter = np.ones(shape, dtype=np.float32)
filter_mid_x = shape[0]//2
filter_mid_y = shape[1]//2
if direction=='down':
mask_filter[filter_mid_x+1:, :, :, :] = 0.
else:
mask_filter[:filter_mid_x, :, :, :] = 0.
if mask == 'a':
mask_filter[filter_mid_x, :, :, :] = 0.
W *= mask_filter
return W
def get_col_conv2d_weights(name, shape, direction='right', mask=None):
weights_initializer = tf.contrib.layers.xavier_initializer()
W = tf.get_variable(name, shape=shape, dtype=np.float32, initializer=weights_initializer)
if mask:
mask_filter = np.ones(shape, dtype=np.float32)
filter_mid_x = shape[0]//2
filter_mid_y = shape[1]//2
if direction=='right':
mask_filter[:, filter_mid_y+1:, :, :] = 0.
else:
mask_filter[:, :filter_mid_y, :, :] = 0.
if mask == 'a':
mask_filter[:, filter_mid_y, :, :] = 0.
W *= mask_filter
return W
def get_conv2d_bias(name, shape):
return tf.get_variable(name, shape=shape, initializer=tf.zeros_initializer())
# for encoder
def conv2d(inputs, num_outputs, kernel_size, scope, data_format):
weights_initializer = tf.contrib.layers.xavier_initializer()
outputs = tf.contrib.layers.conv2d(
inputs, num_outputs, kernel_size, stride=[1,1], padding='SAME', scope=scope,
data_format=data_format, activation_fn=None, weights_initializer=weights_initializer,
biases_initializer=None)
return tf.contrib.layers.batch_norm(
outputs, decay=0.9, center=True, activation_fn=tf.nn.relu,
updates_collections=None, epsilon=1e-5, scope=scope+'/batch_norm',
data_format=data_format)
def pool2d(inputs, kernel_size, scope, data_format):
return tf.contrib.layers.max_pool2d(
inputs, kernel_size, scope=scope, padding='SAME',
data_format=data_format, stride=[2,2])
def fully_connected(inputs, num_outputs, batch_size, scope):
inputs = tf.reshape(inputs, [batch_size,-1], name=scope+'/reshape')
return tf.contrib.layers.fully_connected(inputs, num_outputs, scope=scope+'fully_connect')
def deconv2d(inputs, num_outputs, kernel_size, scope, data_format):
weights_initializer = tf.contrib.layers.xavier_initializer()
outputs = tf.contrib.layers.conv2d_transpose(
inputs, num_outputs, kernel_size, stride=[2,2], padding='SAME', scope=scope,
data_format=data_format, activation_fn=None, weights_initializer=weights_initializer,
biases_initializer=None)
return tf.contrib.layers.batch_norm(
outputs, decay=0.9, center=True, activation_fn=tf.nn.relu,
updates_collections=None, epsilon=1e-5, scope=scope+'/batch_norm',
data_format=data_format)
|
from django.shortcuts import render
from rest_framework import viewsets
from .models import Project, ProjectMember
from .serializers import ProjectSerializer, ProjectMemberSerializer
# Create your views here.
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
|
class Solution:
def longestValidParentheses(self, s: str) -> int:
max_len = 0
dp = [0 for _ in range(len(s))]
for i in range(1, len(s)):
if s[i] == ')':
if s[i - 1] == '(':
dp[i] = dp[i - 2] + 2 if i >= 2 else 2
elif s[i - 1] == ')':
if i - dp[i - 1] >= 1 and s[i - dp[i - 1] - 1] == '(':
if i - dp[i - 1] >= 2:
dp[i] = dp[i - 1] + dp[i - dp[i - 1] - 2] + 2
else:
dp[i] = dp[i - 1] + 2
max_len = max(max_len, dp[i])
return max_len
if __name__ == '__main__':
s = Solution()
res = s.longestValidParentheses("()))")
print(res) |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 2 10:52:55 2018
@author: lcristovao
"""
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_json
import numpy
import os
import pandas as pd
from sklearn import preprocessing
def categoricalToNumeric(array):
le = preprocessing.LabelEncoder()
le.fit(array)
return le.transform(array)
def TurnDatasetToNumeric(dataset):
for i in range(len(dataset.dtypes)):
if dataset.dtypes[i]==object:
v=dataset.iloc[:,i].values
#print(v)
v=categoricalToNumeric(v)
dataset.iloc[:,i]=v
return dataset
#C:/Users/lcristovao/Documents/GitHub/Neuronal_Network_training/BasicNN2
# fix random seed for reproducibility
numpy.random.seed(7)
# Breast cancer dataset
dataset=pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data",header=None)
#Turn columns to numerical
TurnDatasetToNumeric(dataset)
##First columnis patient Id that does not matter
dataset=dataset.iloc[:,1:]
#change first column to last because its the class column
dataset=pd.concat((dataset.iloc[:,1:],dataset.iloc[:,0]),axis=1)
dataset.to_csv('C:/Users/lcristovao/Documents/GitHub/Neuronal_Network_training/BasicNN2/breast_cancer.txt',index=None)
dataset=dataset.values
# split into input (X) and output (Y) variables
X = dataset[:,:-1]
Y = dataset[:,-1]
n_atributes=X.shape[1]#number of columns
# create model
model = Sequential()
model.add(Dense(24, input_dim=n_atributes, activation='relu'))
model.add(Dense(12, activation='relu'))
model.add(Dense(8, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, epochs=300, batch_size=10)
# evaluate the model
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("Input:",X[3,:],"->",Y[3])
prediction=model.predict(X[3:4,:])
print("predict:",round(prediction[0][0]))
#___________Save And Load________________________________________________________
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
'''
# later...
# load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
score = loaded_model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
# Fit the model
loaded_model.fit(X, Y, epochs=150, batch_size=10)
# evaluate the model
scores = loaded_model.evaluate(X, Y)
print("\n%s: %.2f%%" % (loaded_model.metrics_names[1], scores[1]*100))
'''
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class ClientsGroup(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class ScootersGroup(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class RateGroup(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Scooter(models.Model):
scooter_name = models.CharField(max_length=50)
status_choices = [
('ON', 'Online'),
('UR', 'Under repair'),
('RT', 'Rented'),
('BK', 'Booked')
]
status = models.CharField(max_length=2, choices=status_choices, default='ON')
alerts_choices = [
('OK', 'No alerts'),
('HJ', 'Hijacking'),
('LC', 'Lost connection'),
('LA', 'Leaving area'),
('LB', 'Low battery'),
]
limits = [(10, '10 км/ч'), (15, '15 км/ч'), (25, '25 км/ч')]
alert_status = models.CharField(max_length=2, choices=alerts_choices, default='OK')
battery = models.IntegerField(default=40000)
latitude = models.FloatField(default=0.0)
longitude = models.FloatField(default=0.0)
description = models.TextField(blank=True, default="Без описания")
scooter_group = models.ManyToManyField(ScootersGroup, blank=True, default=[0])
photo = models.ImageField(default='images/image.png', upload_to='images/')
tracker_id = models.CharField(default='0000000000000000', max_length=16)
speed_limit = models.IntegerField(default=10, choices=limits)
lamp = models.BooleanField(default=False)
engine = models.BooleanField(default=False)
lock = models.BooleanField(default=True)
last_ping = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.scooter_name + " " + str(self.id)
class Client(models.Model):
client_name = models.CharField(max_length=30, default='-')
surname = models.CharField(max_length=30, default='-')
status_choices = [
('AC', 'Active'),
('WV', 'Wait for verification'),
('BD', 'Blocked')
]
status = models.CharField(max_length=2, choices=status_choices, default='WV')
balance = models.DecimalField(max_digits=15, decimal_places=2, default=0.0)
client_photo = models.ImageField(blank=True, default='images/image.png')
client_group = models.ManyToManyField(ClientsGroup, blank=True, default=0)
phone = models.CharField(max_length=15, default="0", unique=True)
def __str__(self):
return self.client_name + " " + self.phone
class Rate(models.Model):
name = models.CharField(max_length=30, default="Default rate")
rate = models.DecimalField(max_digits=15, decimal_places=2)
group = models.ManyToManyField(RateGroup)
def __str__(self):
return self.name
class Alert(models.Model):
alerts_choices = [
('HJ', 'Hijacking'),
('LC', 'Lost connection'),
('LA', 'Leaving area'),
('LB', 'Low battery'),
('FP', 'Failed payment')
]
alert_type = models.CharField(max_length=6, default='test', choices=alerts_choices)
alert_owner = models.ForeignKey(Scooter, on_delete=models.CASCADE)
alert_order = models.ForeignKey(Client, blank=True, on_delete=models.CASCADE)
gotten = models.DateTimeField(default=timezone.now)
checked = models.BooleanField(default=False)
class Order(models.Model):
date = models.DateField()
start_time = models.TimeField()
finish_time = models.TimeField()
scooter = models.ForeignKey(Scooter, on_delete=models.CASCADE)
client = models.ForeignKey(Client, on_delete=models.CASCADE)
cost = models.DecimalField(max_digits=15, decimal_places=2)
is_paid = models.BooleanField(default=False)
rate = models.ForeignKey(Rate, on_delete=models.CASCADE)
def __str__(self):
return self.client.__str__() + " " + self.scooter.scooter_name + " " + str(self.date) + " " + str(self.id)
class Transaction(models.Model):
date_time = models.DateTimeField(auto_now_add=True)
cost = models.DecimalField(max_digits=15, decimal_places=2)
client = models.OneToOneField(Client, on_delete=models.CASCADE)
order = models.OneToOneField(Order, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
class GeoZone(models.Model):
zone_type = models.CharField(max_length=10, default="Neutral")
def __str__(self):
return str(self.id) + self.zone_type
class GeoPoint(models.Model):
lat = models.FloatField(default=0.0)
lon = models.FloatField(default=0.0)
zone = models.ForeignKey(GeoZone, on_delete=models.CASCADE)
number = models.IntegerField(default=0)
def __str__(self):
return str(self.lat) + " " + str(self.lon)
class AlarmSettingsSingleton(models.Model):
low_battery = models.FloatField(default=10.0)
hijacking_speed = models.FloatField(default=10.0)
leaving_area_time = models.FloatField(default=5.0)
lost_track = models.FloatField(default=5.0)
def __str__(self):
return str(self.id)
|
spam_lines = [" ".join(x.split("\t")[1:]) for x in open("../data/SMSSpamCollection") if x.split('\t')[0] == "spam"]
ham_lines = [" ".join(x.split("\t")[1:]) for x in open("../data/SMSSpamCollection") if x.split('\t')[0] == "ham"]
print(" ".join(ham_lines))
|
import logSetup
if __name__ == "__main__":
print("Initializing logging")
logSetup.initLogging()
import TextScrape.TextScrapeBase
import readability.readability
import bs4
import webFunctions
class Scrape(TextScrape.TextScrapeBase.TextScraper):
tableKey = 'yora'
loggerPath = 'Main.Yor.Scrape'
pluginName = 'YoraikunScrape'
wg = webFunctions.WebGetRobust(logPath=loggerPath+".Web")
threads = 4
baseUrl = "https://yoraikun.wordpress.com/"
fileDomains = set(['files.wordpress.com/'])
startUrl = baseUrl
# Any url containing any of the words in the `badwords` list will be ignored.
badwords = [
'/disclaimer/',
'/about/',
'like_comment=',
'replytocom=',
'?share=',
'/comment-page-',
'wp-login.php',
'gravatar.com',
]
decomposeBefore = [
{'name' : 'likes-master'}, # Bullshit sharing widgets
{'id' : 'jp-post-flair'},
{'class' : 'commentlist'}, # Scrub out the comments so we don't try to fetch links from them
{'id' : 'comments'},
]
decompose = [
{'class' : 'commentlist'}, # Scrub out the comments so we don't try to fetch links from them
{'class' : 'loggedout-follow-normal'},
{'class' : 'sd-content'},
{'class' : 'sd-title'},
{'class' : 'widget-area'},
{'class' : 'xoxo'},
{'class' : 'wpcnt'},
{'id' : 'calendar_wrap'},
{'id' : 'comments'},
{'id' : 'footer'},
{'id' : 'header'},
{'id' : 'entry-author-info'},
{'id' : 'jp-post-flair'},
{'id' : 'likes-other-gravatars'},
{'id' : 'nav-above'},
{'id' : 'nav-below'},
{'id' : 'primary'},
{'id' : 'secondary'},
{'name' : 'likes-master'}, # Bullshit sharing widgets
{'style' : 'display:none'},
]
# Grab all images, ignoring host domain
allImages = True
def test():
scrp = Scrape()
scrp.crawl()
# scrp.retreiveItemFromUrl(scrp.baseUrl)
if __name__ == "__main__":
test()
|
# coding: utf-8
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
d = {}
for c in s:
if c in d:
d[c] += 1
else:
d[c] = 1
for c in t:
if not (c in d):
return False
elif d[c] == 0:
return False
else:
d[c] -= 1
for i in d.values():
if i != 0:
return False
return True
a = Solution()
print a.isAnagram("anagram", "nagaram")
print a.isAnagram("ab", "b")
|
# Módulo destinado a escribir las estructuras de datos que serán ser utilizadas
class Node:
def __init__(self, value=None):
self.value = value
self.next = None
def __lt__(self, other):
if self.value < other.value:
return True
else:
return False
def __gt__(self, other):
if self.value > other.value:
return True
else:
return False
def __eq__(self, other):
if self.value == other.value:
return True
else:
return False
class Iterator:
"""As seen at https://stackoverflow.com/questions/19721334/
python-iterators-on-linked-list-elements"""
def __init__(self, head):
self.current = head
def __iter__(self):
return self
def __next__(self):
if self.current is None:
raise StopIteration
else:
current = self.current
self.current = self.current.next
return current.value
class Listirijilla:
def __init__(self, arg=None):
self.head = None
self.tail = None
if arg:
self.append(arg)
def append(self, value):
new = Node(value)
if not self.head:
self.head = new
self.tail = self.head
else:
self.tail.next = new
self.tail = self.tail.next
def __getitem__(self, position):
current_node = self.head
for node in range(position):
if current_node:
current_node = current_node.next
if not current_node:
raise IndexError("Posición fuera de rango")
return current_node.value
def __setitem__(self, position, value):
"""As seen at: https://github.com/IIC2233/contenidos/blob/master/
semana-03/01-arboles%20y%20listas%20ligadas.ipynb"""
if position > len(self):
raise IndexError("Posición fuera de rango")
current_node = self.head
if position == 0:
self.head.value = value
else:
for node in range(position):
if current_node:
current_node = current_node.next
if current_node is not None:
current_node.value = value
def __len__(self):
total = 0
current = self.head
if current is None:
return total
while current:
current = current.next
total += 1
return total
def __repr__(self):
if not len(self):
return "[]"
rep = '['
current = self.head
while current:
rep += '{} , '.format(current.value)
current = current.next
rep = rep[:-3]
rep += "]"
return rep
def __iter__(self):
return Iterator(self.head)
def __add__(self, other):
if not isinstance(other, Listirijilla):
raise TypeError("Sólo puedes sumar una lista con otra")
new = Listirijilla()
for value in self:
new.append(value)
for value in other:
new.append(value)
return new
def sort(self):
"""As seen at https://stackoverflow.com/questions/18262306/
quicksort-with-python"""
def quick(sub_lista):
if len(sub_lista) > 1:
less = Listirijilla()
equal = Listirijilla()
greater = Listirijilla()
pivot = sub_lista.head
current = sub_lista.head
while current:
if current < pivot:
less.append(current.value)
if current == pivot:
equal.append(current.value)
if current > pivot:
greater.append(current.value)
current = current.next
return quick(less) + equal + quick(greater)
else:
return sub_lista
aux = quick(self)
self.head = aux.head
self.tail = aux.tail
def popleft(self):
if len(self):
aux = self.head
self.head = self.head.next
return aux.value
else:
raise IndexError
def remove(self, value):
anterior = self.head
if self.head is None:
return
if self.head.value == value:
self.head = self.head.next
return
current = self.head.next
while current.value is not None:
if current.value == value:
anterior.next = current.next
return
anterior = current
current = current.next
class SamePlayerError(Exception):
def __init__(self):
super().__init__("No existe la afinidad de un jugador consigo mismo")
class NotConectedError(Exception):
def __init__(self):
super().__init__("Los jugadores no tienen grado uno de relación")
"""As seen at:
-https://stackoverflow.com/questions/39286116/
python-class-behaves-like-dictionary-or-list-data-like
-https://intelligentjava.wordpress.com/2016/10/19/
introduction-to-hash-tables/"""
# Esta tabla de hash asume hash perfecto.
class HashTable(object):
def __init__(self):
self._hashes = Listirijilla()
self._values = Listirijilla()
def __getitem__(self, key):
h = hash(key)
position = 0
for hsh in self._hashes:
if hsh == h:
return self._values[position]
position += 1
raise KeyError
# este set sirve para añadir rápido si sabemos que no existe la llave
def fast_set(self, key, value):
h = hash(key)
self._hashes.append(h)
self._values.append(value)
def __setitem__(self, key, value):
h = hash(key)
position = 0
for hsh in self._hashes:
if hsh == h:
self._values[position] = value
break
position += 1
else:
self._hashes.append(h)
self._values.append(value)
def __len__(self):
return len(self._hashes) |
{
'name': 'Delphinus',
'author': 'Optesis SA',
'version': '1.4.0',
'category': 'Tools',
'description': """
permet de faire une descripotion ...
""",
'summary': 'Module de ...',
'sequence': 9,
'depends': ['base', 'account', 'account_accountant', 'purchase','sale','jt_amount_in_words'],
'data': [
'security/ir.model.access.csv',
'views/account_move_view.xml',
'views/optesis_pivot.xml',
'views/purchase_order_view.xml',
'views/sale_order_view.xml',
'views/res_partner_view.xml',
'views/optesis_header_footer.xml',
'views/optesis_external_layout.xml',
'views/optesis_report_delphinus.xml',
'views/header_footer.xml',
'views/external_view.xml',
'views/optesis_report_delphinus.xml',
'views/bon_delphinus_1.xml',
'views/bon_delphinus.xml',
'report/report.xml',
'report/optesis_custom_format.xml',
'report/optesis_custom_format_facture.xml',
],
'test': [],
'installable': True,
'application': True,
'auto_install': False,
}
|
#!/usr/bin/env python
# coding: utf-8
import pkg_resources
extensions = [
'sphinx.ext.autodoc',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'ircproto'
author = u'Alex Grönholm'
copyright = '2016, ' + author
v = pkg_resources.get_distribution('ircproto').parsed_version
version = v.base_version
release = v.public
language = None
exclude_patterns = ['_build']
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = 'classic'
html_static_path = ['_static']
htmlhelp_basename = 'ircprotodoc'
|
'''Shorty App Settings'''
from django.conf import settings
ADMIN_ENABLED = getattr(settings, 'SHORTY_ADMIN_ENABLED', True)
EXTERNAL_FLAG = getattr(settings, 'SHORTY_EXTERNAL_FLAG', False)
CANONICAL_DOMAIN = getattr(settings, 'SHORTY_CANONICAL_DOMAIN', None)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.