blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8e3cf43e1d62308b1feeb537e76b3faca235f5c3 | Python | TheCodingHacker/SMSScare | /SmsScareV.7.py | UTF-8 | 11,713 | 2.859375 | 3 | [] | no_license | from twilio.rest import Client
from datetime import date
import random
import time
def main():
print("""
Welcome to Coder's and Geral's SMS Scare Software
1) Send an SMS
2) Credits
3) Exit
""")
choice=input("Enter a number: ")
if(choice == "1"):
sms()
if(choice == "2"):
credits()
if(choice == "3"):
exit(0)
def sms():
print("""
SMS Scare
1) Bank One Time Code Scare
2) Fake Order
3) PH Premium Fake SMS Scare
4) Prositite Order Fake SMS Scare
5) Console Online Account Suspend SMS Scare
6) Relationship Advice SMS Troll
7) Reveal Prank
""")
smschoice=input("Enter a number: ")
if(smschoice == "1"):
bankscare()
if(smschoice == "2"):
orderscare()
if(smschoice == "3"):
phscare()
if(smschoice == "4"):
prossms()
if(smschoice == "5"):
consuspend()
if(smschoice == "6"):
relatadvice()
if(smschoice == "7"):
reveal()
def credits():
print("This program was made by Coder and Geral.")
print("If you would like to contact us, you can reach us at our discord. https://discord.gg/CdnDyXJ")
time.sleep(15)
main()
def bankscare():
print("""
Bank Scare!
1) US Send
2) UK Send
""")
choice=input("Enter a number: ")
if(choice == "1"):
codescareus()
if(choice == "2"):
codescare()
def codescare():
print("Bank One Time Code Scare!")
number=input("Enter Number (Incl. Country Code ex. +44 For UK): ")
bank=input("Enter Bank Name: ")
banknum=input("Enter Number (10 LETTER BANK NAME ex. NATWESTENGB: ")
message="Your one-time passcode is " + str(random.randint(111111,999999)) + ". Do not share this code with anyone. If you did not request a code, Please contact the bank immediately!"
print("Sending Message!")
smssenduk(number, message, banknum)
def codescareus():
print("Bank One Time Code Scare!")
number=input("Enter Number (Incl. Country Code ex. +1 For US): ")
bank=input("Enter Bank Name: ")
message="Your one-time passcode is " + str(random.randint(111111,999999)) + ". Do not share this code with anyone. If you did not request a code, Please contact the bank immediately!"
print("Sending Message!")
smssendus(number, message)
def orderscare():
print("SMS Fake Order!")
print("""
1) US Send
2) UK Send
""")
fakenum=input("Choose a country: ")
if(fakenum == "1"):
fakeordersetupus()
if(fakenum == "2"):
fakeordersetupuk()
def fakeordersetupus():
print("Fake Order US Setup!")
providor=input("Enter Shipping Provider: ")
item=input("Enter Item: ")
number=input("Enter Number (Incl. Country Code ex. +1 For US): ")
name=input("Targets Name: ")
date=input("Enter Delivery Date (6 Digit ex. 12/07/19): ")
message="Hello " + name + ", Your order of: " + item + " will be delivered on the: " + date + ". Thank you for shipping with " + providor + " Order ID: " + str(random.randint(111111,999999))
smssendus(number, message)
def fakeordersetupuk():
print("Fake Order UK Setup!")
providor=input("Enter Shipping Provider: ")
item=input("Enter Item: ")
number=input("Enter Number (Incl. Country Code ex. +44 For UK): ")
provnum=input("Enter Number (10 LETTER PROVIDER NAME ex. UPSENDELIV: ")
name=input("Targets Name: ")
date=input("Enter Delivery Date (6 Digit ex. 07/12/19): ")
message="Hello " + name + ", Your order of: " + item + " will be delivered on the: " + date + ". Thank you for shipping with " + providor + " Order ID: " + str(random.randint(111111,999999))
smssenduk(number, message, provnum)
def phscare():
print("""
SMS Fake PH Premium Order!
1) US Send
2) UK Send
""")
phchoice=input("Enter Number: ")
if(phchoice == "1"):
phpremus()
if(phchoice == "2"):
phpremuk()
def phpremus():
print("PH Prem Order Setup!")
name=input("Enter Target Name: ")
today = date.today()
number=input("Enter Number (Incl. Country Code ex. +1 For US): ")
message = "Hello " + name + ", Your PornHub Premium Subscription was Sucessfully Renewed Today! (" + str(today) + "), Head over to https://pornhub.com to check out our newest premium content!"
smssendus(number, message)
def phpremuk():
print("PH Prem Order Setup UK!")
name=input("Enter Target Name: ")
today = date.today()
number=input("Enter Number (Incl. Country Code ex. +44 For UK): ")
phnum=input("Enter Number (10 LETTER PROVIDER NAME ex. PORNHUBNUM: ")
message = "Hello " + name + ", Your PornHub Premium Subscription was Sucessfully Renewed Today! (" + str(today) + "), Head over to https://pornhub.com to check out our newest premium content!"
smssenduk(number, message, phnum)
def reveal():
print("""
Reveal the Prank!
1) US
2) UK
""")
choice=input("Choose a number: ")
if(choice == "1"):
revealus()
if(choice == "2"):
revealuk()
def revealus():
print("Reveal the prank!")
name=input("Enter Target Name: ")
yname=input("Enter Your Name: ")
number=input("Enter Number (Incl. Country Code ex. +1 For US): ")
message= "Hello, " + name + ". " + "All messages you have recieved from this number were a prank, Please contact: " + yname + " as he was the one who executed this prank!"
smssendus(number, message)
def revealuk():
print("Reveal the prank!")
name=input("Enter Target Name:")
number=input("Enter Number (Incl. Country Code ex. +44 for UK): ")
message= "Hello, " + name + ". " + "All messages you have recieved from this number were a prank, Please contact: " + yname + " as he was the one who executed this prank!"
def sender():
print("""
What way would you like to send the message?
1) 10 Digit Word
2) Preset Number
""")
choice=input("Choose a number: ")
if(choice == 1):
word=input("Enter Number to Send from (10 LETTER NAME ex. PORNHUBPREM): ")
smssenuk(number,message,word)
if(choice == 2):
#Change the UK Number Below
smssenuk(number,message,"+441452260494")
def prossms():
print("""
Prosititute Fake SMS Scare
1) US Send
2) UK Send
""")
choice=input("Choose a number: ")
if(choice == "1"):
prossmsuk()
if(choice == "2"):
prossmsus()
def prossmsus():
print("Prostitute Fake SMS Scare US Send")
name=input("Enter Target Name: ")
number=input("Enter Number (Incl. Country Code ex. +1 for US): ")
prosname=input("Enter Prostitute's Name: ")
message="Hey " + name + " its " + prosname + " I am on the way to your house now, dont have too much fun without me. I cant wait to see you. Love you"
smsendus(number, message)
def prossmsuk():
print("Prostitute Fake SMS Scare UK Send :: This Module uses a normal telephone number for functionality!")
name=input("Enter Target Name: ")
number=input("Enter Number (Incl. Country Code ex. +44 for UK): ")
prosname=input("Enter Prostitute's Name: ")
message="Hey " + name + " its " + prosname + " I am on the way to your house now, dont have too much fun without me. I cant wait to see you. Love you"
smsenduk(number, message, "+441452260494")
def consuspend():
print("""
Console Fake SMS Scare!
Choose a Console Provider
1) PS4
2) XBOX
""")
choice=input("Choose a number: ")
if(choice == "1"):
ps4send()
if(choice == "2"):
xboxsend()
def ps4send():
print("""
PS4 Account Suspension SMS Scare
1) US Send
2) UK Send
""")
choice=input("Choose a number: ")
if(choice == "1"):
ps4ussend()
if(choice == "2"):
ps4uksend()
def xboxsend():
print("""
Xbox Account Suspension SMS Scare
1) US Send
2) UK Send
""")
choice=input("Choose a number: ")
if(choice == "1"):
xboxussend()
if(choice == "2"):
xboxuksend()
def ps4ussend():
print("""
PS4 Account Suspension SMS Scare - US Setup
""")
name=input("Enter Target Name: ")
number=input("Enter Number (Incl. Country Code ex. +1 for US): ")
reason=input("Enter Ban Reason: ")
days=input("Enter Ban Length (In Days): ")
message="Hello, " + name + ": Your PS4 Account has been suspended for " + reason + " this ban will persist for " + days + " days. In the future please refrain from breaking the Sony Rules and Guidelines!"
smssendus(number, message)
def ps4uksend():
print("""
PS4 Account Suspension SMS Scare - UK Setup
""")
name=input("Enter Target Name: ")
number=input("Enter Number(Incl. County Code ex. +44 for UK): ")
sonynum="SONYPLAYSTAT"
reason=input("Enter Ban Reason: ")
days=input("Enter Ban length (In Days): ")
message="Hello, " + name + ": Your PS4 Account has been suspended for " + reason + " this ban will persist for " + days + " days. In the future please refrain from breaking the Sony Rules and Guidelines!"
smsenduk(number, message, sonynum)
def xboxussend():
print("""
Xbox Account Suspension SMS Scare - US Setup
""")
name=input("Enter Target Name: ")
number=input("Enter number(Incl. Country Code ex. +44 for UK): ")
reason=input("Enter Ban Reason: ")
days=input("Enter Ban Length (In Days): ")
message="Hello, " + name + ": Your Xbox Account has been suspended for " + reason + " this ban will persist for " + days + " days. In the future please refrain from breaking the Microsoft Rules and Guidelines!"
smssendus(number, message)
def xboxuksend():
print("""
Xbox Account Suspension SMS Scare - UK Setup
""")
name=input("Enter Target Name: ")
number=input("Enter number(Incl. Country Code ex. +44 for UK): ")
reason=input("Enter Ban Reason: ")
days=input("Enter Ban Length (In Days): ")
xboxnum="MICROSOFTGB"
message="Hello, " + name + ": Your Xbox Account has been suspended for " + reason + " this ban will persist for " + days + " days. In the future please refrain from breaking the Microsoft Rules and Guidelines!"
smssenduk(number, message, xboxnum)
def relatadvice():
print("""
Relationship Advice SMS troll
1) US Send
2) UK Send
""")
choice=input("Choose a number: ")
if(choice == "1"):
relatadviceus()
if(choice == "2"):
relatadviceuk()
def relatadviceus():
name=input("Enter Target Name: ")
number=input("Enter Number(Incl. County Code ex. +1 for US): ")
message = "Hello, " + name + " you have requested out free relationship advice."
def smssenduk(number, message, num):
client = Client("" , "")
#Client("Account SID", "API Token")
message = client.messages \
.create(
body=message,
from_=num,
to=number
)
print(message.sid)
def smssendus(number, message):
client = Client("", "")
#Client("Account SID", "API Token")
message = client.messages \
.create(
body=message,
# Change the number below
from_='+17072025135',
to=number
)
print(message.sid)
main()
| true |
efd92e25f77eed42337fb4c272eab521c1d8de3b | Python | SylvainDeker/Distributed-Systems | /draft/pytest/test_fixture.py | UTF-8 | 1,101 | 2.546875 | 3 | [] | no_license | import os
import pytest
import numpy as np
import cv2 as cv
# pytest test_fixture.py
def test_needsfiles(tmpdir):
# provide a temporary directory unique to the test invocation
print(tmpdir)
assert True
def test_create_file(tmp_path):
# The tmp_path_factory is a session-scoped fixture which can be
# used to create arbitrary temporary directories from any other
# fixture or test.
d = tmp_path / "sub"
d.mkdir()
p = d / "hello.txt"
p.write_text("CONTENT")
assert p.read_text() == "CONTENT"
assert len(list(tmp_path.iterdir())) == 1
# assert False
@pytest.fixture(scope="session")
def image_file(tmpdir_factory):
img = cv.imread("/home/bob/Documents/Distributed-Systems/data/NE1_50M_SR_W/NE1_50M_SR_W.tif")
assert img.shape == (5400,10800,3)
fn = tmpdir_factory.mktemp("data").join("img.png")
# assert str(fn) == ""
cv.imwrite(str(fn),img)
return fn
# contents of test_image.py
def test_histogram(image_file):
img = cv.imread(str(image_file))
assert img.shape == (5400,10800,3)
# compute and test histogram
| true |
ded6caae87a5072f00699e30d68548ddfc8d808b | Python | leonardobc52/edd_1310_2021 | /ADTS/Laberinto/Backtracking.py | UTF-8 | 2,737 | 3.25 | 3 | [] | no_license | from Array2D import Array2D
from stack import Stack
class LaberintoADT:
"""
0 pasillo , 1 pared, S salida y E entrada
pasillo es una tupla ((2,1),(2,2),(2,3),(2,4),(3,2)(4,2))
Entrada en una tupla(5,1)
salida(2,5)
"""
def __init__( self , rens , cols , pasillos , entrada , salida ):
self.__laberinto = Array2D( rens , cols , '1' )
for pasillo in pasillos:
self.__laberinto.set_item( pasillo[0] , pasillo[1] ,'0')
self.set_entrada( entrada[0],entrada[1])
self.set_salida( salida[0],salida[1])
self.__camino = Stack()
self.__previa = None
def to_string( self ):
self.__laberinto.to_string()
#Establece entrada 'E' en la matriz, verificar limites perifericos
def set_entrada( self , ren , col ):
#terminar la validacion de las coordenadas
self.__laberinto.set_item(ren,col,'E')
#Establece salida 'S', dentro de los limites periericos de la matriz
def set_salida( self , ren , col ):
#Terminar las validaciones
self.__laberinto.set_item(ren,col,'S')
def es_salida(self,ren,col):
return self.__laberinto.get_item(ren,col)=='S'
def buscar_entrada(self):
encontradp = False
for renglon in range( self.__laberinto.get_num_rows() ):
for columna in range( self.__laberinto.get_num_cols() ):
if self.__laberinto.get_item(renglon,columna) == 'E':
self.__camino.push( (renglon,columna) )
encontrado = True
return encontrado
def set_previa(self,pos_previa):
self.__previa = pos_previa
def get_previa(self):
return self.__previa
def imprimir_camino( self ):
self.__camino.to_string()
def get_pos_actual( self ):
return self.__camino.peek()
def resolver_laberinto( self ):
actual = self.__camino.peek() #(5,2)
# buscar izquierda \ la diagonalpermite continuar el codigo abajo
# agregar validaciones para los limites del laberinto
if actual[1]-1 != -1 \
and self.__laberinto.get_item(actual[0],actual[1]-1) == '0' \
and self.get_previa() != (actual[0],actual[1]-1) \
and self.__laberinto.get_item(actual[0],actual[1]-1) != 'X':
self.set_previa( actual )
self.__camino.push((actual[0],actual[1]-1))
if actual[0]-1 != -1 \
and self.__laberinto.get_item(actual[0]-1,actual[1]) == '0' \
and self.get_previa() != (actual[0]-1,actual[1]) \
and self.__laberinto.get_item(actual[0]-1,actual[1]) != 'X':
self.set_previa( actual )
self.__camino.push((actual[0]-1,actual[1]))
| true |
4b268f19cdfba36db05a7be7efb74785f5a48b40 | Python | oliewalcp/WeChat | /SourceCode/Assist/Saver.py | UTF-8 | 3,146 | 2.703125 | 3 | [] | no_license | from Assist.GlobalVariable import *
"""
保存朋友信息
param[friend_id]:朋友id号
param[sender]:发送者
param[receiver]:接收者
param[nick_name]:朋友昵称
param[friend_remark_name]:朋友备注
"""
def save_friend_info(friend_id, nick_name, remark_name):
mem_file = QSettings(GlobalVariable.PersonalMsgFile, QSettings.IniFormat)
null = True
# 遍历本地用户id号
child_group = mem_file.childGroups()
for child in child_group:
same = False
mem_file.beginGroup(child)
# 匹配到相同昵称
if mem_file.value("NickName") == nick_name:
mem_file.setValue("RemarkName", remark_name)
same = True
null = False
# 匹配到相同备注
elif mem_file.value("RemarkName") == remark_name:
mem_file.setValue("NickName", nick_name)
same = True
null = False
mem_file.endGroup()
# 如果匹配到相同的昵称或备注,且用户id号不同,就修改用户id号
if same and friend_id != child:
mem_file.remove(child)
mem_file.beginGroup(friend_id)
mem_file.setValue("RemarkName", remark_name)
mem_file.setValue("NickName", nick_name)
mem_file.endGroup()
if os.path.exists(GlobalVariable.RecordDataDirectory + friend_id + ".ini"):
os.rename(GlobalVariable.RecordDataDirectory + friend_id + ".ini",
GlobalVariable.RecordDataDirectory + child + ".ini")
# 如果没有找到该用户,则新增一个
if null:
mem_file.beginGroup(friend_id)
mem_file.setValue("RemarkName", remark_name)
mem_file.setValue("NickName", nick_name)
mem_file.endGroup()
del mem_file
"""
保存聊天消息
param[friend_id]:朋友id号
param[msg_id]:消息id号
param[msg_content]:消息内容
param[now_time]:发送时间
param[type]:消息类型
param[sender]:发送者
param[is_me]:是否是本机发送(0——不是,1——是)
"""
def save_chat_msg(friend_id, msg_id, msg_content, now_time, type, sender, is_me):
mem_file = QSettings(GlobalVariable.RecordDataDirectory + friend_id + ".ini", QSettings.IniFormat)
mem_file.beginGroup(msg_id)
mem_file.setValue("Content", msg_content)
mem_file.setValue("Time", now_time)
mem_file.setValue("Type", type)
mem_file.setValue("Sender", sender)
mem_file.setValue("ISend", is_me)
mem_file.endGroup()
del mem_file
"""
保存消息记录
param[msg_handler]:ChatMessageHandler对象
param[now_time]:消息发送时间
"""
def save_msg(msg_handler, now_time):
friend_id = msg_handler.get_current_user()
msg_content = msg_handler.get_content()
remark_name = msg_handler.get_remark_name()
sender = msg_handler.get_sender()
is_me = msg_handler.local_is_sender()
save_friend_info(friend_id, msg_handler.get_nick_name(), remark_name)
save_chat_msg(friend_id, msg_handler.get_msg_id(), msg_content, now_time, msg_handler.get_type(), sender, is_me)
| true |
48495f29ced6cc67cfa9531c7eedb2eaca2bbcb3 | Python | 827983519/recommend_algorithm | /ICF_topN_class.py | UTF-8 | 5,296 | 2.625 | 3 | [] | no_license | from sklearn.metrics import pairwise_distances
import copy
class ICF_topN:
def R_jaccard(self,I1,I2):
intersect = 1.0 * len(np.intersect1d(I1,I2))
if intersect == 0:
return 0
I1_g = 1.0 * len(I1) - intersect
I2_g = 1.0 * len(I2) - intersect
down = 1 + 1/intersect + I1_g/(1 + I1_g) + 1/(1 + I2_g)
return 1/down
def calculate_Rjaccard(self,data):
index_table = dict(zip(data.index.values,[0 for i in range(len(data.index))]))
for index,row in data.iterrows():
index_table[index] =row.dropna().index.values
rjaccard_table = pd.DataFrame(index=data.index,columns=data.index).to_dict()
for i in rjaccard_table:
for j in rjaccard_table:
if i == j:
rjaccard_table[i][j]=1
rjaccard_table[j][i]=1
continue
if rjaccard_table[i][j] > 0:
rjaccard_table[j][i] = rjaccard_table[i][j]
continue
rjaccard_table[j][i] = self.R_jaccard(index_table[j],index_table[i])
return rjaccard_table
def __init__(self,neighbor=10,similarity='pearson'):
self.neighbor = neighbor
self.similarity = similarity
def jaccard(self,n1,n2):
up = len(np.intersect1d(n1,n2))
down = len(np.union1d(n1,n2))
return up/down
def calculate_jaccard(self,data):
index_table = dict(zip(data.index.values,[0 for i in range(len(data.index))]))
for index,row in data.iterrows():
index_table[index] =row.dropna().index.values
jaccard_table = pd.DataFrame(index=data.index,columns=data.index).to_dict()
for i in data.index.values:
for j in data.index.values:
if i==j:
jaccard_table[i][j]=1
continue
if jaccard_table[i][j] >0:
jaccard_table[j][i] = jaccard_table[i][j]
continue
jaccard_table[j][i] = self.jaccard(index_table[j],index_table[i])
return jaccard_table
def calculate_similarity(self,data):
if self.similarity == 'pearson':
return pd.DataFrame(1- pairwise_distances(data.fillna(0),metric='correlation'),columns=data.index,index=data.index).to_dict()
if self.similarity == 'IIF':
IIF = pd.read_csv('IIF.csv',index_col=0)
IIF = pd.DataFrame(IIF.values,index=IIF.index,columns=IIF.index)
return IIF.to_dict()
if self.similarity == 'jaccard':
return self.calculate_jaccard(data)
if self.similarity == 'rjaccard':
return self.calculate_Rjaccard(data)
def find_nearset_neighbor(self,movieId):
top_neighbor = sorted(self.similarity_set[movieId].items(), key=lambda e:e[1], reverse=True)[1:1+self.neighbor]
similar_index = [i[0] for i in top_neighbor]
return similar_index
def fit(self,data):
self.origin_data = data
self.dataset = data.fillna(0).to_dict()
self.similarity_set = self.calculate_similarity(data)
def recommend(self,userId,topN):
top = sorted(self.predict_set[userId].items(),key = lambda items:items[1],reverse=True)[:topN]
top_N = [i[0] for i in top]
return top_N
def calculate_Fscore(self,test_data,topN):
self.precision = []
self.recall = []
self.Fscore = []
test_data = test_data[['userId','movieId']]
for user in self.predict_set:
#top = sorted(self.predict_set[user].items(),key = lambda items:items[1],reverse=True)[:topN]
#top_N = [i[0] for i in top]
top_N = self.recommend(user,topN)
test_set = test_data.loc[test_data['userId']==user,'movieId'].values
if len(test_set)==0:
continue
inter = len(np.intersect1d(top_N,test_set))
precision = inter/topN
#recall = inter/len(test_set)
#fscore = (1+0.25)*(precision*recall)/(0.25*precision+recall)
self.precision.append(precision)
#self.recall.append(recall)
# self.Fscore.append(fscore)
def predict_whole(self,user_list):
user_list = user_list.to_dict()
predict_set = copy.deepcopy(self.dataset)
for movie in self.dataset[list(self.dataset.keys())[0]]:
print(movie)
k_similar = self.find_nearset_neighbor(movie)
for user in self.dataset.keys():
if predict_set[user][movie] > 0:
predict_set[user][movie] = 0
continue
u_list = user_list['User_list'][user]
combine = np.intersect1d(u_list,k_similar)
p = 0
for k_index in combine:
if self.dataset[user][k_index] > 0:
p += self.similarity_set[movie][k_index]*1
predict_set[user][movie] = p
self.predict_set = predict_set
return self.predict_set
| true |
c0ef608f42cc68fa2ab14799904987dd68c2f9aa | Python | FluffyFu/Leetcode | /547_number_of_provinces/solution2.py | UTF-8 | 509 | 3.171875 | 3 | [] | no_license | def bfs(isConnected):
n = len(isConnected)
visited = set()
res = 0
for i in range(n):
if i not in visited:
to_visit = [i]
while to_visit:
cur = to_visit.pop()
temp = []
for j, val in enumerate(isConnected[cur]):
if j not in visited and val:
visited.add(j)
temp.append(j)
to_visit += temp
res += 1
return res
| true |
781ffe1a8253aacd491cd6b5c6535c686df3c1ef | Python | prashantpandey9/codesseptember2019- | /hackerearth/rotation.py | UTF-8 | 433 | 2.75 | 3 | [] | no_license | ##n=int(input())
##s=list(map(str,input()))
##t=list(map(str,input()))
##if n>=1 and n<=10**3 and len(s)==len(t):
## count=0
## match=0
## l=n
## for i in range(n-1):
## print(s[i:n-1],t[0:n-1-i])
## if s[i:n-1]==t[0:n-1-i]:
## break
##
## print(i)
n = int(input())
s = input()
t = input()
for i in range(0,n):
k = s[i:]
print(k)
if k in t:
print(n-len(k))
break
| true |
ee9361e8a34ddd236bedb3099a49521d32b74a31 | Python | pabloalarconm/linkml | /linkml/generators/prefixmapgen.py | UTF-8 | 3,535 | 2.53125 | 3 | [
"CC0-1.0"
] | permissive | """
Generate JSON-LD contexts
"""
import logging
import os
from typing import Union, TextIO, Set, Optional
import click
from jsonasobj2 import JsonObj, as_json
from rdflib import XSD
from linkml_runtime.linkml_model.meta import SchemaDefinition, ClassDefinition, SlotDefinition, Definition, Element
from linkml_runtime.utils.formatutils import camelcase, underscore, be
from linkml.utils.generator import Generator, shared_arguments
from linkml_runtime.linkml_model.types import SHEX
URI_RANGES = (XSD.anyURI, SHEX.nonliteral, SHEX.bnode, SHEX.iri)
class PrefixGenerator(Generator):
generatorname = os.path.basename(__file__)
generatorversion = "0.1.1"
valid_formats = ['json']
visit_all_class_slots = False
def __init__(self, schema: Union[str, TextIO, SchemaDefinition], **kwargs) -> None:
super().__init__(schema, **kwargs)
if self.namespaces is None:
raise TypeError("Schema text must be supplied to context generater. Preparsed schema will not work")
self.emit_prefixes: Set[str] = set()
self.default_ns = None
self.context_body = dict()
self.slot_class_maps = dict()
def visit_schema(self, base: Optional[str]=None, output: Optional[str]=None, **_):
# Add any explicitly declared prefixes
for prefix in self.schema.prefixes.values():
self.emit_prefixes.add(prefix.prefix_prefix)
# Add any prefixes explicitly declared
for pfx in self.schema.emit_prefixes:
self.add_prefix(pfx)
# Add the default prefix
if self.schema.default_prefix:
dflt = self.namespaces.prefix_for(self.schema.default_prefix)
if dflt:
self.default_ns = dflt
if self.default_ns:
self.emit_prefixes.add(self.default_ns)
def end_schema(self, base: Optional[str] = None, output: Optional[str] = None, **_) -> None:
context = JsonObj()
if base:
if '://' not in base:
self.context_body['@base'] = os.path.relpath(base, os.path.dirname(self.schema.source_file))
else:
self.context_body['@base'] = base
for prefix in sorted(self.emit_prefixes):
context[prefix] = self.namespaces[prefix]
for k, v in self.context_body.items():
context[k] = v
for k, v in self.slot_class_maps.items():
context[k] = v
if output:
with open(output, 'w') as outf:
outf.write(as_json(context))
else:
print(as_json(context))
def visit_class(self, cls: ClassDefinition) -> bool:
class_def = {}
cn = camelcase(cls.name)
self.add_mappings(cls)
cls_prefix = self.namespaces.prefix_for(cls.class_uri)
if not self.default_ns or not cls_prefix or cls_prefix != self.default_ns:
class_def['@id'] = cls.class_uri
if cls_prefix:
self.add_prefix(cls_prefix)
if class_def:
self.slot_class_maps[cn] = class_def
# We don't bother to visit class slots - just all slots
return False
def visit_slot(self, aliased_slot_name: str, slot: SlotDefinition) -> None:
self.add_mappings(slot)
@shared_arguments(PrefixGenerator)
@click.command()
@click.option("--base", help="Base URI for model")
def cli(yamlfile, **args):
""" Generate jsonld @context definition from LinkML model """
print(PrefixGenerator(yamlfile, **args).serialize(**args))
| true |
ff6c76d95dac5d0ea631e0b0a547c96a753fc468 | Python | Spy7Dragon/Unsupervised-Learning-and-Dimensionality-Reduction | /comparison_generation.py | UTF-8 | 24,959 | 2.671875 | 3 | [] | no_license | import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import *
import pandas as pd
import traceback
from timeit import default_timer as timer
colors = ['red', 'orange', 'green', 'blue', 'indigo', 'violet']
def compare_cluster_values(model, data, attribute, values):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
# print("Perform training for " + attribute + " on " + model_name)
for i in range(0, len(values)):
value = values[i]
scatters = []
try:
setattr(model, attribute, value)
model.fit(training_features)
feature_sets = itertools.combinations(training_features.columns.tolist(), 2)
for feature_set in feature_sets:
feature1 = feature_set[0]
feature2 = feature_set[1]
for i in range(model.n_clusters):
color = colors[i]
scatter = plt.scatter(training_features.loc[model.labels_ == i, feature1],
training_features.loc[model.labels_ == i, feature2],
s=1, c=color, label='cluster' + str(i))
scatters.append(scatter)
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
max_int = np.iinfo(np.int64).max
minx = training_features.loc[training_features[feature1] != max_int][feature1].min(skipna=True)
maxx = training_features.loc[training_features[feature1] != max_int][feature1].max(skipna=True)
plt.xlim([minx, maxx])
miny = training_features.loc[training_features[feature2] != max_int][feature2].min(skipna=True)
maxy = training_features.loc[training_features[feature2] != max_int][feature2].max(skipna=True)
plt.ylim([miny, maxy])
title = create_title(data, model_name, attribute, value, feature1, feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
plt.close()
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
finally:
for scatter in scatters:
scatter.remove()
def display_clusters(model, data):
training_features = data['training_features']
training_classes = data['training_classes'].iloc[:, 0]
test_features = data['test_features']
test_classes = data['test_classes'].iloc[:, 0]
model_name = type(model).__name__
# print("Perform training on " + model_name)
try:
model.fit(training_features)
feature_sets = itertools.combinations(training_features.columns.tolist(), 2)
for feature_set in feature_sets:
scatters = []
try:
feature1 = feature_set[0]
feature2 = feature_set[1]
scatter = plt.scatter(training_features.loc[training_classes.values == True][feature1],
training_features.loc[training_classes.values == True][feature2],
s=1, facecolors='black', label='True')
scatters.append(scatter)
scatter = plt.scatter(training_features.loc[training_classes.values == False][feature1],
training_features.loc[training_classes.values == False][feature2],
s=1, facecolors='gray', label='False')
scatters.append(scatter)
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
max_int = np.iinfo(np.int64).max
minx = training_features.loc[training_features[feature1] != max_int][feature1].min(skipna=True)
if np.isnan(minx):
minx = 0
maxx = training_features.loc[training_features[feature1] != max_int][feature1].max(skipna=True)
if np.isnan(maxx):
maxx = max_int
plt.xlim([minx, maxx])
miny = training_features.loc[training_features[feature2] != max_int][feature2].min(skipna=True)
if np.isnan(miny):
miny = 0
maxy = training_features.loc[training_features[feature2] != max_int][feature2].max(skipna=True)
if np.isnan(maxy):
maxy = max_int
plt.ylim([miny, maxy])
title = create_title(data, model_name, feature1=feature1, feature2=feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
plt.close()
except Exception as ex:
print(str(ex) + ' in ' + model_name)
traceback.print_exc()
finally:
for scatter in scatters:
scatter.remove()
except Exception as ex:
print(str(ex) + ' in ' + model_name)
traceback.print_exc()
def compare_cluster_numbers(model, data, attribute, intervals, interval_size=20, start_index=0):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
best_inertia = float('inf')
best_training_time = float('inf')
best_value = start_index
# print("Perform training for " + attribute + " on " + model_name)
for i in range(intervals):
value = i * interval_size + start_index
try:
setattr(model, attribute, value)
start = timer()
model.fit(training_features)
end = timer()
training_time = end - start
feature_sets = itertools.combinations(training_features.columns.tolist(), 2)
for feature_set in feature_sets:
scatters = []
try:
feature1 = feature_set[0]
feature2 = feature_set[1]
length = 2
if hasattr(model, 'n_components'):
length = model.n_components
elif hasattr(model, 'n_clusters'):
length = model.n_clusters
for i in range(length):
color = colors[i]
scatter = plt.scatter(training_features.loc[model.labels_ == i, feature1],
training_features.loc[model.labels_ == i, feature2],
s=1, c=color, label='cluster' + str(i))
scatters.append(scatter)
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
max_int = np.iinfo(np.int64).max
minx = training_features.loc[training_features[feature1] != max_int][feature1].min(skipna=True)
if np.isnan(minx):
minx = 0
maxx = training_features.loc[training_features[feature1] != max_int][feature1].max(skipna=True)
if np.isnan(maxx):
maxx = max_int
plt.xlim([minx, maxx])
miny = training_features.loc[training_features[feature2] != max_int][feature2].min(skipna=True)
if np.isnan(miny):
miny = 0
maxy = training_features.loc[training_features[feature2] != max_int][feature2].max(skipna=True)
if np.isnan(maxy):
maxy = max_int
plt.ylim([miny, maxy])
title = create_title(data, model_name, attribute, value, feature1, feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
for scatter in scatters:
scatter.remove()
plt.close()
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
if hasattr(model, 'inertia_'):
inertia = model.inertia_
if inertia < best_inertia:
best_inertia = inertia
best_value = value
else:
if training_time < best_training_time:
best_training_time = training_time
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
return best_value
def compare_error_numbers(model, data, attribute, intervals, interval_size=20, start_index=0):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
section_scores = []
time_data = []
# print("Perform training for " + attribute + " on " + model_name)
for i in range(intervals):
value = i * interval_size + start_index
try:
setattr(model, attribute, value)
start = timer()
model.fit(training_features, training_classes)
end = timer()
training_time = end - start
start = timer()
predicted_training_classes = model.predict(training_features)
end = timer()
classification_time = end - start
time_data.append([training_time, classification_time])
predicted_test_classes = model.predict(test_features)
training_score = accuracy_score(training_classes, predicted_training_classes)
training_error = 1.0 - training_score
test_score = accuracy_score(test_classes, predicted_test_classes)
test_error = 1.0 - test_score
section_scores.append([value, training_error, test_error])
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
title = create_title(data, model_name, attribute)
plot_frame = pd.DataFrame(section_scores, columns=[attribute, 'Training Error', 'Test Error'])
graph = plot_frame.plot(x=attribute, y=['Training Error', 'Test Error'],
title=title)
graph.set_xlabel(attribute)
graph.set_ylabel('Error')
plt.ylim(0.0, 0.5)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
plt.close()
time_table = pd.DataFrame(time_data,
columns=['Training Time', 'Classification Time'])
table_directory = data['table_directory']
time_table.to_csv(table_directory + '/' + title + '-time.csv')
def compare_mean_values(model, data, attribute, values):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
# print("Perform training for " + attribute + " on " + model_name)
for i in range(0, len(values)):
value = values[i]
scatters = []
try:
setattr(model, attribute, value)
model.fit(training_features)
feature_list = training_features.columns.tolist()
feature_sets = itertools.combinations(feature_list, 2)
for feature_set in feature_sets:
feature1 = feature_set[0]
feature2 = feature_set[1]
for index in range(len(model.means_)):
color = colors[index]
scatter = plt.scatter(model.means_[index, feature1],
model.means_[index, feature2],
s=3, c=color, label='cluster' + str(index))
scatters.append(scatter)
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
max_int = np.iinfo(np.int64).max
minx = training_features.loc[training_features[feature1] != max_int][feature1].min(skipna=True)
maxx = training_features.loc[training_features[feature1] != max_int][feature1].max(skipna=True)
plt.xlim([minx, maxx])
miny = training_features.loc[training_features[feature2] != max_int][feature2].min(skipna=True)
maxy = training_features.loc[training_features[feature2] != max_int][feature2].max(skipna=True)
plt.ylim([miny, maxy])
title = create_title(data, model_name, attribute, value, feature1, feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
plt.close()
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
finally:
for scatter in scatters:
scatter.remove()
def compare_mean_numbers(model, data, attribute, intervals, interval_size=20, start_index=0):
training_features = data['training_features']
training_classes = data['training_classes']
training_features = training_features.loc[:, (training_features != 0).any(axis=0)]
training_features.dropna(inplace=True)
used_columns = training_features.columns.tolist()
test_features = data['test_features']
test_classes = data['test_classes']
test_features = test_features.loc[:, used_columns]
test_features.dropna(inplace=True)
model_name = type(model).__name__
best_value = start_index
best_training_time = float('inf')
# print("Perform training for " + attribute + " on " + model_name)
for i in range(intervals):
value = i * interval_size + start_index
scatters = []
try:
setattr(model, attribute, value)
start = timer()
model.fit(training_features)
end = timer()
training_time = end - start
feature_sets = itertools.combinations(range(len(used_columns)), 2)
for feature_set in feature_sets:
feature1 = feature_set[0]
feature2 = feature_set[1]
for index in range(len(model.means_)):
color = colors[index]
scatter = plt.scatter(model.means_[index, feature1],
model.means_[index, feature2],
s=5, c=color, label='cluster' + str(index))
scatters.append(scatter)
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
feature1 = used_columns[feature1]
feature2 = used_columns[feature2]
max_int = np.iinfo(np.int64).max
minx = training_features.loc[training_features[feature1] != max_int][feature1].min(skipna=True)
maxx = training_features.loc[training_features[feature1] != max_int][feature1].max(skipna=True)
plt.xlim([minx, maxx])
miny = training_features.loc[training_features[feature2] != max_int][feature2].min(skipna=True)
maxy = training_features.loc[training_features[feature2] != max_int][feature2].max(skipna=True)
plt.ylim([miny, maxy])
title = create_title(data, model_name, attribute, value, feature1, feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '.png')
for scatter in scatters[:]:
try:
scatter.remove()
except Exception:
pass
plt.close()
if training_time < best_training_time:
best_training_time = training_time
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
return best_value
def compare_explained_variance_numbers(model, data, attribute, intervals, interval_size=20, start_index=0):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
best_mean_explained_variance = 0.0
best_value = start_index
# print("Perform training for " + attribute + " on " + model_name)
for i in range(intervals):
value = i * interval_size + start_index
try:
setattr(model, attribute, value)
new_data = model.fit_transform(training_features)
mean_explained_variance = np.sum(model.explained_variance_ratio_)
if mean_explained_variance > best_mean_explained_variance \
and best_mean_explained_variance < 1.0:
best_mean_explained_variance = mean_explained_variance
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
setattr(model, attribute, best_value)
return best_value
def compare_explained_variance_values(model, data, attribute, values):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
best_mean_explained_variance = 0.0
best_value = values[0]
# print("Perform training for " + attribute + " on " + model_name)
for i in range(0, len(values)):
value = values[i]
try:
setattr(model, attribute, value)
new_data = model.fit_transform(training_features)
mean_explained_variance = np.sum(model.explained_variance_ratio_)
if mean_explained_variance > best_mean_explained_variance \
and best_mean_explained_variance < 1.0:
best_mean_explained_variance = mean_explained_variance
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
setattr(model, attribute, best_value)
return best_value
def compare_mixing_numbers(model, data, attribute, intervals, interval_size=20, start_index=0):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
best_mixing_sum = 0.0
best_value = start_index
# print("Perform training for " + attribute + " on " + model_name)
for i in range(intervals):
value = i * interval_size + start_index
try:
setattr(model, attribute, value)
new_data = model.fit_transform(training_features)
mixing_sum = np.sum(model.mixing_)
if mixing_sum > best_mixing_sum \
and best_mixing_sum < 1.0:
best_mixing_sum = mixing_sum
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
setattr(model, attribute, best_value)
return best_value
def compare_mixing_values(model, data, attribute, values):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
test_classes = data['test_classes']
model_name = type(model).__name__
best_mixing_sum = 0.0
best_value = values[0]
# print("Perform training for " + attribute + " on " + model_name)
for i in range(0, len(values)):
value = values[i]
try:
setattr(model, attribute, value)
new_data = model.fit_transform(training_features)
mixing_sum = np.sum(model.mixing_)
if mixing_sum > best_mixing_sum \
and best_mixing_sum < 1.0:
best_mixing_sum = mixing_sum
best_value = value
except Exception as ex:
print(str(ex) + ' in ' + model_name + ' using ' + attribute \
+ ' of ' + str(value))
traceback.print_exc()
setattr(model, attribute, best_value)
return best_value
def plot_reduction(model, data):
training_features = data['training_features']
training_classes = data['training_classes']
test_features = data['test_features']
model.fit(training_features)
if hasattr(model, 'transform'):
new_training_features = pd.DataFrame(model.transform(training_features))
else:
new_training_features = pd.DataFrame(model.sample(len(training_features))[0])
length = 2
if hasattr(model, 'n_components'):
length = model.n_components
elif hasattr(model, 'n_clusters'):
length = model.n_clusters
feature_sets = itertools.combinations(range(length), 2)
model_name = type(model).__name__
for feature_set in feature_sets:
feature1 = feature_set[0]
feature2 = feature_set[1]
scatter = plt.scatter(new_training_features.iloc[:, feature1],
new_training_features.iloc[:, feature2],
s=1, color='black', label='points')
plt.legend()
plt.xlabel(feature1)
plt.ylabel(feature2)
max_int = np.iinfo(np.int64).max
minx = new_training_features.loc[new_training_features[feature1] != max_int][feature1].min(skipna=True)
if np.isnan(minx):
minx = 0
maxx = new_training_features.loc[new_training_features[feature1] != max_int][feature1].max(skipna=True)
if np.isnan(maxx):
maxx = max_int
plt.xlim([minx, maxx])
miny = new_training_features.loc[new_training_features[feature2] != max_int][feature2].min(skipna=True)
if np.isnan(miny):
miny = 0
maxy = new_training_features.loc[new_training_features[feature2] != max_int][feature2].max(skipna=True)
if np.isnan(maxy):
maxy = max_int
plt.ylim([miny, maxy])
title = create_title(data, model_name, feature1=feature1, feature2=feature2)
plt.title(title)
directory = data['directory']
plt.savefig(directory + '/' + title + '-reduction.png')
scatter.remove()
plt.close()
def get_model_name(model):
return type(model).__name__
def create_title(data, model_name, attribute=None, value=None, feature1=None, feature2=None):
title = ""
if 'dimensionality_reduction' in data and data['dimensionality_reduction'] is not None:
title += data['dimensionality_reduction'] + '-'
if 'clusterer' in data and data['clusterer'] is not None:
title += data['clusterer'] + '-'
title += model_name
if attribute is not None:
title += '-' + attribute
if value is not None:
title += '-' + str(value)
if feature1 is not None and feature2 is not None:
title += '-' + str(feature1) + '-vs-' + str(feature2)
return title
def create_attribute_storage_name(data, model):
directory = data['directory']
model_name = type(model).__name__
name = directory + '-' + model_name
if 'dimensionality_reduction' in data and data['dimensionality_reduction'] is not None:
name += '-' + data['dimensionality_reduction']
if 'clusterer' in data and data['clusterer'] is not None:
name += '-' + data['clusterer']
return name
def update_model(model, best):
for attribute in best:
setattr(model, attribute, best[attribute])
| true |
143035dacc93c929a1be9db65b44a4992f8c8152 | Python | knighton/comma_speed | /common/dataset.py | UTF-8 | 4,565 | 2.984375 | 3 | [] | no_license | import numpy as np
class Split(object):
def __init__(self, samples_per_epoch, x_sample_shapes, x_dtypes,
y_sample_shapes, y_dtypes):
self.samples_per_epoch = samples_per_epoch
self.sample_shapes = x_sample_shapes, y_sample_shapes
self.x_sample_shapes = x_sample_shapes
self.y_sample_shapes = y_sample_shapes
self.dtypes = x_dtypes, y_dtypes
self.x_dtypes = x_dtypes
self.y_dtypes = y_dtypes
def batches_per_epoch(self, batch_size):
return self.samples_per_epoch // batch_size
def x_batch_shapes(self, batch_size):
return [(batch_size,) + x for x in self.x_sample_shapes]
def y_batch_shapes(self, batch_size):
return [(batch_size,) + y for y in self.y_sample_shapes]
def batch_shapes(self, batch_size):
x = self.x_batch_shapes(batch_size),
y = self.y_batch_shapes(batch_size)
return x, y
def get_batch(self, batch_size, index):
raise NotImplementedError
def shuffle(self, batch_size):
batches_per_epoch = self.batches_per_epoch(batch_size)
x = np.arange(batches_per_epoch)
np.random.shuffle(x)
return x
class RamSplit(Split):
@classmethod
def normalize(cls, xx):
if isinstance(xx, np.ndarray):
xx = [xx]
else:
assert isinstance(xx, (list, tuple))
return xx
@classmethod
def check(cls, xx, yy):
counts = set()
for x in xx:
assert isinstance(x, np.ndarray)
counts.add(len(x))
for y in yy:
assert isinstance(y, np.ndarray)
counts.add(len(y))
assert len(counts) == 1
assert counts.pop()
def __init__(self, xx, yy):
xx = self.normalize(xx)
yy = self.normalize(yy)
self.check(xx, yy)
samples_per_epoch = len(xx[0])
x_sample_shapes = [x[0].shape for x in xx]
x_dtypes = [x[0].dtype.name for x in xx]
y_sample_shapes = [y[0].shape for y in yy]
y_dtypes = [y[0].dtype.name for y in yy]
Split.__init__(self, samples_per_epoch, x_sample_shapes, x_dtypes,
y_sample_shapes, y_dtypes)
self.xx = xx
self.yy = yy
def get_batch(self, batch_size, index):
a = index * batch_size
z = (index + 1) * batch_size
batch_xx = [x[a:z] for x in self.xx]
batch_yy = [y[a:z] for y in self.yy]
return batch_xx, batch_yy
class Dataset(object):
def __init__(self, train, test):
assert isinstance(train, Split)
if test is not None:
assert isinstance(test, Split)
assert train.sample_shapes == test.sample_shapes
assert train.dtypes == test.dtypes
self.train = train
self.test = test
if test:
self.samples_per_epoch = \
train.samples_per_epoch + test.samples_per_epoch
else:
self.samples_per_epoch = train.samples_per_epoch
self.sample_shapes = train.sample_shapes
self.x_sample_shapes = train.x_sample_shapes
self.y_sample_shapes = train.y_sample_shapes
self.dtypes = train.dtypes
self.x_dtypes = train.x_dtypes
self.y_dtypes = train.y_dtypes
def batches_per_epoch(self, batch_size):
batches_per_epoch = self.train.batches_per_epoch(batch_size)
if self.test:
batches_per_epoch += self.test.batches_per_epoch(batch_size)
return batches_per_epoch
def get_batch(self, batch_size, is_training, index):
if is_training:
split = self.train
else:
split = self.test
return split.get_batch(batch_size, index)
def shuffle(self, batch_size):
num_train_batches = self.train.batches_per_epoch(batch_size)
if self.test:
num_test_batches = self.test.batches_per_epoch(batch_size)
else:
num_test_batches = 0
train_batches = np.arange(num_train_batches)
test_batches = np.arange(num_test_batches)
x = np.zeros((num_train_batches + num_test_batches, 2), 'int64')
x[train_batches, 0] = 1
x[train_batches, 1] = train_batches
x[num_train_batches + test_batches, 1] = test_batches
np.random.shuffle(x)
return x
def each_batch(self, batch_size):
for is_training, index in self.shuffle(batch_size):
xx, yy = self.get_batch(batch_size, is_training, index)
yield is_training, xx, yy
| true |
6b23ec7d907dde0d4032b866ce590d1d733f486b | Python | webclinic017/workspace_pyCharm | /divyanshu/dbHandler.py | UTF-8 | 1,675 | 2.828125 | 3 | [] | no_license | import MySQLdb
import env
from custom_logging import logger
class DBHandler:
db = ''
cursor = ''
def __init__(self, query = 'use python;'):
try:
DBHandler.db = MySQLdb.connect(env.DB_IP, env.DB_USER, env.DB_PASSWORD, env.DB_DATABASE)
logger.info('Connection created !!')
DBHandler.cursor = DBHandler.db.cursor()
self.query = query
logger.debug('Running query: ' + self.query)
DBHandler.cursor.execute(self.query)
except:
logger.exception('Connecetion Failed')
def __enter__(self):
logger.debug('############################################')
return self
def insert(self, query):
self.query = query
try:
logger.debug('Running query: ' + self.query)
DBHandler.cursor.execute(self.query)
DBHandler.db.commit()
logger.info('Query executed')
except:
logger.exception('Query Failed')
DBHandler.db.rollback()
def execute(self, query):
self.query = query
try:
logger.debug('Running query: ' + self.query)
DBHandler.cursor.execute(self.query)
result = DBHandler.cursor.fetchall()
return result
except:
result = 'Query Failed'
logger.exception('Query Failed')
return result
def __exit__(self, exc_type, exc_value, traceback):
DBHandler.db.close()
logger.info('******************************************')
logger.info('Destroying DB object')
logger.info('******************************************')
| true |
dc0a9209205fee58f975f99970aa72227d931c47 | Python | kouui/XenICsPy | /XenicsGUI/tabs.py | UTF-8 | 994 | 2.71875 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtWidgets import QVBoxLayout, QTabWidget
from xenicsPanel import XenicsPanel
from observePanel import ObservePanel
class Tab(QWidget):
def __init__(self, parent):
super(QWidget, self).__init__(parent)
self.parent = parent
mainLayout = QVBoxLayout()
#--- initialize tab screen
tabs = QTabWidget()
xenics = XenicsPanel(self)
observe = ObservePanel(self)
#--- add tabs
for tab, name in zip([xenics,observe],
["XenICs","Observation"]):
tabs.addTab(tab, name)
#--- add tabs to widget
mainLayout.addWidget(tabs)
self.setLayout(mainLayout)
def setSelf(self, **kwags):
for key, value in kwags.items():
setattr(self, key, value)
if __name__=="__main__":
app = QApplication(sys.argv)
test = Tab(parent=None)
test.show()
sys.exit(app.exec_())
| true |
e0abdbf31c5525240a4f0001b206a5cb5df9d751 | Python | erinnlebaron3/python | /ZipFunc.py | UTF-8 | 1,308 | 3.828125 | 4 | [] | no_license | # going to allow us to do is actually merge our lists or merge multiple lists into a set of tuples.
# many of the algorithms that you build whenever you're working with machine learning algorithms are what are called matrix matrices.
# matirx set of nested colections that can be built with Zip Function
# one important component to understand whenever you're working with the zip function is the sorted order of your list is very important
# because if it's not sorted right then your list or both of your lists are not going to merge properly and so that could be an issue and
# it's something you have to watch out for.
# Now if you were to try to do this manually you would need to loop over one of these elements and then you'd have to add each one of the
# array items or each one the list items, create a new tuple merge them together and that would be a little bit of a messy process.
# zip does is it allows us to merge those automatically
# whenever you hear that you have two list and you need to merge them together and you want to have a direct mapping.
positions = ['2b', '3b', 'ss', 'dh']
players = ['Altuve', 'Bregman', 'Correa', 'Gattis']
scoreboard = zip(positions, players)
print(list(scoreboard))
answer = [('2b','Altuve'), ('3b','Bregman'), ('ss', 'Correa'), ('dh','Gattis')] | true |
9546f0464199595cc262bf536cbba17a80c63fad | Python | Suraj-KD/HackerRank | /python/string/Capitalize.py | UTF-8 | 277 | 2.796875 | 3 | [] | no_license | from __future__ import print_function
import sys, re
def capitalize(text):
return ''.join([ x[0].upper() + x[1:] if x.isalpha() else x for x in re.split('(\W+)', text) ])
def main():
print(capitalize(sys.stdin.read().rstrip()))
if __name__ == '__main__':
main()
| true |
923953fa43f3382207b8cb168cc75d42a9a0fab7 | Python | Asi7ho/BlackJackPP | /original/donnees.py | UTF-8 | 482 | 2.515625 | 3 | [] | no_license | """Ce fichier définit quelques données, sous la forme de variables"""
nom_carte = { 1: 'As', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8',9: '9', 10: '10', 11: 'valet', 12: 'dame', 13: 'roi' }
figure_carte = {'p': 'pique','t': 'trefle', 'c': 'carreau', 'h': 'coeur' }
deck = []
figures = [ 'p','t','c','h' ]
main = { 'croupier': [],'humain': [] }
main_d = { 'croupier': [],'humain': [] }
fosse_carte = []
scores = {}
nom_fichier_scores = "scores"
| true |
240f858ab332e2a1402ff09f095fb3cbb5d055a0 | Python | brahimmade/SVC-Streaming | /codecs/old/plots/perf/plot.py | UTF-8 | 1,662 | 2.75 | 3 | [] | no_license | import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 50})
matplotlib.rcParams['figure.figsize'] = 20, 10
fig, ax = plt.subplots()
fig.tight_layout()
fig.subplots_adjust(left=0.15, top=0.96, right=0.96)
lines = open('ssim.txt', 'r')
bpp1 = []
itr = []
onetime = []
for line in lines:
line = line.strip().split(',')
print (line)
bpp1.append(float(line[0]))
itr.append(float(line[1]))
onetime.append(float(line[2]))
lines = open('vp9.txt', 'r')
bpp2 = []
h264 = []
for line in lines:
line = line.strip().split(' ')
bpp2.append(float(line[0]))
h264.append(float(line[1]))
lines = open('h265.txt', 'r')
bpp3 = []
h265 = []
for line in lines:
line = line.strip().split(' ')
bpp3.append(float(line[0]))
h265.append(float(line[1]))
plt.plot(bpp1, onetime, marker='D', color='magenta', markersize=24, markeredgecolor='black', linewidth=6, label='Residual-Onetime')
plt.plot(bpp1, itr, marker='s', color='skyblue', markersize=20, markeredgecolor='black', linewidth=6, label='Residual-Iterative')
plt.plot(bpp2, h264, marker='^', color='orange', markersize=32, markeredgecolor='black', linewidth=12, label='H.265')
plt.plot(bpp3, h265, marker='o', color='maroon', markersize=28, markeredgecolor='black', linewidth=12, label='VP9')
#plt.ylim([0.85, 1])
plt.xlim([0.5, 1])
#ax.set_yticklabels(np.arange(7), [0.84, 0.86, 0.88, 0.90, 0.92, 0.94, 0.96])
ax.set_ylabel('MS-SSIM')
ax.set_xlabel('BPP')
ax = plt.gca()
ax.yaxis.grid(linestyle='dotted')
ax.xaxis.grid(linestyle='dotted')
plt.legend(loc='lower right')
plt.savefig('comparison.pdf')
plt.show()
| true |
fedfa8bef4f8c8ea04c007140d37102df0f72a06 | Python | Mythologos/Smorg | /Bot/Cogs/Helpers/Enumerators/timekeeper.py | UTF-8 | 7,796 | 2.65625 | 3 | [] | no_license | """
This module consists of various enumerations and groups of constants relating to time.
This mainly includes classes like TimeZone which delve into what aliases relate to each UTC timezone offset
and sets of aliases or constants.
"""
from __future__ import annotations
from aenum import Enum, NamedConstant
from typing import Union
class TimeZone(Enum, init='value aliases'):
"""
This enumeration contains various objects that link time zone aliases to their UTC offsets.
It also makes them comparable.
The name of each item in the enumeration is based on the military abbreviation for each one.
The EXTRA item is the exception to this, as I could not find its abbreviation.
"""
A = 1, ("BST", "CET", "MET", "MEZ", "WAT", "WETDST")
B = 2, ("BDST", "CEST", "CETDST", "EET", "IST", "MEST", "MESZ", "METDST", "SAST")
C = 3, ("EAT", "EEST", "EETDST", "FET", "IDT", "MSK")
D = 4, ("AMST", "AZST", "AZT", "GEST", "GET", "MSD", "MUT", "RET", "SCT", "VOLT")
E = 5, ("MAWT", "MUST", "MVT", "PKT", "TFT", 'YEKT', "TJT", "TMT", "UZT", "YEKT")
F = 6, ("ALMT", "BDT", "BTT", "IOT", "KGST", "KGT", "OMMST", "OMST", "PKST", "UZST", "XJT", "YEKST")
G = 7, ("ALMST", "CXT", "DAVT", "ICT", "KRAST", "KRAT", "NOVST", "NOVT", "WAST")
H = 8, ("AWST", "BNT", "BORT", "CCT", "HKT", "IRKT", "IRKST", "MYT", "PHT", "SGT", "ULAT", "WADT")
I = 9, ("AWSST", "JAYT", "JST", "KST", "PWT", "ULAST", "WDT", "YAKT", "YAKST")
K = 10, ("AEST", "CHUT", "DDUT", "KDT", "LIGT", "MPT", "PGT", "TRUT", "VLAST", "VLAT", "YAPT")
L = 11, ("AEDT", "AESST", "KOST", "LHDT", "MAGST", "MAGT", "PONT", "VUT")
M = 12, ("ANAST", "ANAT", "FJT", "GILT", "MHT", "NZST", "NZT", "PETST", "PETT", "TVT", "WAKT", "WFT")
MSTAR = 13, ("FJST", "NZDT", "PHOT", "TKT", "TOT")
EXTRA = 14, ("LINT",)
N = -1, ("AZOT", "EGT", "FNST")
O = -2, ("BRST", "FNT", "PMDT", "UYST", "WGST")
P = -3, ("ARST", "ART", "ADT", "BRA", "BRT", "CLST", "CLT", "FKST", "FKT", "GFT", "PMST", "PYST", "UYT", "WGT")
Q = -4, ("AMT", "AST", "BOT", "EDT", "GYT", "PYT", "VET")
R = -5, ("ACT", "CDT", "COT", "EASST", "EAST", "EST", "PET")
S = -6, ("CST", "GALT", "MDT")
T = -7, ("MST", "PDT")
U = -8, ("AKDT", "PST")
V = -9, ("AKST", "GAMT")
W = -10, ("CKT", "HST", "TAHT")
X = -11, ("NUT",)
Y = -12, ()
Z = 0, ("AZOST", "EGST", "GMT", "UCT", "UT", "UTC", "WET", "Z", "ZULU")
def __lt__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
lt_bool = self.value < other
else:
lt_bool = self.value < other.value
return lt_bool
def __gt__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
gt_bool = self.value > other
else:
gt_bool = self.value > other.value
return gt_bool
def __ge__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
ge_bool = self.value >= other
else:
ge_bool = self.value >= other.value
return ge_bool
def __le__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
le_bool = self.value <= other
else:
le_bool = self.value <= other.value
return le_bool
def __eq__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
eq_bool = self.value == other
else:
eq_bool = self.value == other.value
return eq_bool
def __ne__(self, other: Union[int, float, complex, TimeZone]) -> bool:
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
ne_bool = self.value < other
else:
ne_bool = self.value < other.value
return ne_bool
def __str__(self) -> str:
tz_template: str = f'Time Zone {self.name}: '
for alias in self.aliases:
tz_template.join(alias.join(', '))
return tz_template.rstrip(', ').join('.')
@staticmethod
def get_lowest_zone_value() -> int:
"""
This method gets the lowest value of a UTC time zone offset in the TimeZone enumeration.
:return int: the lowest value of a UTC time zone offset in the TimeZone enumeration.
"""
sorted_zones_by_enum = sorted(TimeZone.__members__.values(), key=lambda full_entry: full_entry.value)
lowest_zone_value = sorted_zones_by_enum[0].value
return lowest_zone_value
@staticmethod
def get_highest_zone_value() -> int:
"""
This method gets the highest value of a UTC time zone offset in the TimeZone enumeration.
:return int: the highest value of a UTC time zone offset in the TimeZone enumeration.
"""
sorted_zones_by_enum = sorted(TimeZone.__members__.values(), key=lambda full_entry: full_entry.value)
highest_zone_value = sorted_zones_by_enum[-1].value
return highest_zone_value
@classmethod
def list_time_zones(cls) -> list:
"""
This method composes and returns a complete list of TimeZone objects from this enumeration.
:return list: a complete list of TimeZone objects.
"""
time_zone_list: list = []
for i in range(cls.get_lowest_zone_value(), cls.get_highest_zone_value() + 1):
time_zone_list.append(TimeZone(i))
return time_zone_list
class DateConstant(NamedConstant):
"""
This class contains various constant values for dates that may be used in multiple locations.
"""
FIRST_DAY_OF_MONTH = 1
LEAP_YEAR_MODULO = 4
class TimeConstant(NamedConstant):
"""
This class contains various constant values for times that may be used in multiple locations.
"""
START_MINUTE = 0
END_MINUTE = 59
START_HOUR = 0
START_MERIDIEM_HOUR = 1
END_MERIDIEM_HOUR = 12
END_HOUR = 23
class MonthAliases(NamedConstant):
"""
This class tallies various accepted names or indicators for months in textual input.
"""
JANUARY = ('January', 'Jan', '1')
FEBRUARY = ('February', 'Feb', '2')
MARCH = ('March', 'Mar', '3')
APRIL = ('April', 'Apr', '4')
MAY = ('May', '5')
JUNE = ('June', 'Jun', '6')
JULY = ('July', 'Jul', '7')
AUGUST = ('August', 'Aug', '8')
SEPTEMBER = ('September', 'Sept', '9')
OCTOBER = ('October', 'Oct', '10')
NOVEMBER = ('November', 'Nov', '11')
DECEMBER = ('December', 'Dec', '12')
class MonthConstant(Enum, init='value number_of_days'):
"""
This enumeration contains various constant values for times that may be used in multiple locations.
It also lists of the number of days that each month has.
Since it is different in a leap year, February is listed in two different ways (with two different names).
"""
JANUARY = 1, 31
FEBRUARY = 2, 28
MARCH = 3, 31
APRIL = 4, 30
MAY = 5, 31
JUNE = 6, 30
JULY = 7, 31
AUGUST = 8, 31
SEPTEMBER = 9, 31
OCTOBER = 10, 31
NOVEMBER = 11, 30
DECEMBER = 12, 31
LEAP_YEAR_FEBRUARY = 13, 29
class PeriodConstant:
"""
This class contains various constant values for periods of the day that may be used in multiple locations.
"""
SINE_MERIDIEM = 0 # "without a midday," referring to how there's no period separation in a twenty-four hour clock
ANTE_MERIDIEM = 1
POST_MERIDIEM = 2
| true |
fcd0283127a2b7dd39dee6fd6b1acb0b1281b2e8 | Python | ramankarki/learning-python | /computer_scientist/recursion/find_max.py | UTF-8 | 514 | 2.828125 | 3 | [] | no_license | from test_suite import *
print(find_max([[12],3,[4,5,10],7,8,9,[1,100]]) == 100)
test(find_max([2, 9, [1, 13], 8, 6]) == 13)
test(find_max([2, [[100, 7], 90], [1, 13], 8, 6]) == 100)
test(find_max([[[13, 7], 90], 2, [1, 100], 8, 6]) == 100)
test(find_max(["joe", ["sam", "ben"]]) == "sam")
test(find_max([2, 9, [1, 13], 8, 6]) == 1)
test(find_max([2, [[100, 1], 90], [10, 13], 8, 6]) == 1)
test(find_max([2, [[13, -7], 90], [1, 100], 8, 6]) == -7)
test(find_max([[[-13, 7], 90], 2, [1, 100], 8, 6]) == -13)
| true |
eb90c2afac1aedd1b190eee661cce07262c4a210 | Python | darraes/coding_questions | /v1/DynamicProgramming/dy_boxes_stack.py | UTF-8 | 2,835 | 3.484375 | 3 | [] | no_license | # Craking the Code Interview - Dynamic Programming
# http://www.careercup.com/question?id=5712696989188096
class Box:
def __init__(self, width, height, depth):
self._width = width
self._height = height
self._depth = depth
def __eq__(self, other):
if type(other) is type(self):
return self._width == other._width and self._height == other._height and self._depth == other._depth
return False
def __str__(self):
return "W {} H {} D {}".format(self._width, self._height, self._depth)
def __hash__(self):
return hash((self._width, self._height, self._depth))
def is_allowed_above(self, bottom):
if bottom is None:
return True
if self == bottom:
return False
return self._width <= bottom._width \
and self._height <= bottom._height \
and self._depth <= bottom._depth
def create_rotations(self):
return [self, \
Box(self._width, self._depth, self._height),\
Box(self._depth, self._height, self._width),\
Box(self._depth, self._width, self._height),\
Box(self._height, self._width, self._depth),\
Box(self._height, self._depth, self._width),\
]
def calculate_height(boxes):
sum = 0
for box in boxes:
sum += box._height
return sum
def max_stack(boxes, bottom, cache, in_use):
if boxes is None or cache is None: raise
if bottom is not None and cache.has_key(bottom):
return cache[bottom]
max_st = []
for i in range(len(boxes)):
if not in_use[i]:
box = boxes[i]
in_use[i] = True
# Rotates for all dimensions (They all can be the height)
rotations = box.create_rotations()
for box_rotation in rotations:
if box_rotation.is_allowed_above(bottom):
max_sub = max_stack(boxes, box_rotation, cache, in_use)
# Since we have all possible rotations, we can assume that
# the height will always be placed on the same index (considering
# an array as dimensions)
if calculate_height(max_sub) > calculate_height(max_st):
max_st = []
max_st.extend(max_sub)
in_use[i] = False
if bottom is not None:
max_st.append(bottom)
cache[bottom] = max_st
return max_st
boxes = [Box(3, 3, 3), Box(2, 2, 4), Box(4, 4, 4), Box(5, 5, 3), Box(9, 9, 9), Box(7, 7, 7), Box(8, 8, 8)]
for b in max_stack(boxes, None, dict(), [0]*len(boxes)):
print b
| true |
b75c02eaad0a4ad59a6d1328510404f4d7a4b79b | Python | millinma/lyapunov | /lyapunov-exponent.py | UTF-8 | 556 | 2.875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def lyapunov_exponent(r, preiterations, start_value, iterations):
# run preiterations
lyap = 0
x = start_value
for _ in range(preiterations):
#print x
x = r*x*(1-x)
for _ in range(iterations):
x = r*x*(1-x)
help_v = np.log(np.absolute(r-2*r*x))
#print "1. help", help_v
help_v = help_v//np.log(2)
#print "2. help", help_v
lyap = lyap + help_v
return lyap/iterations
print lyapunov_exponent(2, 600, 0.1, 8000)
| true |
a035de293fd92738506227dd34d5177178df90ce | Python | danielchung1012/Fyp | /python3/main3.py | UTF-8 | 2,312 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
from multiprocessing import Process, Value, Array
from imutils.video import VideoStream
from pyimagesearch.basicmotiondetector import BasicMotionDetector
from imutils.object_detection import non_max_suppression
from detector_model.detector import MultiPeopleDetector
from imutils import paths
import numpy as np
import datetime
import imutils
import time
import cv2
import os
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
# initialize the video streams and allow them to warmup
print("[INFO] starting cameras...")
webcam = VideoStream(src=0).start()
picam = VideoStream(usePiCamera=True).start()
cam_dir1 = '/home/pi/share/Github/Fyp/python3/Image'
os.chdir(cam_dir1)
time.sleep(2.0)
detector = MultiPeopleDetector()
camMotion = MultiPeopleDetector()
piMotion = MultiPeopleDetector()
total = 0
i = 0
while True:
# initialize the list of frames that have been processed
frames = []
for (stream, motion) in zip((webcam, picam), (camMotion, piMotion)):
frame = stream.read()
motion.detect(frame)
picName = "image-" + str(total) + ".jpg"
motion.setImageName(picName)
frame = motion.getDetectedImage()
#multiPeopleDetect(frame)
frames.append(frame)
total += 1
# increment the total number of frames read and grab the
# current timestamp
total += 1
timestamp = datetime.datetime.now()
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
for (frame, name) in zip(frames, ("Webcam", "Picamera")):
picName = name + "-" + str(i) + ".jpg"
print(picName)
cv2.imwrite(picName, frame)
i += 1
# check to see if a key was pressed
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if i > 10:
break
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
webcam.stop()
picam.stop()
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:])
| true |
4f4f3ce027f4bb978c0467ec3407427a65329c5b | Python | makhmudislamov/Matrix-Quote-Generator | /sample.py | UTF-8 | 2,244 | 3.703125 | 4 | [] | no_license | import random
from datetime import datetime
from histogram import *
def histogram_dict(word_list):
"""
Input >>> array of strings
Output >>> histogram in a dictionary format
"""
hist_dict = {}
for word in word_list:
if word not in hist_dict:
hist_dict[word] = 1
else:
hist_dict[word] += 1
return hist_dict
def random_word(histogram):
"""
takes a histogram (however you've structured yours)
and returns a single word, at random. It should not yet
take into account the distributions of the words
"""
# iterate through the histogram - list
# return one random word
listed = list(histogram)
rand_ind = random.randint(0, len(listed)-1)
return listed[rand_ind]
# def rand_hist_word():
# """
# This function creates dictionary from CL words.
# Then prints random word from the dictionary
# """
# histogram = Histogram()
# rand_key = random.randint(0, len(histogram)-1)
# print(histogram)
# print([key for key in histogram.keys()][rand_key])
def stochastic_sample(histogram):
"""
Returns random word from the dictionary based on frequency.
"""
tokens = 0
cumulative_probability = 0.0
# you can use sum()
for word_frequency in histogram.values():
tokens += word_frequency # this works until here, tested with print
random_choice = random.uniform(0, 1)
for word, word_frequency in histogram.items():
cumulative_probability += word_frequency/tokens
if cumulative_probability >= random_choice:
return word
def test_iteration(histogram, iteration):
"""
Creates hisogram based on stochastic sampling and iterating given amount to prove stochastic sampmling
"""
word_list = [stochastic_sample(histogram) for x in range(iteration)]
# should use Histogram class build method
return histogram_dict(word_list)
if __name__ == '__main__':
file = "./sample_words.txt"
# print(random_word(histogram))
histogram = Histogram().build(file)
start_time = datetime.now()
print(test_iteration(histogram, 10000))
# print(stochastic_sample(histogram))
print(datetime.now()-start_time)
| true |
38a990b0822f6b29f4e72b4d16fa74ffacda27b0 | Python | BenguTercan/Memory-Game | /MemoryGame.py | UTF-8 | 2,899 | 3.171875 | 3 | [] | no_license | from tkinter import *
import time
import random
def started() :
count = 5
button.destroy()
printSmt(count)
def printSmt(count):
def numbers():
number = random.randint( 0,9 )
return number
depola_sayi = ""
for x in range ( count ) :
canvas = Canvas( width = 900 , height = 600 , bg = 'pink' )
canvas.pack ( expand = YES , fill = BOTH )
xloc = random.randint ( 50 , 800 )
yloc = random.randint ( 50 , 500 )
ekran_sayi = numbers()
depola_sayi += str ( ekran_sayi )
canvas.create_text ( xloc , yloc , text = ekran_sayi , fill = 'steelblue' , font = ( "arial" , 40 , "bold" ) )
root.update()
time.sleep( 0.8 )
canvas.pack_forget()
root.update()
time.sleep( 0.5 )
print ( depola_sayi )
def sayi_al(yenisay) :
yenisay.get()
if yenisay.get() != depola_sayi :
kutu.pack_forget()
buton2.pack_forget()
label_cong = Label ( root , anchor = CENTER , text = "GAME OVER..." , font = ( "Helvetica" , 25 , "bold" ) ,
bg = 'pink' , fg = 'steelblue' )
label_cong.pack( expand = 1 , ipadx = "20" , ipady = "15" )
root.update()
time.sleep( 1.1 )
root.quit()
if yenisay.get() == depola_sayi :
new_count = count + 1
kutu.pack_forget()
buton2.pack_forget()
label_cong2 = Label ( root , anchor = CENTER , text = "CONGRATULATIONS ! Go to next level." , bg = 'pink' , fg = 'steelblue' ,
font = ( "Helvetica" , 25 ,"bold" ) )
label_cong2.pack ( expand = 1 , ipadx = "20" , ipady ="15" )
root.update()
time.sleep( 1.8 )
label_cong2.pack_forget()
return printSmt ( new_count )
yenisay = StringVar()
kutu = Entry ( root, textvariable = yenisay , width = 40 , font = ( "Helvetica" , 20 , "bold" ) ,
fg = 'HotPink4' , bg = 'rosy brown' )
kutu.pack ( expand = "1" , ipadx = "20" , ipady ="15" )
buton2 = Button ( root, text = "TRY" , command = lambda : sayi_al ( yenisay ) ,
bg = 'rosy brown' , fg = "HotPink4" , font = ( "Helvetica" , 15 , "bold" ),
relief = RAISED , activebackground ='rosy brown' , activeforeground = "HotPink4")
buton2.pack ( expand = "1.5" , ipadx = "20" , ipady = "15" )
root = Tk()
root.title ( "WELCOME TO THE MEMORY GAME !!" )
root.configure ( background = 'pink' )
root.geometry ( "900x600" )
button = Button ( root , text = "Start GAME" , command = started , bg = 'rosy brown' , fg = "HotPink4" ,
font = ( "Helvetica" , 30 , "bold italic" ) ,
relief = RAISED , activebackground = 'rosy brown' , activeforeground = "HotPink4" )
button.pack ( expand = "1" , ipadx = "50" , ipady = "50" )
root.mainloop()
| true |
6ccfe2407d7e22361551794da549fe7219397de7 | Python | yuniaohappy/HeadFirstPython | /07/athletelist.py | UTF-8 | 833 | 3.515625 | 4 | [] | no_license | def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return time_string
(mi, secs) = time_string.split(splitter)
return mi + '.' + secs
class AthleteList(list):
def __init__(self, a_name, a_dob=None, a_times=[]):
self.name = a_name
self.dob = a_dob
self.extend(a_times)
def top3(self):
return sorted(set(sanitize(s) for s in self))[0:3]
def get_coach_data(filename):
try:
with open(filename) as f:
data = f.readline().strip().split(',')
return(AthleteList(data.pop(0),data.pop(0),data))
except IOError as ioe:
print('File error: ' + ioe)
return None
sarah = get_coach_data('sarah.txt')
print(sarah.name + '\t' + str(sarah.top3()))
| true |
3a1e97f3d5d48d54b418392d4ba3257e69a842b2 | Python | richard912611428iop/leetcode-py | /src/Count and Say.py | UTF-8 | 400 | 3.046875 | 3 | [
"MIT"
] | permissive | class Solution:
def foo(self, data, maxlen):
L = []
for i in range(maxlen):
data = "".join(str(len(list(g)))+str(n) for n, g in (
itertools.groupby(data)))
L.append(data)
return L
# @return a string
def countAndSay(self, n):
if n == 1:
return "1"
ret = self.foo("1", n-1)
return ret[-1]
| true |
2df408a76de81a3b8ef96cb749ff7895aed4dc01 | Python | juechen-zzz/learngit | /python/D2L_AI_Pytorch/5_2 填充和步幅.py | UTF-8 | 954 | 3.015625 | 3 | [] | no_license | import torch
from torch import nn
def comp_conv2d(conv2d, X):
X = X.view((1, 1) + X.shape)
# print(X.shape) # torch.Size([1, 1, 8, 8])
Y = conv2d(X)
return Y.view(Y.shape[2:]) # 排除不关心的前两维:批量和通道
# 注意这里是两侧分别填充1行或列,所以在两侧一共填充2行或列
conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, padding=1)
X = torch.rand(8, 8)
print(comp_conv2d(conv2d, X).shape)
# 使用高为5、宽为3的卷积核。在高和宽两侧的填充数分别为2和1
conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 3), padding=(2, 1), stride=(4, 3))
print(comp_conv2d(conv2d, X).shape)
conv2d = nn.Conv2d(1, 1, kernel_size=3, padding=1, stride=2)
print(comp_conv2d(conv2d, X).shape)
Y = torch.rand(3, 3)
conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=2, padding=1, stride=(3, 2))
print(comp_conv2d(conv2d, Y).shape)
| true |
72a35f9ee728436280df2558812c6bf3ca1a9278 | Python | TouchPal/guldan | /app/api/models/user.py | UTF-8 | 2,719 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
from sqlalchemy import Column, String
from flask import g
from app.exc import GulDanException
from .base import Base
class User(Base):
__tablename__ = "user"
name = Column(String(64), nullable=False, unique=True)
secret_hash = Column(String(32), nullable=False) # used for login validation
user_hash = Column(String(32), nullable=False) # used for puller
def __init__(self, name, secret_hash, user_hash):
self.name = name
self.secret_hash = secret_hash
self.user_hash = user_hash
def __repr__(self):
return "<User {}>".format(self.name)
def to_dict(self):
return {
"id": self.id,
"name": self.name,
"user_hash": self.user_hash
}
@staticmethod
def add_with_check(user_name, secrect_hash, user_hash):
user = g.db_session.query(User).filter_by(name=user_name, is_deleted=0).limit(1).first()
if user:
raise GulDanException().with_message(u"用户({})已经存在".format(user_name)).with_code(409)
g.db_session.add(User(user_name, secrect_hash, user_hash))
@staticmethod
def get_user_by_name(user_name):
result = g.db_session.query(User).filter_by(name=user_name, is_deleted=0).first()
if not result:
return User("", "", "")
return result
@staticmethod
def get_user_by_user_hash(user_hash):
return g.db_session.query(User).filter(
User.user_hash == user_hash,
User.is_deleted == 0
).limit(1).first()
@staticmethod
def get_user_hash_by_name(user_name, limit=1):
result = g.db_session.query(User.user_hash).filter_by(name=user_name, is_deleted=0).limit(limit).first()
if not result:
return ""
return result[0]
@staticmethod
def update_user(user_id, name, secret_hash):
user = g.db_session.query(User).filter_by(id=user_id, is_deleted=0).limit(1).first()
if not user:
raise GulDanException().with_message(u"用户(id:{})没有找到".format(user_id)).with_code(404)
user.name = name
user.secret_hash = secret_hash
@staticmethod
def search_user_by_name(user_name):
return g.db_session.query(User).filter(User.name.like("%" + user_name + "%")).all()
@staticmethod
def delete_by_user_hash(user_hash):
user = g.db_session.query(User).filter(
User.is_deleted == 0,
User.user_hash == user_hash
).first()
if not user:
raise GulDanException().with_code(404).with_message(u"找不到用户该用户")
user.is_deleted = 1
return user
| true |
b95c63628c1bb6595f5a260639395e4542303331 | Python | YuliyaUt/ElGamalSecAnalyzer | /main.py | UTF-8 | 9,306 | 3.078125 | 3 | [] | no_license | import math
def get_param_by_key(params, key, message):
if (key in params) and (len(params) > (params.index(key) + 1)):
ind = params.index(key)
param = params[ind + 1]
else:
print(message)
param = input()
return param
# Euclid's algorithm
def gcd(a, b):
if a == 0 or b == 0:
return 0
if a < 0:
a = - a
if b < 0:
b = - b
# s = q * t + r
s, t = max(a, b), min(a, b)
r = s % t
while r != 0:
s, t = t, r
r = s % t
return t
# using Euclid's algorithm
def inverse(a, module):
a = a % module
if gcd(a, module) != 1:
return 0
n = 1
s, t = module, a
q, r = s // t, s % t
p_prev, p = 0, 1
while r != 0:
p_prev, p = p, p * q + p_prev
s, t = t, r
q, r = s // t, s % t
n += 1
if n % 2 == 0:
p *= -1
return p
def baby_step_giant_step(y, g, p, timeout):
# Choose m - size of prebuilt table (meet-in-the-middle)
m = int(math.sqrt(p-1)) + 1
# Prebuild table of g^(m*i)
table = []
g_m = pow(g, m, p)
gamma = g_m
for i in range(m):
table.append(gamma)
gamma = (gamma * g_m) % p
for j in range(m):
elem = pow(g, j, p) * y % p
if elem in table:
# indexing in array begins with 0, so +1
x = (table.index(elem) + 1) * m - j
return x % (p-1)
return 0
# Need primes.txt file in order for this function to work
def find_factors(n):
# factors are found by absolute value
if n < 0:
n = -n
remains = n
filename = "primes.txt"
f = open(filename)
p = int(f.readline())
# factors are stored in dict
factors = {}
while p and remains > 1:
i = 0
while remains > 1 and remains % p == 0:
i += 1
remains = remains // p
if i > 0:
factors[p] = i
p = int(f.readline())
f.close()
return factors
def pohlig_hellman_algorithm(y, g, p, timeout):
# h = y, n = p-1, a = g
h, n, a = y, p-1, g
# at first going to find log_a(h) by modulo q**k: q**k || (p-1) for all prime q | (p-1)
factors = find_factors(n)
# print(factors)
logarithms = {}
if 2 in factors.keys():
k = factors[2]
h_i, log = h, 0
b = []
for i in range(k):
if pow(h_i, n//(2**(i+1)), p) != 1:
b_i = 1
else:
b_i = 0
b.append(b_i)
h_i = h_i * pow(inverse(a, p), (2 ** i) * b_i, p)
h_i %= p
for i in range(k):
log = 2 * log + b[k-1-i]
log = log % p
logarithms[2] = log
for q in factors.keys():
if q == 2:
# already done before
continue
k = factors[q]
h_i = h
c = [] # array with all coefficients of log_a(h) mod (q**k)
table = {} # table for c_i calculation
for j in range(q):
index = pow(a, (n*j)//q, p)
table[index] = j
for i in range(k):
c_i = table[pow(h_i, n // (q ** (i+1)), p)]
c.append(c_i)
h_i = h_i * pow(inverse(a, p), (q ** i) * c_i, p)
h_i %= p
log = 0
for i in range(k):
log = q * log + c[k-1-i]
logarithms[q] = log
x = 0
# use chinese remainder theorem to find log_a(h) mod (p-1), p-1=n
for q in factors.keys():
k = factors[q]
a_q = q**k
m_q = n // a_q
x += logarithms[q] * m_q * inverse(m_q, a_q)
x %= n
return x
def pollards_rho_method(y, g, p, timeout):
# a = g, h = y, n = p-1
a, h, n = g % p, y % p, p-1
h_sequence = []
x_sequence = []
y_sequence = []
x_i, y_i, h_i = 0, 0, 1
i = 0
s_table = {}
# cycle ends when repeated values(h_i=h_t, x_i!=x_t,...) are found
# or numbers start to repeat (x_i=x_t,y_i=y_t)
while 1 == 1:
h_sequence.append(h_i)
x_sequence.append(x_i)
y_sequence.append(y_i)
i += 1
# G1
if 0 <= h_i < p // 3:
h_i = (h * h_i) % p
y_i = (y_i + 1) % n
# G2
elif p // 3 <= h_i < 2 * p // 3:
h_i = (h_i * h_i) % p
x_i = (x_i * 2) % n
y_i = (y_i * 2) % n
# G3
else:
h_i = (a * h_i) % p
x_i = (x_i + 1) % n
tmp, m = i-1, 0
while tmp and tmp % 2:
tmp //= 2
m += 1
s_table[m] = i-1
# print("s-table", i, s_table)
t = -1
for j in s_table.values():
if h_sequence[j] == h_i:
t = j
if t != -1:
# found matching value in sequence
x_dif = (x_i - x_sequence[t]) % n
y_dif = (y_i - y_sequence[t]) % n
if not (x_dif and y_dif):
return 0
if gcd(y_dif, n) == 1:
return (- x_dif * inverse(y_dif, n)) % n
d = gcd(y_dif, n)
n_0 = n // d
log_0 = (- x_dif * inverse(y_dif, n_0)) % n_0
# print(x_dif, y_dif, d, n_0, log_0)
for i in range(d):
log = log_0 + i * n_0
if pow(a, log, p) == (h % p):
return log % n
pass
def test():
y, g, p = 3, 5, 23
timeout = 120
print("x =", baby_step_giant_step(y, g, p, timeout))
y, g, p = 13, 3, 17
print("x =", baby_step_giant_step(y, g, p, timeout))
print("first polig is", pohlig_hellman_algorithm(3, 5, 23, timeout))
print("second polig is", pohlig_hellman_algorithm(13, 3, 17, timeout))
print("third polig is", pohlig_hellman_algorithm(11, 3, 17, timeout))
print("forth polig is", pohlig_hellman_algorithm(28, 2, 37, timeout))
print(pollards_rho_method(3, 5, 23, timeout))
print(pollards_rho_method(13, 3, 17, timeout))
print(pollards_rho_method(5, 2, 1019, timeout))
print("--------------------y=-1, g=5, p=67453-------------------")
print("baby-giant:", baby_step_giant_step(-1, 5, 67453, timeout))
print("pohlig-hellman", pohlig_hellman_algorithm(-1, 5, 67453, timeout))
print("rho-method", pollards_rho_method(-1, 5, 67453, timeout))
print("--------------------y=11, g=3, p=59441-------------------")
print("baby-giant:", baby_step_giant_step(11, 3, 59441, timeout))
print("pohlig-hellman", pohlig_hellman_algorithm(11, 3, 59441, timeout))
print("rho-method", pollards_rho_method(11, 3, 59441, timeout))
print("--------------------y=-1, g=3, p=715827881-------------------")
print("baby-giant:", baby_step_giant_step(-1, 3, 715827881, timeout))
print("pohlig-hellman", pohlig_hellman_algorithm(-1, 3, 715827881, timeout))
print("rho-method", pollards_rho_method(-1, 3, 715827883, timeout))
print(pow(3, 97612893, 715827883))
print(pow(3, 184379909, 715827883))
print("--------------------y=5, g=11, p=477224802150431------------------")
print("pohlig-hellman", pohlig_hellman_algorithm(5, 11, 477224802150431, timeout))
print("rho-method", pollards_rho_method(5, 11, 477224802150431, timeout))
def test_mode(params):
test()
pass
def analysis_mode(params):
timeout_param = int(get_param_by_key(params, "-t", "Enter timeout for one attack (in seconds)"))
p_param = int(get_param_by_key(params, "-p", "Enter p of public key (prime modulo)"))
y_param = int(get_param_by_key(params, "-y", "Enter y of public key(y=g^x mod p)"))
g_param = int(get_param_by_key(params, "-g", "Enter g of public key"))
x = baby_step_giant_step(y_param, g_param, p_param, timeout_param)
if pow(g_param, x, p_param) == y_param % p_param:
print("Success of baby step - giant step attack! x =", x)
else:
print("Baby step - giant step attack did not succeed! ")
x = pohlig_hellman_algorithm(y_param, g_param, p_param, timeout_param)
if pow(g_param, x, p_param) == y_param % p_param:
print("Success of Pohlig-Hellman algorithm! x =", x)
else:
print("Pohlig-Hellman algorithm did not succeed! ")
x = pollards_rho_method(y_param, g_param, p_param, timeout_param)
if pow(g_param, x, p_param) == y_param % p_param:
print("Success of Pollard's Rho algorithm! x =", x)
else:
print("Pollard's Rho algorithm did not succeed! ")
pass
def main():
print("-------------------------ElGamal Security Analyzer-------------------------")
print("Enter '/test [-n system_parameter]' for test mode")
print("Enter '/analyze [-y y] [-g g] [-p p] [-t timeout_in_seconds]' for public key security analysis")
command = input()
if not command:
return 0
params = command.split(" ")
mode = params[0]
if mode == "/test":
test_mode(params)
elif mode == "/analyze":
analysis_mode(params)
else:
print("Couldn't recognize a command")
return 0
if __name__ == "__main__":
main()
| true |
2b7e784c3fa464101c7648dde144974ceedd46fd | Python | haishenming/scrapy_tt | /selenium_tt/selenium_spider.py | UTF-8 | 191 | 2.5625 | 3 | [] | no_license |
# 通过selenium拿到cookie
from selenium import webdriver
brower = webdriver.Chrome()
brower.get("http://www.baidu.com")
Cookie = brower.get_cookies()
print(Cookie)
brower.close() | true |
c5cd1f02591140965834691fb68b2090c3c253d8 | Python | d3815/other | /check_and_move_png.py | UTF-8 | 923 | 2.84375 | 3 | [] | no_license | import os
import shutil
from PIL import Image
'''
проходим по всем файлам в папке
проверяем размеры (должны быть не больше 256 по обеим сторонам)
переносим их в новую папку
'''
path_to_png_dir = '/ipad-hd'
for d, dirs, files in os.walk(os.getcwd() + path_to_png_dir):
for one_file in files:
if one_file.endswith('.png'):
path = os.path.join(d, one_file)
work_img = Image.open(path)
(width, height) = work_img.size
if width < 256:
if height < 256:
res = '/result'
new_path = d + res
if not os.path.exists(new_path):
os.mkdir(new_path)
print(f'Файл {path} перемещен')
shutil.move(path, new_path)
| true |
994168febc671340a2b9670e750d28d7ce3210a6 | Python | niklasw/wfrp4e_rpc_flask_server | /utils.py | UTF-8 | 8,303 | 2.9375 | 3 | [] | no_license | import sys
from collections import UserDict, UserList
def Error(*args):
s = 'Error: '
for a in args:
s += f'{a} '
print(s, file=sys.stdout, flush=True)
def Info(*args):
s = '>>>>> '
for a in args:
s += f'{a} '
print(s)
def Log(*args, log_file_name='log'):
s = '>>>>> '
for a in args:
s += f'{a} '
s+='\n'
with open(log_file_name,'a') as fp:
fp.write(s)
def valid_name(name):
return ''.join(e for e in self.name if e.isalnum()).lower()
def get_form_value(form, id, default=-1):
try:
value = form.get(id)
value = default.__class__(value)
return value
except:
Log(f'FORM READ ERROR: {id}')
return default
def search_form(form, expression):
Info('SEARCHING FORM')
import re
pat = re.compile(expression)
matches = []
for key in form.keys():
if pat.match(key):
matches.append(key)
return matches
def tryCast(d,typ):
try:
return typ(d)
except:
Error('in NameMap.cast',e)
return typ()
class NamedMap(UserDict):
def __init__(self, name, **kw):
self.name = name
super().__init__(**kw)
self.valid_name = ''.join(e for e in self.name if e.isalnum()).lower()
def sum(self, typ=int):
return sum(self.cast(typ).values())
def get(self,key):
if key == 'sum': return self.sum()
return super().get(key)
def set(self,key,value):
'''Prevents setting a key that is not already there'''
if key in self.keys():
self[key] = tryCast(value,int)
else:
Error('NamedMap.set', 'wrong key', key)
def cast(self,typ):
data = [tryCast(d,typ) for d in self.values()]
return NamedMap(self.name,**dict(zip(self.keys(),data)))
def __repr__(self):
return f'{self.name}: {super().__repr__()}'
def __str__(self):
return f'{self.name}: {super().__str__()}'
class Char(NamedMap):
dummy_name = '--'
row_keys = ['initial','advance','sum']
def __init__(self, name, initial=0, advance=0):
if name not in CharList.names:
Error('Char.__init__', 'Wrong characteristic name', name)
super().__init__(name, initial=initial, advance=advance)
self.rows = list(self.keys())+['sum']
def __eq__(self,other):
return self.name == other.name
def __hash__(self):
return hash((self.name, self.get('initial')))
class CharList(UserList):
names = 'ws bs s t i ag dex int wp fel'.split()
specie_char_adds = {'human': len(names)*[20],
'dwarf': [30,20,20,30,20,10,30,20,40,10],
'halfling': [10,30,10,20,20,20,30,20,30,30]}
def __init__(self, *args):
super().__init__(*args)
if len(self) == 0:
for item in self.names:
self.append(Char(item,initial=0,advance=0))
elif len(self) != len(self.names):
Error('CharList.__init__', 'incomplete chars')
else:
if not all([isinstance(a, Char) for a in self]):
Error('CharList.__init__', 'wrong types')
def append(self, item):
if not isinstance(item, Char):
Error('CharList.append','can only append Char')
super().append(item)
def get(self, name):
if not name.lower() in self.names:
Error('CharList.get', 'wrong char name', name)
else:
for item in self:
if item.name.lower() == name.lower():
return item
return None
def initial(self):
return [c.get('initial') for c in self]
def advance(self):
return [c.get('advance') for c in self]
def sum(self):
return [c.sum() for c in self]
def as_dict(self):
d = {}
for item in self:
d[item.name] = item
return d
class Skill(NamedMap):
def __init__(self, name, char, advance=0):
self.char = char
super().__init__(name, initial=char.sum(), advance=advance)
self.basic = name in SkillList.basic_names
def refresh(self):
self.valid_name = ''.join(e for e in self.name if e.isalnum()).lower()
self.set('initial', self.char.sum())
def __eq__(self, other):
return self.valid_name == other.valid_name # and self.char == other.char
def __hash__(self):
return hash((self.valid_name, self.char))
class Talent(NamedMap):
def __init__(self, name):
self.times_taken = 0
self.description = ''
def __eq__(self, other):
return self.name == other.name and self.description == others.description
def __hash__(self):
return hash((self.name, self.description))
class SkillList(UserList):
basic_names = [ 'art', 'athletics', 'bribery', 'charm',
'charm animal', 'climb', 'cool', 'consume alcohol',
'dodge', 'drive', 'endurance', 'entertain',
'gamble', 'gossip', 'haggle', 'intimidate',
'intuition', 'leadership', 'melee basic', 'melee',
'navigation', 'outdoor survival', 'perception', 'ride',
'row', 'stealth' ]
basic_chars = [ 'dex', 'ag', 'fel', 'fel',
'wp', 's', 'wp', 't',
'ag', 'ag', 't', 'fel',
'int', 'fel', 'fel', 's',
'i', 'fel', 'ws', 'ws',
'i', 'int', 'i', 'ag',
's', 'ag']
n_basic = len(basic_names)
n_advanced = 13
@staticmethod
def is_basic(name):
return name.lower() in basic_names
def __init__(self, content = None):
if content:
super().__init__(content)
else:
super().__init__([Skill(n,Char(c)) for n,c in \
zip(self.basic_names, self.basic_chars)])
self += [Skill(str(i), Char('ws')) for i in range(self.n_advanced)]
def basic(self, first=0, last=n_basic):
return (self[i] for i in range(first,last))
def added(self):
return (self[i] for i in range(self.n_basic, len(self)))
def get(self, name):
for item in self:
if item.name.lower() == name.lower():
return item
return None
def remove_by_name(self, name):
for i, item in enumerate(self):
if item.name.lower() == name.lower() or item.valid_name == name:
return self.remove(item)
return None
def refresh(self,chars):
for item in self:
item.char = chars.get(item.char.name)
if item.char:
item.refresh()
#self.remove_duplicates()
def remove_duplicates(self):
tmp_skills = []
for skill in self:
if skill in tmp_skills or not skill.valid_name:
self.remove(skill)
else:
tmp_skills.append(skill)
class TalentList(UserList):
n_talents = 12
def __init__(self):
super().__init__([Talent('') for i in range(self.n_talents)])
if __name__ == '__main__':
cl = CharList()
cl.get('ws').update(initial=33)
cl.get('i').update(initial=48)
for item in cl:
print(item, 'sum =', item.sum(), item.get('initial'))
print(cl.sum())
print(cl.initial())
print(cl.as_dict())
print('=====================================================')
slist = SkillList()
skill = Skill('Wrestle', Char('s',initial=34,advance=5), advance=3)
slist.append(skill)
slist.append(skill)
gotSkill = slist.get('stealth')
gotSkill.set('advance', 6)
gotChar = cl.get(gotSkill.char.name)
gotChar.set('initial', 43)
gotChar.set('advance', 1)
slist.refresh(cl)
schars = []
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
for skill in slist:
print(skill.char, skill.sum())
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
for i in slist.added():
print(i)
print('=====================================================')
for i in slist.basic():
print(i.name, i.get('initial'), i.get('advance'), i.get('sum'))
gotSkill = slist.get('stealth')
print(gotSkill.name, gotSkill.char, gotSkill.get('sum'))
| true |
c00a88f2b0d3a5c984f667f406365b2d20b01075 | Python | GlennCoding/cryptography-py | /main.py | UTF-8 | 1,347 | 3.453125 | 3 | [] | no_license | import encryption
import decryption
def check_input(a, b, message):
while True:
user_input = input(message)
if user_input == a or user_input == b:
return user_input
elif user_input == 'back':
break
else:
print('Please only enter the desired characters.')
def run():
# Navigation Menu
ask_purpose = check_input('e', 'd', 'Type \'e\' to encrypt and \'d\' to decrypt: ')
if ask_purpose == 'e':
ask_method = check_input('c', 'v', 'Type \'c\' for caesar encyption and \'v\' for vigenere encyption: ')
if ask_method == 'c':
encryption.caesar_cipher()
else:
encryption.vigenere_cipher()
else:
ask_method = check_input('c', 'v', 'Type \'c\' for caesar decryption and \'v\' for vigenere decryption: ')
if ask_method == 'c':
decryption.caesar_decrypt()
else:
decryption.vigenere_decrypt()
if __name__ == '__main__':
end_program = False
while not end_program:
run()
continue_program = '-'
while continue_program != 'y' and continue_program != 'n' \
and continue_program != '':
continue_program = input('Do you want start over again?(Y/n)')
if continue_program == 'n':
end_program = True
| true |
1072f2264b7fd13e5535e79730a89a8afdd1b7e8 | Python | Scaravex/googlaround | /google places.py | UTF-8 | 5,553 | 2.71875 | 3 | [] | no_license | ##help place: https://developers.google.com/places/web-service/search
##download googleplaces here --> https://github.com/slimkrazy/python-google-places
#installed: googleplaces, gmaps
import json
import pandas as pd
import numpy as np
import csv
import codecs
from time import sleep
from googleplaces import GooglePlaces
# import of internal files
from config import API_KEY
from config import SEARCH_KEY #e.g. restaurant, supermarket, bar
from config import LOCATION_KEY # good idea is to have a list of U-bahn station, save every search and eliminate duplicated results
from config import PICKLE_FILE_PATH
def create_dictionary (place):
'''
create a dictionary with information from a googleMaps place
'''
info = {}
info ['_id'] = place.place_id
info ['name'] = place.name
info ['address'] = place.formatted_address
# info ['PLZ'] = place.details['address_components'][7]['long_name']
try: info ['geo_location'] = place.geo_location
except: pass
try: info ['opening_hours'] = place.details['opening_hours']['weekday_text']
except: pass
try: info ['rating'] = float(place.rating)
except: pass
try: info ['viewport'] = place.details['geometry']['viewport']
except: pass
try: info ['types'] = place.details['types']
except: pass
try: info ['url'] = place.url
except: pass
try: info ['website'] = place.website
except: pass
return info
def query_gmaps(api_key=API_KEY, search_key=SEARCH_KEY, location_key=LOCATION_KEY):
'''
function which performs the query of stores
'''
saved_shops = []
google_places = GooglePlaces(api_key)
# Maybe can be preferred to use the text_search API, instead.
query_result = google_places.nearby_search(
location = location_key, keyword = search_key,
radius = 20000,
rankby = "distance")#,pagetoken='nextPageToken')
## pagetoken=x --> Returns the next 20 results from a previously run search.
## Setting a pagetoken parameter will execute a search with the same parameters
## used previously — all parameters other than pagetoken will be ignored.
if query_result.has_attributions:
print(query_result.html_attributions)
saved_shops = saver_queries(saved_shops, query_result)
# Are there any additional pages of results?
print(len(saved_shops))
temporary_search = query_result
max_iter=0
while temporary_search.has_next_page_token & max_iter<1:
max_iter+=1
try:
query_result_next_page = google_places.nearby_search(pagetoken = temporary_search.next_page_token)
saved_shops = saver_queries(saved_shops,query_result_next_page)
temporary_search = query_result_next_page
except:
pass
return saved_shops
def saver_queries (saved_places, query_result):
'''
save all the queries from the ones extracted from gmaps searches API
'''
for place in query_result.places:
# Returned places from a query are place summaries.
print (place.name)
print (place.geo_location)
print (place.place_id)
# The following method has to make a further API call.
place.get_details()
# Referencing any of the attributes below, prior to making a call to
# get_details() will raise a googleplaces.GooglePlacesAttributeError.
print (place.details) # A dict matching the JSON response from Google.
print (place.local_phone_number) # print (place.international_phone_number)
print (place.website)
print (place.url)
tempDict = create_dictionary(place)
saved_places.append(tempDict)
return saved_places
def new_query_checker(good_shops, df, api_key=API_KEY, search_key=SEARCH_KEY, location_key=LOCATION_KEY):
'''
check if the values in the new query are in the current database
'''
saved_shops = query_gmaps(api_key, search_key, location_key)
## to be smarter, check in the database and save into database directly
#s = pd.Series(df['address']) --> faster
count = 0
for shops in saved_shops:
if any(df._id == shops['_id']):
pass
else:
count+=1
good_shops.append(shops)
print("There were in total these new items:")
print(count)
return good_shops
##to be very smart: create a matrix of data points in Berlin,
## convert into addresses and then only select the unique results
# from_csv('total_shops.csv', encoding='latin_1')
with codecs.open('locations.csv', 'r', encoding='utf-8', errors='ignore') as fdata:
reader = csv.reader(fdata)
locations = []
for row in reader:
locations.append(row)
good_shops = query_gmaps()
locations = pd.DataFrame(locations, columns = ['Places'])
def find_good_shops(good_shops, locations = locations):
'''identify only the good_shops'''
for location in locations:
try:
good_shops = new_query_checker(good_shops, df=pd.DataFrame(good_shops), location_key=location)
print(len(good_shops))
print("going to sleep")
sleep(45) # trick to avoid google maps troubles
print('I slept 45 secondds, zzz: already saved {} good shops'.format(len(good_shops)))
except:
break
return good_shops
| true |
20c95a079ea8373ef795bbba0ab6670e8c79baf2 | Python | stencila/hub | /worker/jobs/extract/__init__test.py | UTF-8 | 949 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import pytest
from . import Extract
def test_missing_source_type():
extract = Extract()
with pytest.raises(AssertionError) as excinfo:
extract.do({})
assert "source must have a type" in str(excinfo.value)
def test_unknown_source_type():
extract = Extract()
with pytest.raises(ValueError) as excinfo:
extract.do({"type": "foo"})
assert "Unknown source type: foo" in str(excinfo.value)
def test_bad_arg_types():
extract = Extract()
with pytest.raises(AssertionError) as excinfo:
extract.do(None)
assert "source must be a dictionary" in str(excinfo.value)
with pytest.raises(AssertionError) as excinfo:
extract.do({"type": "whateva"}, None)
assert "filters must be a dictionary" in str(excinfo.value)
with pytest.raises(AssertionError) as excinfo:
extract.do({"type": "whateva"}, {}, None)
assert "secrets must be a dictionary" in str(excinfo.value)
| true |
781435c49f64f8d620244c25092bc99ae80f9a92 | Python | vijaykrishnav/Logistic-regression-along-with-Area-under-RUC | /Logistic regression (Area under ROC curve) with missing value imputation.py | UTF-8 | 9,520 | 3 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
#getting libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as mlt
get_ipython().magic(u'matplotlib inline')
# In[2]:
#getting file
crd1=pd.read_csv('D:\\V data analytics\\sample dataset\\Dataset by Imarticus\\Data for logistic regression\\R_Module_Day_7.2_Credit_Risk_Train_data.csv')
crd2=pd.read_csv('D:\\V data analytics\\sample dataset\\Dataset by Imarticus\\Data for logistic regression\\R_Module_Day_8.2_Credit_Risk_Validate_data.csv')
# In[11]:
# to do missing value imputation, we will concat both train and validation data
crd=pd.concat([crd1,crd2],axis=0)
# In[4]:
crd.isna().sum()
# In[12]:
#lets reset index just to be sure
crd.reset_index(inplace=True,drop=True)
# In[13]:
crd.head()
# In[14]:
#all good lets check for missing values
crd.isna().sum()
# In[15]:
#missing value of imputation, to get the list of indices of gender missing values we do
genmiss=crd[crd['Gender'].isna()].index.tolist()
# In[16]:
#to find out the mode we do,
crd.Gender.value_counts()
# In[17]:
#lets fill up missing value by 'Male'
crd['Gender'].iloc[genmiss]='Male'
# In[18]:
crd.Gender.isna().sum()
# In[19]:
#great, lets do the missing value imputation for married
crd.Married.value_counts()
# In[20]:
#lets fill up nas by 'Yes'
crd['Married'].iloc[crd[crd['Married'].isna()].index.tolist()]='Yes'
# In[21]:
crd.Married.isna().sum()
# In[22]:
#lets do missing value impputation of dependents
crd.Dependents.value_counts()
# In[29]:
crd.Dependents.isna().sum()
# In[30]:
#lets fill the nas of dependents missing value in case of not married people as zero
# so we need the dataframe where married is no and dependents is na
dmiss1=crd[(crd['Married']=='No') & (crd['Dependents'].isna())].index.tolist()
print(dmiss1)
# In[31]:
#filling it up with zero
crd['Dependents'].iloc[dmiss1]='0'
# In[32]:
crd.Dependents.value_counts()
# In[33]:
crd.Dependents.isna().sum()
# In[28]:
#lets do a crosstab of gender with dependents
pd.crosstab(crd['Gender'],crd['Dependents'].isna()) # out of 16, 15 values corresponds to male
# In[38]:
#lets do the cross tab of dependents and gender
pd.crosstab(crd['Gender'],crd['Dependents'])
# In[39]:
#since most of male have a mode of 0, lets fill the remaining values by 0
crd['Dependents'][(crd['Dependents'].isna())]='0'
# In[40]:
crd.Dependents.isna().sum()
# In[41]:
#missing value impution of selfemployed
crd.Self_Employed.isna().sum()
# In[42]:
crd.Self_Employed.value_counts()
# In[43]:
#replacing nas with 'No'
crd['Self_Employed'].iloc[crd[crd['Self_Employed'].isna()].index.tolist()]='No'
# In[44]:
#mvi of loanamount loanamount_term
#lets compare the missingvalues of loanamount with loanamountterm
pd.crosstab(crd['Loan_Amount_Term'],crd['LoanAmount'].isna())
# In[45]:
#its evident that loan_amount_term pertaining to 360 has highest numbers of nas.
# we will fill the value by the mean of the loanamount corresponding to the loan_amount_term 360.
crd.groupby(crd['Loan_Amount_Term'])['LoanAmount'].mean()
# In[46]:
# lets fill the loanampunt na's in the range of 360 by 144
crd['LoanAmount'].iloc[crd[(crd['Loan_Amount_Term']==360) & (crd['LoanAmount'].isna())].index.tolist()]=144
# In[47]:
#for the rest of nas lets replace the values by 132
crd['LoanAmount'].iloc[crd[crd['LoanAmount'].isna()].index.tolist()]=132
# In[48]:
#missing value imputation of loan_amount_term
crd.Loan_Amount_Term.isnull().sum()
# In[49]:
crd.Loan_Amount_Term.value_counts()
# In[50]:
#lets fill up by mode, ie 360
crd['Loan_Amount_Term'].iloc[crd[crd['Loan_Amount_Term'].isna()].index.tolist()]=360
# In[51]:
#missing value imutation of Credit_history by Logistic regression
crd.Credit_History.isnull().sum()
# In[56]:
#lets separate the dataframe as train and test, lets take all na's in credithistory as test data
crd_testdata=crd.loc[crd['Credit_History'].isna(),:]
# In[58]:
#to get traindata, we will first get incex of all na's of credithistory
crd_credna_indx=crd[crd['Credit_History'].isna()].index.tolist()
# In[61]:
#getting traindata
crd_traindata_index=[x for x in crd.index.tolist() if x not in crd_credna_indx]
# In[62]:
crd_traindata=crd.iloc[crd_traindata_index]
# In[64]:
#Tp do a logistic regression, we will lose all unimo variables and get dummies for all catagorical variables
crd_train1=pd.get_dummies(crd_traindata.drop(['Loan_ID'],axis=1),drop_first=True)
# In[65]:
crd_train1.head()
# In[67]:
#lets do the same for testdata
crd_test1=pd.get_dummies(crd_testdata.drop(['Loan_ID'],axis=1),drop_first=True)
# In[68]:
crd_test1.head()
# In[71]:
#preparing xtrain and y train, xtest and ytest
x_train1=crd_train1.drop(['Credit_History','Loan_Status_Y'],axis=1)
y_train1=crd_train1['Credit_History']
# In[73]:
#similarly for test set
x_test1=crd_test1.drop(['Credit_History','Loan_Status_Y'],axis=1)
y_test1=crd_test1['Credit_History']
# In[74]:
from sklearn.linear_model import LogisticRegression
# In[75]:
m1=LogisticRegression()
# In[76]:
m1.fit(x_train1,y_train1)
# In[77]:
#predicting for xtest
pred1=m1.predict(x_test1)
# In[78]:
print(pred1)
# In[83]:
crd_test1['Credit_History']=pred1
# In[84]:
crd_new=pd.concat([crd_train1,crd_test1],axis=0)
# In[85]:
crd_new.isnull().sum()
# In[86]:
crd_new.shape
# In[87]:
crd.Credit_History.isna().sum()
# In[88]:
crd['Credit_History'].iloc[crd[crd['Credit_History'].isna()].index.tolist()]=pred1
# In[89]:
crd.isnull().sum()
# In[90]:
crd.head()
# In[ ]:
# lets go ahead woth logistic regression now
# In[97]:
#some basic eda's first
sns.barplot(x=crd['Loan_Status'],y=crd['LoanAmount'],hue=crd['Gender'],data=crd)
# In[98]:
sns.barplot(x=crd['Credit_History'],y=crd['Loan_Status'],hue=crd['Married'],data=crd)
# In[105]:
sns.barplot(x=crd['Loan_Status'],y=crd['ApplicantIncome'],hue=crd['Property_Area'],data=crd) #(so rural property got rejected most)
# In[104]:
sns.barplot(x=crd['Loan_Status'],y=crd['ApplicantIncome'],hue=crd['Dependents'],data=crd) #interesting fact is applicant with..
#..3+ dependents were rejected also were given higher no of loans!!
# In[ ]:
# lets split the data into train and validation,just like how it was given previously
# In[111]:
crd_train=crd.head(len(crd1))
# In[113]:
crd_train.to_csv('D:\\V data analytics\\sample dataset\\Dataset by Imarticus\\Data for logistic regression\\Loan_eligibility_estimation_traindata_withmv_imputed\\crd_train.csv')
# In[114]:
crd_val=crd.tail(len(crd2))
# In[115]:
crd_val.to_csv('D:\\V data analytics\\sample dataset\\Dataset by Imarticus\\Data for logistic regression\\Loan_eligibility_estimation_train _test_data_withmv_imputed\\crd_val.csv')
# ### Logistic regression with area under roc curve
# In[117]:
#getting dummies for all catagorical variables
crd.info()
# In[120]:
#seperating dvb
crd_new1=pd.get_dummies(crd.drop(['Loan_ID','Loan_Status'],axis=1),drop_first=True)
# In[121]:
crd_new1.head()
# In[122]:
x_train=crd_new1
# In[123]:
y_train=crd['Loan_Status']
# In[124]:
m2=LogisticRegression()
# In[125]:
m2.fit(x_train,y_train)
# In[128]:
m2.score(x_train,y_train)
# In[129]:
# PREDICTING DATA FOR VALIDATION DATA
crd_val.shape
# In[130]:
crd_val.head()
# In[132]:
#preparing data to predict
crd_val1=crd_val.drop(['Loan_ID','Loan_Status'],axis=1)
# In[133]:
#getting dummyvalues for cvs
crdval=pd.get_dummies(crd_val1,drop_first=True)
# In[134]:
crdval.head()
# In[135]:
#predicting
pred2=m2.predict(crdval)
# In[136]:
#getting crosstab
pd.crosstab(crd_val['Loan_Status'],pred2)
# In[137]:
#to get crosstab we download metrics
from sklearn import metrics
# In[138]:
metrics.confusion_matrix(crd_val['Loan_Status'],pred2) #we can interpret the results as predicted
# N Y
# actual loanstatus N 55 22
# Y 1 289
# In[ ]:
#sensitivity of the model =tp/tp+fn= 289/289+1=99% highly sensitive
#speificity of the model = tn/tn+fp=55/55+22=55/77= 71% speific
# In[140]:
#lets get the valuecount of loanstatus in validation data
crd_val.Loan_Status.value_counts()
# In[141]:
#accuracy score
metrics.accuracy_score(crd_val['Loan_Status'],pred2) #93% great
# In[172]:
crd_val.head()
# In[179]:
crdval.head()
# In[180]:
crdval['Loan_Status']=crd_val['Loan_Status'] #in crdval, we add loanstatus, since we have to conver it to digits by mapping
# In[181]:
crdval.head()
# In[182]:
crdval['Loan_Status_digit']=crdval.Loan_Status.map({'N':0,'Y':1})
# In[184]:
crdval1=crdval.drop(['Loan_Status'],axis=1)
# In[186]:
crdval1.info()
# In[187]:
crdval.info()
# In[196]:
probvalues_of_dv=m2.predict_proba(crdval1.drop(['Loan_Status_digit'],axis=1))
# In[197]:
Loan_status_prob0=m2.predict_proba(crdval1.drop(['Loan_Status_digit'],axis=1))[:,1]
# In[199]:
#graph
sns.distplot(Loan_status_prob0,kde=False,bins=50)
# In[200]:
#roc curve
fpr,tpr,thresholds=metrics.roc_curve(crd_val['Loan_Status_digit'],Loan_status_prob0)
# In[202]:
mlt.plot(fpr,tpr)
# In[203]:
#getting area under aoc
metrics.roc_auc_score(crd_val['Loan_Status_digit'],Loan_status_prob0)
| true |
c6e38795027a96ba120768a17fda7f2d204686e4 | Python | joshbenner/mudsling | /src/mudsling/utils/migrate.py | UTF-8 | 1,419 | 3.390625 | 3 | [] | no_license | """
Migration utilities.
"""
module_name_map = {}
class_name_map = {}
class_move_map = {}
def forward_class(new_class):
"""
Creates a dynamic class whose sole function is to forward to another class.
This is useful when renaming or changing the location of a class.
If we are renaming class 'foo.Bar' to 'foo.Baz', then we might put this in
the foo.py file:
>>> from mudsling.utils.migrate import forward_class
>>> Bar = forward_class(Baz)
:param new_class: The class to forward to.
:type new_class: type or str
:return: A dynamically-created class which will forward instantiations of
the old class to the new class transparently.
"""
class _class(object):
def __new__(cls, *args, **kwargs):
if isinstance(new_class, str):
from mudsling.utils.modules import class_from_path
new_cls = class_from_path(new_class)
else:
new_cls = new_class
return new_cls.__new__(new_cls, *args, **kwargs)
def __setstate__(self, state):
self.__dict__.update(state)
self.__class__ = new_class
return _class
def rename_module(oldname, newname):
module_name_map[oldname] = newname
def rename_class(oldname, newname):
class_name_map[oldname] = newname
def move_class(oldclass, newclass):
class_move_map[oldclass] = newclass
| true |
9baa9330fb6849d65e53b1d9ac94aa3b4ed51f64 | Python | ricobafica/pdsnd_github | /bikeshare.py | UTF-8 | 9,576 | 4 | 4 | [] | no_license | """
This python code provide the following information by exploring
US Bikeshare Data from Chicago, New York City and Washington:
1- Popular times of travel (i.e., occurs most often in the start time)
most common month
most common day of week
most common hour of day
2- Popular stations and trip
most common start station
most common end station
most common trip from start to end (i.e., most frequent combination of start station and end station)
3- Trip duration
total travel time
average travel time
4- User info
counts of each user type
counts of each gender (only available for NYC and Chicago)
earliest, most recent, most common year of birth (only available for NYC and Chicago)
"""
import time
import pandas as pd
import calendar
CITY_DATA = { 'Chicago': 'chicago.csv',
'New York': 'new_york_city.csv',
'Washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington).
# HINT: Use a while loop to handle invalid inputs
while True:
city = input("Select a city from {}, {} or {}:".format(*CITY_DATA.keys())).strip().title()
if city in CITY_DATA.keys():
break
else:
print("Incorrect value. Please write an eligible city!")
times = ['month', 'day', 'none']
while True:
time_filter = input("Would you like to filter the data by month, day, or not at all? Type \"none\" no time filter:").lower()
if time_filter in times:
break
else:
print("Incorrect value. Please write an eligible filter to the time!")
# TO DO: get user input for month (all, january, february, ... , june)
if time_filter == 'month':
months = ['January', 'February', 'March', 'April', 'May', 'June', 'All']
while True:
month = input("Which month? January, February, March, April, May, June, or All: ").title()
if month in months:
day = 'All'
break
else:
print("Incorrect value. Please write an eligible filter to month!")
elif time_filter == 'day':
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'All']
while True:
day = input("Which day of week to filter? Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, All: ").title()
if day in days:
month = 'All'
break
else:
print("Incorrect value. Please write an eligible filter to day o week!")
else:
month = 'All'
day = 'All'
print('-'*80)
print("Thanks, We will calculate Bikeshare statistics filtered by:\n City: {} \n Month: {} \n Day: {}".format(city, month, day))
print('-'*80)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# load data file into a dataframe
start_time = time.time()
df = pd.read_csv(CITY_DATA[city])
# convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# extract month and day of week from Start Time to create new columns
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# filter by month if applicable
# Since the month parameter is given as the name of the month,
# you'll need to first convert this to the corresponding month number.
# Then, select rows of the dataframe that have the specified month and
# reassign this as the new dataframe.
if month != 'All':
# use the index of the months list to get the corresponding int
months = ['January', 'February', 'March', 'April', 'May', 'June']
month = months.index(month) + 1
# filter by month to create the new dataframe
df = df[df['month'] == month]
# filter by day of week if applicable
if day != 'All':
# filter by day of week to create the new dataframe
df = df[df['day_of_week'] == day.title()]
print("The first 5 rows for the bikeshare data from {} are:\n".format(city), df.head())
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*80)
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('Calculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
common_month = df['month'].mode()[0]
print(' Most Common Month:', calendar.month_name[common_month])
# TO DO: display the most common day of week
popular_weekday = df['day_of_week'].mode()[0]
print(' Most Popular Day of Week:', popular_weekday)
# find the most common hour (from 0 to 23)
df['hour'] = df['Start Time'].dt.hour
popular_hour = df['hour'].mode()[0]
print(' Most Popular Start Hour:', popular_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*80)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('Calculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_popular = df['Start Station'].mode()[0]
print(' Most Common Start Station:', start_popular)
# TO DO: display most commonly used end station
end_popular = df['End Station'].mode()[0]
print(' Most Common End Station:', end_popular)
# TO DO: display most frequent combination of start station and end station trip
df['Trip Combined'] = 'FROM ' + df['Start Station'] + ' TO ' + df['End Station']
trip_popular = df['Trip Combined'].mode()[0]
print(' Most Common Trip:', trip_popular)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*80)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('Calculating Trip Duration...\n')
start_time = time.time()
total_travel_time = df['Trip Duration']
# TO DO: display total travel time
print(" Total Travel Time Duration in sec:", total_travel_time.sum())
print(" Total Travel Time Duration in years:", total_travel_time.sum()/(60*60*24*365))
# TO DO: display mean travel time
print("\n Average Travel Time Duration in sec:", total_travel_time.mean())
print(" Average Travel Time Duration in minutes:", total_travel_time.mean()/60)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*80)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('Calculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df['User Type'].value_counts()
print(" Counts of each user Type:\n",user_types)
# TO DO: Display counts of gender
try:
gender_qty = df['Gender'].value_counts()
print("\n Counts of Each Gender:\n",gender_qty)
# TO DO: Display earliest, most recent, and most common year of birth
earliest_birth_year = int(df['Birth Year'].min())
last_birth_year = int(df['Birth Year'].max())
common_birth_year = int(df['Birth Year'].mean())
print("\n Earliest Year of Birth:", earliest_birth_year)
print(" Most Recent Year of Birth:", last_birth_year)
print(" Most Common Year of Birth:", common_birth_year)
except KeyError:
print("\n Sorry about earliest, most recent, and most common year of birth information:")
print(" Washington doesn\'t have data about either Gender and Birth from its users")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*80)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
go_on = False
index = 0
while go_on == False:
index += 1
next_function = input("To continue press y: ").lower()
if next_function == 'y':
if index == 1:
time_stats(df)
elif index == 2:
station_stats(df)
elif index == 3:
trip_duration_stats(df)
elif index == 4:
user_stats(df)
go_on = True
else:
go_on = True
restart = input('\nWould you like to restart? Enter "yes" to continue or any other key to exit.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| true |
2811461faf0aea04d77b4dfa8d7a472de5d6dd48 | Python | tadams42/seveno_pyutil | /src/seveno_pyutil/logging_utilities/pretty_formatter.py | UTF-8 | 2,971 | 2.9375 | 3 | [
"MIT"
] | permissive | import logging
import colorlog
from pretty_traceback.formatting import exc_to_traceback_str
class PrettyFormatter(colorlog.ColoredFormatter):
"""
Logging formatter for pretty logs:
- can optionally colorize output
- can optionally force output to be single line
- reformats logged tracebacks
To colorize output:
- instantiate formatter with `colorize = True`
- use color placeholders in format string, ie `%(red)s text that will be red %(reset)s`
Example YAML logging config to use it:
.. code-block:: yaml
---
version: 1
loggers:
my_app:
handlers:
- console
level: INFO
propagate: false
handlers:
console:
class: logging.StreamHandler
filters:
- request_id
stream: ext://sys.stdout
formatter: colored_multiline
formatters:
colored_multiline:
(): seveno_pyutil.PrettyFormatter
force_single_line: false
colorize: true
format: >-
lvl=%(log_color)s%(levelname)s%(reset)s
ts=%(thin_white)s%(asctime)s%(reset)s
msg=%(message)s
colorless_single_line:
(): seveno_pyutil.PrettyFormatter
force_single_line: true
colorize: false
format: >-
lvl=%(levelname)s
ts=%(asctime)s
msg=%(message)s
Some of available colors are:
.. code-block:: python
[
'black', 'bold_black', 'thin_black', 'red', 'bold_red', 'thin_red', 'green',
'bold_green', 'thin_green', 'yellow', 'bold_yellow', 'thin_yellow', 'blue',
'bold_blue', 'thin_blue', 'purple', 'bold_purple', 'thin_purple', 'cyan',
'bold_cyan', 'thin_cyan', 'white', 'bold_white', 'thin_white', "..."
]
Others can be retrieved via:
.. code-block:: python
colorlog.ColoredFormatter()._escape_code_map("DEBUG").keys()
or check docs for `colorlog`.
"""
def __init__(
self, *args, force_single_line: bool = True, colorize: bool = False, **kwargs
):
self.force_single_line = force_single_line
self.colorize = colorize
# Adjust any base class arguments that might had slipped in
kwargs.pop("force_color", None)
kwargs.pop("no_color", None)
if not self.colorize:
kwargs["no_color"] = True
else:
kwargs["force_color"] = True
super().__init__(*args, **kwargs)
def formatException(self, ei) -> str:
_, exc_value, traceback = ei
return exc_to_traceback_str(exc_value, traceback, color=self.colorize)
def format(self, record: logging.LogRecord) -> str:
retv = super().format(record)
if self.force_single_line:
return retv.replace("\n", "\\n")
return retv
| true |
219d41b853734b74eb3231dc254b4cdb089efab3 | Python | kristapsdzelzkalejs/eksamens | /rinka_laukums.py | UTF-8 | 132 | 3.015625 | 3 | [] | no_license | pi = 3.14
radius = int(input('Ievadi rinka radiusu :'))
laukums = pi * radius * radius
print("Apla laukums ir: {}".format(laukums))
| true |
4ddbee3e31608485b89027863d1489bc0080d6bc | Python | analysiscenter/batchflow | /batchflow/tests/config_basic_test.py | UTF-8 | 11,511 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | """
Each function tests specific Config class method.
"""
import sys
import pytest
sys.path.append('../..')
from batchflow import Config
def test_dict_init():
"""
Tests Config.__init__() using input of dictionary type.
For inner structure check Config.flatten() is used.
"""
#Slashed-structured dictionary initialization
init_dict = {'a' : 1, 'b/c' : 2, 'b/d' : 3}
exp_flat = {'a': 1, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Nested-structured dictionary initialization
init_dict = {'a' : {}, 'b' : {'c' : 2, 'd' : 3}}
exp_flat = {'a': {}, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Mixed-structured dictionary initialization
init_dict = {'a' : None, 'b/c' : 2, 'b' : {'d' : 3}}
exp_flat = {'a': None, 'b/c': 2, 'b/d': 3}
config = Config(init_dict)
assert config.flatten() == exp_flat
#Config-structured dictionary initialization
init_dict = {'a' : Config({'b' : 2})}
exp_flat = {'a/b': 2}
config = Config(init_dict)
assert config.flatten() == exp_flat
def test_dict_init_bad():
"""
Tests Config.__init__() using BAD input of dictionary type.
"""
#Int-keyed dictionary initialization
init_dict = {0 : 1}
with pytest.raises(TypeError):
Config(init_dict)
#Bool-keyed dictionary initialization
init_dict = {False : True}
with pytest.raises(TypeError):
Config(init_dict)
def test_list_init():
"""
Tests Config.__init__() using input of list type.
For inner structure check Config.flatten() is used.
"""
#Slashed-structured list initialization
init_list = [('a', 1), ('b/c', 2), ('b/d', 3)]
exp_flat = {'a': 1, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Nested-structured list initialization
init_list = [('a', {}), ('b', {'c' : 2, 'd' : 3})]
exp_flat = {'a': {}, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Mixed-structured list initialization
init_list = [('a', None), ('b/c', 2), ('b', {'d' : 3})]
exp_flat = {'a': None, 'b/c': 2, 'b/d': 3}
config = Config(init_list)
assert config.flatten() == exp_flat
#Config-structured list initialization
init_list = [('a', Config({'b' : 2}))]
exp_flat = {'a/b': 2}
config = Config(init_list)
assert config.flatten() == exp_flat
def test_list_init_bad():
"""
Tests Config.__init__() using BAD input of list type.
"""
#Int-keyed list initialization
init_list = [(0, 1)]
with pytest.raises(TypeError):
Config(init_list)
#Bool-keyed list initialization
init_list = [(False, True)]
with pytest.raises(TypeError):
Config(init_list)
#Bad-shaped list initialization
init_list = [('a', 0, 1)]
with pytest.raises(ValueError):
Config(init_list)
def test_config_init():
"""
Tests Config.__init__() using input of Config type.
For inner structure check Config.flatten() is used.
"""
#Basically, there nothing to test here,
#but since Config can be initialized with its own instance...
init_config = Config({'a': 0})
exp_flat = {'a' : 0}
config = Config(init_config)
assert config.flatten() == exp_flat
def test_pop():
"""
Tests Config.pop(), comparing the return value with expected one.
For inner structure check Config.flatten() is used.
"""
#Pop scalar value by slashed-structured key
config = Config({'a' : 1, 'b/c' : 2, 'b/d' : 3})
pop_key = 'b/c'
exp_ret = 2
exp_flat = {'a' : 1, 'b/d' : 3}
assert config.pop(pop_key) == exp_ret
assert config.flatten() == exp_flat
#Pop dict value by simple key
config = Config({'a' : 1, 'b/c' : 2, 'b/d' : 3})
pop_key = 'b'
exp_ret = {'c' : 2, 'd' : 3}
exp_flat = {'a' : 1}
assert config.pop(pop_key) == exp_ret
assert config.flatten() == exp_flat
def test_get():
"""
Tests Config.get(), comparing the return value with expected one.
For inner structure check Config.flatten() is used.
"""
#Get scalar value by slashed-structured key
config = Config({'a' : {'b' : 1}})
get_key = 'a/b'
exp_ret = 1
exp_flat = {'a/b' : 1}
assert config.get(get_key) == exp_ret
assert config.flatten() == exp_flat
#Get scalar value by slashed-structured key via dotted access
config = Config({'a' : {'b' : 1}})
get_key = 'a/b'
exp_ret = 1
exp_flat = {'a/b' : 1}
for simple_key in get_key.split('/'):
config = getattr(config, simple_key)
assert config == exp_ret
#Get dict value by simple key
config = Config({'a' : {'b' : 1}})
get_key = 'a'
exp_ret = {'b' : 1}
exp_flat = {'a/b' : 1}
assert config.get(get_key) == exp_ret
assert getattr(config, get_key).flatten() == exp_ret # check dotted access
assert config.flatten() == exp_flat
def test_put():
"""
Tests Config.put(), placing value by key in Config instance.
For inner structure check Config.flatten() is used.
"""
#Put scalar value by simple key
config = Config({'a' : 1})
put_key = 'b'
put_val = 2
exp_flat = {'a' : 1, 'b' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
#Put scalar value by slashed-structured key
config = Config({'a/b' : 1})
put_key = 'a/c'
put_val = 2
exp_flat = {'a/b' : 1, 'a/c' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
#Put dict value by simple key
config = Config({'a/b' : 1})
put_key = 'a'
put_val = {'c' : 2}
exp_flat = {'a/b' : 1, 'a/c' : 2}
config.put(put_key, put_val)
assert config.flatten() == exp_flat
def test_flatten():
"""
Tests Config.flatten()
"""
#Flatten none config
config = Config(None)
exp_flat = {}
assert config.flatten() == exp_flat
#Flatten empty config
config = Config({})
exp_flat = {}
assert config.flatten() == exp_flat
#Flatten simple config
config = Config({'a' : 1})
exp_flat = {'a' : 1}
assert config.flatten() == exp_flat
#Flatten nested config
config = Config({'a' : {'b' : {}, 'c' : {'d' : None}}})
exp_flat = {'a/b' : {}, 'a/c/d' : None}
assert config.flatten() == exp_flat
def test_add():
"""
Tests Config.add(), adding up two Config instances.
For result inner structure check Config.flatten() is used.
"""
#Simple summands with non-empty intersection
augend = Config({'a' : 1, 'b' : 2})
addend = Config({'b' : 3, 'c' : 4})
exp_flat = {'a' : 1, 'b' : 3, 'c' : 4}
result = augend + addend
assert result.flatten() == exp_flat
#Nested summands with non-empty intersection
augend = Config({'a/b' : 1, 'a/c' : 2})
addend = Config({'a/c/d' : 3, 'e/f' : 4})
exp_flat = {'a/b' : 1, 'a/c/d' : 3, 'e/f' : 4}
result = augend + addend
assert result.flatten() == exp_flat
#Nested summands with non-standard values such as None and empty dict
augend = Config({'a/b' : 1, 'b/d' : {}})
addend = Config({'a' : {}, 'b/d' : None})
exp_flat = {'a/b': 1, 'b/d': None}
result = augend + addend
assert result.flatten() == exp_flat
def test_iadd_items():
"""
Tests Config.config.__iadd__()
For inner structure check Config.flatten() is used.
"""
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] = 0
exp_flat = {'a': 0}
assert config_old.flatten() == exp_flat
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] = dict(b=0, d=3)
exp_flat = {'a/b': 0, 'a/d': 3}
assert config_old.flatten() == exp_flat
config_old = Config({'a/b': 1, 'a/c': 2})
config_old['a'] += dict(b=0, d=3)
exp_flat = {'a/b': 0, 'a/c': 2, 'a/d': 3}
assert config_old.flatten() == exp_flat
def test_items():
"""
Tests Config.items()
For dict_items conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = [('a', 1)]
exp_flat = [('a', 1)]
assert list(config.items(flatten=False)) == exp_full
assert list(config.items(flatten=True)) == exp_flat
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = [('a', {'b' : 1, 'c' : 2})]
exp_flat = [('a/b', 1), ('a/c', 2)]
assert list(config.items(flatten=False)).sort() == exp_full.sort()
assert list(config.items(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = [('a', {'b' : 1, 'c' : {'d' : 2}})]
exp_flat = [('a/b', 1), ('a/c/d', 2)]
assert list(config.items(flatten=False)).sort() == exp_full.sort()
assert list(config.items(flatten=True)).sort() == exp_flat.sort()
def test_keys():
"""
Tests Config.keys()
For dict_keys conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = ['a']
exp_flat = ['a']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = ['a']
exp_flat = ['a/b', 'a/c']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = ['a']
exp_flat = ['a/b', 'a/c/d']
assert list(config.keys(flatten=False)).sort() == exp_full.sort()
assert list(config.keys(flatten=True)).sort() == exp_flat.sort()
def test_values():
"""
Tests Config.values()
For dict_values conversion cast to list is used.
"""
#Simple
config = Config({'a' : 1})
exp_full = [1]
exp_flat = [1]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
#Nested
config = Config({'a' : {'b' : 1, 'c' : 2}})
exp_full = [{'b' : 1, 'c' : 2}]
exp_flat = [1, 2]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
#Deeply nested
config = Config({'a' : {'b' : 1, 'c' : {'d' : 2}}})
exp_full = [{'b' : 1, 'c' : {'d' : 2}}]
exp_flat = [1, 2]
assert list(config.values(flatten=False)).sort() == exp_full.sort()
assert list(config.values(flatten=True)).sort() == exp_flat.sort()
def test_update():
"""
Tests Config.update()
For inner structure check Config.flatten() is used.
"""
#Value replacement by slashed-structured key
config_old = Config({'a/b' : 1, 'a/c' : 2})
config_new = Config({'a/c' : 3, 'a/d' : 4})
exp_flat = {'a/b' : 1, 'a/c' : 3, 'a/d' : 4}
config_old.update(config_new)
assert config_old.flatten() == exp_flat
#Value insertion by slashed-structured key
config_old = Config({'a/b' : 1})
config_new = Config({'a/c/d' : 2})
exp_flat = {'a/b' : 1, 'a/c/d' : 2}
config_old.update(config_new)
assert config_old.flatten() == exp_flat
#Update with Config instance including None and empty dict values
config_old = Config({'a' : {}, 'b' : None})
config_new = Config({'a' : None, 'b' : {}})
config_old.update(config_new)
exp_flat = {'a' : None, 'b' : {}}
assert config_old.flatten() == exp_flat
| true |
d94afc4b0bdda61a009b73bb7d080b4703dcdd0c | Python | strengthen/LeetCode | /Python3/377.py | UTF-8 | 1,973 | 3.515625 | 4 | [
"MIT"
] | permissive | __________________________________________________________________________________________________
sample 32 ms submission
class Solution:
def combinationSum4(self, nums: List[int], target: int) -> int:
dp = [0] * (target + 1)
dp[0] = 1
for t in range(1, target+1):
for v in nums:
if t >= v:
dp[t] += dp[t-v]
return dp[target]
__________________________________________________________________________________________________
sample 13304 kb submission
class Solution:
def combinationSum4(self, nums: List[int], target: int) -> int:
# Check whether the input array is None or empty
if not nums:
return 0
# Assumption: nums only contains distinct numbers, nums[i] > 0, and target > 0
# DFS / Backtracking
# # -> We need to sort nums first.
# nums.sort()
# self._perm_count = 0
# self._combination_sum4_helper([], 0, nums, target)
# return self._perm_count
# Essentially, this is a knapsack problem (exact version) similar to 322-Coin Change and 518-Coin Change 2.
# => DP solution
# Initialization
# subproblems[i]: Number of possible combinations that add up to i
subproblems = [0] * (target + 1)
subproblems[0] = 1
# Bottom-up calculation
for curr_target in range(1, target + 1):
num_of_combs = 0
# Let each number, in turn, be the last number
for num in nums:
# curr_target = num + a combination that adds up to (curr_target - num)
remaining_target = curr_target - num
if remaining_target >= 0:
num_of_combs += subproblems[remaining_target]
subproblems[curr_target] = num_of_combs
return subproblems[target]
__________________________________________________________________________________________________
| true |
939bd11e2395e43faa4950124fcb846fe2d5abfc | Python | chengaddone/fblog | /blog/models.py | UTF-8 | 8,388 | 2.578125 | 3 | [] | no_license | # 项目的模型类
import hashlib
from .extensions import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
class BaseModel(object):
"""模型基类,为每个模型补充创建时间与更新时间"""
create_time = db.Column(db.DateTime, default=datetime.now) # 记录创建时间
update_time = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now) # 记录更新时间
class User(BaseModel, db.Model):
"""用户模型类"""
__tablename__ = "blog_user"
id = db.Column(db.Integer, primary_key=True) # 用户id
nick_name = db.Column(db.String(32), unique=True, nullable=False) # 用户昵称
password_hash = db.Column(db.String(128), nullable=False) # 加密密码
email = db.Column(db.String(254), unique=True, nullable=False) # 用户邮箱账号
avatar_url = db.Column(db.String(128)) # 用户头像路径
last_login = db.Column(db.DateTime, default=datetime.now) # 上次登录时间
is_admin = db.Column(db.Boolean, default=False) # 是否是管理员
signature = db.Column(db.String(512)) # 用户个性签名
gender = db.Column(db.Enum("MAN", "WOMAN"), default="MAN") # 用户性别
# 当前用户所发布的文章
post_list = db.relationship('Post', backref='user', lazy='dynamic')
# 当前用户发布的评论
comment_list = db.relationship('Comment', backref='user', lazy='dynamic')
# 当用户修改名称时,将用户的旧昵称放在此字段,当用户昵称审核没有通过时,将旧昵称还原
old_name = db.Column(db.String(32), nullable=True)
# 标记用户是否修改了昵称,1为默认值,0表示用户修改了昵称
name_state = db.Column(db.Integer, default=1)
@property # property装饰器让password方法可以以属性的样式被调用
def password(self):
raise AttributeError("当前属性不允许读") # 当直接以属性的方式访问password方法,抛出异常
@password.setter # 相当于重写password的setter方法,让其完成相应属性的赋值功能
def password(self, value):
self.password_hash = generate_password_hash(value)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def gravatar_hash(self):
return hashlib.md5(self.email.lower().encode('utf-8')).hexdigest()
def gravatar(self, size=100, default='identicon', rating='g'):
url = 'https://secure.gravatar.com/avatar'
hash = self.gravatar_hash()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def to_dict(self):
resp_dict = {
"id": self.id,
"nick_name": self.nick_name,
"avatar_url": self.avatar_url if self.avatar_url else self.gravatar(),
"email": self.email,
"gender": self.gender if self.gender else "MAN",
"signature": self.signature if self.signature else "",
}
return resp_dict
def to_admin_dict(self):
resp_dict = {
"id": self.id,
"nick_name": self.nick_name,
"email": self.email,
"register": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"last_login": self.last_login.strftime("%Y-%m-%d %H:%M:%S"),
"update_time": self.update_time.strftime("%Y-%m-%d %H:%M:%S")
}
return resp_dict
class Post(db.Model, BaseModel):
"""博客文章模型类"""
__tablename__ = "blog_post"
id = db.Column(db.Integer, primary_key=True) # 博客文章编号
title = db.Column(db.String(256), nullable=False) # 博客文章标题
digest = db.Column(db.String(512), nullable=False) # 博客文章摘要
content = db.Column(db.Text, nullable=False) # 博客文章内容
clicks = db.Column(db.Integer, default=0) # 浏览量
index_image_url = db.Column(db.String(256)) # 博客文章列表图片路径
category_id = db.Column(db.Integer, db.ForeignKey("blog_category.id"))
user_id = db.Column(db.Integer, db.ForeignKey("blog_user.id")) # 当前文章的作者id
status = db.Column(db.Integer, default=0) # 当前文章状态 如果为0代表审核通过,1代表审核中,-1代表审核不通过
reason = db.Column(db.String(256)) # 未通过原因,status = -1 的时候使用
like_count = db.Column(db.Integer, default=0) # 点赞条数
# 当前文章的所有评价
comments = db.relationship("Comment", lazy="dynamic", backref='post', cascade="all, delete-orphan")
def to_review_dict(self):
resp_dict = {
"id": self.id,
"title": self.title,
"create_time": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"status": self.status,
"reason": self.reason if self.reason else ""
}
return resp_dict
def to_basic_dict(self):
resp_dict = {
"id": self.id,
"title": self.title,
"digest": self.digest,
"create_time": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"index_image_url": self.index_image_url,
"clicks": self.clicks,
"like_count": self.like_count,
"category": self.category.to_dict(),
"comments_count": self.comments.count(),
}
return resp_dict
def to_dict(self):
resp_dict = {
"id": self.id,
"title": self.title,
"digest": self.digest,
"create_time": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"content": self.content,
"comments_count": self.comments.count(),
"clicks": self.clicks,
"category": self.category.to_dict(),
"index_image_url": self.index_image_url,
"author": self.user.to_dict() if self.user else None,
"like_count": self.like_count
}
return resp_dict
class Category(BaseModel, db.Model):
"""文章分类"""
__tablename__ = "blog_category"
id = db.Column(db.Integer, primary_key=True) # 分类编号
name = db.Column(db.String(64), nullable=False) # 分类名
post_list = db.relationship('Post', backref='category', lazy='dynamic')
def to_dict(self):
resp_dict = {
"id": self.id,
"name": self.name
}
return resp_dict
class Comment(BaseModel, db.Model):
"""评论模型类"""
__tablename__ = "blog_comment"
id = db.Column(db.Integer, primary_key=True) # 评论编号
user_id = db.Column(db.Integer, db.ForeignKey("blog_user.id"), nullable=False) # 用户id
post_id = db.Column(db.Integer, db.ForeignKey("blog_post.id"), nullable=False) # 文章id
content = db.Column(db.Text, nullable=False) # 评论内容
parent_id = db.Column(db.Integer, db.ForeignKey("blog_comment.id")) # 父评论id
parent = db.relationship("Comment", remote_side=[id]) # 自关联
status = db.Column(db.Integer, default=0) # 当前新闻状态 如果为0代表审核中,1代表审核通过,-1代表审核不通过
reason = db.Column(db.String(256)) # 未通过原因,status = -1 的时候使用
def to_dict(self):
resp_dict = {
"id": self.id,
"create_time": self.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"content": self.content,
"parent": self.parent.to_dict() if self.parent else None,
"user": User.query.get(self.user_id).to_dict(),
"post_id": self.post_id,
}
return resp_dict
class PostLike(BaseModel, db.Model):
"""文章点赞"""
__tablename__ = "post_like"
post_id = db.Column("comment_id", db.Integer, db.ForeignKey("blog_post.id"), primary_key=True) # 评论编号
user_id = db.Column("user_id", db.Integer, db.ForeignKey("blog_user.id"), primary_key=True) # 用户编号
class AboutMe(BaseModel, db.Model):
"""关于我页面介绍的数据库模型"""
__tablename__ = "about_me"
id = db.Column(db.Integer, primary_key=True) # 数据库编号
content = db.Column(db.Text, nullable=False) # 关于我页面内容
def to_dict(self):
resp_dict = {
"content": self.content
}
return resp_dict
| true |
c523a59c5f7ac6b3220eae94d30d9706dcbe5c91 | Python | LSaldyt/l33t | /microsoft/search.py | UTF-8 | 2,348 | 3.234375 | 3 | [] | no_license | # Lucas Saldyt 2019
import sys
import time
def main(args):
problem = ['abcd', '1234', 'efgd', '567c']
queries = ['ab2', 'efg', '1234dc']
problem = ['aaaaa'] * 5
queries = ['a' * 7]
print('Problem:')
for row in problem:
print(row)
for query in queries:
begin = time.time()
first = exhaustive(problem, query)
mid = time.time()
second = exhaustive(problem, query, save=True)
final = time.time()
print('Query: {} Occurances: {} Time: {}'.format(query, first, mid - begin))
print('Query: {} Occurances: {} Time: {}'.format(query, second, final - mid))
return 0
def exhaustive(array, query, save=False):
'''
Count the number of occurances of query in array
'''
cache = dict()
if len(array) == 0 or len(query) == 0:
return 0
x, y = len(array), len(array[0])
total = 0
for i in range(x):
for j in range(y):
if array[i][j] == query[0]:
seen = set()
seen.add((i, j))
total += count(array, seen, query[1:], i, j, x, y, cache, save)
seen.remove((i, j))
return total
def valid(i, j, x, y):
return i > -1 and j > -1 and i < x and j < y
def count(array, seen, query, i, j, x, y, cache, save):
if len(query) == 0:
return 1
if save and (tuple(sorted(list(seen))), query, i, j) in cache:
return cache[(tuple(sorted(list(seen))), query, i, j)]
neighbors = [(i, j) for(i, j) in [(i - 1, j - 1),
(i - 1, j),
(i, j - 1),
(i + 1, j),
(i, j + 1),
(i + 1, j + 1),
(i + 1, j - 1),
(i - 1, j + 1)] if valid(i, j, x, y)]
total = 0
for ni, nj in neighbors:
if (ni, nj) not in seen:
if array[ni][nj] == query[0]:
seen.add((ni, nj))
total += count(array, seen, query[1:], ni, nj, x, y, cache, save)
seen.remove((ni, nj))
if save:
cache[(tuple(sorted(list(seen))), query, i, j)] = total
return total
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| true |
35d0af616796efaf1f0046bc2ee97ddb61d04942 | Python | ArshanKhanifar/eopi_solutions | /src/arshan/problem_5_p_6_var1_arshan.py | UTF-8 | 721 | 2.96875 | 3 | [] | no_license | from protocol.problem_5_p_6_var1 import Problem5P6Var1
class Problem5P6Var1Arshan(Problem5P6Var1):
def find_longest_equal_sublist(self, int_list):
return self.max_so_far_with_lookup(int_list)
return self.brute_force_with_lookup(int_list)
def max_so_far_with_lookup(self, int_list):
m = 0
A = int_list
l = {}
for i in A:
if i not in l:
l[i] = 0
l[i] += 1
m = max(m, l[i])
return m
def brute_force_with_lookup(self, input_list):
A = input_list
l = {}
for i in A:
if i not in l:
l[i] = 0
l[i] += 1
return max([l[k] for k in l])
| true |
6873e748cc476e4ff037e318d3ee33b25098e7f9 | Python | ryandancy/project-euler | /problem67.py | UTF-8 | 1,884 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Project Euler Problem 67:
By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top
to bottom is 23.
3
7 4
2 4 6
8 5 9 3
That is, 3 + 7 + 4 + 9 = 23.
Find the maximum total from top to bottom in data/p067_triangle.txt, a 15K text file containing a triangle with
one-hundred rows.
"""
# This is exactly the same as problem 18's algorithm
# Use something like Djiksta's algorithm, but searching for the highest-value path
from collections import defaultdict
# Build a graph of the triangle
# Format is id: ((to_id1, weight1), (to_id2, weight2))
with open('data/p067_triangle.txt') as tri:
triangle_list = [list(map(int, row.split(' '))) for row in tri.readlines()]
graph = {}
current_id = 0
for row_num, row in enumerate(triangle_list[:-1]):
for i, value in enumerate(row):
to_id = current_id + len(row)
graph[current_id] = ((to_id, triangle_list[row_num + 1][i]), (to_id + 1, triangle_list[row_num + 1][i + 1]))
current_id += 1
distances = defaultdict(lambda: 0)
distances[0] = triangle_list[0][0]
current = 0
row_num_to_visit = 0
row_to_visit = {0}
next_row_to_visit = set()
while True:
if not row_to_visit:
row_num_to_visit += 1
if row_num_to_visit == len(triangle_list) - 1:
# don't visit the last row
break
row_to_visit = next_row_to_visit
next_row_to_visit = set()
current = max(row_to_visit, key=lambda key: distances[key])
row_to_visit.remove(current)
neighbours = graph[current]
current_dist = distances[current]
for neighbour_id, neighbour_weight in neighbours:
new_dist = current_dist + neighbour_weight
if new_dist > distances[neighbour_id]:
distances[neighbour_id] = current_dist + neighbour_weight
next_row_to_visit.add(neighbour_id)
print(max(distances.values()))
| true |
f4f7e3afd8ff978199ff07a0121b788145a6fbc7 | Python | nhartland/GPPDF | /gppdf.py | UTF-8 | 5,020 | 3.171875 | 3 | [] | no_license | #! /usr/bin/env python3
"""
Gaussian Process Parton Distribution Functions (GPPDFs)
This script takes a prior (replica) PDF Set from LHAPDF and uses it to
define a Gaussian Process (GP). Rather than assuming a covariance function,
the covariance function of the prior PDF set is measured and used.
Correspondingly the GP mean is simply computed as the mean over the input
replicas. The script outputs the parameters of the GP, along with `ngen`
samples of the GP.
The output is a numpy 'npz' format file containing
- The name of the prior PDF
- The list of flavours in the GP
- The grid of x-points sampled in the GP
- The GP mean function evaluated on the x-grid
- The GP covariance function evaluated on the x-grid
- A numpy array of `nsamples` samples of the GP
The sample array has shape (`nsamples`, `nx*nf`) where `nx` is the number
of points in the sampled x-grid, and `nf` is the number of active flavours
in the GP. The x-grid points are currently hardcoded in `XGRID`, the
flavour basis is read from the prior LHAPDF set.
"""
import lhapdf
import argparse
import itertools
import numpy as np
from collections import namedtuple
# Colouring
from colorama import init
init() # Init colours
from colorama import Fore
# Defines the x-grid used for sampling
XGRID = np.logspace(-3,0,200)
# Collection for GPPDFs
GPPDF = namedtuple('GPPDF', ['prior', 'mean', 'covariance', 'Q0', 'flavours', 'xgrid'])
def get_active_flavours(pdfset, Q0):
"""
Trim pdfs if their thresholds are above the initial scale
LHAPDF sets aren't very helpful here in that it does not have separate
entries (typically) for mass and threshold. Have to assume q threshold is
identical to q mass.
"""
flavour_string = pdfset.get_entry("Flavors").split(",")
flavours = list(map(int, flavour_string))
mc = float(pdfset.get_entry("MCharm"))
mb = float(pdfset.get_entry("MBottom"))
mt = float(pdfset.get_entry("MTop"))
if mc > Q0:
flavours = [f for f in flavours if abs(f) != 4]
if mb > Q0:
flavours = [f for f in flavours if abs(f) != 5]
if mt > Q0:
flavours = [f for f in flavours if abs(f) != 6]
return flavours
def generate_gp(prior):
""" Generate the GP from an input `prior` PDF """
# Get PDFSet and replicas (split is to avoid replica 0)
pdfset = lhapdf.getPDFSet(prior)
replicas = pdfset.mkPDFs()[1:]
# Initial scale and x-grid
Q0 = float(pdfset.get_entry("QMin"))
xs, nx = XGRID, len(XGRID)
# Available flavours
flavours = get_active_flavours(pdfset, Q0)
nfl = len(flavours)
print(f"{Fore.GREEN}Sampling {nx} x-points at initial scale: {Q0} GeV")
grid_points = list(itertools.product(flavours, xs))
pdf_values = np.empty([nfl*nx, len(replicas)])
for irep, rep in enumerate(replicas):
for ipt, pt in enumerate(grid_points):
pdf_values[ipt][irep] = rep.xfxQ(pt[0], pt[1], Q0)
print("Computing stats")
mean = np.mean(pdf_values, axis=1)
covariance = np.cov(pdf_values)
# Condition covariance matrix a bit
# Should use attempt on multivariate_normal instead
min_eig = np.min(np.real(np.linalg.eigvals(covariance)))
while min_eig < 0:
print(Fore.YELLOW)
print("WARNING: Covariance matrix not positive-semidefinite")
print(f"Minimum eigenvalue: {min_eig}")
print("Introducing regulator...")
covariance -= 100*min_eig * np.eye(*covariance.shape)
min_eig = np.min(np.real(np.linalg.eigvals(covariance)))
print(f"New minimum: {min_eig}")
return GPPDF(prior, mean, covariance, Q0, flavours, xs)
def sample_gp(gppdf, nsamples):
""" Sample the Gaussian Process `gppdf` a total of `nsamples` times and export to file. """
# Could Break this into cholesky decomp etc for progress
# monitoring/parallelisation
print(Fore.GREEN)
print("Generating GPs")
gp_values = np.random.multivariate_normal(gppdf.mean, gppdf.covariance, nsamples, 'raise')
outfile = f'GP_{gppdf.prior}_{len(gp_values)}'
np.savez_compressed(outfile,
prior=gppdf.prior,
setname=outfile,
mean=gppdf.mean,
covariance=gppdf.covariance, Q0=gppdf.Q0,
flavours=gppdf.flavours, xgrid=gppdf.xgrid,
samples=gp_values)
return outfile
#TODO x-grid selection
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("priorpdf", help="prior LHAPDF grid used to generate GP", type=str)
parser.add_argument("nsamples", help="number of GP samples", type=int)
args = parser.parse_args()
# Generate the GP from the prior pdf
gppdf = generate_gp = generate_gp(args.priorpdf )
# Sample the GP and write to file
outfile = sample_gp(gppdf, args.nsamples)
print(f'Results output to {outfile}')
| true |
4d1ce270a1414c4017c6a7a0f6760e54004053a0 | Python | heyitsdsp/SONAR | /plotter.py | UTF-8 | 394 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 12:39:29 2020
@author: dsp
"""
import turtle
import serial
ser = serial.Serial('/dev/ttyACM0", 9600')
window = turtle.Screen()
window.bgcolor('black')
t = turtle.Turtle()
t.color('cyan')
t.pensize(3)
while(serial.available() > 0):
values = serial.read_until(';')
print(values)
turtle.bye()
ser.close()
| true |
fa7f7830eec3f0c6ea1d8aa88b558a7359011210 | Python | xzguy/LeetCode | /Problem 1001 - 1100/P1019.py | UTF-8 | 1,112 | 3.59375 | 4 | [] | no_license | # Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def nextLargerNodes(self, head: ListNode) -> [int]:
nums = []
while head:
nums.append(head.val)
head = head.next
next_larger = [0]*len(nums)
for i in range(len(nums)-1):
for j in range(i+1, len(nums)):
if nums[j] > nums[i]:
next_larger[i] = nums[j]
break
return next_larger
# one pass of linked list, with a stack to store
# the number that has not found its next larger.
# use amortized analysis (by average), time complexity
# is O(n)
def nextLargerNodes_1(self, head: ListNode) -> [int]:
stack = []
res = []
while head:
while stack and stack[-1][1] < head.val:
pair = stack.pop()
res[pair[0]] = head.val
stack.append((len(res), head.val))
res.append(0)
head = head.next
return res | true |
205d59a74a7f0782368c0f8fd793d80e52981638 | Python | yulyzulu/holbertonschool-machine_learning | /math/0x00-linear_algebra/100-slice_like_a_ninja.py | UTF-8 | 176 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
"""Module to execute functions"""
def np_slice(matrix, axes={}):
"""Function that slices a matrix along a specific axes"""
return matrix[axes]
| true |
19a3a22edccc9214638df158b6f51b3dde31246c | Python | ranjana980/Python | /function files/multiple.py | UTF-8 | 156 | 3.28125 | 3 | [] | no_license | def multiply(list):
i=0
multiple=1
while i<len(list):
multiple=multiple*list[i]
i=i+1
print(multiple)
multiply([8,2,3,-1,7]) | true |
1c6a771e4ec402adbe69ef06f897c5fb9633b509 | Python | PsychoinformaticsLab/pliers | /pliers/tests/extractors/test_misc_extractors.py | UTF-8 | 3,597 | 2.6875 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | from pliers.extractors import (MetricExtractor,
VADERSentimentExtractor,
merge_results)
from pliers.stimuli import SeriesStim, TextStim
from pliers.tests.utils import get_test_data_path
import numpy as np
import scipy
from pathlib import Path
import pytest
from os import environ
def test_metric_extractor():
def dummy(array):
return array[0]
def dummy_list(array):
return array[0], array[1]
f = Path(get_test_data_path(), 'text', 'test_lexical_dictionary.txt')
stim = SeriesStim(data=np.linspace(1., 4., 20), onset=2., duration=.5)
stim_file = SeriesStim(filename=f, column='frequency', sep='\t',
index_col='text')
ext_single = MetricExtractor(functions='numpy.mean')
ext_idx = MetricExtractor(functions='numpy.mean',
subset_idx=['for', 'testing', 'text'])
ext_multiple = MetricExtractor(functions=['numpy.mean', 'numpy.min',
scipy.stats.entropy, dummy,
dummy_list])
ext_names = MetricExtractor(functions=['numpy.mean', 'numpy.min',
scipy.stats.entropy, dummy,
dummy_list, 'tensorflow.reduce_mean'],
var_names=['mean', 'min', 'entropy',
'custom1', 'custom2', 'tf_mean'])
ext_lambda = MetricExtractor(functions='lambda x: -np.max(x)',
var_names='custom_function')
r = ext_single.transform(stim)
r_file = ext_single.transform(stim_file)
r_file_idx = ext_idx.transform(stim_file)
r_multiple = ext_multiple.transform(stim)
r_names = ext_names.transform(stim)
r_lambda = ext_lambda.transform(stim)
r_df = r.to_df()
r_file_df = r_file.to_df()
r_file_idx_df = r_file_idx.to_df()
r_multiple_df = r_multiple.to_df()
r_long = r_multiple.to_df(format='long')
r_names_df = r_names.to_df()
r_lambda_df = r_lambda.to_df()
for res in [r_df, r_file_df, r_multiple_df]:
assert res.shape[0] == 1
assert r_long.shape[0] == len(ext_multiple.functions)
assert r_df['onset'][0] == 2
assert r_df['duration'][0] == .5
assert r_df['mean'][0] == 2.5
assert np.isclose(r_file_df['mean'][0], 11.388, rtol=0.001)
assert np.isclose(r_file_idx_df['mean'][0], 12.582, rtol=0.001)
assert all([m in r_multiple_df.columns for m in ['mean', 'entropy']])
assert r_multiple_df['amin'][0] == 1.
assert r_multiple_df['dummy'][0] == 1.
assert r_multiple_df['dummy_list'][0][0] == np.linspace(1., 4., 20)[0]
assert r_multiple_df['dummy_list'][0][1] == np.linspace(1., 4., 20)[1]
assert type(r_multiple_df['dummy_list'][0]) == np.ndarray
assert r_names_df.columns[-3] == 'custom1'
assert r_names_df.columns[-2] == 'custom2'
assert r_names_df.columns[-1] == 'tf_mean'
assert np.isclose(r_names_df['mean'][0], r_names_df['tf_mean'][0])
assert r_lambda_df['custom_function'][0] == -4
def test_metric_er_as_stim():
stim = TextStim(text='This is a test')
ext_sent = VADERSentimentExtractor()
ext_metric = MetricExtractor(functions='numpy.sum',
subset_idx=[f'sentiment_{d}'
for d in ['neg','pos','neu']])
r = ext_metric.transform(ext_sent.transform(stim))
df = merge_results(r, extractor_names=False)
assert np.isclose(df['sum'][0], 1)
| true |
6c55649ed526c1c4b41563c17f2e53c7f03c8908 | Python | ZhangSteven/trustee | /test/test_TSCF_upload.py | UTF-8 | 4,287 | 2.71875 | 3 | [] | no_license | """
Test the read_holding() method from open_holding.py
"""
import unittest2
from xlrd import open_workbook
from trustee.utility import get_current_directory
from small_program.read_file import read_file
from trustee.TSCF_upload import read_line_jones, update_position, \
get_days_maturity_LYE
from trustee.geneva import read_line
from os.path import join
from datetime import datetime
class TestTSCFUpload(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestTSCFUpload, self).__init__(*args, **kwargs)
def setUp(self):
"""
Run before a test function
"""
pass
def tearDown(self):
"""
Run after a test finishes
"""
pass
def test_read_line_jones(self):
input_file = join(get_current_directory(), 'samples', 'Jones Holding 2017.12.20.xlsx')
holding, row_in_error = read_file(input_file, read_line_jones)
self.assertEqual(len(holding), 105)
self.verify_jones_position1(holding[0])
self.verify_jones_position2(holding[4])
self.verify_jones_position3(holding[104])
def test_update_position(self):
input_file = join(get_current_directory(), 'samples', 'Jones Holding 2017.12.20.xlsx')
jones_holding, row_in_error = read_file(input_file, read_line_jones)
input_file = join(get_current_directory(), 'samples', '12229 local appraisal 20180103.xlsx')
geneva_holding, row_in_error = read_file(input_file, read_line)
self.assertEqual(len(geneva_holding), 88)
update_position(geneva_holding, jones_holding)
self.verify_geneva_position1(geneva_holding[1])
self.verify_geneva_position2(geneva_holding[4])
self.verify_geneva_position3(geneva_holding[87])
def test_get_days_maturity_LYE(self):
# The test works only in year 2018. Need to change the number
# of days since last year end if testing in year 2019 or later.
self.assertEqual(get_days_maturity_LYE(datetime(2019,2,1)), 397)
self.assertEqual(get_days_maturity_LYE(datetime(2018,1,25)), 25)
def verify_jones_position1(self, position):
# fist position in Jones Holding 2017.12.20.xlsx
# FR0013101599 CNP ASSURANCES (CNPFP 6 01/22/49 FIXED)
self.assertEqual(len(position), 3)
self.assertEqual(position['ISIN'], 'FR0013101599')
self.assertAlmostEqual(position['Purchase Cost'], 98.233)
self.assertAlmostEqual(position['Yield at Cost'], 6.125)
def verify_jones_position2(self, position):
# 5th position in Jones Holding 2017.12.20.xlsx
self.assertEqual(len(position), 3)
self.assertEqual(position['ISIN'], 'HK0000171949')
self.assertAlmostEqual(position['Purchase Cost'], 100)
self.assertAlmostEqual(position['Yield at Cost'], 6.15)
def verify_jones_position3(self, position):
# last position in Jones Holding 2017.12.20.xlsx
self.assertEqual(len(position), 3)
self.assertEqual(position['ISIN'], 'XS1736887099')
self.assertAlmostEqual(position['Purchase Cost'], 100)
self.assertAlmostEqual(position['Yield at Cost'], 4.8)
def verify_geneva_position1(self, position):
# first CNY position in 12229 local appraisal 20180103.xlsx
# HK0000171949 HTM
self.assertEqual(position['Group1'], 'Chinese Renminbi Yuan')
self.assertAlmostEqual(position['Yield at Cost'], 6.15)
self.assertAlmostEqual(position['Purchase Cost'], 100)
def verify_geneva_position2(self, position):
# first HKD position in 12229 local appraisal 20180103.xlsx
# HK0000163607 HTM
self.assertEqual(position['Group1'], 'Hong Kong Dollar')
self.assertAlmostEqual(position['Yield at Cost'], 6.193005)
self.assertAlmostEqual(position['Purchase Cost'], 99.259)
def verify_geneva_position3(self, position):
# last position in 12229 local appraisal 20180103.xlsx
# US912803AY96 HTM
self.assertEqual(position['Group1'], 'United States Dollar')
self.assertAlmostEqual(position['Yield at Cost'], 6.234)
self.assertAlmostEqual(position['Purchase Cost'], 39.9258742)
| true |
2c1363c165c79fac5b1372501d4f7bc9f108b42d | Python | ZX1209/gl-algorithm-practise | /leetcode-gl-python/leetcode-移动零.py | UTF-8 | 1,311 | 4.125 | 4 | [] | no_license | # leetcode-移动零.py
# 给定一个数组 nums,编写一个函数将所有 0 移动到数组的末尾,同时保持非零元素的相对顺序。
# 示例:
# 输入: [0,1,0,3,12]
# 输出: [1,3,12,0,0]
# 说明:
# 必须在原数组上操作,不能拷贝额外的数组。
# 尽量减少操作次数。
"""
思路:
嗯,,原数组上操作..
减少移动次数
一般是不会用删除什么的吧..
虽说挺方便的
先用下吧..
果然高效的是直接移动啊
或者说交换下..可以
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
count = 0
while True:
try:
nums.remove(0)
count+=1
except ValueError:
nums.extend([0]*count)
break
# 参考
# 执行用时为 40 ms 的范例
# class Solution(object):
# def moveZeroes(self, nums):
# """
# :type nums: List[int]
# :rtype: void Do not return anything, modify nums in-place instead.
# """
# i = j = 0
# for i in range(len(nums)):
# if nums[i] != 0:
# nums[i], nums[j] = nums[j], nums[i]
# j += 1 | true |
f448bd3da735b9a1926e796dc7d825106f484051 | Python | rkomar4815/twitter_analytics | /FollowerAnalysis/fcompare.py | UTF-8 | 1,679 | 2.671875 | 3 | [] | no_license | import pandas as pd
# This section compares followers to following users
A = pd.read_csv('3_28_19 NatSecAction Following.csv', index_col=0)
print(A)
B = pd.read_csv('3_28_19 NatSecAction Followers.csv', index_col=0)
print(B)
C = pd.merge(
left=A, right=B,
how='outer', left_index=True,
right_index=True, suffixes=['_a', '_b']
)
print(C)
not_in_a = C.drop(A.index)
not_in_b = C.drop(B.index)
not_in_a.to_csv('not_in_a.csv')
not_in_b.to_csv('not_in_b.csv')
D = pd.read_csv('not_in_b.csv')
E = pd.read_csv('not_in_a.csv')
a_notin_b = len(D.index)
print(
'Number of accounts you follow that do not follow back: '
+ str(a_notin_b)
)
a_total = len(A.index)
print(
'The total number of accounts that you follow: '
+ str(a_total)
)
nonfollowbackratio = (a_notin_b/a_total)*100
percent_nonfollowback = str(nonfollowbackratio) + '%'
print(
'Percent of accounts that you follow that do not follow back: '
+ str(percent_nonfollowback)
)
# This section identifies ranks followers by their followers
sortedE = E.sort_values(by=['followers_count_b'], ascending=False)
E_columns_to_remove = [
'id_str_a', 'name_a', 'location_a', 'description_a',
'created_at_a', 'url_a', 'verified_a', 'lang_a',
'friends_count_a', 'followers_count_a', 'statuses_count_a',
'favourites_count_a', 'last_tweeted_at_a', 'id_str_b',
'location_b', 'created_at_b', 'url_b', 'lang_b',
'_count_b', 'last_tweeted_at_b', 'name_b',
'statuses_count_b'
]
print(sortedE.columns)
sortedE2 = sortedE.drop(columns=E_columns_to_remove)
print(sortedE2.columns)
sortedE2.to_csv('sorted_natesecationfollowers_by_followcount.csv')
| true |
b96b696a609c434b12e96404940ed4ff068de0c9 | Python | quentinxxz/music-crawler | /PlayListDownloader.py | UTF-8 | 2,021 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
import urllib
import urllib2
import sys
from StringIO import *
import gzip
class PlayListDownloader(object):
"""This class is a http downloader to dowload
music playlist as json text"""
'''
@param timeout
@param cookie :please use the cookie when you are in login state
'''
def __init__(self,timeout,cookie):
self.timeout=timeout
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers={
'Host' : 'music.163.com',
'Connection': 'keep-alive',
'Origin':'http://music.163.com',
'RA-Sid':'7D78DE40-20150702-170828-f2248c-87c071',##TODO ?? what's that
'RA-Ver':'3.0.7',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie' : cookie,
'Referer' : 'http://music.163.com/',
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'
}
'''
@param url : a playList url , example "http://music.163.com/api/playlist/detail?id=123905597"
'''
def downloadPlayList(self,url):
req = urllib2.Request(url, headers=self.headers)
response = urllib2.urlopen(req,timeout=self.timeout)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
playListJson = data.decode('utf-8','replace').encode(sys.getfilesystemencoding())
f.close()
else:
sys.getfilesystemencoding()
the_page = response.read()
playListJson = the_page.decode('utf-8','replace').encode(sys.getfilesystemencoding())
response.close()
return playListJson
if __name__ == '__main__':
cookie = 'your own cookie'
playListDownloader = PlayListDownloader(5,cookie)
print playListDownloader.downloadPlayList("http://music.163.com/api/playlist/detail?id=123905597")
| true |
8b14d3bf16771dc7a6856a23c9e3f37cc613ada6 | Python | caitaozhan/LeetCode | /backtracking/216.combination-sum-iii.py | UTF-8 | 3,033 | 3.65625 | 4 | [] | no_license | #
# @lc app=leetcode id=216 lang=python3
#
# [216] Combination Sum III
#
# https://leetcode.com/problems/combination-sum-iii/description/
#
# algorithms
# Medium (55.78%)
# Likes: 1244
# Dislikes: 54
# Total Accepted: 180.6K
# Total Submissions: 313.9K
# Testcase Example: '3\n7'
#
#
# Find all possible combinations of k numbers that add up to a number n, given
# that only numbers from 1 to 9 can be used and each combination should be a
# unique set of numbers.
#
# Note:
#
#
# All numbers will be positive integers.
# The solution set must not contain duplicate combinations.
#
#
# Example 1:
#
#
# Input: k = 3, n = 7
# Output: [[1,2,4]]
#
#
# Example 2:
#
#
# Input: k = 3, n = 9
# Output: [[1,2,6], [1,3,5], [2,3,4]]
#
#
#
# @lc code=start
import itertools
from typing import List
class SolutionIter:
'''time beat 95%
'''
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
return [nums for nums in itertools.combinations(range(1, 10), k) if sum(nums) == n]
class Solution:
'''backtracking
9/12/2020
'''
def __init__(self):
self.ans = []
self.k = 0
self.n = 0
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
self.k, self.n = k, n
self.backtrack(1, [])
return self.ans
def backtrack(self, idx, lst):
if len(lst) == self.k:
if sum(lst) == self.n:
self.ans.append(lst[:])
return
if sum(lst) > self.n:
return
for i in range(idx, 10):
lst.append(i)
self.backtrack(i+1, lst)
lst.pop()
class Solution:
'''Recursion in a Backtrack way
5/10/2022
'''
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
def backtrack(stack, summation, start):
if len(stack) == k:
if summation == n:
ans.append(stack.copy())
return
if summation > n:
return
for i in range(start, 10):
backtrack(stack + [i], summation + i, i + 1)
ans = []
backtrack(stack=[], summation=0, start=1)
return ans
class Solution:
'''Recursion in a DFS way
'''
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
def dfs(stack, summation, i):
'''summation is the sum of the previous (i-1) numbers
'''
if summation > n:
return
if len(stack) == k:
if summation == n:
ans.append(stack.copy())
return
if i == 10:
return
# skip the current number i
dfs(stack, summation, i + 1)
# pick the current number i
dfs(stack + [i], summation + i, i + 1)
ans = []
dfs(stack=[], summation=0, i=1)
return ans
s = Solution()
print(s.combinationSum3(3, 9))
# @lc code=end
| true |
40a30fa300495e099171c764d10c79c6aa02311e | Python | tomoeyukishiro/code-path | /num_range.py | UTF-8 | 629 | 3.28125 | 3 | [] | no_license | class Solution:
# @param A : list of integers
# @param B : integer
# @param C : integer
# @return an integer
def numRange(self, A, B, C):
n = len(A)
sets = []
for i in range(n):
sum = 0
j = i
while sum < B and j < n:
sum += A[j]
j += 1
while sum >= B and sum <= C and j <= n:
if sum <= C:
if A[i:j]:
sets.append(A[i:j])
if j < n:
sum += A[j]
j += 1
return len(sets) | true |
675d8694002a675102e70f8df5dd1fcb7d09fa01 | Python | promila3/HackerRank | /Python/strings/stringValidators.py | UTF-8 | 941 | 3.375 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/string-validators/problem?h_r=next-challenge&h_v=zen
def checkIsAlphanum(s):
for a in s:
if a.isalnum():
print("True")
return
print("False")
return
def checkIsAlpha(s):
for a in s:
if a.isalpha():
print("True")
return
print("False")
return
def checkIsDigit(s):
for a in s:
if a.isdigit():
print("True")
return
print("False")
return
def checkIsLower(s):
for a in s:
if a.islower():
print("True")
return
print("False")
return
def checkIsUpper(s):
for a in s:
if a.isupper():
print("True")
return
print("False")
return
if __name__ == '__main__':
s = input()
checkIsAlphanum(s)
checkIsAlpha(s)
checkIsDigit(s)
checkIsLower(s)
checkIsUpper(s)
| true |
82990967d5f27d5d01b4dc754ff1c60b865531bf | Python | niesmo/sign-language-classification | /src/algorithm/k-means-client.py | UTF-8 | 3,393 | 2.8125 | 3 | [] | no_license | import sqlite3 as lite
import csv
import sys, os, inspect, logging
import math, collections, numpy
from sklearn.cluster import KMeans
from sklearn import datasets
from sklearn.decomposition import PCA
NUMBER_OF_CLUSTERS = 7
CONFIDENCY_PERCENT = 0.0
curr_dir = os.path.dirname(inspect.getfile(inspect.currentframe()))
db_file = os.path.abspath(os.path.join(curr_dir, '../data/db/data.db'))
# global data files
testingData = []
testingDataLabels = []
trainingData = []
trainingDataLabels = []
# these are the points that we use to find the labels for each cluster
knownPoints = {}
# defining the logger
logger = logging.getLogger("K-Means Algorithm")
def loadData(queryFilename):
logger.debug("Openning the query file " + queryFilename)
# load in the query file
query = ""
queryFile = open("../data/db/queries/"+ queryFilename +".sql")
for l in queryFile:
query += l
# add the confidence in the query
# WARNING: if you change this, you have to change the MAGIC number
query += " WHERE confidence >" + str(CONFIDENCY_PERCENT)
magic = 3175
logger.debug("Openning the database connection")
# query the data base
connection = lite.connect(db_file)
with connection:
cursor = connection.cursor()
logger.debug("Executing the query")
cursor.execute(query)
rows = cursor.fetchall()
# define the training and testing data
# training <= 2309 Testing >= 2526
for row in rows:
# MAGIC NUMBER -> Change when needed
if row[0] <= magic:
trainingData.append(list(row[2:]))
trainingDataLabels.append(row[1])
if not knownPoints.has_key(row[1]):
knownPoints[row[1]] = numpy.array(row[2:])
else:
testingData.append(list(row[2:]))
testingDataLabels.append(row[1])
print "TRAINING", len(trainingData)
print "TESTING", len(testingData)
for label in knownPoints:
print label, knownPoints[label][:2]
def preProcess():
logger.info("pre-processing the data ...")
return
def main():
logger.info("main function: defining the k-means estimator")
kmeans = KMeans(n_clusters=NUMBER_OF_CLUSTERS, init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=1)
# PCA
logger.info("Running k-means for " + str(len(trainingData)) + " data points")
kmeans.fit(trainingData)
logger.info("Finished running k-means")
# finding what cluster are what labels
# TODO: put this in a function
labelToLetterMap = {}
for p in knownPoints:
m_min = 10000000;
for i, cluster in enumerate(kmeans.cluster_centers_):
dist = numpy.linalg.norm(cluster - knownPoints[p])
if dist < m_min:
m_min = dist
labelToLetterMap[p] = i
print "\n\nLabel to letter mapping:"
print labelToLetterMap
print "\n\nDistribution of Training labels"; print 50 * '-'
counter=collections.Counter(kmeans.labels_)
print counter
print "\n\nDistribution of Test Data"; print 50 * '-'
counter=collections.Counter(testingDataLabels)
print counter
testResults = kmeans.predict(testingData)
print "\n\nTest Results after running k-means"; print 50 * '-'
counter=collections.Counter(testResults)
print counter
if __name__ == "__main__":
# bring in all the data
loadData('all-relative-data')
# pre process the data
preProcess()
# call the main function
main() | true |
7eb0e9a2949b96d30e1f1b22d8c04c1f3b9619d7 | Python | berrybretch/ficBuster | /space/validator.py | UTF-8 | 728 | 3.15625 | 3 | [] | no_license | from urllib.parse import urlparse
import re
def validate_url(url: str) -> str:
"""
Returns url if valid or raises ValueError
params:
url:str
returns:
url:str
"""
if not url:
return ValueError("url is empty")
else:
parsed = urlparse(url)
regex = re.compile("/threads/[a-z0-9\-]+.[0-9]+/reader/?")
if parsed.scheme != "https":
raise ValueError("Not https")
if parsed.netloc != "forums.spacebattles.com":
raise ValueError("Wrong net location")
if not regex.fullmatch(parsed.path):
raise ValueError("Need the reader link")
if url.endswith("/"):
url = url.rstrip("/")
return url
| true |
8c70fe330667dbac22b7599850cbb698458c51ce | Python | Little-girl-1992/little_code | /distance/Cosine.py | UTF-8 | 351 | 3.34375 | 3 | [] | no_license | # -*-coding:utf-8-*-
"""几何中夹角余弦可用来衡量两个向量方向的差异,
机器学习中借用这一概念来衡量样本向量之间的差异"""
from numpy import *
# 两个向量A,B
vector1=[0.2,0.3,0.5]
vector2=[0.1,0.2,0.3]
# 余弦夹角
cosV12= dot(vector1,vector2)/(linalg.norm(vector1)*linalg.norm(vector2))
print cosV12
| true |
cce8af0881f42314b41c0652c74372be9333429e | Python | Lucas-Rufino/Information-Retrieval-System-for-TV-Series | /ranking/vectorial_model.py | UTF-8 | 544 | 2.9375 | 3 | [] | no_license | import numpy as np
import math
class Vectorial_Model(object):
def __init__(self):
self.a = []
def norm (self, document):
size = [x**2 for x in document]
size = math.sqrt(sum(size))
if (size != 0):
normalized = [x/size for x in document]
else: normalized = document
return normalized
def cossine(self, document, query):
document = self.norm(document)
query = self.norm(query)
cos = np.dot(document, query)
return cos
| true |
ee4c2041de0632578331a050d36cc91102f77dc7 | Python | andreallorerung/peach-chatbot-alpha | /FlaskWebProject/chatbot/botinterface/rivescript_loader.py | UTF-8 | 787 | 3.265625 | 3 | [] | no_license | '''Auxiliary module to load rivescript brain information and initialize the
interpreter properly'''
import os
import rivescript
def loadBrain(interpreter, brain):
'''To load the brain at filepath'''
interpreter = _loadDirOrFile(interpreter, brain)
interpreter.sort_replies()
return interpreter
def _loadDirOrFile(interpreter, brain):
'''To load either a directory or a file of rivescript brain data'''
new_interpreter = interpreter
if os.path.isdir(brain):
new_interpreter.load_directory(brain)
elif os.path.isfile(brain):
new_interpreter.load_file(brain)
else:
raise ValueError("no directory or file found at specified filepath "
"for chatbot brain:'{}'".format(brain))
return new_interpreter
| true |
db160e9911154f2e86df29274668349fb9e3d68a | Python | bgalgano/ClusNet | /code/old/modules/modules/GaussNet/gauss.py | UTF-8 | 3,256 | 2.625 | 3 | [] | no_license | class Profile:
def __init__(self,std,im_size):
self.mid_pixel = int(im_size/2) # 128/2
self.x, self.y = self.mid_pixel, self.mid_pixel
self.im_size = im_size
self.std = std
self.noise = False
self.lam = 0.1133929878
gkern1d = signal.gaussian(self.im_size, std=std).reshape(self.im_size, 1)
self.im = np.outer(gkern1d, gkern1d)
self.im_lrud = None
self.im_lr = None
self.im_ud = None
def __repr__(self):
"""
print cluster metadata
"""
return str(self.im)
def to_pandas(self):
"""
convert metadata (as recarray) to pandas DataFrame
"""
self.meta = pd.DataFrame(self.meta)
return
def add_noise(self):
"""
add Poisson noise to cluster im matrix
"""
self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)
self.im += self.noise
return
def shift(self):
"""
shift cluster randomly within bounds of im
"""
"""
shift cluster randomly within bounds of im
"""
r = self.std
mid = self.mid_pixel #center pixel index of 384x384 image
delta = self.im_size - self.mid_pixel - r - 10
x = np.random.randint(low=-1*delta,high=delta,size=1)[0]
y = np.random.randint(low=-1*delta,high=delta,size=1)[0]
self.x += x
self.y += y
im_shift = np.roll(self.im,shift=y,axis=0)
self.im = np.roll(im_shift,shift=x,axis=1)
return
def plot(self,spath='../figs/profile/'):
"""
plot image
"""
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.imshow(self.im,interpolation='none',cmap='viridis')
ticks = np.arange(0,self.size,50)
plt.xticks(ticks),plt.yticks(ticks)
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.12)
plt.colorbar(im, cax=cax)
# plt.show()
plt.close()
return None
def flip_lr(self):
im_c = np.zeros((self.im_size,self.im_size))
im_c[self.x,self.y] = 1
im_lr = np.fliplr(self.im)
im_c_lr = np.flipud(im_c)
self.im_lr = im_lr
self.x_lr, self.y_lr = [val[0] for val in np.nonzero(im_c_lr)]
self.im = im_lr
self.x, self.y = self.x_lr, self.y_lr
return None
def flip_ud(self):
im_c = np.zeros((self.im_size,self.im_size))
im_c[self.x,self.y] = 1
im_ud = np.flipud(self.im)
im_c_ud = np.fliplr(im_c)
self.im_ud = im_ud
self.x_ud, self.y_ud = [val[0] for val in np.nonzero(im_c_ud)]
self.im = im_ud
self.x, self.y = self.x_ud, self.y_ud
return None
def flip_lrud(self):
im_c = np.zeros((self.im_size,self.im_size))
im_c[self.x,self.y] = 1
im_lrud = np.fliplr(np.flipud(self.im))
im_c_lrud = np.flipud(np.fliplr(im_c))
self.im_lrud = im_lrud
self.x_lrud, self.y_lrud = [val[0] for val in np.nonzero(im_c_lrud)]
self.im = im_lrud
self.x, self.y = self.x_lrud, self.y_lrud
return None
| true |
994a6e765a0c04ef4a6b7a1d17485f94f19b21dc | Python | Velu2498/Python-basics | /list tuple dictionaries.py | UTF-8 | 1,118 | 4 | 4 | [] | no_license | #Sample Code for List :
example = ["hii",2,3.2,"hellow","bii"]
print (example[3])
print (example[4])
example[0]="hay"
print (example[0])
#Sample Code for Tuples :
year_born = ("Paris Hilton", 1981)
print (year_born[0])
print (year_born[1])
#year_born[1] = 2011 __>> any modification will throw error
#Sample Code for Dictionaries :
mydict = {"apples": 430, "bananas": 312, "oranges": 525, "pears": 217}
print ("Cost of apple is " + str(mydict["apples"]))
mydict["apples"] = 800
print ("New Cost of apple is " + str(mydict["apples"]))
confusion = {}
confusion[1] = 1
confusion['1'] = 2
confusion[1] += 1
sum = 0
for k in confusion:
sum += confusion[k]
print (sum)
names1 = ['Amir', 'Barry', 'Chales', 'Dao']
names2 = names1
names3 = names1[:]
names2[0] = 'Alice'
names3[1] = 'Bob'
sum = 0
for ls in (names1, names2, names3):
if ls[0] == 'Alice':
sum += 1
if ls[1] == 'Bob':
sum += 10
print (sum)
names1 = ['Amir', 'Barry', 'Chales', 'Dao']
loc = names1.index("Chales")
print (loc)
names1 = ['Amir', 'Barry', 'Chales', 'Dao']
names2 = [name.lower() for name in names1]
print (names2[2][0]) | true |
f1dbf3a78f1696ca0baf6182e4a995f509dbd04e | Python | bobfri/battle-snake_solo_length_challenge | /server.py | UTF-8 | 11,040 | 3.03125 | 3 | [
"MIT"
] | permissive | import os
import random
import cherrypy
"""
This is a simple Battlesnake server written in Python and was based on the starter snake from battlesnake official
For instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md
"""
class Board(object):
def __init__(self, maxx, maxy):
self.board=[[{"visited":False,"snake":False} for i in range(maxx)] for j in range(maxy)]
self.maxx=maxx
self.maxy=maxy
return
def snake(self,snake_piece):
self.board[snake_piece["x"]][snake_piece["y"]]["snake"]=True
return
#TODO clean this
def check(self,move,head,length):
moves_ressult = {
"up":{"x":0,"y":1},
"down":{"x":0,"y":-1},
"left":{"x":-1,"y":0},
"right":{"x":1,"y":0}}
to_visite=[{"x":(moves_ressult.get(move)["x"]+head["x"]),"y":(moves_ressult.get(move)["y"]+head["y"])}]
board=self.board.copy()
board[to_visite[0]["x"]][to_visite[0]["y"]]["visited"]= False
visited_stack=[]
while len(visited_stack)<=length:
if len(to_visite)==0:
return {"wontTrap":False,"visited":len(visited_stack), "move":move}
tempPlace=to_visite.pop(0)
visited_stack.append(tempPlace)
if (tempPlace["y"]+1)<self.maxy:
if not board[tempPlace["x"]][tempPlace["y"]+1]["visited"] and not board[tempPlace["x"]][tempPlace["y"]+1]["snake"]:
board[tempPlace["x"]][tempPlace["y"]+1]["visited"]= True
to_visite.append({"x":tempPlace["x"],"y":tempPlace["y"]+1})
if (tempPlace["y"]-1)>=0:
if not board[tempPlace["x"]][tempPlace["y"]-1]["visited"] and not board[tempPlace["x"]][tempPlace["y"]-1]["snake"]:
board[tempPlace["x"]][tempPlace["y"]-1]["visited"]= True
to_visite.append({"x":tempPlace["x"],"y":tempPlace["y"]-1})
if (tempPlace["x"]+1)<self.maxx:
if not board[tempPlace["x"]+1][tempPlace["y"]]["visited"] and not board[tempPlace["x"]+1][tempPlace["y"]]["snake"]:
board[tempPlace["x"]+1][tempPlace["y"]]["visited"]= True
to_visite.append({"x":tempPlace["x"]+1,"y":tempPlace["y"]})
if (tempPlace["x"]-1)>=0:
if not board[tempPlace["x"]-1][tempPlace["y"]]["visited"] and not board[tempPlace["x"]-1][tempPlace["y"]]["snake"]:
board[tempPlace["x"]-1][tempPlace["y"]]["visited"]= True
to_visite.append({"x":tempPlace["x"]-1,"y":tempPlace["y"]})
return {"wontTrap":True,"visited":0, "move":move}
def printboard(self):
print(self.board)
return
class Battlesnake(object):
board= Board(0,0)
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self):
# This function is called when you register your Battlesnake on play.battlesnake.com
# It controls your Battlesnake appearance and author permissions.
# TIP: If you open your Battlesnake URL in browser you should see this data
return {
"apiversion": "1",
"author": "bobfrit",
"color": "#80c1ff",
"head": "silly",
"tail": "bolt",
}
@cherrypy.expose
@cherrypy.tools.json_in()
def start(self):
# This function is called everytime your snake is entered into a game.
# cherrypy.request.json contains information about the game that's about to be played.
data = cherrypy.request.json
print("START")
return "ok"
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def move(self):
# This function is called on every turn of a game. It's how your snake decides where to move.
# Valid moves are "up", "down", "left", or "right".
# TODO: Use the information in cherrypy.request.json to decide your next move.
data = cherrypy.request.json
head = data["you"]["head"]
self.board = Board(data["board"]["width"],data["board"]["height"])
possible_moves = ["up", "down", "left", "right"]
tryMoves=possible_moves.copy()
board_sanke_death_move=self.clashWithHead(head)
remove_move=self.outOfBoardMove()+self.crashIntoSnake(head)
remove_move.extend(board_sanke_death_move)
#self.board.printboard()
tryMoves=[temp for temp in possible_moves if temp not in remove_move]
try :
if len(tryMoves)==0:
tryMoves.extend(board_sanke_death_move)
if len(tryMoves)==1 and len(board_sanke_death_move)==0:
move= tryMoves[0]
else:
print(tryMoves)
tryMovesNearest = self.nearest_food(tryMoves.copy(),head,data["board"]["food"])
move = random.choice(tryMovesNearest)
print(tryMovesNearest)
print(tryMoves)
trapMove=[]
trapMove.append(self.board.check(move,data["you"]["head"],data["you"]["length"]))
print(trapMove)
print(tryMoves)
if not trapMove[-1]["wontTrap"]:
print(f"before: {tryMoves}")
tryMovesNearest.remove(move)
print(f"after: {tryMoves}")
if len(tryMovesNearest) == 1:
print(tryMoves)
print(move)
tryMoves.remove(move)
move=tryMovesNearest[0]
trapMove.append(self.board.check(move,data["you"]["head"],data["you"]["length"]))
while len(tryMoves)>0:
tryMoves.remove(move)
if not trapMove[-1]["wontTrap"]:
if len(tryMoves)==0:
tryMoves.extend(board_sanke_death_move)
trapMove.sort(key=lambda x:x["visited"])
move=trapMove[-1]["move"]
print(f"sorted trap: {trapMove}")
else:
move=tryMoves[0]
trapMove.append(self.board.check(move,data["you"]["head"],data["you"]["length"]))
print(trapMove)
except IndexError as e:
print(e)
print("random")
#move = random.choice(possible_moves)
print(f"MOVE: {move}")
return {"move": move}
@cherrypy.expose
@cherrypy.tools.json_in()
def end(self):
# This function is called when a game your snake was in ends.
# It's purely for informational purposes, you don't have to make any decisions here.
data = cherrypy.request.json
print("END")
return "ok"
#return a list of moves that would kill the snake by crashing into and other snake
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def crashIntoSnake(self,head):
moves_ressult = {
"up":{"x":0,"y":1},
"down":{"x":0,"y":-1},
"left":{"x":-1,"y":0},
"right":{"x":1,"y":0}}
move_return=[]
data = cherrypy.request.json
#head = data["you"]["head"]
snake_block=[]
#for peice in data["you"]["body"]:
# snake_block.append(peice)
for snake in data["board"]["snakes"]:
for peice in snake["body"]:
snake_block.append(peice)
self.board.snake(peice)
snake_block.pop()#TODO check if the other snake will eat
for pos_move in moves_ressult:
temp={"x":(moves_ressult.get(pos_move)["x"]+head["x"]),"y":(moves_ressult.get(pos_move)["y"]+head["y"])}
if temp in snake_block:
move_return.append(pos_move)
return move_return
#return list of moves that will be out of the board
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def outOfBoardMove(self):
moves_ressult = {
"up":{"x":0,"y":1},
"down":{"x":0,"y":-1},
"left":{"x":-1,"y":0},
"right":{"x":1,"y":0}}
move_return=[]
data = cherrypy.request.json
max_y=data["board"]["height"]
max_x=data["board"]["width"]
head = data["you"]["head"]
for pos_move in moves_ressult:
temp={"x":(moves_ressult.get(pos_move)["x"]+head["x"]),"y":(moves_ressult.get(pos_move)["y"]+head["y"])}
if temp["x"]>=max_x or temp["y"]>=max_y or temp["x"]<0 or temp["y"]<0:
move_return.append(pos_move)
return move_return
#reutn move that could crash with an other head that would kill
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def clashWithHead(self,head):
moves_ressult = {
"up":{"x":0,"y":1},
"down":{"x":0,"y":-1},
"left":{"x":-1,"y":0},
"right":{"x":1,"y":0}}
move_return=[]
data = cherrypy.request.json
#head = data["you"]["head"]
length = data["you"]["length"]
snake_head=[]
snake_prediction=[]
for snake in data["board"]["snakes"]:
if length <= snake["length"] and snake["head"] != head:
snake_head.append(snake["head"])
for snakehead in snake_head:
for pos_move in moves_ressult:
snake_prediction.append({"x":(moves_ressult.get(pos_move)["x"]+snakehead["x"]),"y":(moves_ressult.get(pos_move)["y"]+snakehead["y"])})
for pos_move in moves_ressult:
temp={"x":(moves_ressult.get(pos_move)["x"]+head["x"]),"y":(moves_ressult.get(pos_move)["y"]+head["y"])}
if temp in snake_prediction:
move_return.append(pos_move)
#print(move_return)
return move_return
#return the move that would bring you closes to a food node
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def nearest_food(self, move, head, food):
moves_ressult = {
"up":{"x":0,"y":1},
"down":{"x":0,"y":-1},
"left":{"x":-1,"y":0},
"right":{"x":1,"y":0}}
move_return =[]
if len(food) >0:
food.sort(key=lambda x:abs(x["x"]-head["x"])+abs(x["y"]-head["y"]))
nearestFood=food[0]
#closest
#print(nearestFood)
for pos_move in move:
tempDistance=abs(nearestFood["x"]-head["x"]-moves_ressult.get(pos_move)["x"])+abs(nearestFood["y"]-head["y"]-moves_ressult.get(pos_move)["y"])
tempDistanceOpissite=abs(nearestFood["x"]-head["x"]+moves_ressult.get(pos_move)["x"])+abs(nearestFood["y"]-head["y"]+moves_ressult.get(pos_move)["y"])
if tempDistanceOpissite>tempDistance:
move_return.append(pos_move)
if len(move_return)>0:
#print(move_return)
return move_return
else:
return move
return move
if __name__ == "__main__":
server = Battlesnake()
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
cherrypy.config.update(
{"server.socket_port": int(os.environ.get("PORT", "8080")),}
)
print("Starting Battlesnake Server...")
cherrypy.quickstart(server) | true |
4b5a3e60ceb1c41913f213147dff8d00335c80d1 | Python | Xylons/Jorge_Francisco_MachineL | /P2/KmeansDownsampling.py | UTF-8 | 2,407 | 3.109375 | 3 | [] | no_license | import numpy as np
import pandas as pd
def Kmeans(X_pca, k, isPCA=True, normalize=True, bidimensional=False):
"""
Kmeans clustering method
X_pca = dataset with PCA
k = number of clusters
isPCA = if the dataset comes from PCA
normalize = if normalizing is needed
bidimensional = if the dataset has only 2 variables
"""
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np
import collections
iterations = 10
max_iter = 300
tol = 1e-04
random_state = 0
init = "random"
if(not isPCA):
X_pca = X_pca.to_numpy()
if(normalize):
scaler = preprocessing.StandardScaler()
X_pca = scaler.fit_transform(X_pca)
km = KMeans(k, init, n_init = iterations ,max_iter= max_iter, tol = tol,random_state = random_state)
labels = km.fit_predict(X_pca)
map = collections.Counter(labels)
pd.DataFrame(km.cluster_centers_).to_csv("centroids.csv")
from sklearn import metrics
print ("Silhouette Score:\n"+str(metrics.silhouette_score(X_pca, labels)))
print("\nCentroids with number of ocurrences:")
for x in range(0,np.size(km.cluster_centers_,0)):
# print(str(km.cluster_centers_[x])+'\t\tlabel: '+str(x)+' number of ocurrences: '+str(map[x]))
print('{:<40s} {:<30s}'.format(str(km.cluster_centers_[x]), 'label: '+str(x)+' number of ocurrences: '+str(map[x])))
if (bidimensional):
plt.xlabel('Roll')
plt.ylabel('Pitch')
x = X_pca[:,0]
y = X_pca[:,1]
plt.scatter(x,y, c = labels)
# plotting centroids
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], c='red',s=50)
plt.show()
else:
fig = plt.figure()
ax = Axes3D(fig)
x = X_pca[:,0]
y = X_pca[:,1]
z = X_pca[:,2]
# plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], km.cluster_centers_[:,2], c='red',s=50)#
ax.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], km.cluster_centers_[:,2], c='red',s=50)
ax.scatter(x,y,z, c = labels)
plt.show()
df = pd.read_csv('task3_dataset_noattacks.csv')
df = df.drop(df.columns[[0, 1, 2, 3, df.columns.size-1]], axis=1)
Kmeans(df, 10, False, False, True) | true |
3a72b9f13532eb95299ca64e8719f124dc846799 | Python | NurbukeTeker/EmotionRecogFlaskApp | /user.py | UTF-8 | 285 | 2.8125 | 3 | [
"MIT"
] | permissive | class User(object):
def __init__(self,usermail):
self.usermail = usermail
self.isIn = False
def update(self):
self.isIn = True
def userisIn(self):
if self.isIn:
return True
else:
return False | true |
d033a9b09bd84ab8c853b331ee0585b9d6ef2cde | Python | homeworkprod/chatrelater | /tests/test_serialization.py | UTF-8 | 1,510 | 2.96875 | 3 | [
"MIT"
] | permissive | """
:Copyright: 2007-2021 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from pathlib import Path
from tempfile import mkstemp
import pytest
from chatrelater.serialization import serialize_data_to_file, load_data
def test_load_save_data(tmp_path):
"""Test saving data and then loading it again.
Also, KeyErrors are expected if required keys are missing.
"""
# Assure that missing keys raise a ``KeyError``.
required_keys = frozenset(['nicknames', 'relations', 'directed'])
for key in required_keys:
keys = set(required_keys)
keys.remove(key)
assert_required_keys(tmp_path, dict.fromkeys(keys), True)
assert_required_keys(tmp_path, {'nicknames': None}, True)
assert_required_keys(tmp_path, dict.fromkeys(required_keys), False)
# Assure the exact data saved will be loaded afterwards.
data = {
'nicknames': 123,
'relations': 456,
'directed': True,
}
filename = generate_tmp_file(tmp_path)
serialize_data_to_file(data, filename=filename)
assert load_data(filename) == (123, 456, True)
def generate_tmp_file(tmp_path: Path) -> Path:
_, tmp_filename = mkstemp(dir=tmp_path)
return Path(tmp_filename)
def assert_required_keys(tmp_path, data, should_raise=False):
filename = generate_tmp_file(tmp_path)
serialize_data_to_file(data, filename=filename)
if should_raise:
with pytest.raises(KeyError):
load_data(filename)
else:
load_data(filename)
| true |
6ee08ea5415c568a3ad38ad7fcf98191252b0e97 | Python | CyCTW/NCTU-EmbeddedSystem-108-Project | /client/module/IMU.py | UTF-8 | 788 | 2.96875 | 3 | [] | no_license | import smbus
class IMU(object):
def __init__(self):
self.bus = smbus.SMBus(1)
def write_byte(self, adr, value):
self.bus.write_byte_data(self.ADDRESS, adr, value)
def read_byte(self, adr):
return self.bus.read_byte_data(self.ADDRESS, adr)
def read_word(self, adr, rf = 1):
# rf = 1 Little Endian Format, rf = 0 Big Endian Format
low, high = self.read_byte(adr), self.read_byte(adr + 1)
if rf != 1:
low, high = high, low
return (high << 8) + low
def read_word_2c(self, adr, rf = 1):
# rf = 1 Little Endian Format, rf = 0 Big Endian Format
val = self.read_word(adr, rf)
if (val & (1 << 16 - 1)):
return val - (1 << 16)
else:
return val
| true |
68fd23dbee7098b259c93593106257518fe15a0d | Python | amarsubedi/pynepal | /pynepal.py | UTF-8 | 2,636 | 3.203125 | 3 | [] | no_license | import json
# Parse all states
with open('pynepal/db/provinces.json', 'r') as json_provinces:
json_provinces = json.load(json_provinces)
# Parse all districts.
with open('pynepal/db/districts.json', 'r') as json_districts:
json_districts = json.load(json_districts)
class AbstractObj(object):
"""
Abstract class for State, District class
"""
def __init__(self, **kwargs):
for attr, val in kwargs.items():
if not hasattr(self, attr):
setattr(self, attr, val)
def __repr__(self):
if hasattr(self, "name"):
return "{}('{}')".format(self.__class__.__name__,getattr(self, "name"))
class RuralMuncipality(AbstractObj):
"""
Represent RuralMuncipality
"""
pass
class Municipality(AbstractObj):
"""
Class for municipality object
"""
pass
class SubMetropolitanCity(AbstractObj):
"""
Class for submetropolitian city
"""
pass
class MetropolitanCity(AbstractObj):
"""
Class for submetropolitian city
"""
pass
class District(AbstractObj):
"""
Class for district
"""
pass
# List of districts
districts = []
for json_district in json_districts:
districts.append(District(**json_district))
class Province(AbstractObj):
"""
State class to hold information about states of Nepal
"""
@property
def districts(self):
"""
Return all the district of this state
"""
province_districts = [dist for dist in districts if dist.province_no == self.province_no]
return province_districts
class _Provinces(list):
"""
Return list of provinces
"""
province_names = ("province_one", "province_two", "province_three", \
"province_four", "province_five", "province_six", "province_seven")
indexes = {"one":1, "two":2, "three":3, "four":4, "five":5, "six":6}
def __init__(self):
super(_Provinces, self).__init__()
for json_province in json_provinces:
self.append(Province(**json_province))
# Sort province based on province no.
self.sort(key=lambda state: state.province_no)
def __getattr__(self, attrname):
"""
nepal_provinces = Provinces()
nepal_provinces.province_one
"""
if attrname not in self.province_names:
raise AttributeError("{} has no attribute {}".format(self.__class__.__name__, attrname))
_, index = attrname.split("_")
return self[self.indexes.get(index)-1]
# Create provinces
provinces = _Provinces()
| true |
8bdc0702abf6c3489bf8df0e759dc180e1ab4b7a | Python | phumacinha/Grafos-2020-1 | /REO1/ex1.py | UTF-8 | 1,472 | 3.359375 | 3 | [] | no_license | # N é a quantidade de casos que serão testados
N = int(input())
# Lista de listas de adjacência
grafos = []
for n in range(N):
num_vertices_arestas = input().split()
# V é a quantidade de vértices e E de arestas
V, E = int(num_vertices_arestas[0]), int(num_vertices_arestas[1])
# Lista de adjacencia de G
lista_de_adjacencia = [[] for _ in range(V)]
# Acrescentando suas arestas
for _ in range(E):
aresta = input().split()
lista_de_adjacencia[int(aresta[0])].append(int(aresta[1]))
grafos.append(lista_de_adjacencia)
caso = 1
for grafo in grafos:
print('Caso {}:'.format(caso), end='')
caso = caso + 1
n = len(grafo)
cor = ['B']*n
# Nível de hierarquia
hierarquia = 1
def pathR_visit(s):
global hierarquia
# Inicializando o vertice origem
cor[s] = 'C'
vizinhos = grafo[s]
for v in vizinhos:
print('{}{}-{}'.format(' '*hierarquia, s, v), end='')
if(cor[v] == 'B'):
print(' pathR(G,{})'.format(v))
cor[v] = 'C'
hierarquia = hierarquia + 1
pathR_visit(v)
else:
print('')
hierarquia = hierarquia if hierarquia == 1 else hierarquia - 1
cor[s] = 'P'
for u in range(n):
if(cor[u] == 'B' and len(grafo[u]) > 0):
print('')
pathR_visit(u)
print('') | true |
7c46c5314d59be83ab60b0c96f22f9a114896c9b | Python | geckotian96/qbb2019-answers | /day4-afternoon/day4-homework-3.2.py | UTF-8 | 1,826 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
"""
usage:./py <t_name> <samples_csv> <FPKMS>
create a timecourse of the given transcrips of females and males
Usage: ./day4-homework.3.2.py <t_name> <samples.csv> <replicates.csv> <c_tab dir>
"""
#./day4-homework-3.2.py FBtr0331261 ~/qbb2019/data/samples.csv ~/qbb2019/data/replicates.csv ~/qbb2019-answers/results/stringtie/
import sys
import pandas as pd
import matplotlib.pyplot as plt
import os
t_name = sys.argv[1]
samples=pd.read_csv(sys.argv[2])
replicates=pd.read_csv(sys.argv[3])
ctab_dir=sys.argv[4]
#fpkms = df.drop(columns="gene_name")
def sex_ident(sex, sampletypes):
soi= sampletypes.loc[:,"sex"] == sex
srr_ids= sampletypes.loc[soi,"sample"]
my_data= []
for srr_id in srr_ids:
ctab_path = os.path.join (ctab_dir, srr_id, "t_data.ctab") # Find path to ctab fil.
df = pd.read_csv (ctab_path, sep="\t", index_col="t_name") # Load ctab file
my_data.append(df.loc[t_name,"FPKM"]) # Grab the FPKM value for our transcript, and append to my_data
return my_data
f_vals1= sex_ident("female", samples)
f_vals2= sex_ident("female", replicates)
m_vals1= sex_ident("male", samples)
m_vals2= sex_ident("male", replicates)
fig, ax= plt.subplots()
ax.plot(range(4,8),m_vals2, ".",color="orange", label="m. replicates")
ax.plot(range(4,8),f_vals2, ".",color="green", label="f. replicates")
ax.plot(f_vals1, color="red", label="female")
ax.plot(m_vals1, color="blue", label="male")
ax.legend(loc='center right', bbox_to_anchor=(1.430, .5))
ax.set_xticklabels(["0", "10", "11", "12", "13", "14A", "14B", "14C", "14D"])
ax.set_ylabel("mRNA Abundance (FPKM)")
ax.set_xlabel("Development Stage")
ax.set_title("FBtr0331261 Abundance")
plt.tight_layout()
plt.subplots_adjust(top=0.9)
fig.savefig("q3.timecourse.png")
plt.close(fig)
| true |
87f41fd077d132548299a325c335503437eec687 | Python | reazion/learning_python-1 | /loto.py | UTF-8 | 437 | 3.3125 | 3 | [] | no_license | import random
a,c = input("Выберите тип игры. '6 45', '5 36', '4 20': ").split()
def loter():
b=[]
d=[]
e=0
for i in range(1,int(c)+1):
b.append(i)
while len(d)<int(a):
e=random.choice(b)
if e not in d:
d.append(e)
print(d)
if a=="4" and c=="20":
loter()
loter()
elif a=="5" and c=="36":
loter()
a=1
c=4
loter()
else:
loter()
input()
| true |
a752a47053de7918f89edddc9f2990dc16b98609 | Python | CrispyCabot/AIProject1 | /EightPuzzleGame_InformedSearch.py | UTF-8 | 11,699 | 3.625 | 4 | [] | no_license | import numpy as np
from EightPuzzleGame_State import State
'''
This class implement the Best-First-Search (BFS) algorithm along with the Heuristic search strategies
In this algorithm, an Open list is used to store the unexplored states and
a Closed list is used to store the visited state. Open list is a priority queue (First-In-First-Out).
The priority is insured through sorting the Open list each time after new states are generated
and added into the list. The heuristics are used to decide which node should be visited next.
In this informed search, reducing the state space search complexity is the main criterion.
We define heuristic evaluations to reduce the states that need to be checked every iteration.
Evaluation function is used to express the quality of informedness of a heuristic algorithm.
'''
class InformedSearchSolver:
current = State()
goal = State()
openlist = []
closed = []
depth = 0
def __init__(self, current, goal):
self.current = current
self.goal = goal
self.openlist.append(current)
def sortFun(self, e):
return e.weight
def check_inclusive(self, s):
"""
* The check_inclusive function is designed to check if the expanded state is in open list or closed list
* This is done to prevent looping. (You can use a similar code from uninformedsearch program)
* @param s
* @return
"""
in_open = False
in_closed = False
ret = [-1, -1]
# TODO your code start here
for i in self.openlist:
if i.equals(s):
in_open = True
break
for i in self.closed:
if i.equals(s):
in_closed = True
break
return {"open": in_open, "closed": in_closed}
# TODO your code end here
def state_walk(self):
"""
* The following state_walk function is designed to move the blank tile --> perform actions
* There are four types of possible actions/walks of for the blank tile, i.e.,
* ↑ ↓ ← → (move up, move down, move left, move right)
* Note that in this framework the blank tile is represent by '0'
"""
self.closed.append(self.current)
self.openlist.remove(self.current)
# move to the next heuristic state
walk_state = self.current.tile_seq
row = 0
col = 0
for i in range(len(walk_state)):
for j in range(len(walk_state[i])):
if walk_state[i, j] == 0:
row = i
col = j
break
self.depth += 1
children = []
''' The following program is used to do the state space actions
The 4 conditions for moving the tiles all use similar logic, they only differ in the location of the
tile that needs to be swapped. That being the case, I will only comment the first subroutine'''
# TODO your code start here
### ↑(move up) action ###
# (row - 1) is checked to prevent out of bounds errors, the tile is swapped with the one above it
if (row - 1) >= 0:
"""
*get the 2d array of current
*define a temp 2d array and loop over current.tile_seq
*pass the value from current.tile_seq to temp array
*↑ is correspond to (row, col) and (row-1, col)
*exchange these two tiles of temp
*define a new temp state via temp array
*call check_inclusive(temp state)
*do the next steps according to flag
*if flag = 1 //not in open and closed
*begin
*assign the child a heuristic value via heuristic_test(temp state);
*add the child to open
*end;
*if flag = 2 //in the open list
*if the child was reached by a shorter path
*then give the state on open the shorter path
*if flag = 3 //in the closed list
*if the child was reached by a shorter path then
*begin
*remove the state from closed;
*add the child to open
*end;
"""
temp = self.current.tile_seq.copy()
# Swap the blank space with the tile above it
temp[row][col] = temp[row-1][col]
temp[row-1][col] = 0
tempState = State(temp, self.depth)
children.append(tempState)
### ↓(move down) action ###
# row + 1 is checked to make sure it will stay in bounds
if (row + 1 < len(walk_state)):
temp = self.current.tile_seq.copy()
# Swap the blank space with the tile above it
temp[row][col] = temp[row+1][col]
temp[row+1][col] = 0
tempState = State(temp, self.depth)
children.append(tempState)
### ←(move left) action ###
if (col - 1 >= 0):
temp = self.current.tile_seq.copy()
# Swap the blank space with the tile above it
temp[row][col] = temp[row][col-1]
temp[row][col-1] = 0
tempState = State(temp, self.depth)
children.append(tempState)
### →(move right) action ###
if (col + 1 < len(walk_state)):
temp = self.current.tile_seq.copy()
# Swap the blank space with the tile above it
temp[row][col] = temp[row][col+1]
temp[row][col+1] = 0
tempState = State(temp, self.depth)
children.append(tempState)
for child in children:
flags = self.check_inclusive(child)
child.weight = child.depth + self.heuristic_test(child)
if not (flags['open'] or flags['closed']):
self.openlist.append(child)
elif flags['open']:
existingState = None
existingStateIndex = -1
for i in self.openlist:
if i.equals(child):
existingState = i
existingStateIndex = self.openlist.index(i)
break
if child.depth < existingState.depth:
self.openlist[existingStateIndex] = child
elif flags['closed']:
existingState = None
existingStateIndex = -1
for i in self.closed:
if i.equals(child):
existingState = i
existingStateIndex = self.closed.index(i)
break
if child.depth < existingState.depth:
self.closed.remove(existingState)
self.openlist.append(child)
# sort the open list first by h(n) then g(n)
newList = []
for val in self.openlist:
index = 0
# print(index)
while index < len(newList) and newList[index].weight < val.weight:
index += 1
newList.insert(index, val)
self.openlist = newList
# Set the next current state
self.current = self.openlist[0]
# TODO your code end here
def heuristic_test(self, current):
"""
* Solve the game using heuristic search strategies
* There are three types of heuristic rules:
* (1) Tiles out of place
* (2) Sum of distances out of place
* (3) 2 x the number of direct tile reversals
* evaluation function
* f(n) = g(n) + h(n)
* g(n) = depth of path length to start state
* h(n) = (1) + (2) + (3)
"""
curr_seq = current.tile_seq
goal_seq = self.goal.tile_seq
# (1) Tiles out of place
h1 = 0
# TODO your code start here
"""
*loop over the curr_seq
*check the every entry in curr_seq with goal_seq
"""
dimens = len(curr_seq)
for row in range(0, dimens):
for col in range(0, dimens):
if curr_seq[row][col] != goal_seq[row][col]:
h1 += 1
# TODO your code end here
# (2) Sum of distances out of place
h2 = 0
# TODO your code start here
"""
*loop over the goal_seq and curr_seq in nested way
*locate the entry which has the same value in
*curr_seq and goal_seq then calculate the offset
*through the absolute value of two differences
*of curr_row-goal_row and curr_col-goal_col
*absoulte value can be calculated by abs(...)
"""
for currRow in range(0, dimens):
for currCol in range(0, dimens):
val = curr_seq[currRow][currCol]
for goalRow in range(0, dimens):
for goalCol in range(0, dimens):
if goal_seq[goalRow][goalCol] == val:
h2 += abs(currRow-goalRow) + abs(currCol-goalCol)
break
# TODO your code end here
# (3) 2 x the number of direct tile reversals
h3 = 0
# TODO your code start here
"""
*loop over the curr_seq
*use a Γ(gamma)shap slider to walk throught curr_seq and goal_seq
*rule out the entry with value 0
*set the boundry restriction
*don't forget to time 2 at last
*for example
*goal_seq 1 2 3 curr_seq 2 1 3 the Γ shape starts
* 4 5 6 4 5 6
* 7 8 0 7 8 0
*with 1 2 in goal_seq and 2 1 in curr_seq thus the
* 4 4
*reversal is 1 2 and 2 1
"""
for row in range(0, dimens):
for col in range(0, dimens):
if not (goal_seq[row][col] == 0 or curr_seq[row][col] == 0):
# check right
if col + 1 < dimens and curr_seq[row][col+1] == goal_seq[row][col] and curr_seq[row][col] == goal_seq[row][col+1]:
h3 += 1
# check left
if col - 1 >= 0 and curr_seq[row][col-1] == goal_seq[row][col] and curr_seq[row][col] == goal_seq[row][col-1]:
h3 += 1
# check down
if row + 1 < dimens and curr_seq[row+1][col] == goal_seq[row][col] and curr_seq[row][col] == goal_seq[row+1][col]:
h3 += 1
# check up
if row - 1 >= 0 and curr_seq[row-1][col] == goal_seq[row][col] and curr_seq[row][col] == goal_seq[row-1][col]:
h3 += 1
h3 *= 2
# update the heuristic value for current state
return h1 + h2 + h3
# TODO your code end here
# You can change the following code to print all the states on the search path
def run(self):
# output the goal state
target = self.goal.tile_seq
print("\nReached goal state: ")
target_str = np.array2string(target, precision=2, separator=' ')
print(target_str[1:-1])
print("\n The visited states are: ")
path = 0
while not self.current.equals(self.goal):
self.state_walk()
path += 1
print('Visited State number', path)
pathstate_str = np.array2string(
self.current.tile_seq, precision=2, separator=' ')
print(pathstate_str[1:-1])
print("\nIt took", path, "iterations to reach to the goal state")
print("The length of the path is:", self.current.depth)
| true |
71d03859563a368f1cca13ddc33dbc98e19cee82 | Python | emillynge/cryptogram-solver | /sub_solver.py | UTF-8 | 12,009 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3.4
"""A cryptogram substitution-cipher solver."""
import argparse
import re
import functools
from copy import copy
from collections import (defaultdict, namedtuple, Counter)
from operator import itemgetter
__version__ = '0.0.1'
class NoSolutionException(Exception):
pass
@functools.lru_cache()
def hash_word(word):
"""Hashes a word into its similarity equivalent.
MXM becomes 010, ASDF becomes 0123, AFAFA becomes 01010, etc.
"""
seen = dict()
out = list()
i = 0
for c in word:
if c not in seen:
seen[c] = str(i)
i += 1
out.append(seen[c])
return ''.join(out)
class Corpus(object):
"""Manages a corpus of words sorted by frequency descending."""
def __init__(self, corpus_filename):
self._hash_dict = defaultdict(list)
with open(corpus_filename) as fp:
for word in fp:
_word = word.strip()
self._hash_dict[hash_word(_word)].append(_word)
def find_candidates(self, input_word):
"""Finds words in the corpus that could match the given word in
ciphertext.
For example, MXM would match wow but not cat, and cIF would match cat
but not bat. Uppercase letters indicate ciphertext letters and lowercase
letters indicate plaintext letters.
Args:
inputWord: The word to search for. Can be mixed uppercase/lowercase.
"""
input_word_hash = hash_word(input_word)
hash_matches = self._hash_dict[input_word_hash]
candidates = list()
for word in hash_matches:
for candidate_char, input_char in zip(word, input_word):
if input_char.islower() or input_char == "'" or candidate_char == "'":
if input_char != candidate_char:
break # invalidate
else: # run this block if no break occurred i.e word is not invalidated
candidates.append(word)
return candidates
class SubSolver(object):
"""Solves substitution ciphers."""
def __init__(self, ciphertext, corpus_filename, verbose=False):
"""Initializes the solver.
Args:
ciphertext: The ciphertext to solve.
corpusFilename: The filename of the corpus to use.
verbose: Print out intermediate steps.
"""
self._corpus = Corpus(corpus_filename)
self._translations = list()
self.ciphertext = ciphertext.upper()
self.verbose = verbose
def best_cipher(self, remaining_words, trans):
candidates_weight = 10.0
coverage_weight = 1.0
translated_words = [word.translate(trans) for word in remaining_words]
candidate_lists = [self._corpus.find_candidates(word) for word in translated_words]
max_candidate_len = max(len(candidates) for candidates in candidate_lists)
char_count = Counter(char for char in ''.join(translated_words) if char.isupper())
total_char_count = sum(char_count.values())
Result = namedtuple('Result', 'cipher_val cipher_word candidates n_candidates covered')
best = Result(-1, 'dummy', [], 0, 0)
for (candidates, cipher_word, translated_word) in zip(candidate_lists, remaining_words, translated_words):
covered = sum(char_count[char] for char in set(translated_word) if char.isupper())
coverage = covered / total_char_count
n_candidates = len(candidates)
candidate_len = ((max_candidate_len - n_candidates) / max_candidate_len)
cipher_value = coverage * candidate_len
if cipher_value > best.cipher_val:
best = Result(cipher_value, cipher_word, candidates, n_candidates, covered)
return best
def solve(self):
"""Solves the cipher passed to the solver.
This function invokes the recursive solver multiple times, starting
with a very strict threshold on unknown words (which could be proper
nouns or words not in the dictionary). It then expands this out to a
final threshold, after which it considers the cipher unsolvable.
"""
words = re.sub(r'[^\w ]+', '', self.ciphertext).split()
words.sort(key=lambda word: len(self._corpus._hash_dict[hash_word(word)]), reverse=True)
Translation = namedtuple('Translation', 'trans solution')
err = NoSolutionException('Solve loop not started?')
for max_unknown_word_count in range(0, max(3, len(words) / 10)):
try:
for solution in self._recursive_solve(words, {}, 0, max_unknown_word_count):
trans = self._make_trans_from_dict(solution)
print('Solution found: {0}'.format(self.ciphertext.translate(trans)))
self._translations.append(Translation(trans, solution))
break
except NoSolutionException as err:
if self.verbose:
print(err)
except KeyboardInterrupt:
break
else: # loop not breaked => no solution found. reraise latest error
raise err
def _recursive_solve(self, remaining_words, current_translation,
unknown_word_count, max_unknown_word_count):
"""Recursively solves the puzzle.
The algorithm chooses the first word from the list of remaining words,
then finds all words that could possibly match it using the current
translation table and the corpus. For each candidate, it builds a new
dict that assumes that that candidate is the correct word, then
continues the recursive search. It also tries ignoring the current word
in case it's a pronoun.
Args:
remainingWords: The list of remaining words to translate, in
descending length order.
currentTranslation: The current translation table for this recursive
state.
unknownWordCount: The current number of words it had to skip.
maxUnknownWordCount: The maximum number before it gives up.
Returns:
A dict that translates the ciphertext, or None if it could not find
one.
"""
trans = self._make_trans_from_dict(current_translation)
if self.verbose:
print(self.ciphertext.translate(trans))
if not remaining_words: # remaining words is empty. we're done!
yield current_translation
raise StopIteration()
best = self.best_cipher(remaining_words, trans)
if best.n_candidates == 0:
raise NoSolutionException()
cipher_word = best.cipher_word
candidates = best.candidates
remaining_words.remove(cipher_word)
best_translations = list()
for candidate in candidates:
new_trans = dict(current_translation)
translated_plaintext_chars = set(current_translation.values())
for cipher_char, plaintext_char in zip(cipher_word, candidate):
# This translation is bad if it tries to translate a ciphertext
# character we haven't seen to a plaintext character we already
# have a translation for.
if cipher_char not in current_translation and plaintext_char in translated_plaintext_chars:
break
new_trans[cipher_char] = plaintext_char
else: # code is reached if no break occurred => good translation
_trans = self._make_trans_from_dict(new_trans)
best = self.best_cipher(remaining_words, _trans)
if best.n_candidates != 0 or len(remaining_words) == 0:
best_translations.append((new_trans, best.n_candidates, best.covered))
if False:#best_translations:
max_n_candidates = max(item[1] for item in best_translations) + 1
max_covered = max(item[2] for item in best_translations) + 1
best_translations.sort(key=lambda item: (max_n_candidates - item[1])/max_n_candidates + item[2]/max_covered, reverse=True)
for trans, _, _ in best_translations:
try:
for sol in self._recursive_solve(remaining_words,
trans, unknown_word_count,
max_unknown_word_count):
yield sol
except NoSolutionException:
pass
# If code is reached none of the candidates could produce valid result for the current cipher word
# Try not using the candidates and skipping this word, because it
# might not be in the corpus if it's a proper noun.
if unknown_word_count >= max_unknown_word_count: # We cannot skip anymore words than we already have
remaining_words.append(cipher_word) # Re-append cipher_word
raise NoSolutionException(
'Reached limit of {0} skipped words. \n best translation:'.format(unknown_word_count,
current_translation))
try:
for sol in self._recursive_solve(remaining_words,
current_translation,
unknown_word_count + 1,
max_unknown_word_count):
yield sol
except NoSolutionException:
remaining_words.append(cipher_word) # Re-append cipher_word
raise
@staticmethod
def _make_trans_from_dict(translations):
"""Takes a translation dictionary and returns a string fit for use with
string.translate()."""
from_str = translations.keys()
to_str = translations.values()
return str.maketrans(''.join(from_str), ''.join(to_str))
def print_report(self):
"""Prints the result of the solve process."""
if not self._translations:
print('Failed to translate ciphertext.')
return
self._translations.sort(key=lambda item: len(item.solution), reverse=False)
print('Plaintext:')
for i, (trans, solution) in enumerate(self._translations):
plaintext = self.ciphertext.translate(trans)
print(str(i) + ':\t' + plaintext)
if len(self._translations) > 1:
i = int(input('which solution so you want?: '))
print('Ciphertext:')
print(self.ciphertext, '\n')
trans, solution = self._translations[i]
plaintext = self.ciphertext.translate(trans)
print('Plaintext:')
print(plaintext, '\n')
print('Substitutions:')
items = [key + ' -> ' + word for key, word in solution.items()]
items.sort()
i = 0
for item in items:
print(item + ' ', )
if i % 5 == 4:
print('')
i += 1
def main():
"""Main entry point."""
print('SubSolver v' + __version__ + '\n')
parser = argparse.ArgumentParser(
description='Solves substitution ciphers.')
parser.add_argument('input_text',
help='A file containing the ciphertext.')
parser.add_argument('-c', metavar='corpus', required=False,
default='corpus.txt',
help='Filename of the word corpus.')
parser.add_argument('-v', action='store_true',
help='Verbose mode.')
args = parser.parse_args()
try:
ciphertext = open(args.input_text).read().strip()
except IOError as err:
print('No file {0} found. using it as ciphertext'.format(args.input_text))
ciphertext = args.input_text
solver = SubSolver(ciphertext, args.c, args.v)
solver.solve()
solver.print_report()
if __name__ == '__main__':
main()
| true |
03b1931d0f007e8ac353b56f80bbc495eddadffa | Python | wwnis7/256 | /min soo/57problems/34.py | UTF-8 | 326 | 3.984375 | 4 | [] | no_license | A = ['John Smith', 'Jackie jackson', 'Chris Jones', 'Amanda Cullen', 'Jeremy Goodwin']
for j in range(2):
print(f'There are {len(A)} employees: ')
for i in A:
print(i)
if j == 1: break
name = input('Enter an employee name to remove: ')
try:
A.remove(name)
except:
print(f'{name} is not an employee.') | true |
e786e690d89cae4ed0a0bc4c6d1b7ca80d95ae73 | Python | tamer-abdulghani/iot-raspberry-aws-face-analysis | /modules/camera.py | UTF-8 | 539 | 2.625 | 3 | [] | no_license | import time
def capture_picture_from_raspberry(file_path):
from picamera import PiCamera
count = 3
for i in range(count):
print(count - i)
time.sleep(1)
with PiCamera() as camera:
camera.vflip = True
camera.hflip = True
camera.capture(file_path)
def capture_picture_from_windows(file_path):
import cv2
camera_port = 0
camera = cv2.VideoCapture(camera_port)
time.sleep(0.1)
return_value, image = camera.read()
cv2.imwrite(file_path, image)
del camera
| true |
d6d29034039ed5ad62021d2c05f2ed7f8051f630 | Python | jfinnson/django-test | /happy_team/templatetags/utils.py | UTF-8 | 416 | 2.671875 | 3 | [] | no_license | from django import template
register = template.Library()
@register.filter
def get_key_value(some_dict, key):
"""
Provides a filter to be used in Django Jinja2 templates.
Filter allows lookup of values within a dictionary {} via a key.
:param some_dict: Dictionary object
:param key: key value to lookup in some_dict
:return: value in dict at key
"""
return some_dict.get(key, '')
| true |
df4076e66d832cab8a54cb612d6986cde977ae24 | Python | ReshadSadik/python_crawler_imdb | /imdb1.py | UTF-8 | 647 | 2.78125 | 3 | [] | no_license | import scrapy
class imdb(scrapy.Spider):
name = 'imdb_movies'
start_urls = ['https://www.imdb.com/chart/top/?ref_=nv_mv_250']
def parse(self, response):
SET_SELECTOR = 'tr'
for movies in response.css(SET_SELECTOR):
NAME_SELECTOR = '.titleColumn a ::text '
year_selector = 'span ::text'
rating_selector = 'strong ::text'
yield {
'name': movies.css(NAME_SELECTOR) .extract_first(),
'release': movies.css(year_selector).extract_first(),
'rating' : movies.css(rating_selector) .extract_first() ,
}
| true |
d0247b91889decb176ec1b7ca5805805c0056694 | Python | MajikalExplosions/digitea-source-code | /server.py | UTF-8 | 2,944 | 2.578125 | 3 | [] | no_license | #app.logger.info(u'Inserting message: {}'.format(message))#See https://github.com/heroku-examples/python-websockets-chat/blob/master/chat.py
import os
import logging
import gevent
import random
from flask import Flask, render_template, request
from flask_sockets import Sockets
from datetime import datetime
#The following few lines are like copied so idk what they do
app = Flask(__name__)
app.debug = 'DEBUG' in os.environ
sockets = Sockets(app)
chatHistory = ['DigiTea Server started {}'.format(datetime.now())]
@app.route("/chat")
def getPage():
return render_template('chat-page.html')
@app.route("/get-history", methods=['POST'])
def getHistory():
global chatHistory
historyInAString = ''
for message in chatHistory:
historyInAString += message
historyInAString += '\n'
historyInAString = historyInAString[:-1]
return (historyInAString, 200)
@app.route("/send-message", methods=['POST'])
def setMessage():
global chatHistory
chatHistory.append(request.values.get('message', 'missing_no'))
if (len(chatHistory) > 250):
chatHistory = chatHistory[-250:]
return getHistory()
#Do stuff
@app.route("/cephaloholic")#Addicted to the head
def getConsole():
return render_template('console.html')
'''
website = 'custom.html'
args = 'Blank_Screen'
lastRefresh = 'Blank_Screen'
current = 'Blank_Screen'
colors = ['FADA63', 'E68A86', 'DE5284', '9C5292', '74559B', '8C99C9', '83CCD9', '7DCFCE', 'B1D6A0']
#receiving message from slack
@app.route('/slack-message', methods=['POST'])
def receive():##RETURNS TO SLACK NOT THE ACTUAL WEB PAGE
global website
global args
global current
data = request.values.get('text', 'custom Error Parsing Data')
#type = data.split(' ', 1)[0]
#args = data.split(' ', 1)[1]
app.logger.warning(u'Data: {}'.format(data))
website = 'custom.html'
current = data
args = data
app.logger.warning(u'Selected Custom Template: {}'.format(data))
return ('Setting live display screen to custom message...', 200)
@app.route('/page')
def getWebsite():#Returns the actual website(this is where the comp at the front should go)
#added these two lines
global website
global args
#added above
app.logger.warning(u'Returning website {} with args {}'.format(website, args))
return render_template(website, msg2=args, clr=colors[random.randint(0, len(colors) - 1)], animations=animations[random.randint(0, len(animations) - 1)])
@app.route ('/refresh')
def refresh():
global lastRefresh
global current
app.logger.warning(u'Last refresh {}, current {}'.format(lastRefresh, current))
if lastRefresh != current:
lastRefresh = current
app.logger.warning(u'Returning refresh.')
return ('refresh', 200)
app.logger.warning(u'Returning keep.')
return ('keep', 200)#Usually would be keep but idk what happened debugging here
'''
| true |
1789d3badeae2698c4efccff2270c677d7fb5823 | Python | ONSdigital/address-index-data | /DataScience/Analytics/prototype/saoSuffixAddresses.py | UTF-8 | 2,311 | 2.8125 | 3 | [
"LicenseRef-scancode-proprietary-license",
"MIT"
] | permissive | #!/usr/bin/env python
"""
ONS Address Index - Secondary Address Object End Suffix Test Data
=================================================================
A simple script to attach UPRNs to a dataset with SAO end suffixes.
As the dataset is synthetic it contains AddressBase UPRNs enabling automatic
performance computations.
This is a prototype code aimed for experimentation and testing. There are not unit tests.
The code has been written for speed rather than accuracy, it therefore uses fairly aggressive
blocking. As the final solution will likely use ElasticSearch, the aim of this prototype is
not the highest accuracy but to quickly test different ideas, which can inform the final
ElasticSearch solution.
Running
-------
After all requirements are satisfied, the script can be invoked using CPython interpreter::
python saoSuffixAddresses.py
Requirements
------------
:requires: numpy (tested with 1.12.0)
:requires: pandas (tested with 0.19.2)
:requires: addressLinking (and all the requirements within it)
Author
------
:author: Sami Niemi (sami.niemi@valtech.co.uk)
Version
-------
:version: 0.1
:date: 1-Mar-2017
"""
import numpy as np
import pandas as pd
from Analytics.linking import addressLinking
class SAOsuffixLinker(addressLinking.AddressLinker):
"""
Address Linker for the SAO Suffix dataset. Inherits the AddressLinker and overwrites the load_data method.
"""
def load_data(self):
"""
Read in the SAO Suffix address test data. Overwrites the method in the AddressLinker.
"""
self.toLinkAddressData = pd.read_excel(self.settings['inputPath'] + self.settings['inputFilename'])
self.toLinkAddressData['ID'] = np.arange(len(self.toLinkAddressData['UPRN'].index))
self.toLinkAddressData.rename(columns={'UPRN': 'UPRN_old'}, inplace=True)
def run_sao_suffix_linker(**kwargs):
"""
A simple wrapper that allows running SAO Suffix Address linker.
:return: None
"""
settings = dict(inputFilename='SAO_END_SUFFIX.xlsx',
inputPath='/Users/saminiemi/Projects/ONS/AddressIndex/data/',
outname='SAOsuffix')
settings.update(kwargs)
linker = SAOsuffixLinker(**settings)
linker.run_all()
del linker
if __name__ == "__main__":
run_sao_suffix_linker()
| true |
18f3e2a7704fd7b56c1515e446739afabb0eeb47 | Python | AndreaBorghesi/knowInject_transComputing | /src/extract_dep_graphs_from_json.py | UTF-8 | 11,229 | 2.78125 | 3 | [] | no_license | '''
Parse json file containing info on the structure of the benchmark and extract
the dependency graph
Andrea Borghesi
University of Bologna
2019-05-31
'''
#!/usr/bin/python3.6
import os
import numpy
import sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import json
import networkx as nx
data_dir = './data'
def parse(graph, l, level=0):
#print('line ' + str(l))
if l == None:
return 'P', [], None
op = l[0]
#print('op ' + str(op))
if op == 'A':
level = l[1]
cond_path = l[2]
v_idx = l[3]
exp = l[4]
lhs = exp[0]
rhs = exp[1]
nodes, top_node = parse_assignment(graph, v_idx, lhs, rhs, level)
elif op == 'R':
level = l[1]
cond_path = l[2]
op_type = l[3]
v_idx = l[4]
exp = l[5]
lhs = exp[0]
rhs = exp[1]
nodes, top_node = parse_conditional_exp(graph, op_type, v_idx, lhs, rhs,
level)
elif op == 'P':
prim_type = l[1]
nodes, top_node = parse_primitive(graph, prim_type, level)
elif op == 'V':
v_idx = l[1]
nodes, top_node = parse_var(graph, v_idx, level)
elif op == 'E':
op_type = l[1]
v_idx = l[2]
lhs = l[3]
if len(l) == 5:
rhs = l[4]
else:
rhs = None
nodes, top_node = parse_exp(graph, op_type, v_idx, lhs, rhs, level)
elif op == 'T':
v_idx = l[1]
content = l[2]
nodes, top_node = parse_temp(graph, v_idx, content, level)
elif op == 'F':
params = l[1]
nodes, top_node = parse_func(graph, params, level)
elif op == 'C':
const_type = l[1]
value = l[2]
nodes, top_node = parse_const(graph, const_type, value, level)
else:
if len(l) == 1: # it's a variable
#print('\tVAR inside')
#print('\t{}'.format(l[0]))
_, nodes, top_node = parse(graph, l[0])
#print('\tnodes {}'.format(nodes))
op = 'VV'
else:
#print('Unexpected op {}'.format(op))
nodes = []
top_node = None
return op, nodes, top_node
def parse_assignment(graph, v_idx, lhs, rhs, level):
#print('>>>>>> PARSE ASS <<<<<<<')
res_node = 'v{}'.format(v_idx)
res_node_t = 't{}'.format(v_idx)
lop, lnodes, ltop_node = parse(graph, lhs, level)
rop, rnodes, rtop_node = parse(graph, rhs, level)
#print('lop {} - lnodes {}'.format(lop, lnodes))
#print('rop {} - rnodes {}'.format(rop, rnodes))
#print('res_node {}'.format(res_node))
nodes = lnodes
nodes.extend(rnodes)
nodes = list(set(nodes))
if res_node_t in nodes:
res_node = res_node_t
if ltop_node != res_node and ltop_node != res_node_t:
if ltop_node != None:
#if(ltop_node == 't21'):
# print('--> Adding node from {} L'.format(ltop_node))
graph.add_edge(ltop_node, res_node, weight=level)
if rtop_node != res_node and rtop_node != res_node_t:
if rtop_node != None:
#if(rtop_node == 't21'):
# print('--> Adding node from {} R'.format(rtop_node))
graph.add_edge(rtop_node, res_node, weight=level)
return nodes, res_node
def parse_conditional_exp(graph, op_type, v_idx, lhs, rhs, level):
node = 'v{}'.format(v_idx)
return [], node
def parse_primitive(graph, prim_type, level):
node = None
return [], node
def parse_var(graph, v_idx, level):
#print('>>>>>> PARSE VAR <<<<<<<')
node = 'v{}'.format(v_idx)
if node not in graph.nodes():
#print('parse var adding node {}'.format(node))
graph.add_node(node)
return [node], node
def parse_exp(graph, op_type, v_idx, lhs, rhs, level):
#print('>>>>>> PARSE EXP <<<<<<<')
nodes = []
lop, lnodes, ltop_node = parse(graph, lhs, level)
rop, rnodes, rtop_node = parse(graph, rhs, level)
nodes.extend(lnodes)
nodes.extend(rnodes)
nodes = list(set(nodes))
res_node = 'v{}'.format(v_idx)
res_node_t = 't{}'.format(v_idx)
#print('Graph nodes {}'.format(graph.nodes))
#print('internal nodes {}'.format(nodes))
if res_node not in nodes and res_node_t not in nodes:
nodes.append(res_node)
#print('parse exp adding node {}'.format(res_node))
graph.add_node(res_node)
#print('lop {}'.format(lop))
#if lop == 'V' or lop == 'T':
# print('\tlnodes {}'.format(lnodes))
# print('\tres node {}'.format(res_node))
# for n in lnodes:
# graph.add_edge(n, res_node)
#print('rop {}'.format(rop))
#if rop == 'V':
# for n in rnodes:
# graph.add_edge(n, res_node)
#print(ltop_node)
#print(rtop_node)
#print(res_node_t)
if res_node_t in nodes:
res_node = res_node_t
if ltop_node != res_node and ltop_node != res_node_t:
if ltop_node != None:
graph.add_edge(ltop_node, res_node, weight=level)
#if(ltop_node == 't21'):
# print('--> Adding node from {} L'.format(ltop_node))
if rtop_node != res_node and rtop_node != res_node_t:
if rtop_node != None:
graph.add_edge(rtop_node, res_node, weight=level)
#if(rtop_node == 't21'):
# print('--> Adding node from {} R'.format(rtop_node))
return nodes, res_node
def parse_temp(graph, v_idx, content, level):
#print('>>>>>> PARSE TEMP <<<<<<<')
node = 't{}'.format(v_idx)
if node not in graph.nodes():
#print('parse temp adding node {}'.format(node))
graph.add_node(node)
nodes = [node]
#print('nodes {}'.format(nodes))
cop, cnodes, top_node = parse(graph, content, level)
nodes.extend(cnodes)
#print('nodes {}'.format(nodes))
if cop == 'V' or cop == 'VV' or cop == 'F' or cop == 'E':
for n in cnodes:
graph.add_edge(n, node, weight=level)
nodes.remove(n)
#if(n == 't21'):
# print('--> Adding node from {} to {}'.format(n, node))
#print('cop {}'.format(cop))
#print('cnodes {}'.format(cnodes))
nodes = list(set(nodes))
return nodes, node
def parse_func(graph, params, level):
pop, pnodes, top_node = parse(graph, params, level)
return pnodes, top_node
def parse_const(graph, const_type, value, level):
return [], None
def parse_graph(benchmark):
json_file = data_dir + '/' + benchmark + '/program_vardeps.json'
with open(json_file) as jfile:
data = json.load(jfile)
G = nx.DiGraph()
for l in data:
parse(G, l)
return G
def plot_graph(G, benchmark):
fig = plt.figure()
if benchmark == 'dwt':
nx.draw(G, with_labels=True, node_size=500, alpha=.5,
font_weight='bold')
elif benchmark == 'BlackScholes':
nx.draw(G, with_labels=True, node_size=500, alpha=.5,
font_weight='bold')
elif benchmark == 'Jacobi':
nx.draw(G, with_labels=True, node_size=500, alpha=.5,
font_weight='bold')
else:
nx.draw_kamada_kawai(G, with_labels=True, node_size=500, alpha=.5,
font_weight='bold')
plt.show()
def plot_graph_weightedEdges(G, benchmark):
fig = plt.figure()
pos = nx.layout.spring_layout(G)
M = G.number_of_edges()
edge_colors = range(2, M + 2)
labels = {}
for node in G.nodes():
labels[node] = node
node_sizes = [400] * G.number_of_nodes()
edge_alphas = [(5 + i) / (M + 4) for i in range(M)]
nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes,
node_color='green', alpha=.5)
nx.draw_networkx_labels(G, pos, labels, font_weight='bold')
edges = nx.draw_networkx_edges(G, pos, node_size=node_sizes,
arrowstyle='->', arrowsize=10, edge_color=edge_colors,
edge_cmap=plt.cm.Blues, width=2)
for i in range(M):
edges[i].set_alpha(edge_alphas[i])
pc = mpl.collections.PatchCollection(edges, cmap=plt.cm.Blues)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
plt.show()
'''
Extract binary variable relations from the dependency graphs of the type
less-or-equal
The function returns a list composed by tuples in the form (var1, var2),
where var1 and var2 are the variables involved
- e.g. V1 = V2+V3 ~= V1 = T4(V2) + T4(V3) --> T4 <= V1 (if T4 is not used in
other other expressions, it would make no sense to allocate to it more bits
than the expression result)
IN: only_temp set to true specifies that only relations involving temporary
variables must be returned
OUT: the relations list
'''
def get_binary_leq_rels(graph, only_temp):
rels = []
for n in graph.nodes:
for nn in graph.successors(n):
if not only_temp:
rels.append((n,nn))
else:
if 't' in n or 't' in nn:
rels.append((n,nn))
return rels
'''
Extract temporary cast due to expression relations from the dependency graphs
- these relations state that the precision of a temporary variable introduced to
handle two (or more) operands with different precision, must be equal to the
minimum precision of the two operands
- e.g. V1 = V2+V3 ~= V1 = T4(V2) + T4(V3) --> T4 = min(V2, V3)
The function returns a list composed by tuples in the form
(var1, [var2, var3, .., varN]), denoting relations such as:
var1 = min(var2, var3, .., varN)
'''
def get_cast_exps_rels(graph):
rels = []
for n in graph.nodes:
if 't' in n and len(list(graph.predecessors(n))) > 1:
rels.append((n, list(graph.predecessors(n))))
return rels
'''
Parse the information related to the input benchmark and obtain the dependency
graph. Then, extract and return variables relationships
'''
def get_relations(benchmark, binRel_onlyTemp):
G = parse_graph(benchmark)
bin_rels = get_binary_leq_rels(G, binRel_onlyTemp)
cast_expr_rels = get_cast_exps_rels(G)
return bin_rels, cast_expr_rels
def getAdditionalFeatures(benchmark):
add_feat=[]
G = parse_graph(benchmark)
for var in G.edges():
add_feat.append(("var_{}".format(int(''.join(filter(str.isdigit, var[1])))), "var_{}".format(int(''.join(filter(str.isdigit, var[0]))))))
return add_feat
def getAdjacencyMatrix(benchmark):
G = parse_graph(benchmark)
#node name remapping (0, 1, 2... instead of v0, v1, t2...)
mapping = {}
for node in G.nodes():
mapping[node]=node[1]
H = nx.relabel_nodes(G, mapping)
adjacent_matrix = []
for node in sorted(H.nodes()):
row = listofzeros = [0] * len(H)
for edge in H.edges(node):
row[int(edge[1])]=1
adjacent_matrix.append(row)
return adjacent_matrix
def main(argv):
benchmark = argv[0]
G = parse_graph(benchmark)
bin_rels = get_binary_leq_rels(G, False)
bin_rels_onlyT = get_binary_leq_rels(G, True)
cast_expr_rels = get_cast_exps_rels(G)
#print(bin_rels)
#print(bin_rels_onlyT)
#print(cast_expr_rels)
plot_graph(G, benchmark)
#plot_graph_weightedEdges(G, benchmark)
if __name__ == '__main__':
main(sys.argv[1:])
| true |
c8637ab92e487af64f14346647a25122f88deeab | Python | Lornatang/ml-alg | /Perceptro.py | UTF-8 | 1,656 | 3.265625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
np.random.seed(123)
X, y = make_blobs(n_samples=1000, centers=2)
y = y[:, np.newaxis]
X_train, X_test, y_train, y_test = train_test_split(X, y)
class Perceptron():
def __init__(self):
pass
def train(self, X, y, learning_rate=5e-2, n_iters=100):
n_samples, n_features = X.shape
# Step 0: init all paras
self.weights = np.zeros((n_features, 1))
self.bias = 0
for i in range(n_iters):
# Step 1: Compute the activation
a = np.dot(X, self.weights) + self.bias
# Step 2: Compute the output
y_predict = self.step_function(a)
# Step 3: Compute weight updates
delta_w = learning_rate * np.dot(X.T, (y - y_predict))
delta_b = learning_rate * np.sum(y - y_predict)
# Step 4: Update the parameters
self.weights += delta_w
self.bias += delta_b
return self.weights, self.bias
def step_function(self, x):
return np.array([1 if elem >= 0 else 0 for elem in x])[:, np.newaxis]
def predict(self, X):
a = np.dot(X, self.weights) + self.bias
return self.step_function(a)
p = Perceptron()
w_trained, b_trained = p.train(
X_train, y_train, learning_rate=0.05, n_iters=500)
y_p_train = p.predict(X_train)
y_p_test = p.predict(X_test)
print(
f"training accuracy: {100 - np.mean(np.abs(y_p_train - y_train)) * 100}%")
print(f"test accuracy: {100 - np.mean(np.abs(y_p_test - y_test)) * 100}%")
| true |
c399fba5260cc9f5c447a3093a57d812f9687431 | Python | code-lgtm/Problem-Solving | /python/usaco/chapter1/barn.py | UTF-8 | 1,261 | 3.34375 | 3 | [
"MIT"
] | permissive | """
ID: kumar.g1
LANG: PYTHON2
TASK: barn1
"""
import heapq
fin = open ('barn1.in', 'r')
fout = open ('barn1.out', 'w')
M, S, C = map(int, fin.readline().rstrip("\n").split(" "))
occupied = []
for i in range(C):
occupied.append(int(fin.readline().rstrip("\n")))
def min_stalls(m, s, c):
# No empty covered stall if number of stalls are less than equal to
# boards available
if len(c) <= m:
return len(c)
c = sorted(c)
nstalls = 1
gaps = []
last = c[0]
boards = 1
for i in range(1, len(c)):
# Cover consecutive covered stalls with single board
if c[i]-last == 1:
last = c[i]
nstalls += 1
continue
# Do not create any gaps if there are boards available
if boards < M:
heapq.heappush(gaps, c[i]-last-1)
boards += 1
nstalls += 1
else:
# Introduce smallest gap in solution if a gap has to be introduced
if len(gaps) > 0 and c[i]-last-1 >= gaps[0]:
nstalls += heapq.heappushpop(gaps, c[i]-last-1) + 1
else:
nstalls += c[i]-last
last = c[i]
return nstalls
fout.write(str(min_stalls(M, S, occupied)) + "\n")
fout.close()
| true |
cfd2c1a58d7f9ac852070ff38a0f16225bc922aa | Python | thinkmpink/police-fatalities-sample | /victim_names/combine_names.py | UTF-8 | 3,404 | 3.140625 | 3 | [
"MIT"
] | permissive | import argparse, datetime as dt, getpass
from calendar import monthrange
"""
FE input: "M/D/YYYY H:M:S Victim Name"
WK input: "YYYY-MM-DD victim name"
combined_names output: (YYYY, M, D, "victim name")
"""
def combine_names(fpath):
names = set()
fe_file = "{}victim_names/fe_names.tsv".format(fpath)
wk_file = "{}victim_names/wk_names.tsv".format(fpath)
with open(fe_file, "r") as fe:
for line in fe:
line = line.split()
date = line[0].split("/")
name = " ".join(line[2:]).lower()
name_tuple = (int(date[2]), int(date[0]), int(date[1]), name)
names.add(name_tuple)
with open(wk_file, "r") as wk:
for line in wk:
line = line.split()
date = line[0].split("-")
name = " ".join(line[1:]).lower()
if date[0] == 'None':
name_tuple = (1900, 1, 1, name)
else:
name_tuple = (int(date[0]), int(date[1]), int(date[2]), name)
names.add(name_tuple)
return names
"""
Write names from combine_names() to 3 files:
- victim_names/<dataset>_names.tsv (used in `grep_names.sh`)
- victim_names/<dataset>_name_date.tsv (used for ...)
- victim_names/<dataset>_names_strict_month.tsv (used in `pr_eval.py`)
:param fpath: /path/to/post_extract_tests/
:param names: a set of gold standard names
"""
def write_names_by_month(fpath, names, year, month):
writefn = "{0}victim_names/{1}_{2}_names.tsv".format(fpath, month, year)
writefnd = "{0}victim_names/{1}_{2}_name_date.tsv".format(fpath, month,
year)
writefnds = "{0}victim_names/{1}_{2}_names_strict_month.tsv".format(fpath,
month, year)
f_name = open(writefn, "w")
f_name_d = open(writefnd, "w")
f_name_s = open(writefnds, "w")
arg_date = dt.datetime(year, month, 1)
arg_date += dt.timedelta(monthrange(year, month)[1])
for date_name in names:
this_date = dt.datetime(date_name[0], date_name[1], date_name[2])
name_lower = date_name[3]
if this_date.month == arg_date.month and this_date.year == arg_date.year:
f_name_s.write(name_lower + "\n")
if this_date < arg_date:
name_upper = " ".join([name.capitalize() for name in name_lower.split()])
f_name.write(name_lower + "\n")
f_name.write(name_upper + "\n")
f_name_d.write("{}\t{:04d}-{:02d}-{:02d}\n".format(name_lower,
date_name[0], date_name[1], date_name[2]))
f_name.close()
f_name_d.close()
f_name_s.close()
def main():
fpath = '/home/{}/newsevents/post_extract_tests/'.format(getpass.getuser())
parser = argparse.ArgumentParser(description="""Take formatted gold victim
names from Wiki and Fatal Encounters data where incident month < MONTH+1,
write names to victim_names/<dataset>_names.tsv and name-date pairs to
victim_names/<dataset>_name_date.tsv""")
parser.add_argument('-y', '--year', type=int, help="Year of the dataset",
required=True)
parser.add_argument('-m', '--month', type=int, help="Month of the dataset",
required=True)
args = parser.parse_args()
names = combine_names(fpath)
write_names_by_month(fpath, names, args.year, args.month)
if __name__ == "__main__":
main()
| true |
37414714d6e4baa6e609f5b09d06d71dd378ef1b | Python | cognizac/Scrapers | /CaseWestern_scraper.py | UTF-8 | 2,067 | 2.53125 | 3 | [
"MIT"
] | permissive | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: cognizac
#
# Created: 14/02/2014
# Copyright: (c) cognizac 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import mechanize
from bs4 import BeautifulSoup
def main():
br = mechanize.Browser()
br.open('http://weatherhead.case.edu/faculty/directory?sortBy=p')
columns = BeautifulSoup(br.response().read()).findAll(attrs={'class':'span6'})
contacts = []
for column in columns:
people = column.findAll('strong')
info = column.findAll('br')
for index,person in enumerate(people):
thiscontact = []
thiscontact.append(person.text)
thisinfo = info[index*3].nextSibling.split(',')
thiscontact.append(thisinfo[0])
thiscontact.append(thisinfo[1].encode('ascii','ignore'))
if person.a:
#print person.a['href']
br.open('http://weatherhead.case.edu'+str(person.a['href']))
soup = BeautifulSoup(br.response().read())
email = soup.find(attrs={'class':'span10'}).a.text
phone = soup.find(attrs={'class':'span10'}).findAll('br')[2].nextSibling.replace('\t','').replace('\r','').replace('\n','')
if email:
thiscontact.append(email)
else:
thiscontact.append("""N/A""")
if email:
thiscontact.append(phone)
else:
thiscontact.append("""N/A""")
print thiscontact
contacts.append(thiscontact)
fileout = open('CaseWestern_contacts.tsv','w')
fileout.write('Name\tTitle\tArea\tEmail\tPhone\n')
for contact in contacts:
fileout.write(str(contact[0])+'\t'+str(contact[1])+'\t'+str(contact[2])+'\t'+str(contact[3])+'\t'+str(contact[4])+'\n')
fileout.close()
if __name__ == '__main__':
main()
| true |
58515ce7d8898a9e4be81167a725a4bc86764c4b | Python | yeliu6/Web-VLE-Builder | /Web_VLE/plotting.py | UTF-8 | 1,755 | 3.015625 | 3 | [] | no_license | import pygal
def plottingIsoBar(xVals, tempListBub, tempListDew, compList, temp_units):
strTitle = "Isobaric VLE Diagram for Mixture:\n{} and {}".format(compList[0], compList[1], fontsize=18,
fontname='Times New Roman')
strY = "Temperature ({})".format(temp_units, fontsize=16, fontname='Times New Roman')
strX = "Mole Fraction of Component 1: {}".format(compList[0], fontsize=16, fontname='Times New Roman')
chart = pygal.XY(width=800, height=800, explicit_size=True)
chart.title = strTitle
chart.x_title = strX
chart.y_title = strY
chart.legend_at_bottom = True
chart.x_labels = (0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
chart.add('Bubble Point Curve', tempListBub)
chart.add('Dew Point Curve', tempListDew)
chart_data = chart.render_data_uri()
return chart_data
def plottingIsoTherm(xVals, presListBub, presListDew, compList, pressure_units):
strTitle = "Isothermal VLE Diagram for Mixture:\n{} and {}".format(compList[0], compList[1], fontsize=18,
fontname='Times New Roman')
strY = "Pressure ({})".format(pressure_units, fontsize=16, fontname='Times New Roman')
strX = "Mole Fraction of Component 1: {}".format(compList[0], fontsize=16, fontname='Times New Roman')
chart = pygal.XY(width=800, height=800, explicit_size=True)
chart.title = strTitle
chart.x_title = strX
chart.y_title = strY
chart.legend_at_bottom = True
chart.add('Bubble Point Curve', presListBub)
chart.add('Dew Point Curve', presListDew)
chart.x_labels = (0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
chart_data = chart.render_data_uri()
return chart_data | true |
7a35fb0ae7ce1891025be54c28e379aca965685d | Python | Harrison-Mitchell/Daily-Sydney-Temperature | /weather.py | UTF-8 | 3,559 | 3.3125 | 3 | [] | no_license | from PIL import Image, ImageDraw
STRIP_HEIGHT = 7 # currently (2017) makes a nice square image
IMAGE_WIDTH = 1095 # currently 365 * 3 i.e 1 day = 3px wide
temps = []
# rawTemps = []
# the constant first temperature in the file
lastTemp = "24.4"
# downloaded from http://www.bom.gov.au/climate/data/
with open("data.csv", "r") as data:
# read each line exluding the CSV header
for line in data.readlines()[1:]:
# if temperature is missing, use yesterday's temperature
if ",," in line:
line = line.replace(",,", "," + lastTemp + ",")
segment = line.split(",")
# keep only year, month, day and temperature
temps.append([*segment[2:5], float(segment[5])])
# rawTemps.append(float(segment[5]))
# make todays temperature now "yesterday's"
lastTemp = segment[5]
# originally used the min and max for calculations but
# these extreme values then meant there was less of
# a color range for the rest of the image to use
# i.e the image was mostly orange (mid range of yellow-red)
minTemp = 10 #min(rawTemps)
maxTemp = 40 #max(rawTemps)
# if each line is 7 pixels tall, calculate final image height
height = (int(len(temps) / 365) - 2) * STRIP_HEIGHT
# create the final compilation image
overallIm = Image.new("RGB", (IMAGE_WIDTH, height), color="white")
# create the strip (single year) image
yearIm = Image.new("RGB", (364, 1), color="white")
# resize year strip and add to final compilation
def addToOverall(year):
# yucky globals, reason being half laziness and half...
# well... full laziness actually...
global yearIm
global overallIm
# the original 365x1 image is very small
yearIm = yearIm.resize((IMAGE_WIDTH, STRIP_HEIGHT))
# calculate the year
yearNum = int(year) - 1861
# and thus how low the strip should be
overallIm.paste(yearIm, (0, yearNum * STRIP_HEIGHT))
day = 0
lastYear = 1859
for line in temps:
# if it's not 1859 (bad data) and it's not a leap day (uneven strip lengths)
if line[0] != "1859" and not (line[1] == "02" and line[2] == "29"):
# if this day is still within the same year
if line[0] == lastYear:
# find what color the day should be between 10-40 scaled to 0-255
point = (line[3] - minTemp) / (maxTemp - minTemp) * 255
# again, we didn't use absolute top and bottom, we picked numbers
# that would create a better spread of colors, so the edge temperatures
# would cause errors corrected here
if point > 255: point = 255
if point < 0: point = 0
# place the day's pixel on the x coordinate according to day (0<=x<=364)
# the color should be between yellow (255,255,0) and red (255,0,0)
yearIm.putpixel((day, 0), (255, 255 - int(point), 0))
day += 1
# if it's the beginning of a new year
else:
# add last year's strip to the compilation
addToOverall(line[0])
# reset the strip image to white and the original size
yearIm = Image.new("RGB", (364, 1), color="white")
# update the comparison year
lastYear = str(int(lastYear) + 1)
day = 0
# the current year doesn't compare to the year in the future
# e.g if it's mid 2017, a 2018 line is never seen and the loop breaks
# so we need to add the imcomplete year to the final image
addToOverall(line[0])
overallIm.save("hot.png") | true |
d5aad1e44bdab50fb9535fbe785c7424f2f0422e | Python | stavan93/GoT-Death-Predictions | /src/python/Replace_Redirect_Names.py | UTF-8 | 2,410 | 2.75 | 3 | [] | no_license | import cbor
import trec_car.read_data as dat
import spacy
import csv
import os
import time
from collections import Counter
from spacy import displacy
csvfile = str(os.path.dirname(os.path.dirname(os.getcwd()))) + '/data/character-deaths.csv'
names = []
namelist = []
f = open(str(os.path.dirname(os.path.dirname(os.getcwd()))) + '/data/got.cbor', 'rb')
pages = list(dat.iter_pages(f))
f.close()
#Replaces the names in the name list with the redirect names for characters who have a redirect link to another page in the cbor
def Rename_Redirect( namelist ):
file = open(str(os.path.dirname(os.path.dirname(os.getcwd()))) + '/data/names.txt','w')
file.write("Name\n")
for n in namelist:
flag = True
if n == "Victarion Greyjoy":
file.write("House Greyjoy\n")
continue
for page in pages:
if page.page_name == n:
if "REDIRECT " in page.get_text():
nn = str(page.get_text()).split(" ")
#print(str(nn[1:]))
file.write(' '.join(map(str, nn[1:])) + "\n")
flag = False
elif "REDIRECT" in page.get_text():
nn = str(page.get_text())[9:]
#print(str(nn[1:]))
file.write(nn + "\n")
flag = False
else:
file.write(n + "\n")
flag = False
if flag:
file.write(n + "\n")
file.close()
#Reads the character-deaths.csv file gets the character names
def Read_Csv():
with open(csvfile, 'r') as file:
reader = csv.reader(file)
fields = next(reader)
for nam in reader:
names.append(nam[0])
if __name__ == "__main__":
Read_Csv()
for char_name in names:
fname = str(char_name).split(" ")
if len(fname) == 1:
namelist.append(fname[0])
elif len(fname) == 2:
if "(" in fname[1]:
namelist.append(fname[0])
else:
namelist.append(fname[0] + " " + fname[1])
else:
if "(" in fname[1]:
namelist.append(fname[0])
elif "(" in fname[2]:
namelist.append(fname[0] + " " + fname[1])
else:
namelist.append(fname[0] + " " + fname[1] + " " + fname[2])
Rename_Redirect(namelist) | true |
9b3fa5d1ef7bd35543db69b509517b5b8f2195ec | Python | davidohana/python-scolp | /scolp.py | UTF-8 | 7,959 | 3.078125 | 3 | [
"MIT"
] | permissive | import datetime
import numbers
from enum import Enum, auto
from typing import List, Dict
class TitleMode(Enum):
INLINE = auto()
HEADER = auto()
NONE = auto()
class Alignment(Enum):
LEFT = auto()
RIGHT = auto()
CENTER = auto()
AUTO = auto()
class Column:
# noinspection PyTypeChecker
def __init__(self):
self.title = ""
self.format: str = None
self.width: int = None
self.title_to_value_separator: str = None
self.pad_fill_char: str = None
self.pad_align: str = None
self.column_separator: str = None
self.type_to_format: Dict[type, str] = None
class Config:
def __init__(self):
self.columns: List[Column] = []
self.output_each_n_rows = 1
self.output_each_n_seconds = 0
self.title_mode = TitleMode.HEADER
self.header_repeat_row_count = 10
self.header_repeat_row_count_first = 1
self.header_line_char = "-"
self.default_column = Column()
self.default_column.width = 8
self.default_column.format = None
self.default_column.title_to_value_separator = "="
self.default_column.pad_fill_char = " "
self.default_column.column_separator = "|"
self.default_column.pad_align = Alignment.AUTO
self.default_column.type_to_format = {
int: "{:,}",
float: "{:,.3f}",
}
self.print_func = self._print_impl
def add_column(self,
title: str, fmt=None, width=None,
title_to_value_separator=None, pad_fill_char=None, pad_align=None,
column_separator=None, type_to_format=None):
col = Column()
col.title = title
col.format = fmt
col.width = width
col.title_to_value_separator = title_to_value_separator
col.pad_fill_char = pad_fill_char
col.pad_align = pad_align
col.type_to_format = type_to_format
col.column_separator = column_separator
self.columns.append(col)
return self
def add_columns(self, *titles: str):
for title in titles:
self.add_column(title)
@staticmethod
def _print_impl(s: str):
print(s, end="", flush=True)
class Scolp:
def __init__(self, config=Config()):
self.config = config
self.row_index = 0
self.last_row_print_time_seconds = 0
self.init_time = datetime.datetime.now()
self._cur_col_index = 0
self._cur_printed_row_index = -1
self._enable_print_current_row = False
self._force_print_row_index = 0
def _print(self, s: str):
self.config.print_func(s)
def _println(self):
self._print("\n")
def _pad(self, s: str, col: Column, orig_value):
width = self._get_config_param(col, "width")
col.width = max(width, len(s))
if len(s) == col.width:
return s
pad_fill_char = self._get_config_param(col, "pad_fill_char")
align = self._get_config_param(col, "pad_align")
if align == Alignment.AUTO and orig_value is not None:
if isinstance(orig_value, numbers.Number):
align = Alignment.RIGHT
else:
align = Alignment.LEFT
if align == Alignment.RIGHT:
padded = str.rjust(s, width, pad_fill_char)
elif align == Alignment.CENTER:
padded = str.center(s, width, pad_fill_char)
else:
padded = str.ljust(s, width, pad_fill_char)
return padded
def get_default_format_str(self, col: Column, value):
type_to_format = self._get_config_param(col, "type_to_format")
for typ, fmt in type_to_format.items():
if isinstance(value, typ):
return fmt
return None
def _format(self, col: Column, value):
fmt = self._get_config_param(col, "format")
if fmt is None:
fmt = self.get_default_format_str(col, value)
if fmt is None:
fmt_val = str(value)
else:
try:
fmt_val = str.format(fmt, value)
except (ValueError, TypeError):
fmt_val = str(value) + " (FMT_ERR)"
fmt_val = self._pad(fmt_val, col, value)
return fmt_val
def _get_config_param(self, col: Column, param_name: str):
col_param = col.__dict__[param_name]
if col_param is not None:
return col_param
return self.config.default_column.__dict__[param_name]
def print_col_headers(self):
self._println()
for col in self.config.columns:
title = self._pad(col.title, col, None)
self._print(title)
if col is not self.config.columns[-1]:
column_separator = self._get_config_param(col, "column_separator")
self._print(column_separator)
self._println()
for col in self.config.columns:
horz_line = self.config.header_line_char * col.width
self._print(horz_line)
if col is not self.config.columns[-1]:
column_separator = self._get_config_param(col, "column_separator")
self._print(column_separator)
self._println()
def _print_column(self, var_value):
col = self.config.columns[self._cur_col_index]
if self._cur_col_index == 0:
self._cur_printed_row_index += 1
self.last_row_print_time_seconds = datetime.datetime.now().timestamp()
if self.config.title_mode == TitleMode.HEADER and \
(self._cur_printed_row_index == self.config.header_repeat_row_count_first or
self._cur_printed_row_index % self.config.header_repeat_row_count == 0):
self.print_col_headers()
if self.config.title_mode == TitleMode.INLINE and col.title and not col.title.isspace():
self._print(col.title)
title_to_value_separator = self._get_config_param(col, "title_to_value_separator")
self._print(title_to_value_separator)
fmt_val = self._format(col, var_value)
self._print(fmt_val)
if self._cur_col_index == len(self.config.columns) - 1:
self._println()
else:
column_separator = self._get_config_param(col, "column_separator")
self._print(column_separator)
def _update_print_enable_status(self):
if self._cur_col_index != 0:
return
self._enable_print_current_row = \
self.row_index == self._force_print_row_index or \
self.row_index % self.config.output_each_n_rows == 0 and \
datetime.datetime.now().timestamp() - self.last_row_print_time_seconds >= self.config.output_each_n_seconds
def print(self, *var_values):
if len(self.config.columns) == 0:
self.config.add_column("(no title)")
for var_value in var_values:
self._update_print_enable_status()
if self._enable_print_current_row:
self._print_column(var_value)
if self._cur_col_index == len(self.config.columns) - 1:
self.row_index += 1
self._cur_col_index = 0
else:
self._cur_col_index += 1
def endline(self, msg=""):
self._update_print_enable_status()
if self._enable_print_current_row:
self._print(msg)
self._println()
self.row_index += 1
self._cur_col_index = 0
def elapsed_since_init(self, round_seconds=True):
elapsed = datetime.datetime.now() - self.init_time
if round_seconds:
rounded_seconds = round(elapsed.total_seconds())
elapsed = datetime.timedelta(seconds=rounded_seconds)
return elapsed
def force_print_next_row(self):
self._force_print_row_index = self.row_index
| true |
bbbef810ef55693668ba211a9d467918fe4b8aae | Python | MarvynBailly/MarvynBailly.github.io | /games/hangman/hang_man.py | UTF-8 | 4,303 | 3.921875 | 4 | [] | no_license | """
Hangman Game
Auther: Marvyn Bailly
Version: 0.02
Play Hangman
"""
from os import system, name
import random
def loadWords():
words = []
with open('words.txt','r') as f:
for line in f:
for word in line.split():
words.append(word)
return words
def chooseWord(words):
"""
words (list): list of words (strings)
Returns a word from words at random
"""
return random.choice(words)
def load_hangman(number):
with open("hangman.txt", "r") as f:
searchlines = f.readlines()
for i, line in enumerate(searchlines):
if number in line:
for l in searchlines[i+1:i+11]:
print(l,end = '')
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def welcome():
"""
Welcomes the player to the game
input: nothing
output: nothing
"""
print("Welcome to hangman!")
def rules():
"""
States the rules of hangman
input: nothing
output: nothing
"""
print("Guess a random word created by a freind or the computer")
print("You have 6 guesses before you lose")
print("Letters that are correctly guessed will be shown in their place")
def user_opponent():
"""
Lets the user choice if they play against the pc or a friend
input:nothing
output:pc or friend
"""
print("Would you like to play against a friend or the computer?")
opponent = input("Please enter H for human and C for computer:" )
return opponent
def human_create_puzzle():
"""
Friend creates puzzle
"""
puzzle = input("Friend, please enter the puzzle: " )
clear()
return puzzle
def user_progress(puzzle):
progress = "_ " * len(puzzle)
return progress
def pc_create_puzzle():
"""
Creates a puzzle for the user to guess
"""
words = loadWords()
puzzle = chooseWord (words).lower()
clear()
return puzzle
def get_user_guess():
"""
User guesses a letter
ignores caps and checks for numbers
input: nothing
output: user guess
"""
guess = input("Please enter the letter you want to guess: " )
"add a letter check later"
return guess
def evaluate_user_guess(puzzle, guess, progress):
"""
Checks the user's with the puzzle
Input: puzzle and user's guess
Output: Evaluation
"""
progress = list(progress.replace(" ",""))
for i in range(len(puzzle)):
if guess == puzzle[i]:
del progress[i]
progress.insert(i,guess)
progress = " ".join(progress)
return progress
def create_puzzle(opponent):
if opponent in ["C", "c","computer"]:
puzzle = pc_create_puzzle()
else:
puzzle = human_create_puzzle()
return puzzle
def check_guess(guess,puzzle):
if guess in puzzle:
return True
else:
return False
def print_results(number,puzzle,result):
load_hangman(number)
if result == "win":
print("\n"+"You guessed the word! The word was:", puzzle)
print("You Win!")
else:
print("\n"+"You didn't guess the word. The word was:",puzzle)
print("You Lose!")
def one_round(opponent):
"""
Play one round
Ends after 6 rounds or user wins
input: opponent
output: win xor lose
"""
correct_answer = False
missed_rounds = 0
letters_guessed = []
puzzle = create_puzzle(opponent)
progress = user_progress(puzzle)
while missed_rounds != 6:
if correct_answer == False:
load_hangman(str(missed_rounds))
print("\n",progress)
print("Letters guessed:", " ".join(letters_guessed))
guess = get_user_guess()
if check_guess(guess,puzzle) == True:
progress = evaluate_user_guess(puzzle, guess,progress)
else:
letters_guessed.append(guess)
print(guess, "is not in the word")
missed_rounds += 1
correct_answer = (progress.replace(" ","") == puzzle)
input("press enter to continue",)
clear()
else:
print_results(str(missed_rounds),puzzle,result="win")
return
print_results(str(missed_rounds),puzzle,result="lose")
return
def game_loop():
"""
calls the welcome
gives option for rules and options
"""
welcome()
rules()
wants_to_play = input("What to play? (y/n) ")
while wants_to_play in ['y','Y','Yes','yes',"YES","YES!"]:
opponent = user_opponent()
one_round(opponent)
wants_to_play = input("What to play again? (y/n) ")
game_loop() | true |
2ba3f3172d806bdec196c3c89aa1c5bf6b324f67 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_117/765.py | UTF-8 | 1,585 | 2.671875 | 3 | [] | no_license | __author__ = 'Mariyan Stoyanov'
of = open('B-large.out', 'w')
f = open('B-large.in', 'r')
counter = 0
number_of_test_cases = 0
row = 0
test_cases = []
lawns_list = []
readNM = False
count_n = 0
my_lawn = []
for line in f:
if counter==0:
number_of_test_cases = int(line)
readNM = True
else:
if readNM:
if len(my_lawn)>0:
lawns_list.append(tuple(my_lawn))
my_lawn = []
N,M = map(int,line.rstrip('\n').split())
readNM = False
count_n = 0
else:
count_n += 1
if count_n==N:
readNM = True
my_lawn.append(map(int,line.rstrip('\n').split()))
counter += 1
if len(my_lawn)>0:
lawns_list.append(my_lawn)
if(len(lawns_list)!=number_of_test_cases):
raise ValueError('number of test cases read from file does not match the indicated number of test cases that should be there')
f.close()
for case_num in range(len(lawns_list)):
lawn = lawns_list[case_num]
possible = True
for i in range(len(lawn)):
for j in range(len(lawn[i])):
col = [lawn[i1][j] for i1 in range(len(lawn))]
maxj = max(lawn[i])
maxi = max(col)
if maxi>lawn[i][j] and maxj>lawn[i][j]:
possible = False
break
if possible:
print 'Case #%d: YES'%(case_num+1)
of.write('Case #%d: YES'%(case_num+1))
else:
print 'Case #%d: NO'%(case_num+1)
of.write('Case #%d: NO'%(case_num+1))
of.write('\n')
of.close()
| true |
99f97360085c053aef6213bbcbc5f87bfb0c40fd | Python | HanGuo97/OpenaiBaselines | /TRPO/utils/funciton_utils.py | UTF-8 | 5,348 | 3.1875 | 3 | [] | no_license | import copy
import collections
import tensorflow as tf
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
def is_placeholder(x):
return type(x) is tf.Tensor and len(x.op.inputs) == 0
class TfInput(object):
def __init__(self, name="(unnamed)"):
"""Generalized Tensorflow placeholder. The main differences are:
- possibly uses multiple placeholders internally and returns multiple values
- can apply light postprocessing to the value feed to placeholder.
"""
self.name = name
def get(self):
"""Return the tf variable(s) representing the possibly postprocessed value
of placeholder(s).
"""
raise NotImplemented()
def make_feed_dict(data):
"""Given data input it to the placeholder(s)."""
raise NotImplemented()
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder or TfInput]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens, check_nan=False):
for inpt in inputs:
if not issubclass(type(inpt), TfInput):
assert len(
inpt.op.inputs) == 0, "inputs should all be placeholders of baselines.common.TfInput"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
self.check_nan = check_nan
def _feed_input(self, feed_dict, inpt, value):
if issubclass(type(inpt), TfInput):
feed_dict.update(inpt.make_feed_dict(value))
elif is_placeholder(inpt):
feed_dict[inpt] = value
def __call__(self, *args, **kwargs):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update the kwargs
kwargs_passed_inpt_names = set()
for inpt in self.inputs[len(args):]:
inpt_name = inpt.name.split(':')[0]
inpt_name = inpt_name.split('/')[-1]
assert inpt_name not in kwargs_passed_inpt_names, \
"this function has two arguments with the same name \"{}\", so kwargs cannot be used.".format(
inpt_name)
if inpt_name in kwargs:
kwargs_passed_inpt_names.add(inpt_name)
self._feed_input(feed_dict, inpt, kwargs.pop(inpt_name))
else:
assert inpt in self.givens, "Missing argument " + inpt_name
assert len(kwargs) == 0, "Function got extra arguments " + \
str(list(kwargs.keys()))
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = get_session().run(self.outputs_update,
feed_dict=feed_dict)[:-1]
if self.check_nan:
if any(np.isnan(r).any() for r in results):
raise RuntimeError("Nan detected")
return results
| true |
58e1e8448aacd2ba0805372ecf8d451724982f77 | Python | alexanu/Python_Trading_Snippets | /Technical_Indicators/relative_volatility_index.py | UTF-8 | 2,444 | 2.765625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
# input
symbol = 'AAPL'
start = dt.date.today() - dt.timedelta(days = 365*2)
end = dt.date.today()
# Read data
df = yf.download(symbol,start,end)
n = 14 # Number of period
change = df['Adj Close'].diff(1)
df['Gain'] = change.mask(change<0,0)
df['Loss'] = abs(change.mask(change>0,0))
df['AVG_Gain'] = df.Gain.rolling(n).std()
df['AVG_Loss'] = df.Loss.rolling(n).std()
df['RS'] = df['AVG_Gain']/df['AVG_Loss']
df['RVI'] = 100 - (100/(1+df['RS']))
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,7))
ax1 = plt.subplot(2, 1, 1)
ax1.plot(df['Adj Close'])
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax2 = plt.subplot(2, 1, 2)
ax2.plot(df['RVI'], label='Relative Volatility Index')
ax2.text(s='Overbought', x=df.RVI.index[30], y=60, fontsize=14)
ax2.text(s='Oversold', x=df.RVI.index[30], y=40, fontsize=14)
ax2.axhline(y=60, color='red')
ax2.axhline(y=40, color='red')
ax2.grid()
ax2.set_ylabel('Volume')
ax2.set_xlabel('Date')
plt.show()
from matplotlib import dates as mdates
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].tolist())
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,7))
ax1 = plt.subplot(2, 1, 1)
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax2 = plt.subplot(2, 1, 2)
ax2.plot(df['RVI'], label='Relative Volatility Index')
ax2.text(s='Overbought', x=df.RVI.index[30], y=60, fontsize=14)
ax2.text(s='Oversold', x=df.RVI.index[30], y=40, fontsize=14)
ax2.axhline(y=60, color='red')
ax2.axhline(y=40, color='red')
ax2.grid()
ax2.set_ylabel('Volume')
ax2.set_xlabel('Date')
ax2.legend(loc='best')
plt.show() | true |
b9550b09c37574eaf9f7d145a94f7f7a26c094cf | Python | jiongyzh/data_visualisation | /scatter_squares.py | UTF-8 | 458 | 3.296875 | 3 | [] | no_license | import matplotlib.pyplot as plt
input_values = list(range(1, 11))
squares = [x**2 for x in input_values]
plt.scatter(input_values, squares, s=40, edgecolors='none', c=input_values, cmap=plt.cm.Blues)
plt.title('Square Numbers', fontsize=24)
plt.xlabel('Value', fontsize=14)
plt.ylabel('Square of Value', fontsize=14)
plt.tick_params(axis='both', which='major', labelsize=14)
plt.axis([0, 11, 0, 110])
plt.show()
# plt.savefig('aaa.png', bbox_inches='tight') | true |
f83e32211ef98d053b2099aac318fe8a452e5728 | Python | justolearn/python-batch-may | /covered/datatypes/set_dt.py | UTF-8 | 2,129 | 4 | 4 | [] | no_license | # s = {}
# print(type(s))
# s1 = set()
# print(type(s1))
# set1 = {"apple", "banana", "cherry"}
# set2 = {1, 5, 7, 9, 3}
# set3 = {True, False, False}
# set4 = {1, 'Apple', False, 'False', 1, 'apple'}
# # #
# print(set1)
# print(set2)
# print(set3)
# print(set4)
# print(type(set4))
#
#
# t = ('t1', 1, 'Apple', 'Apple')
# print(t)
# s = set(t)
# print(s)
"""converting string into set"""
# s = "Hello world"
# print(s)
# print(set(s))
"""accessing set"""
# set4 = {1, 'Apple', False, 'False', 1, 'apple'}
# for s in set4:
# print(s)
# set is mutable but object inside set is immutable no list
# s = {1, 2, "hello"}
# print(s)
"""set method"""
"""add method"""
# s1 = {"hello", 1, "apple"}
# print(s1)
# s1.add('Banana')
# print(s1)
"""update method"""
# s1 = {"hello", 1, "apple"}
# s2 = {"banana", "apple", 2, 1}
# s3 = [1,2,3]
# # s4 = ("hello", 1, 3)
# s5 = "hello"
# s1.update(s5)
# print(s1)
"""Removing elements from a set"""
# s1 = {"banana", "apple", 2, 1}
# s1.discard(1)
# print(s1)
# s1.discard(1)
# print(s1) # discarding the element not present in set will not give any error
# s1 = {"banana", "apple", 2, 1}
# s1.remove(2)
# print(s1)
# s1.remove(2)
# print(s1) # removing the element not present in set will give error
"""set operators
Union"""
# a = {1, 2, 3, 4, 5}
# b = {4, 5, 6, 7, 8}
# print(a | b)
#
"""or """
# print(a.union(b))
# print(b.union(a))
"""Intersection"""
# a = {1, 2, 3, 4, 5}
# b = {4, 5, 6, 7, 8}
#
# print(a & b)
# print(a.intersection(b))
"""difference"""
# a = {1, 2, 3, 4, 5}
# b = {4, 5, 6, 7, 8}
# print(a - b)
# print(b - a)
# print(a.difference(b))
# print(b.difference(a))
"""symmetric difference of two sets"""
# a = {1, 2, 3, 4, 5}
# b = {4, 5, 6, 7, 8}
# print(a ^ b)
# print(a.symmetric_difference(b))
"""membership in set"""
# s1 = set("apple")
# print(s1)
# print('a' in s1)
#
# s2 = {'1', 'apple', 2}
# print('apple' in s2)
# print('1' in s2)
# print(3 in s2)
"""sum method"""
# s1 = {2, 1}
# print(len(s1))
# print(sum(s1))
"""frozenset"""
# a = frozenset([1, 2, 3, 4])
# b = frozenset([3, 4, 5, 6])
# print(a)
# print(a | b)
# print(a.add(9))
| true |
89210be7137ba22768cb57013d2429cc0fe35b7d | Python | JoeyZhang111/Autoapi | /Exam/common/read_data.py | UTF-8 | 1,045 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
------------------------
@author: Joey
@file: read_data.py
@time: 2021/7/19 10:20
------------------------
"""
import xlrd,os
from common.base_dir import data_path
from conf.get_conf import ini
class GetData:
def __init__(self,path,sheetname):
self.wb = xlrd.open_workbook(path)
self.sheet = self.wb.sheet_by_name(sheetname)
def get_data(self):
list = []
for i in range(1,self.sheet.nrows):
id = self.sheet.cell(i,0).value
name = self.sheet.cell(i,1).value
url = self.sheet.cell(i,3).value
method = self.sheet.cell(i,4).value
body = self.sheet.cell(i,5).value
res = self.sheet.cell(i,7).value
list.append((id,name,url,method,body,res))
return list
def close(self):
self.wb.close()
"""
path = os.path.join(data_path, ini.get("excel", "path"))
data = GetData(path, ini.get("excel", "sheetname1"))
test_data = data.get_data()
print(test_data)
"""
| true |
d918873ff0a898ca4981da5351e5cd345db22bd6 | Python | vishalkanaujia/stem | /python-code/dutchFlag.py | UTF-8 | 582 | 3.734375 | 4 | [] | no_license | def sortArray(A, index):
pivot = A[index]
smaller, equal, larger = 0, 0, len(A)
while equal < larger:
if A[equal] < pivot:
A[equal], A[smaller] = A[smaller], A[equal]
smaller, equal = smaller + 1, equal + 1
continue
if A[equal] == pivot:
equal += 1
continue
larger -= 1
A[equal], A[larger] = A[larger], A[equal]
print(A)
equal += 1
def sort():
A = [8, 7, 6, 5]
sortArray(A, 3)
print(A)
print(reversed(range(1, len(A))))
sort() | true |
258a9e55d2f62dba62ae50f558d4e4d0a68b8f30 | Python | chenxu0602/LeetCode | /2183.count-array-pairs-divisible-by-k.py | UTF-8 | 896 | 3.09375 | 3 | [] | no_license | #
# @lc app=leetcode id=2183 lang=python3
#
# [2183] Count Array Pairs Divisible by K
#
# @lc code=start
from collections import Counter
import math
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
# It can be shown that if n1 * n2 % k == 0, then gcd(n1, k) * gcd(n2, k) % k == 0.
# res = 0
# gcds = Counter()
# for n in nums:
# gcd_i = math.gcd(n, k)
# for gcd_j, cnt in gcds.items():
# if gcd_i * gcd_j % k == 0:
# res += cnt
# gcds[gcd_i] += 1
# return res
cnt = Counter(math.gcd(n, k) for n in nums)
res = 0
for a in cnt:
for b in cnt:
if a <= b and a * b % k == 0:
res += cnt[a] * cnt[b] if a < b else cnt[a] * (cnt[b] - 1) // 2
return res
# @lc code=end
| true |