blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d76acfa55f36b6f778d11572e2ec66aa329c583e | Python | imood00/python_Progate01 | /python_study_1/page11/script.py | UTF-8 | 379 | 4 | 4 | [] | no_license | x = 10
# xが30より大きい場合に「xは30より大きいです」と出力してください
if x>30:
print("xは30より大きいです")
money = 500
apple_price = 200
# moneyの値がapple_priceの値以上の時、「りんごを買うことができます」と出力してください
if money>=apple_price:
print("りんごを買うことができます")
| true |
2a286ac7d27adddbd7ecc79e7de234d67dc77423 | Python | luisibarra06/python | /lambda1.py | UTF-8 | 73 | 2.578125 | 3 | [] | no_license | # lambda1.py lai
x = lambda a, b, c, : a + b + c
fx = x(5,6,2)
print(x)
| true |
6f3dedb9984752a1774d1dea2139e429e2c37406 | Python | ccuulinay/recommender_systems | /kaggle_events_ref/DataRewriter.py | UTF-8 | 5,763 | 2.671875 | 3 | [] | no_license | from __future__ import division
import pickle
import numpy as np
import scipy.io as sio
class DataRewriter:
def __init__(self):
# 读入数据做初始化
self.userIndex = pickle.load(open("PE_userIndex.pkl", 'rb'))
self.eventIndex = pickle.load(open("PE_eventIndex.pkl", 'rb'))
self.userEventScores = sio.mmread("PE_userEventScores").todense()
self.userSimMatrix = sio.mmread("US_userSimMatrix").todense()
self.eventPropSim = sio.mmread("EV_eventPropSim").todense()
self.eventContSim = sio.mmread("EV_eventContSim").todense()
self.numFriends = sio.mmread("UF_numFriends")
self.userFriends = sio.mmread("UF_userFriends").todense()
self.eventPopularity = sio.mmread("EA_eventPopularity").todense()
def userReco(self, userId, eventId):
"""
根据User-based协同过滤,得到event的推荐度
基本的伪代码思路如下:
for item i
for every other user v that has a preference for i
compute similarity s between u and v
incorporate v's preference for i weighted by s into running aversge
return top items ranked by weighted average
"""
i = self.userIndex[userId]
j = self.eventIndex[eventId]
vs = self.userEventScores[:, j]
sims = self.userSimMatrix[i, :]
prod = sims * vs
try:
return prod[0, 0] - self.userEventScores[i, j]
except IndexError:
return 0
def eventReco(self, userId, eventId):
"""
根据基于物品的协同过滤,得到Event的推荐度
基本的伪代码思路如下:
for item i
for every item j tht u has a preference for
compute similarity s between i and j
add u's preference for j weighted by s to a running average
return top items, ranked by weighted average
"""
i = self.userIndex[userId]
j = self.eventIndex[eventId]
js = self.userEventScores[i, :]
psim = self.eventPropSim[:, j]
csim = self.eventContSim[:, j]
pprod = js * psim
cprod = js * csim
pscore = 0
cscore = 0
try:
pscore = pprod[0, 0] - self.userEventScores[i, j]
except IndexError:
pass
try:
cscore = cprod[0, 0] - self.userEventScores[i, j]
except IndexError:
pass
return pscore, cscore
def userPop(self, userId):
"""
基于用户的朋友个数来推断用户的社交程度
主要的考量是如果用户的朋友非常多,可能会更倾向于参加各种社交活动
"""
if self.userIndex.has_key(userId):
i = self.userIndex[userId]
try:
return self.numFriends[0, i]
except IndexError:
return 0
else:
return 0
def friendInfluence(self, userId):
"""
朋友对用户的影响
主要考虑用户所有的朋友中,有多少是非常喜欢参加各种社交活动/event的
用户的朋友圈如果都积极参与各种event,可能会对当前用户有一定的影响
userFriends:dok_matrix,shape[len(users),len(users)],统计第i个user的第j个朋友的活跃程度。
"""
nusers = np.shape(self.userFriends)[1]
i = self.userIndex[userId]
return (self.userFriends[i, :].sum(axis=0) / nusers)[0, 0]
def rewriteData(self, start=1, train=True, header=True):
"""
把前面user-based协同过滤 和 item-based协同过滤,以及各种热度和影响度作为特征组合在一起
生成新的训练数据,用于分类器分类使用
"""
fn = "train.csv" if train else "test.csv"
fin = open(fn, 'rb')
fout = open("data_" + fn, 'wb')
# write output header
if header:
ocolnames = ["invited", "user_reco", "evt_p_reco",
"evt_c_reco", "user_pop", "frnd_infl", "evt_pop"]
if train:
ocolnames.append("interested")
ocolnames.append("not_interested")
fout.write(",".join(ocolnames) + "\n")
ln = 0
for line in fin:
ln += 1
if ln < start:
continue
cols = line.strip().split(",")
userId = cols[0]
eventId = cols[1]
invited = cols[2]
if ln % 500 == 0:
print("%s:%d (userId, eventId)=(%s, %s)" % (fn, ln, userId, eventId))
user_reco = self.userReco(userId, eventId)
evt_p_reco, evt_c_reco = self.eventReco(userId, eventId)
user_pop = self.userPop(userId)
frnd_infl = self.friendInfluence(userId)
evt_pop = self.eventPop(eventId)
ocols = [invited, user_reco, evt_p_reco,
evt_c_reco, user_pop, frnd_infl, evt_pop]
if train:
ocols.append(cols[4]) # interested
ocols.append(cols[5]) # not_interested
fout.write(",".join(map(lambda x: str(x), ocols)) + "\n")
fin.close()
fout.close()
def rewriteTrainingSet(self):
self.rewriteData(True)
def rewriteTestSet(self):
self.rewriteData(False)
# When running with cython, the actual class will be converted to a .so
# file, and the following code (along with the commented out import below)
# will need to be put into another .py and this should be run.
# import CRegressionData as rd
dr = DataRewriter()
print("生成训练数据...\n")
dr.rewriteData(train=True, start=2, header=True)
print("生成预测数据...\n")
dr.rewriteData(train=False, start=2, header=True)
| true |
a144958ef03392c0bff7ad48e726e515a8745864 | Python | TimothyHorscroft/competitive-programming | /atcoder/agc004/b.py | UTF-8 | 571 | 2.890625 | 3 | [] | no_license | n, x = map(int, input().split())
a = list(map(int, input().split()))
# array size nxn filled with zeroes
minrange = [[0 for j in range(n)] for i in range(n)]
for r in range(n):
for l in range(r):
minrange[l][r] = min(minrange[l][r-1], a[r])
minrange[r][r] = a[r]
res = int(1e18) # 10^18 is basically infinity
for k in range(n):
cur = k*x
for i in range(n):
if i-k >= 0:
cur += minrange[i-k][i]
else:
cur += min(minrange[0][i], minrange[i-k+n][n-1])
res = min(res, cur)
print(res)
| true |
c5b8fb5da84c6e61966a176852ecd8ba75cc65cc | Python | Jishasudheer/phytoncourse | /Exception_handling/vaccine.py | UTF-8 | 125 | 3.171875 | 3 | [] | no_license | age=int(input("Enter age"))
if age<18 :
raise Exception("Not eligible for vaccine")
else :
print("vaccine available") | true |
19cfb4ac4b88b55a8c55f8f71e0a4c6ebf8d4785 | Python | gmarson/Federal-University-of-Uberlandia | /Vigenere Cipher/Project/Vigenere.py | UTF-8 | 1,095 | 3.328125 | 3 | [
"Unlicense"
] | permissive | class Vigenere:
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
key = ""
def __init__(self, key):
self.key = key
def encryptMessage(self,message) -> str:
return self.translateMessage(message, 'encrypt')
def decryptMessage(self,message) -> str:
return self.translateMessage(message, 'decrypt')
def translateMessage(self, message, mode) -> str:
translated = []
keyIndex = 0
self.key = self.key.upper()
for symbol in message:
num = self.LETTERS.find(symbol.upper()) # return the position symbol is on letter
if num != -1:
if mode == 'encrypt':
num += self.LETTERS.find(self.key[keyIndex]) #add if encrypting
elif mode == 'decrypt':
num -= self.LETTERS.find(self.key[keyIndex]) #subtract if decrypting
num %= len(self.LETTERS) ## handle the potential wrap-around
if symbol.isupper():
translated.append(self.LETTERS[num])
elif symbol.islower():
translated.append(self.LETTERS[num].lower())
keyIndex+=1
if keyIndex == len(self.key):
keyIndex =0
else:
translated.append(symbol)
return "".join(translated)
| true |
954743e28c55d042b417bdf62a7cf98c001f6e29 | Python | ervitis/challenges | /leetcode/minimum_index_sum_two_lists/main.py | UTF-8 | 965 | 3.9375 | 4 | [] | no_license | """
Suppose Andy and Doris want to choose a restaurant for dinner, and they both have a list of favorite restaurants represented by strings.
You need to help them find out their common interest with the least list index sum. If there is a choice tie between answers, output all of them with no order requirement. You could assume there always exists an answer.
"""
from typing import List
def find_restaurant(list1: List[str], list2: List[str]) -> List[str]:
c = set(list1) & set(list2)
d = {}
for k, v in enumerate(list1):
if v in c:
d[v] = k + 1
for k, v in enumerate(list2[::-1]):
if v in c:
d[v] -= k + 1
m = min(d.values())
return [k for k, v in d.items() if v == m]
if __name__ == '__main__':
print(find_restaurant(list1=["Shogun", "Tapioca Express", "Burger King", "KFC"],
list2=["Piatti", "The Grill at Torrey Pines", "Hungry Hunter Steakhouse", "Shogun"]))
| true |
a0b3140a599e21b9509385a5497618eb774d9394 | Python | theastrocat/redditoracle | /src/main_top.py | UTF-8 | 869 | 2.875 | 3 | [] | no_license | """
Module for scaping reddit top (front page) posts and adding them to mongo database.
Still needs a method for excluding posts that are already in the database.
"""
import time
from bs4 import BeautifulSoup
from pymongo import MongoClient
import datetime
import random
from reddit_scraping import Reddit_Scrape
client = MongoClient('mongodb://localhost:27017/')
db = client.reddit_top_db
reddit_new_db = db.reddit_top
working = True
check_delay = 7200
html = 'http://www.reddit.com'
while working == True:
reddit_posts = Reddit_Scrape(html)
scrape_time = datetime.datetime.now()
reddit_dict = reddit_posts.main_loop()
for post,content in reddit_dict.items():
reddit_new_db.insert_one({
'post': post,
'info': content,
'time': scrape_time
})
time.sleep(check_delay + int(random.random()*100))
| true |
92e8113a295597f224815bdd4e49d11428e4f0fb | Python | Allien01/PY4E | /02-data-structure/dictionaries/04.py | UTF-8 | 450 | 3.3125 | 3 | [] | no_license | fname = input("Enter the name of the file: ")
fhandle = open(fname) # abre um arquivo para leitura
count = dict()
for line in fhandle:
line = line.rstrip()
if line.startswith("From "):
word = line.split()
key = word[1] # armazena os emails de cada lista
count[key] = count.get(key, 0) + 1 # cria um histogram de e-mails
key = max(count, key = count.get) # retorna quem recebeu mais emails
print(key, count[key])
| true |
72d57f7cf679636bf7c4baa7906771cd13e13289 | Python | joeldiazz/m03-Aplicacions_Ofimatiques | /Extres/ejercicio-mayor_menor.py | UTF-8 | 900 | 3.859375 | 4 | [] | no_license | #Python 3.6#
"""COMPARADOR DE TRES NÚMEROS"""
#Coding: Utf-8
numero1= int(input("1.Pon un numero: "))
numero2= int(input("2.Pon un numero: "))
numero3= int(input("3.Pon un numero: "))
if(numero1 == numero2 and numero3 == numero2):
print("Los 3 numeros (",numero1,",",numero2,"y",numero3,") que has escrito son iguales")
elif(numero1 == numero2 and (numero2 >= numero3 or numero2 <= numero3)):
print("Dos de los números son iguales (",numero1,"y",numero2,")")
elif(numero3 == numero2 and (numero2 >= numero1 or numero2 <= numero1)):
print("Dos de los números son iguales (",numero3,"y",numero2,")")
elif(numero1 == numero3 and (numero2 >= numero3 or numero2 <= numero3)):
print("Dos de los números son iguales (",numero1,"y",numero3,")")
elif((numero2 >= numero3 or numero2 <= numero3)and(numero1 >= numero3 or numero1 <= numero3)):
print("Ninguno de los numeros se repite.")
| true |
fffbf3b9832d44eccb4d781a081c5a66836c8cbf | Python | yusurov/python | /exe_objet2.py | UTF-8 | 1,968 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python3
class Population:
def __init__(self):
self.humains = []
self.dragons = []
self.moutons = []
def reproduire_humains(humains):
nb_beb = int(len(self.humains) / 2)
for i in range(nb_bebe):
#changer le NOM
self.humains.append(Humains(nom="toto"))
def reproduir_moutons(self):
nb_agneau = int(len(self.moutons) / 2)*2
#changer le NOM
self.moutons.append(Mouton(nom="tata"))
def snap_violent(self):
self.humains = []
self.dragons = []
self.moutons = []
def passer_une_anne(self):
self.reproduir_humains()
self.reproduire_moutons()
list_animaux = self.humains + self.dragons + self.moutons
for animal in list_animmaux:
if not animal.vieillir():
del animal
for dragon in self.dragon:
sacrifice = rando.choice(self.humains+self.moutons)
if dragon.peut_manger(sacrifice):
del sacrifice
if len(self.humains)>0
nb_banquet = math.cail(len(self.humains)/4)
for _ in range(0, nb_banquet):
if not self.humains:
print("Oh ils osnt tous morts les humains")
return False
returne True
class Animal:
"""
classe générique representant tous les animaux
"""
def __init__(self,nom):
self.nom = nom
self.age = 0
self.age_max = 42
def vieillir(self):
if self.age > self.age_max:
return False
self.age += 1
return True
def peut_manger(self, animal):
if not isinstance(animal, Animal)
print("Ca se ne mange pas ca")
returne Flse
returne True
class Humain(Animal):
def __init__(self, nom):
Animal.__init__(self, nom)
self.age_max = 50
def peut_manger(self, animal):
if not isinstance(animal, Mouton)
print("Je mange que le mouton")
returne Flse
returne True
class Dragon(Animal):
def __init__(self,nom):
Animal__init__(self, nom)
self.age_max = 256
@staticmethod
def gener_nom():
return random.choice(["haha", "hoho", "huhu"])
class Mouton(Animal):
def __init__(self,nom):
Animal__init__(self, nom)
self.age_max = 10
| true |
556c9a796272f09aa263449d17cf628716a569ba | Python | crj1998/Beautyleg-Downloader | /genpassword.py | UTF-8 | 2,656 | 2.90625 | 3 | [] | no_license | import random
from binascii import hexlify
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget,QPushButton,QApplication,QLabel,QLineEdit,QGridLayout
def genpw(text):
pubKey='010001'
modulus='00e0b509f6259df8'
text=text[::-1].encode()
rsa=int(hexlify(text),16)**int(pubKey,16)%int(modulus,16)
return format(rsa,'x').zfill(15)
class genpwWindow(QWidget):
def __init__(self):
super().__init__()
self.createWidgets()
self.createGridLayout()
self.move(300,300)
self.setFixedSize(450,100)
self.setWindowTitle('激活码生成器')
def setRandomIndex(self):
result=''
words='abcdefghijklmnopqrstuvwxyz'
for i in range(random.randint(1,4)):
result+=random.choice(words)
while len(result)<5:
result+=str(random.randint(0,9))
result=list(result)
random.shuffle(result)
result=''.join(result)
self.line1.setText(result)
self.line2.setText(result+genpw(result))
def genPW(self):
get=self.line1.text()
self.line2.setText(get+genpw(get))
def createWidgets(self):
self.lb1=QLabel('序列码:')
self.lb2=QLabel('激活码:')
self.rand=QPushButton(icon=QIcon('icon/rand.ico'))
self.rand.setToolTip("随机")
self.gen=QPushButton('产生')
self.rand.minimumSizeHint()
self.rand.clicked.connect(self.setRandomIndex)
self.gen.clicked.connect(self.genPW)
self.line1=QLineEdit()
self.line2=QLineEdit()
self.line1.setEchoMode(QLineEdit.Password)
def createGridLayout(self):
#新建表格排列对象,并设置间距为10
grid=QGridLayout()
grid.setSpacing(10)
#表格布局
grid.addWidget(self.lb1,1,0)
grid.addWidget(self.line1,1,1)
grid.addWidget(self.lb2,2,0)
grid.addWidget(self.line2,2,1)
grid.addWidget(self.rand,1,2)
grid.addWidget(self.gen,2,2,1,3)
#使能表格布局
self.setLayout(grid)
def genpwText(number):
words='abcdefghijklmnopqrstuvwxyz'
for i in range(number):
result=''
for j in range(random.randint(1,4)):
result+=random.choice(words)
while len(result)<5:
result+=str(random.randint(0,9))
result=list(result)
random.shuffle(result)
result=''.join(result)
print(result+genpw(result))
if __name__ == '__main__':
import sys
#app=QApplication(sys.argv)
#interface=genpwWindow()
#interface.show()
#sys.exit(app.exec_())
genpwText(50) | true |
a6ef8e7b053c70812277e1dd82608c0fbb050c1a | Python | kantasan/zikken4 | /提出用情報工学実験IV/gizi_kyoutyo.py | UTF-8 | 4,930 | 3.0625 | 3 | [] | no_license | from matplotlib import pyplot
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
"""
listsは必修科目、list2は選択科目。
それぞれ0番目からAさん,Bさん...みたいな形式を取ること!
文字を数値に置き換える.例えばos 0 コンシス 1とか
"""
#授業の数
n = 34
graphname = 'group-0 Elective'
name = 'c0-Elective.jpg'
list_in = []
kamoku_list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20']
print("0 キャリア実践\n" +
"1 大学英語\n" +
"2 情報社会と情報倫理\n" +
"3 モデリングと設計\n" +
"4 プロジェクト・デザインII\n" +
"5 情報工学実験III,IV\n" +
"6 ソフトウェア演習I,II\n" +
"7 プログラミングI,II\n" +
"8 情報工学実験I,II\n" +
"9 アルゴリズムとデータ構造\n" +
"10 情報ネットワークI\n" +
"11 オペレーティングシステム\n" +
"12 データベースシステム\n" +
"13 コンピュータシステム\n" +
"14 計算機アーキテクチャ\n" +
"15 線型代数学\n" +
"16 情報数学I,II\n" +
"17 数学基礎演習I,II\n" +
"18 微分積分I,II\n" +
"19 物理I,II\n" +
"20 確率及び統計\n")
while len(list_in) != 5:
x = input("好きだった科目の番号を入力してください:")
if (x in list_in):
print("まだ入力していない科目を選んでください。")
elif (not x in kamoku_list):
print("0~23までの数字のみ入力してください。")
else:
list_in.append(int(x))
#print(list_in)
lists = []
list2 = []
file_name = "DS_hisshu.csv"
file_name2 = "DS_senntaku.csv"
csv_file = open(file_name, "r", encoding="ms932", errors="", newline="" )
#リスト形式
f = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
#辞書形式
f = csv.DictReader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
f = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
header = next(f)
for row in f:
list_test = []
for s in range(0,5):
list_test.append(int(row[s]))
lists.append(list_test)
"""
try:
file = open(file_name)
lines = file.readlines()
count = 0
test = []
for line in lines:
test.append(line.strip())
count = count + 1
if count == 5:
lists.append(test)
count = 0
test = []
#print(lists)
except Exception as e:
print(e)
finally:
file.close()
list2 = []
try:
file = open(file_name2)
lines2 = file.readlines()
count2 = 0
test2 = []
for line in lines2:
test2.append(int(line.strip()))
count2 = count2 + 1
if count2 == 5:
list2.append(test2)
count2 = 0
test2 = []
#print(list2)
except Exception as e:
print(e)
finally:
file.close()
"""
csv_file = open(file_name2, "r", encoding="ms932", errors="", newline="" )
#リスト形式
f = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
#辞書形式
f = csv.DictReader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
f = csv.reader(csv_file, delimiter=",", doublequote=True, lineterminator="\r\n", quotechar='"', skipinitialspace=True)
for row in f:
list_test2 = []
for s in range(0,5):
list_test2.append(int(row[s]))
list2.append(list_test2)
#必修->ファイル読み込み
#lists =[[1,2,3,4,5],[4,5,6,7,8]]
sets = list(map(lambda x: set(x),lists))
print(sets)
#選択これも
#list2 = [[11,10,9,12,13],[11,15,16,23,24]]
set2 = list(map(lambda x: set(x),list2))
print(set2)
match_index = []
#入力
#list_in = [1,2,3,4,5]
set_in = set(list_in)
print(set_in)
"""
listsの中から1つ以上一致する人を探してindexをmatch_indexに保存
list2[match_index[i]]とかで必修を選んだ人の選択科目がわかる。
マッチしたのが1以上とかなり雑なため、マッチした数に重みを与える必要がある。
"""
for i in range(len(sets)):
print(len(sets[i] & set_in))
if len(sets[i] & set_in) >= 1:
match_index.append(i)
print(match_index)
#print(list2[match_index[0]])
recomend = [0]*n
print(recomend)
"""
マッチした人の選択科目をカウントして多いものから順にとって出力。
recomendのindexは数値化した授業に対応している。
重みをつけるならrecomendのところ
"""
for k in match_index:
for j in range(5):
recomend[list2[k][j]] += 1 * len(sets[k]&set_in)/5
print(recomend)
print(recomend.index(max(recomend)))
plt.title(graphname)
y = np.array(recomend)
x = np.array(range(0,n))
plt.bar(x,y)
plt.savefig(name)
plt.show()
| true |
deed7d0030544767369dad74827106a0f444d073 | Python | dsong127/MachineLearning | /NeuralNetwork/main.py | UTF-8 | 6,963 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sn
from timeit import default_timer as timer
img_size = 784
h_size = 100
m = 15000
ts_m = 10000
def main():
start = timer()
print("--------Parsing data----------------------")
tr_features, tr_labels, ts_features, ts_labels_cm = parse_data()
ts_labels = one_hot_encode(ts_labels_cm)
tr_labels = one_hot_encode(tr_labels)
print("Training input shape: {}".format(tr_features.shape))
print("Training labels shape: {}".format(tr_labels.shape))
end = timer()
print("Parse data complete. Time taken: {} seconds".format(end-start))
print("------------------------------------------")
network_10 = Network(img_size,h_size,10)
network_10.train(0.1, 0.9, tr_features, tr_labels, ts_features, ts_labels, ts_labels_cm, 50)
class Network(object):
def __init__(self, input_size, hidden_size, output_size):
self.in_hidden_weights = self.init_weights(input_size+1, hidden_size)
self.hidden_out_weights = self.init_weights(hidden_size+1, output_size)
# Store prev weights for delta w calculations
self.prev_w_ih = np.zeros(self.in_hidden_weights.shape)
self.prev_w_ho = np.zeros(self.hidden_out_weights.shape)
def init_weights(self, r, c):
w = np.random.uniform(low=-0.05, high=0.05, size=(r, c))
w = np.around(w, decimals=2)
return w
def compute_target_values(self, label):
T = []
for value in label:
t = 0.9 if value==1 else 0.1
T.append(t)
return np.array(T)
# Feed in numpy array activation values from output layer
# then return index of output node with maximum value (Prediciton)
def get_prediction_index(self, O):
max = np.argmax(O)
#return one_hot_encode(max)
return max
def feed_forward(self, x):
# Input to hidden layer
Zh = np.dot(x, self.in_hidden_weights)
H = sigmoid(Zh)
H = np.insert(H, 0, 1) # Prepend 1 for bias
H = H.reshape((1,h_size+1)) # 2D -> 1D array
H = np.ravel(H)
# Hidden to output layer
Zo = np.dot(H, self.hidden_out_weights)
O = sigmoid(Zo)
O = O.reshape((1,10))
return H, O
def back_propagation(self, O, H, label):
# Get target values
T = self.compute_target_values(label)
# Compute output error terms
Eo = O * (1 - O) * (T - O)
assert(Eo.shape == ((1,10)))
# Compute hidden error terms
dot = np.dot(self.hidden_out_weights[1:], Eo.T)
sig_prime = (H[1:] * (1 - H[1:]))
sig_prime = sig_prime.reshape((h_size,1))
Eh = sig_prime.T * dot.T
return Eo, Eh
def update_weights(self, Eo, Eh, H, X, learning_rate, momentum):
#Compute delta, update weights, save current delta for next iteration
delta_w = (learning_rate * Eo.T * H).T + (momentum * self.prev_w_ho)
self.hidden_out_weights += delta_w
self.prev_w_ho = delta_w
# Update input to hidden
delta_w = (learning_rate * Eh.T * X).T + (momentum * self.prev_w_ih)
self.in_hidden_weights += delta_w
self.prev_w_ih = delta_w
def train(self, learning_rate, momentum, tr_inputs, tr_labels, ts_inputs, ts_labels, ts_labels_cm, nb_epoch):
tr_acc_data = []
ts_acc_data = []
prediction_data = []
for epoch in range(nb_epoch+1):
tr_incorrect = 0
ts_incorrect = 0
start = timer()
# Loop Through each example
for input, label in zip(tr_inputs, tr_labels):
H, O = self.feed_forward(input)
prediction = self.get_prediction_index(O)
if prediction != one_hot_to_number(label):
tr_incorrect += 1
if epoch>0:
Eo, Eh = self.back_propagation(O, H, label)
input = input.reshape((1, 785))
self.update_weights(Eo, Eh, H, input, learning_rate, momentum)
# Accuracy on test set
for input, label in zip(ts_inputs, ts_labels):
H, O = self.feed_forward(input)
prediction = self.get_prediction_index(O)
# For confusion matrix (Runs on last epoch)
if epoch == nb_epoch:
prediction_data.append(prediction)
if prediction != one_hot_to_number(label):
ts_incorrect += 1
end = timer()
# Time elapsed
print("Epoch {} \t time elapsed: {}".format(epoch, end-start))
tr_accuracy = ((m - tr_incorrect) / m) * 100
ts_accuracy = ((ts_m - ts_incorrect) / ts_m) * 100
tr_acc_data.append(tr_accuracy)
ts_acc_data.append(ts_accuracy)
# Evaluate training accuracy
print("Training set accuracy: {} %".format(tr_accuracy))
print("Testing set accuracy: {} %".format(ts_accuracy))
print("------------------------------------")
cm = confusion_matrix(ts_labels_cm, np.array(prediction_data))
df_cm = pd.DataFrame(cm, index=[i for i in "0123456789"],
columns=[i for i in "0123456789"])
plt.figure(figsize=(10, 10))
sn.heatmap(df_cm, annot=True, fmt = '.1f')
plt.figure(figsize=(10,10))
epoch_data = range(nb_epoch+1)
plt.title("Accuracy for learning rate: {}".format(learning_rate))
plt.plot(epoch_data, tr_acc_data, label = "Training")
plt.plot(epoch_data, ts_acc_data, label="Testing")
plt.xlabel("Epoch")
plt.ylabel("Accuracy %")
plt.legend()
plt.show()
def one_hot_encode(labels):
nb_labels = len(labels)
nb_categories = 10
one_hot = np.zeros((nb_labels, nb_categories))
one_hot[np.arange(nb_labels), labels] = 1
return one_hot
def one_hot_to_number(label):
return np.argmax(label)
def sigmoid(z):
return 1.0/(1.0+np.exp(-z))
def parse_data():
train_data = pd.read_csv('data/mnist_train.csv', header=None, sep=',', engine='c', na_filter= False, low_memory=False)
test_data = pd.read_csv('data/mnist_test.csv', header=None, sep=',', engine='c', na_filter=False, low_memory=False)
tr_labels = train_data.iloc[:, 0]
tr_labels = tr_labels[:15000]
print(tr_labels.value_counts()) # Check dataset is balanced
train_data /= 255
train_data.iloc[:,0] = 1.0
#tr_features = train_data
tr_features = train_data[:15000] #Only use half of training set
ts_labels = test_data.iloc[:, 0]
test_data /= 255
test_data.iloc[:,0] = 1.0
ts_features = test_data
return np.array(tr_features), np.array(tr_labels), np.array(ts_features), np.array(ts_labels)
if __name__ == '__main__':
main() | true |
4b91a7a18a22f8f8e5514f44eca9507d1c2a9625 | Python | furutuki/LeetCodeSolution | /0257. Binary Tree Paths/python_dfs.py | UTF-8 | 721 | 3.5 | 4 | [
"MIT"
] | permissive | # Definition for a binary tree node.
from typing import List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.ans = []
def dfs(self, node: TreeNode, res:str):
if not node:
return
res += str(node.val)
if not node.left and not node.right:
if res:
self.ans.append(res)
else:
res += "->"
self.dfs(node.left, res)
self.dfs(node.right, res)
def binaryTreePaths(self, root: TreeNode) -> List[str]:
self.dfs(root, "")
return self.ans
| true |
f5d7c6fbed7ccf3bd948569ab062d9822c22736d | Python | songkicheon/MSE_Python | /ex190.py | UTF-8 | 239 | 3.640625 | 4 | [] | no_license | apart = [ [101, 102], [201, 202], [301, 302] ]
for i in apart: #apart에서 원소 하나씩 i 에 저장한다 예) i=[101, 102]
for j in i: #i에서 원소 하나씩 j에 저장하고 j와'호'를 출력한다
print(j,'호') | true |
cb15bc96622b13e3a4ff919305288e5218722653 | Python | jkagnes/BookStore | /FlaskBookstore/FlaskBookstore/FlaskBookstore/models/book.py | UTF-8 | 530 | 2.59375 | 3 | [] | no_license | class Book(object):
def __init__(self, id, title, author, publisher, publishedDate,description, category,smallThumbnail,thumbnail, price, pageCount):
self.id = id
self.title = title
self.author = author
self.publisher = publisher
self.publishedDate = publishedDate
self.description = description
self.category = category
self.smallThumbnail = smallThumbnail
self.thumbnail = thumbnail
self.price = price
self.pageCount = pageCount
| true |
0799fe9d7f895e51fede0e30e8cc8596188e54f2 | Python | nonusDev/Algorithm | /SWEA/D1/1936.1대1가위바위보.py | UTF-8 | 163 | 2.90625 | 3 | [] | no_license | import sys
sys.stdin = open("1936.1대1가위바위보.txt", 'r')
x, y = map(int, input().split())
if x-y == 1 or x-y == -2:
print('A')
else:
print('B') | true |
723915a0d5953d052917ee908cced4a968d30c10 | Python | AlexDarkstalker/PythonCourseraWeek2 | /countOfMaxElems.py | UTF-8 | 229 | 3.453125 | 3 | [] | no_license | num = int(input())
maxNum = num
countMax = 0
if num:
countMax = 1
while num:
num = int(input())
if num > maxNum:
maxNum = num
countMax = 1
elif num == maxNum:
countMax += 1
print(countMax)
| true |
51b51ec2279dab64293fd1cc206bb76be2ac8c1e | Python | jitensinha98/Python-Practice-Programs | /ex21.py | UTF-8 | 405 | 3.34375 | 3 | [] | no_license | def add(a,b):
c=a+b
return c
def sub(a,b):
c=a-b
return c
def multiply(a,b):
c=a*b
return c
def divide(a,b):
c=a/b
return c
age=add(12,2)
height=sub(14,2)
weight=multiply(2,2)
iq=divide(2,2)
print "Age=%d"%age
print "Height=%d"%height
print "Weight=%d"%weight
print "iq=%d"%iq
p=add(age, sub(height, multiply(weight, divide(iq, 2))))
print "Abnormal=%d"%p
| true |
d0c43b0608c8b9326a48497d37f11fa434aef89d | Python | wcdawn/WilliamDawn-thesis | /ch02_neutronDiffusion/python/sketch_triangle.py | UTF-8 | 510 | 2.859375 | 3 | [
"LPPL-1.3c"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
LW = 2
FN = 'Times New Roman'
FS = 12
plt.rc('lines', lw=LW)
plt.rc('mathtext', fontset='stix') # not explicitly Times New Roman but a good clone
plt.rc('font', family=FN, size=FS)
tri = np.array([
[0.7, 0.5],
[0.2, 0.5],
[0.0, -0.1],
[0.7, 0.5]])
plt.figure()
plt.plot(tri[:,0], tri[:,1], '-ko')
plt.axis('equal')
plt.axis('off')
plt.tight_layout()
plt.savefig('../figs/sketch_triangle.pdf', bbox_inches='tight', pad_inches=0)
plt.close()
| true |
61ecd309562f4b9e088af3d445f8bd522a2ac83b | Python | jorgemauricio/proyectoGranizo | /algoritmos_procesamiento/generar_mapa_datos_nasa_2014.py | UTF-8 | 6,717 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
#######################################
# Script que permite la interpolación de los
# datos de precipitación de la NASA
# Author: Jorge Mauricio
# Email: jorge.ernesto.mauricio@gmail.com
# Date: 2018-02-01
# Version: 1.0
#######################################
"""
#!/usr/bin/env python3 # -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
# librerias
import pandas as pd
import os
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from scipy.interpolate import griddata as gd
from time import gmtime, strftime
# Programa principal
def main():
# limpiar la terminal
os.system('clear')
# Estructura final de base de datos
dataBaseStructureCaniones = "Canon,Estado,Nombre,Long,Lat,Year,Month,Day,Hour,RainIMR\n"
# ruta para guardar nombreArchivoParaPandas
# Obtener todos los archivos en data
#listaDeFechas = ['2018-01-01']
# listaDeFechas = [x for x in os.listdir('/media/jorge/U/WRF_Granizo') if x.endswith('')]
# obtener coordenadas cañones dataAntigranizo
dataAntigranizo = pd.read_csv("data/Coordenadas_caniones.csv")
#%% generar info
#%% -106.49 > Long > -97.5
#%% 17.43 > Lat > 25.23
# ruta temporal folders
rutaTemporalDeArchivos = "/media/jorge/backup1/gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGHHL.04"
# generar lista de archvos para procesamiento
#listaDeArchivos = [x for x in os.listdir(rutaTemporalDeArchivos) if x.endswith('')]
listaDeArchivos = ['2014']
# ciclo de procesamiento
for folderAnio in listaDeArchivos:
# ruta temporal de archivo
nombreTemporalDelFolderAnio = "/media/jorge/backup1/gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGHHL.04/{}".format(folderAnio)
# lista de archivos diarios
listaDeArchivosDeDias = [x for x in os.listdir(nombreTemporalDelFolderAnio) if x.endswith('')]
for folderDia in listaDeArchivosDeDias:
# ruta temporal de archivo de dias
nombreTemporalDelFolderDia = "/media/jorge/backup1/gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGHHL.04/{}/{}".format(folderAnio,folderDia)
# lista de archivos en folder diarios
listaDeArchivosEnFolderDia = [x for x in os.listdir(nombreTemporalDelFolderDia) if x.endswith('.HDF5')]
# for
for nombreDelArchivo in listaDeArchivosEnFolderDia:
# nombre temporal del archivo a procesar
nombreTemporalArchivo = "/media/jorge/backup1/gpm1.gesdisc.eosdis.nasa.gov/data/GPM_L3/GPM_3IMERGHHL.04/{}/{}/{}".format(folderAnio,folderDia, nombreDelArchivo)
#lectura del hdf5
f = h5py.File(nombreTemporalArchivo, 'r')
# variable temporal para procesar el hdf5
grid = f['Grid']
# arrays de numpy
lon = np.array(grid['lon'])
lat = np.array(grid['lat'])
precipitation = np.array(grid['precipitationCal'])
# crear la variable que guardara el texto
dataText = "Long,Lat,Prec\n"
for i in range(lon.shape[0]):
for j in range(lat.shape[0]):
tempText = "{},{},{}\n".format(lon[i], lat[j], precipitation[i,j])
dataText += tempText
# generar variables extras
nombreEnArray = nombreDelArchivo.split('.')
# fecha y minutos
tempfecha = nombreEnArray[4]
minutos = nombreEnArray[5]
fecha, temp1, temp2 = tempfecha.split('-')
# guardar a CSV
nombreArchivoParaPandas = guardarCSV(dataText, fecha, minutos)
# close hdf5
f.close()
# leer archivo en pandas
data = pd.read_csv(nombreArchivoParaPandas)
# determinar la hora de lectura
nombreTemporalHora = minutos
#print("***** nombre temporal hora", nombreTemporalHora)
# limites longitud > -106.49 y < -97.5
data = data.loc[data['Long'] > -106.49]
data = data.loc[data['Long'] < -97.5]
# limites latitud > 17.43 y < 25.23
data = data.loc[data['Lat'] > 17.43]
data = data.loc[data['Lat'] < 25.23]
# obtener valores de x, y
lons = np.array(data['Long'])
lats = np.array(data['Lat'])
#%% iniciar la gráfica
plt.clf()
# agregar locación de Coordenadas_caniones
xC = np.array(dataAntigranizo['Long'])
yC = np.array(dataAntigranizo['Lat'])
#plt.scatter(xC, yC,3, marker='o', color='r', zorder=25)
# fig = plt.figure(figsize=(48,24))
m = Basemap(projection='mill',llcrnrlat=17.43,urcrnrlat=25.23,llcrnrlon=-106.49,urcrnrlon=-97.5,resolution='h')
# generar lats, lons
x, y = m(lons, lats)
# numero de columnas y filas
numCols = len(x)
numRows = len(y)
# generar xi, yi
xi = np.linspace(x.min(), x.max(), numCols)
yi = np.linspace(y.min(), y.max(), numRows)
# generar el meshgrid
xi, yi = np.meshgrid(xi, yi)
# generar zi
z = np.array(data['Prec'])
zi = gd((x,y), z, (xi,yi), method='cubic')
#clevs
clevs = [1,5,10,20,30,50,70,100,150,300,500]
#clevs = [0,5,10,15,20,25,30,45,60,75]
#%% contour plot
cs = m.contourf(xi,yi,zi, clevs, zorder=5, alpha=0.5, cmap='rainbow')
# draw map details
#m.drawcoastlines()
#m.drawstates(linewidth=0.7)
#m.drawcountries()
#%% read municipios shape file
#m.readshapefile('shapes/MunicipiosAgs', 'Municipios')
m.readshapefile('shapes/Estados', 'Estados')
m.scatter(xC, yC, latlon=True,s=1, marker='o', color='r', zorder=25)
#%% colorbar
cbar = m.colorbar(cs, location='bottom', pad="5%")
cbar.set_label('mm')
tituloTemporalParaElMapa = "Precipitación para la hora: {}".format(nombreTemporalHora)
plt.title(tituloTemporalParaElMapa)
# Mac /Users/jorgemauricio/Documents/Research/proyectoGranizo/Maps/{}_{}.png
# Linux /home/jorge/Documents/Research/proyectoGranizo/Maps/{}_{}.png
nombreTemporalParaElMapa = "/home/jorge/Documents/Research/proyectoGranizo/data/mapsNASA/{}_{}.png".format(tempfecha,minutos)
plt.annotate('@2018 INIFAP', xy=(-102,22), xycoords='figure fraction', xytext=(0.45,0.45), color='g', zorder=50)
plt.savefig(nombreTemporalParaElMapa, dpi=300)
print('****** Genereate: {}'.format(nombreTemporalParaElMapa))
print(nombreArchivoParaPandas)
eliminarCSVTemporal(nombreArchivoParaPandas)
#%% Guardar a CSV
fileName = 'data/dataFromCanionesTestNASA_2014.csv'
textFile = open(fileName, "w")
textFile.write(dataBaseStructureCaniones)
textFile.close()
def guardarCSV(variableTexto, fecha, minutos):
"""
Función que permite guardar una viriable de texto a .csv
param: txt: variable de texto a guardar
"""
fileName = 'temp/{}_{}.csv'.format(fecha, minutos)
textFile = open(fileName, "w")
textFile.write(variableTexto)
textFile.close()
return fileName
def eliminarCSVTemporal(nombreDelArchivo):
os.remove(nombreDelArchivo)
if __name__ == '__main__':
main()
| true |
748ef33cab3540f7504e2d113a867b123bd9d9d4 | Python | stardust-r/LTW-I | /AI_navigation/UKF/astroPlot.py | UTF-8 | 6,554 | 2.953125 | 3 | [] | no_license | # astroPlot
#
# File containing different functions used for visualisation in astrosim
#
# Syntax: import astroPlot
#
# Inputs:
#
# Outputs:
#
# Other files required: none
# Subfunctions: none
#
# See also:
# Author: Pelayo Penarroya
# email: pelayo.penarroya@deimos-space.com
# Creation March 24, 2020
# Last revision: March 24, 2020
#
# Mods:
#
# Sources:
#
# ------------- BEGIN CODE --------------
# Imports
import matplotlib.pyplot as plt
from astroTransf import Inertial2Hill
def MakeComparisonPlot(title, size=8, fontsize=12, xlabel="ElapsedDays", ylabel=["X (km)", "Y (km)", "Z (km)"]):
# MakeComparisonPlot(title, size=8, fontsize=12)
#
# Function to create a figure to plot position or velocity differences between two objects
#
# Syntax: axes, fig = MakeComparisonPlot("Pick your title")
#
# Inputs:
# - title: str with the title for the figure.
# - size: size for the figure.
# - fontsize: size for the font.
# - xlabel, ylabel: text for the labels
#
# Outputs:
# - axes: handle to the three figure subplot's axes.
# - fig: handle to the figure.
#
# Other files required: none
# Subfunctions: none
#
# See also:
# Author: Pelayo Penarroya
# email: pelayo.penarroya@deimos-space.com
# Creation April 16, 2020
# Last revision: April 16, 2020
#
# Mods:
# - May 12, 2020: Merged with velocity comparisons too
#
# Sources:
#
# ------------- BEGIN CODE --------------
# Make the figure
fig = plt.figure(figsize=(size, size))
# Make sub plot and add title
axis1 = fig.add_subplot(311)
plt.title(title, y=1.025, fontsize=fontsize)
axis2 = fig.add_subplot(312)
axis3 = fig.add_subplot(313)
# Set axis labels
axis1.set_ylabel(ylabel[0])
axis2.set_ylabel(ylabel[1])
axis3.set_ylabel(ylabel[2])
axis3.set_xlabel(xlabel)
axes = [axis1, axis2, axis3]
plt.tight_layout()
# Return the plt
return axes, fig
def MakeResidualsPlot(title, size=8, fontsize=12, resSize=None, xlabel="ElapsedDays", ylabel=["X (km)", "Y (km)", "Z (km)"]):
# MakeResidualsPlot(title, size=8, fontsize=12)
#
# Function to create a figure to plot residuals of a typical OD process
#
# Syntax: axes, fig = MakeResidualsPlot("Pick your title")
#
# Inputs:
# - title: str with the title for the figure.
# - size: size for the figure.
# - fontsize: size for the font.
# - xlabel, ylabel: text for the labels
# - resSize: number of observation types in the resdiuals array
#
# Outputs:
# - axes: handle to the three figure subplot's axes.
# - fig: handle to the figure.
#
# Other files required: none
# Subfunctions: none
#
# See also:
# Author: Pelayo Penarroya
# email: pelayo.penarroya@deimos-space.com
# Creation May 12, 2020
# Last revision: May 12, 2020
#
# Mods:
#
# Sources:
#
# ------------- BEGIN CODE --------------
# Make the figure
fig = plt.figure(figsize=(size, size))
# check that the dimension of the residuals is given
if resSize == None:
raise InputError("Residual dimension must be given.")
figDesign = resSize * 100 + 10
axes = []
# Make sub plot and add title
for ii in range(resSize):
axes.append(fig.add_subplot(figDesign + ii + 1))
axes[-1].set_ylabel(ylabel[ii])
if ii == 0:
plt.title(title, y=1.025, fontsize=fontsize)
axes[-1].set_xlabel(xlabel)
plt.tight_layout()
# Return the plt
return axes, fig
def AddComparisonToPlot(axes, epochs, diff):
# AddPosComparisonToPlot(axes, epochs, diff)
#
# Function to insert a position or velocity comparison in a figure with 3 subplots
#
# Syntax: AddComparisonToPlot(axes, epochs, diff)
#
# Inputs:
# - axes: handle to the figure axis
# - epochs: 1xN array with the epochs
# - diff: 3xN array with the difference in position or velocity
#
# Outputs:
# - axes: handle to the figure axes
#
# Other files required: none
# Subfunctions: none
#
# See also:
# Author: Pelayo Penarroya
# email: pelayo.penarroya@deimos-space.com
# Creation April 16, 2020
# Last revision: April 16, 2020
#
# Mods:
# - April 21, 2020: input now is diff (no pos1 and pos2)
# - April 23, 2020: now velocities can also be plotted
#
# Sources:
#
# ------------- BEGIN CODE --------------
# check position has three components
if diff.shape[0] != 3:
raise SystemError("Array must have 3 components.")
if len(epochs.shape) != 1:
raise SystemError("Time array must have 1 dimension.")
# check time and diff arrays are consistent
if diff.shape[1] != epochs.shape[0]:
raise SystemError(
"Object has different sizes for differential and time arrays")
# check if we are plotting a single diff
if len(diff.shape) == 1:
axes.scatter([diff[0]], [diff[1]],
[diff[2]], s=40)
# or a series of diffs
else:
for ii in range(3):
axes[ii].plot(epochs, diff[ii])
return axes
def AddResidualsToPlot(axes, epochs, residuals, resSize):
# AddResidualsToPlot(axes, epochs, diff)
#
# Function to insert residuals scatterings in a residual plot
#
# Syntax: AddResidualsToPlot(axes, epochs, diff)
#
# Inputs:
# - axes: handle to the figure axis
# - epochs: 1xN array with the epochs
# - diff: 3xN array with the difference in position or velocity
# - resSize: number of observation types in the resdiuals array
#
# Outputs:
# - axes: handle to the figure axes
#
# Other files required: none
# Subfunctions: none
#
# See also:
# Author: Pelayo Penarroya
# email: pelayo.penarroya@deimos-space.com
# Creation May 12, 2020
# Last revision: May 12, 2020
#
# Sources:
#
# ------------- BEGIN CODE --------------
# check position has three components
if residuals.shape[0] != resSize:
raise SystemError("Array must have %d components." % (resSize))
if len(epochs.shape) != 1:
raise SystemError("Time array must have 1 dimension.")
# check time and residuals arrays are consistent
if residuals.shape[1] != epochs.shape[0]:
raise SystemError(
"Object has different sizes for residuals and time arrays")
for ii in range(resSize):
axes[ii].scatter(epochs, residuals[ii])
return axes
| true |
86d17d9b19d1727c36ecb0fcdf0ba824c9206f8c | Python | HanHyunsoo/Python_Programming | /University_Study/lab6_7.py | UTF-8 | 804 | 3.6875 | 4 | [] | no_license | """
챕터: day6
주제: 정규식
문제: 정규식 기호 연습
작성자: 한현수
작성일: 2018.11.15
"""
import re # regular expression 모듈을 수입
# 테스트할 각종 문자열 정의
s = "teeeest"
s2 = "tetst"
s3 = "tst"
r = re.compile('e.s') # e와 s사이에 문자가 있는 경우 찾기
print(r.search(s))
print(r.search(s2))
print(r.search(s3))
r = re.compile('e?s') # e가 0~1번 나타난 후 s가 나타나는 경우 찾기
print(r.search(s))
print(r.search(s2))
print(r.search(s3))
r = re.compile('e*s') # e가 0번이상 존재한 후 s가 나타나는 경우 찾기
print(r.search(s))
print(r.search(s2))
print(r.search(s3))
r = re.compile('e+s') # e가 1번이상 존재한 후 s가 나타나는 경우 찾기
print(r.search(s))
print(r.search(s2))
print(r.search(s3)) | true |
773a863eeae165b2c7beccef19ff6eafd53b480b | Python | ZongLin1105/OpenCVtest | /test12.py | UTF-8 | 776 | 2.90625 | 3 | [] | no_license | import cv2
import numpy as np
cap=cv2.VideoCapture(0) #放想處理的影像檔
while(1):
# 獲取每一帧;判斷有沒有開狀態
ret,frame=cap.read()
# 轉换到 HSV;BGR轉換HSV
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# 設定蓝色的侷值;設定HSV藍色的值
lower_blue=np.array([110,50,50])
upper_blue=np.array([130,255,255])
# 根据侷值构建掩模
mask=cv2.inRange(hsv,lower_blue,upper_blue)
# 对原图像和掩模進行位運算
res=cv2.bitwise_and(frame,frame,mask=mask)
# 显示图像;有雜訊表示有光線打到
cv2.imshow('frame',frame) #原圖
cv2.imshow('mask',mask) #mask圖
cv2.imshow('res',res) #遮蔽圖
k=cv2.waitKey(5)&0xFF
if k==27:
break
# 关閉窗口
cv2.destroyAllWindows()
| true |
0659b3f845aa80c5d731921051507d031d0b6f16 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4160/codes/1723_2498.py | UTF-8 | 336 | 3.4375 | 3 | [] | no_license | ha = int(input("Habitantes a: "))
hb = int(input("Habitantes b: "))
pa = float(input("Percentual de crescimento populacional de a: "))
pb = float(input("Percentual de crescimento populacional de b: "))
pera = pa/100
perb = pb/100
t = 0
ano = 0
while (ha < hb):
ha = ha + (ha * pera)
hb = hb + (hb * perb)
ano = ano + 1
print(ano)
| true |
7a2992f243707e4083f144e4ead4fec0b6cf3c07 | Python | cagridz/centrosome | /tests/test_zernike.py | UTF-8 | 8,426 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | from __future__ import absolute_import
from __future__ import division
import numpy as np
import scipy.ndimage as scind
import unittest
import centrosome.zernike as z
from centrosome.cpmorphology import fill_labeled_holes, draw_line
from six.moves import range
class TestZernike(unittest.TestCase):
def make_zernike_indexes(self):
"""Make an Nx2 array of all the zernike indexes for n<10"""
zernike_n_m = []
for n in range(10):
for m in range(n+1):
if (m+n) & 1 == 0:
zernike_n_m.append((n,m))
return np.array(zernike_n_m)
def score_rotations(self,labels,n):
"""Score the result of n rotations of the label matrix then test for equality"""
self.assertEqual(labels.shape[0],labels.shape[1],"Must use square matrix for test")
self.assertEqual(labels.shape[0] & 1,1,"Must be odd width/height")
zi = self.make_zernike_indexes()
test_labels = np.zeros((labels.shape[0]*n,labels.shape[0]))
test_x = np.zeros((labels.shape[0]*n,labels.shape[0]))
test_y = np.zeros((labels.shape[0]*n,labels.shape[0]))
diameter = labels.shape[0]
radius = labels.shape[0]//2
y,x=np.mgrid[-radius:radius+1,-radius:radius+1].astype(float)/radius
anti_mask = x**2+y**2 > 1
x[anti_mask] = 0
y[anti_mask] = 0
min_pixels = 100000
max_pixels = 0
for i in range(0,n):
angle = 360*i // n # believe it or not, in degrees!
off_x = labels.shape[0]*i
off_y = 0
rotated_labels = scind.rotate(labels,angle,order=0,reshape=False)
pixels = np.sum(rotated_labels)
min_pixels = min(min_pixels,pixels)
max_pixels = max(max_pixels,pixels)
x_mask = x.copy()
y_mask = y.copy()
x_mask[rotated_labels==0]=0
y_mask[rotated_labels==0]=0
test_labels[off_x:off_x+diameter,
off_y:off_y+diameter] = rotated_labels * (i+1)
test_x[off_x:off_x+diameter,
off_y:off_y+diameter] = x_mask
test_y[off_x:off_x+diameter,
off_y:off_y+diameter] = y_mask
zf = z.construct_zernike_polynomials(test_x,test_y,zi)
scores = z.score_zernike(zf,np.ones((n,))*radius,test_labels)
score_0=scores[0]
epsilon = 2.0*(max(1,max_pixels-min_pixels))/max_pixels
for score in scores[1:,:]:
self.assertTrue(np.all(np.abs(score-score_0)<epsilon))
def score_scales(self,labels,n):
"""Score the result of n 3x scalings of the label matrix then test for equality"""
self.assertEqual(labels.shape[0],labels.shape[1],"Must use square matrix for test")
self.assertEqual(labels.shape[0] & 1,1,"Must be odd width/height")
width = labels.shape[0] * 3**n
height = width * (n+1)
zi = self.make_zernike_indexes()
test_labels = np.zeros((height,width))
test_x = np.zeros((height,width))
test_y = np.zeros((height,width))
radii = []
for i in range(n+1):
scaled_labels = scind.zoom(labels,3**i,order=0)
diameter = scaled_labels.shape[0]
radius = scaled_labels.shape[0]//2
radii.append(radius)
y,x=np.mgrid[-radius:radius+1,-radius:radius+1].astype(float)/radius
anti_mask = x**2+y**2 > 1
x[anti_mask] = 0
y[anti_mask] = 0
off_x = width*i
off_y = 0
x[scaled_labels==0]=0
y[scaled_labels==0]=0
test_labels[off_x:off_x+diameter,
off_y:off_y+diameter] = scaled_labels * (i+1)
test_x[off_x:off_x+diameter,
off_y:off_y+diameter] = x
test_y[off_x:off_x+diameter,
off_y:off_y+diameter] = y
zf = z.construct_zernike_polynomials(test_x,test_y,zi)
scores = z.score_zernike(zf,np.array(radii),test_labels)
score_0=scores[0]
epsilon = .02
for score in scores[1:,:]:
self.assertTrue(np.all(np.abs(score-score_0)<epsilon))
def test_00_00_zeros(self):
"""Test construct_zernike_polynomials on an empty image"""
zi = self.make_zernike_indexes()
zf = z.construct_zernike_polynomials(np.zeros((100,100)),
np.zeros((100,100)),
zi)
# All zernikes with m!=0 should be zero
m_ne_0 = np.array([i for i in range(zi.shape[0]) if zi[i,1]])
m_eq_0 = np.array([i for i in range(zi.shape[0]) if zi[i,1]==0])
self.assertTrue(np.all(zf[:,:,m_ne_0]==0))
self.assertTrue(np.all(zf[:,:,m_eq_0]!=0))
scores = z.score_zernike(zf, np.array([]), np.zeros((100,100),int))
self.assertEqual(np.product(scores.shape), 0)
def test_01_01_one_object(self):
"""Test Zernike on one single circle"""
zi = self.make_zernike_indexes()
y,x = np.mgrid[-50:51,-50:51].astype(float)/50
labels = x**2+y**2 <=1
x[labels==0]=0
y[labels==0]=0
zf = z.construct_zernike_polynomials(x, y, zi)
scores = z.score_zernike(zf,[50], labels)
# Zernike 0,0 should be 1 and others should be zero within
# an approximation of 1/radius
epsilon = 1.0/50.0
self.assertTrue(abs(scores[0,0]-1) < epsilon )
self.assertTrue(np.all(scores[0,1:] < epsilon))
def test_02_01_half_circle_rotate(self):
y,x = np.mgrid[-10:11,-10:11].astype(float)/10
labels= x**2+y**2 <=1
labels[y>0]=False
labels = labels.astype(int)
self.score_rotations(labels, 12)
def test_02_02_triangle_rotate(self):
labels = np.zeros((31,31),int)
draw_line(labels, (15,0), (5,25))
draw_line(labels, (5,25),(25,25))
draw_line(labels, (25,25),(15,0))
labels = fill_labeled_holes(labels)
labels = labels>0
self.score_rotations(labels, 12)
def test_02_03_random_objects_rotate(self):
np.random.seed(0)
y,x = np.mgrid[-50:50,-50:50].astype(float)/50
min = int(50/np.sqrt(2))+1
max = 100-min
for points in range(4,12):
labels = np.zeros((101,101),int)
coords = np.random.uniform(low=min,high=max,size=(points,2)).astype(int)
angles = np.array([np.arctan2(y[yi,xi],x[yi,xi]) for xi,yi in coords])
order = np.argsort(angles)
for i in range(points-1):
draw_line(labels,coords[i],coords[i+1])
draw_line(labels,coords[i],coords[0])
fill_labeled_holes(labels)
self.score_rotations(labels,12)
def test_03_01_half_circle_scale(self):
y,x = np.mgrid[-10:11,-10:11].astype(float)/10
labels= x**2+y**2 <=1
labels[y>=0]=False
self.score_scales(labels, 2)
def test_03_02_triangle_scale(self):
labels = np.zeros((31,31),int)
draw_line(labels, (15,0), (5,25))
draw_line(labels, (5,25),(25,25))
draw_line(labels, (25,25),(15,0))
labels = fill_labeled_holes(labels)
labels = labels>0
self.score_scales(labels, 2)
def test_03_03_random_objects_scale(self):
np.random.seed(0)
y,x = np.mgrid[-20:20,-20:20].astype(float)/20
min = int(20/np.sqrt(2))+1
max = 40-min
for points in range(4,12):
labels = np.zeros((41,41),int)
coords = np.random.uniform(low=min,high=max,size=(points,2)).astype(int)
angles = np.array([np.arctan2(y[yi,xi],x[yi,xi]) for xi,yi in coords])
order = np.argsort(angles)
for i in range(points-1):
draw_line(labels,coords[i],coords[i+1])
draw_line(labels,coords[i],coords[0])
fill_labeled_holes(labels)
self.score_scales(labels,2)
class TestGetZerikeNumbers(unittest.TestCase):
def test_01_01_test_3(self):
expected = np.array(((0,0),(1,1),(2,0),(2,2),(3,1),(3,3)),int)
result = np.array(z.get_zernike_indexes(4))
order = np.lexsort((result[:,1],result[:,0]))
result = result[order]
self.assertTrue(np.all(expected == result))
| true |
abc38e143229409c14d24e9054228eec6e33387d | Python | hjjiang/Vending-Machine | /Money.py | UTF-8 | 1,488 | 3.796875 | 4 | [] | no_license | class Money(object):
def __init__(self, value, amount):
self.value = value
self.amount = amount
self.TotalAmount = value * amount
def getTotalAmount(self):
return self.TotalAmount
def getAmount(self):
return self.amount
def addAmount(self, amount):
self.amount += amount
self.TotalAmount = self.getValue() * self.amount
def setAmount(self, amount):
self.amount = amount
self.TotalAmount = self.getValue() * self.amount
def getValue(self):
return self.value
class Penny(Money):
def __init__(self, amount):
super(Penny, self).__init__(.01,amount)
class Nickel(Money):
def __init__(self, amount):
super(Nickel, self).__init__(.05,amount)
class Dime(Money):
def __init__(self, amount):
super(Dime, self).__init__(.10,amount)
class Quarter(Money):
def __init__(self, amount):
super(Quarter, self).__init__(.25,amount)
class OneDollar(Money):
def __init__(self, amount):
super(OneDollar, self).__init__(1.00,amount)
class FiveDollars(Money):
def __init__(self, amount):
super(FiveDollars, self).__init__(5.00,amount)
class TenDollars(Money):
def __init__(self, amount):
super(TenDollars, self).__init__(10.00,amount)
class TwentyDollars(Money):
def __init__(self, amount):
super(TwentyDollars, self).__init__(20.00,amount)
| true |
4ed40bf8166429dc6d02a691819c4983b878f057 | Python | DEVESHTARASIA/json-resume-to-latex | /json_to_tex/json_to_tex/__main__.py | UTF-8 | 5,823 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import json_to_tex as jtt
import json
import os
import sys
import re
from pathlib import Path
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'filepaths',
type=Path,
nargs='+',
help='Filepaths to text template and JSON files. JSON files will be merged in the order specified from left to right.')
parser.add_argument(
'--output-dirpath',
type=Path,
default=Path('.'),
help='Output directory for generated files. Default is current directory.')
parser.add_argument(
'--output-filestem',
type=str,
help='Specifies a stem that will be used to named output files. If not specified, tex_template_filepath stem is used.')
parser.add_argument(
'--include-tags',
nargs='*',
help='Only JSON entries matching the specified tags or entries with no tags specified will be processed.')
parser.add_argument(
'--exclude-tags',
nargs='*',
help='Only JSON entries NOT matching the specified tags or entries with no tags specified will be processed.')
parser.add_argument(
'--no-prune-property-names',
nargs='*',
default=[],
help='A list of property names that will not be pruned from the JSON data whether or not it is used in the tex template.')
parser.add_argument(
'--json-merge-property-name',
type=str,
help='The property values associated with property name are compared to determine the equality of two JSON objects when their equality cannot be inferred by the hierarchical structure.')
parser.add_argument(
'--json-sort-property-name',
type=str,
help='The property values associated with property name are compared to determine the ordering of two JSON objects when their order cannot be inferred by the hierarchical structure.')
def filter_filepaths_by_file_suffix(filepaths, file_suffix):
return [filepath for filepath in filepaths if filepath.suffix == file_suffix]
def main():
args = parser.parse_args()
tex_template_filepath = filter_filepaths_by_file_suffix(args.filepaths, '.tex')
json_filepaths = filter_filepaths_by_file_suffix(args.filepaths, '.json')
if len(tex_template_filepath) > 1:
sys.exit('Only one tex template may be specified. You specified: {}'.format(' '.join(tex_template_filepath)))
else:
tex_template_filepath = tex_template_filepath[0]
if not len(args.output_filestem):
args.output_filestem = tex_template_filepath.stem
template = jtt.generate_template(tex_template_filepath)
# import pprint
# pp = pprint.PrettyPrinter()
# pp.pprint(template)
merge_comp = None
if(args.json_merge_property_name):
merge_comp = lambda v_cur, v_new: isinstance(v_cur, dict) and (args.json_merge_property_name in v_cur) and isinstance(v_new, dict) and (args.json_merge_property_name in v_new) and (v_cur[args.json_merge_property_name] == v_new[args.json_merge_property_name])
merge_sort_key = None
if(args.json_sort_property_name):
merge_sort_key = lambda obj : (args.json_sort_property_name not in obj, obj.get(args.json_sort_property_name, None))
merged_json = {}
for json_filepath in json_filepaths:
jtt.merge_obj(merged_json, jtt.load_json(json_filepath), merge_comp=merge_comp, sort_key=merge_sort_key)
def filter_func(target_tags, include):
def re_tag_match(patterns, tags):
if not isinstance(patterns, (list, set, dict)):
patterns = {patterns}
if not isinstance(tags, (list, set, dict)):
tags = {tags}
for pattern in patterns:
for tag in tags:
if(re.search(pattern, tag)):
return True
return False
def f(value):
if not isinstance(value, dict):
return True
if 'tags' not in value:
return True
tags = value['tags']
property_only_tags = set()
if isinstance(tags, dict):
for tag, properties in tags.items():
if isinstance(properties, list) and len(properties):
property_only_tags.add(tag)
for property_only_tag in property_only_tags:
for property in tags[property_only_tag]:
if re_tag_match(target_tags, property_only_tag) and (property in value):
if include:
# Not sure that there is a symmetrical case for include tags
pass
else:
del value[property]
if len(tags) == len(property_only_tags):
return True
if re_tag_match(target_tags, tags) and not re_tag_match(target_tags, property_only_tags):
return include
return not include
return f
print(args.include_tags)
if(args.include_tags):
jtt.filter_obj(merged_json, filter_func(args.include_tags, True))
if(args.exclude_tags):
jtt.filter_obj(merged_json, filter_func(args.exclude_tags, False))
jtt.prune_obj(merged_json, template, no_prune_property_names=args.no_prune_property_names)
args.output_dirpath.mkdir(parents=True, exist_ok=True)
with open(args.output_dirpath.joinpath(''.join((args.output_filestem, '.json'))), 'w+') as file:
json.dump(merged_json, file)
with open(args.output_dirpath.joinpath(''.join((args.output_filestem, '.tex'))), 'w+') as file:
tex = jtt.json_to_tex(merged_json, template)
tex = jtt.remove_empty_environments(tex)
file.write(tex)
if __name__ == '__main__':
main() | true |
0cc7217a781fdb06d4b2636e2bef864703c9f8d0 | Python | a-shchupakov/Sky_viewer | /sky.py | UTF-8 | 2,570 | 2.9375 | 3 | [] | no_license | import os
import argparse
from modules import sky_gui
from tkinter import *
def check_version():
if sys.version_info < (3, 3):
print('Use python >= 3.3', file=sys.stderr)
sys.exit()
def raise_error():
print('Usage error.\r\nTry using ./sky.py --help')
sys.exit()
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--height', type=int, default=600,
help='Base height of a window. Default value is 600')
parser.add_argument('--width', type=int, default=900,
help='Base width of a window. \nDefault value is 900')
parser.add_argument('--fov', type=int, default=65,
help='Field of view in percents (from 1 to 100). Default value is 65')
parser.add_argument('-b', '--bright', type=str, default='more 0',
help='Choose brightness filter. There are two options - "less" or "more". Then choose apparent'
'magnitude value. For example "more 5" entails displaying stars which magnitude value more'
'than 5. The brighter an object appears, the lower its magnitude value '
'(i.e. inverse relation).')
parser.add_argument('-m', '--music', type=str, default='Thunderbird.mp3',
help='Choose music file which will be played. Default file is "Thunderbird.mp3".'
'You can disable it in app by pressing RMB')
return parser
def check_fov(fov):
if not (1 <= fov <= 100):
raise_error()
def check_bright(bright):
if bright:
info = bright.split()
if not (info[0] == 'more' or info[0] == 'less'):
raise_error()
value = None
try:
value = float(info[1])
except (ValueError, IndexError):
raise_error()
else:
if not (0 <= value < 100):
raise_error()
def main():
check_version()
parser = create_parser()
args = parser.parse_args()
base_width = args.width
base_height = args.height
fov = args.fov
bright = args.bright
music_path = args.music
check_fov(fov)
check_bright(bright)
master = sky_gui.ConfigurationWindow(canvas_width=base_width, canvas_height=base_height, fov=fov, bright=bright,
music_path=music_path)
master.mainloop()
if __name__ == '__main__':
main()
| true |
f115ac96a2b419f6441658c7948e229c1e0f06dc | Python | NRdeARK/Arduino | /testvisual/Untitled-1.py | UTF-8 | 128 | 3.453125 | 3 | [] | no_license | t=input("")
text=str(t)
TEXT=text.upper
for i in text:
if (TEXT[i] in "ABCDEFGHIJKLNMOPQRSTUVWXYZ"):
print(text[i])
| true |
c0915d0a00b134ee8ef6dcfee9eeb796a690f96a | Python | tonghuikang/live-pitch-tracking | /poster/step_1.py | UTF-8 | 4,828 | 2.734375 | 3 | [] | no_license | '''
only works for piano, what if the sound is being replaced the exact same frequency?
piano because it dampens
'''
import numpy as np
import matplotlib.pyplot as plt
import sounddevice as sd
import soundfile as sf
import time
import os
start_time = time.time()
# read file
fileDir = os.path.dirname(os.path.realpath('__file__'))
short_file_name = '../sounds/ttls u3.wav'
file_name = os.path.join(fileDir, short_file_name)
file_name = os.path.abspath(os.path.realpath(file_name))
ref, sample_rate = sf.read(file_name) # not mp3 please
t_start = 0.0
t_end = 5.8
signal = ref[int(t_start * 44100): int(t_end * 44100), 0]
signal_length = len(signal)
# add noise so that silent parts will not give ambiguous values
# signal = np.add(signal, 0.001*np.random.randn(len(signal)))
#sd.play(signal, sample_rate)
#print "--- %s seconds --- the sound is played" % (time.time() - start_time)
# taking absolute
#signal_square = np.multiply(signal, signal)
signal_square = np.absolute(signal)
signal_square = 0.05 * np.array(signal_square)
# the size of the window should never be related to the frequency, which is unknown
window_size = 4096
window_type = 'rect'
# rect, trig, or sin2
if window_type == 'rect':
energy = [np.sum(signal_square[x:x + window_size]) for x in range(signal_length - window_size)] # rectangular window
elif window_type == 'sin2':
window_function = [(np.sin(np.pi * x / window_size)) ** 2 for x in range(window_size)]
energy = [np.sum(np.multiply(signal_square[x:x + window_size], window_function)) for x in range(signal_length - window_size)]
#energy = 1 / (float(window_size)) ** (3.0 / 4.0) * np.array(energy) # maybe not necessary
elif window_type == 'trig':
window_function = [1.0 - np.absolute(2*x / window_size - 1.0) for x in range(window_size)]
energy = [np.sum(np.multiply(signal_square[x:x + window_size], window_function)) for x in range(signal_length - window_size)]
#energy = 1 / (float(window_size)) ** (3.0 / 4.0) * np.array(energy) # maybe not necessary
energy = 1.0 / (float(window_size)) ** (1.0 / 4.0) * np.array(energy)
energy_noise = 0.1
if energy_noise != 0:
energy = np.add(energy, energy_noise*np.random.randn(len(energy)))
energy_time = time.time() - start_time
print("--- %s seconds --- energy calculations are done" % energy_time)
# derivative = [np.arctan(44100*(energy[x+1] - energy[x])) for x in range(len(energy) - 1)]
interval = 400
r_list_length = ((signal_length - 2 * window_size) // interval)
r_list = [0] * r_list_length
r_list_x_series = [0] * r_list_length
for series_number in range(r_list_length):
x = series_number * interval
x_mean = x + window_size / 2.0
y_mean = np.sum(energy[x:x + window_size]) / window_size
r_num = np.sum([(energy[t + x] - y_mean) * (t + x - x_mean) for t in range(window_size)])
r_dim = np.sqrt(np.sum([(t + x - x_mean) ** 2 for t in range(window_size)]) * np.sum(
[(energy[t + x] - y_mean) ** 2 for t in range(window_size)]))
# x - x_mean can be simplified I guess
if np.absolute(r_dim) < 0.001:
print("zero")
r = r_num / r_dim
r_list_x_series[series_number] = t_start + (x + window_size*2.0)/sample_rate
r_list[series_number] = r
r_sq_time = time.time() - (energy_time + start_time)
print("--- %s seconds --- plotting" % r_sq_time)
time_string = time.strftime('%x %X')
time_string = time_string.replace(':', '')
time_string = time_string.replace(r'/', '')
annotation = 'datetime generated: {} \n' \
'{} - from {:.4f}s to {:.4f}s \n '\
'window type: {} - energy_noise: {} - window size: {} - r_sq interval: {} \n '\
'energy_calculations: {:06.2f} - r_sq calculations: {:06.2f}' \
.format(time_string, short_file_name, t_start, t_end, window_type, energy_noise, window_size, interval, energy_time, r_sq_time)
fig = plt.figure(figsize=(20,8))
ax = fig.add_subplot(111)
ax.set_xlim(left=t_start, right=t_end)
plt.tight_layout()
ax.text(0.99, 0.98, annotation,
verticalalignment='top', horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=8)
time_x_series = np.arange(t_start, t_end+0.01, 1.0/sample_rate)
time_x_series = time_x_series[:signal_length]
energy_x_series = np.arange(t_start + float(window_size)/sample_rate, t_end+0.01, 1.0/sample_rate)
energy_x_series = energy_x_series[:signal_length - window_size]
ax.plot(time_x_series, signal, lw=0.08, color="blue")
energy = 1.0 / (float(window_size)) ** (1.0 / 4.0) * np.array(energy) # much time spent finding out you need to 'float'
ax.plot(energy_x_series, energy, lw=0.04, color="green")
ax.plot(r_list_x_series, r_list, lw=2.0, color="red")
# plt.plot(derivative, lw=0.2)
# sd.play(signal, sample_rate)
plt.savefig("plots/{}.svg".format(time_string), bbox_inches='tight')
plt.show()
| true |
95dc905c95377d6d1b4114fc259669ec66b0f029 | Python | braingram/comando | /pycomando/protocols/base.py | UTF-8 | 1,174 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
#import sys
import weakref
from .. import errors
from ..comando import to_bytes, stob
#if sys.version_info >= (3, 0):
# stob = lambda s: s.encode('latin1') if isinstance(s, str) else s
# btos = lambda b: b.decode('latin1') if isinstance(b, bytes) else b
#else:
# stob = str
# btos = str
class Protocol(object):
"""The most basic protocol that doesn't do anything"""
def __init__(self, comm=None, index=0):
self.index = index
self.comm = None
if comm is not None:
self.assign_comm(comm)
def assign_comm(self, comm):
self.comm = weakref.ref(comm)
def send_message(self, bs):
if self.comm is None:
raise errors.ProtocolError(
"Protocol[%s] cannot send, no comm defined" % (self))
comm = self.comm()
if comm is None:
raise errors.ProtocolError(
"Protocol[%s] cannot send, comm has expired" % (self))
comm.send_message(to_bytes(self.index) + stob(bs))
def receive_message(self, bs):
raise NotImplementedError(
"Base Protocol does not know how to receive messages")
| true |
01b8011c5093a8fc05e5362e65e54bafbf4c8844 | Python | ides13/claimsim | /claimsim20200705.py | UTF-8 | 4,721 | 2.984375 | 3 | [] | no_license | #===============================================================================
# 爬Google美國的美專說明書
#===============================================================================
from bs4 import BeautifulSoup
import requests
def download_patent_html (patentno):
url = 'https://patents.google.com/patent/{}'.format(patentno)
response = requests.get(url) #(url, allow_redirects=True)
open(patentno+".html", 'wb').write(response.content) #, encoding='UTF-8'
fp = open("urldownload.txt", "a")
fp.write('\n{}'.format(url))
fp.close()
return
class Patent:
def __init__(self, no_patent=""):
self.fetched_details = False
self.claim01 = None
self.abstract = None
self.data = None
self.patent_num = no_patent
try:
self.fetch_details()
except FileNotFoundError:
print("No such file or directory:" + self.patent_num + ".html")
download_patent_html (self.patent_num)
print("downloaded")
self.fetch_details()
return
def fetch_details(self):
self.fetched_details = True
self.data = open(self.patent_num + ".html", 'rb').read()
soup = BeautifulSoup(self.data, 'html.parser')
try:
self.patent_date = 1
except:
pass
try:
# Get abstract #
abstractsoup = soup.find('meta',attrs={'name':'DC.description'})
self.abstract = abstractsoup['content'] # Get text
except:
pass
try:
claim01soup = soup.find('div', num='00001')
self.claim01 = claim01soup.text # Get text
except:
pass
return
#[class Patent]
def as_dict(self) -> dict:
"""
Return patent info as a dict
:return: dict
"""
if self.fetched_details:
d = {
'abstract': self.abstract,
'claim01': self.claim01,
}
else:
print("error")
return d
#[class Patent]
def __repr__(self):
return str(self.as_dict())
#===============================================================================
# 計算兩個句子的相似度
#===============================================================================
import numpy as np
from scipy import spatial
import gensim
#import pyemd
#load word2vec model, here GoogleNews is used
vecfile = "D:\OpenData\GoogleNews-vectors-negative300.bin"
model = gensim.models.KeyedVectors.load_word2vec_format(vecfile, binary=True)
#two sample sentences
index2word_set = set(model.wv.index2word)
#第一種算法:如果使用word2vec,需要計算每個句子/文檔中所有單詞的平均向量,並使用向量之間的餘弦相似度來計算句子相似度。
def avg_feature_vector(sentence, model, num_features, index2word_set):
words = sentence.split()
feature_vec = np.zeros((num_features, ), dtype='float32')
n_words = 0
for word in words:
if word in index2word_set:
n_words += 1
feature_vec = np.add(feature_vec, model[word])
if (n_words > 0):
feature_vec = np.divide(feature_vec, n_words)
return feature_vec
def calsim(sentance1, sentance2):
try:
s1_afv = avg_feature_vector(sentance1, model=model, num_features=300, index2word_set=index2word_set)
s2_afv = avg_feature_vector(sentance2, model=model, num_features=300, index2word_set=index2word_set)
sim = 1 - spatial.distance.cosine(s1_afv, s2_afv)
except:
sim = 0
return sim
#===============================================================================
#主程式。
#===============================================================================
if __name__ == '__main__':
#pass
#sentance1 指的是一個技術的描述,最簡單的方法就是一個發明的請求項的記載方式
sentance1 = "A system"
#patentlist 提供想要比對的美國專利書號碼,例如['US7654301B2', 'US7654300B2', 'US7654329B2']
patentlist = ['US7654301B2', 'US7654300B2', 'US7654329B2']
for i in patentlist:
p = Patent(i)
sentance2 = p.claim01
sim = calsim(sentance1, sentance2)
print('與%s間的相似度 = %s' % (i, sim))
fp = open("claim_similarity.txt", "a")
fp.write('\n與%s間的相似度 = %s' % (i, sim))
fp.close()
#end for
| true |
a19bf849071cd1bc13454ea295c41a22403944f5 | Python | VakinduPhilliam/Python_Data_Science | /Python_Data_Science_Pattern_En.py | UTF-8 | 1,712 | 3.328125 | 3 | [] | no_license | # Python Data Science and Analytics.
# Data Science is a field in computer science that is dedicated to analyzing patterns in raw data using
# techniques like Artificial Intelligence (AI), Machine Learning (ML), mathematical functions, and
# statistical algorithms.
# Pattern is a web mining module for the Python programming language.
# It has tools for data mining (Google, Twitter and Wikipedia API, a web crawler, a HTML DOM parser), natural
# language processing (part-of-speech taggers, n-gram search, sentiment analysis, WordNet), machine learning
# (vector space model, clustering, SVM), network analysis and <canvas> visualization.
# Pattern is a web mining module for the Python programming language.
# It has tools for data mining (Google, Twitter and Wikipedia API, a web crawler, a HTML DOM parser), natural
# language processing (part-of-speech taggers, n-gram search, sentiment analysis, WordNet), machine learning
# (vector space model, clustering, SVM), network analysis and <canvas> visualization.
# pattern.en
# The pattern.en module is a natural language processing (NLP) toolkit for English. Because language is ambiguous
# (e.g., I can <-> a can) it uses statistical approaches + regular expressions. This means that it is fast, quite
# accurate and occasionally incorrect. It has a part-of-speech tagger that identifies word types (e.g., noun, verb,
# adjective), word inflection (conjugation, singularization) and a WordNet API.
from pattern.en import parse
s = 'The mobile web is more important than mobile apps.'
s = parse(s, relations=True, lemmata=True)
print s
# Displays
# 'The/DT/B-NP/O/NP-SBJ-1/the mobile/JJ/I-NP/O/NP-SBJ-1/mobile' ... | true |
7594a6f18fbce84bd00a95c2842e18e3c187a129 | Python | Bit4z/python | /python/print row collunm.py | UTF-8 | 178 | 3.25 | 3 | [] | no_license | m=int(input("enter row"))
n=int(input("enter collunm"))
k=1
for i in range(m):
for j in range(n):
print("*",end="")
k=k+1
print(end="\n")
| true |
1a4c42958087358ed8a72bc611dcc89c3aa8de69 | Python | rayhanaziai/Practice_problems | /str_reverse.py | UTF-8 | 708 | 3.984375 | 4 | [] | no_license | def reverse_list(l, first_letter, last_letter):
i = first_letter
j = last_letter
while i < j:
l[i], l[j] = l[j], l[i]
i += 1
j -= 1
return l
def reverse_s(s):
char_lst = list(s)
reverse_list(char_lst, 0, len(char_lst)-1)
i = 0
for end_letter in xrange(len(char_lst)):
if (char_lst[end_letter] == " "):
reverse_list(char_lst, i, end_letter-1)
i = (end_letter + 1)
elif (end_letter == len(char_lst)-1):
reverse_list(char_lst, i, end_letter)
return ''.join(char_lst)
# run your function through some test cases here
# remember: debugging is half the battle!
print reverse_s('hi my name is ray') | true |
36ba29adbe56a5947cb526ef37cd2354141fc3ce | Python | betancjj/UC_APOP | /FiveHoleProbe/DataProcessing/Python/FiveHoleProbe_CalibrationAndProcessing.py | UTF-8 | 9,839 | 2.59375 | 3 | [] | no_license | #from scipy.interpolate import spline
import os
import numpy as np
import matplotlib.pyplot as plt
def lin_interp(indeps,deps,spec_indep):
for ind,indep in enumerate(indeps):
if spec_indep > indep and spec_indep < indeps[ind+1]:
low_indep = indep
high_indep = indeps[ind+1]
return deps[ind] + (spec_indep-indep)*((deps[ind+1]-deps[ind])/(high_indep-low_indep))
class CalibPoint:
def __init__(self,yaw,pitch,cp_yaw,cp_pitch,cp_static,cp_total):
self.yaw = yaw
self.pitch = pitch
self.cp_yaw = cp_yaw
self.cp_pitch = cp_pitch
self.cp_static = cp_static
self.cp_total = cp_total
class CalibData:
def __init__(self,calib_filename):
with open(calib_filename) as calib_file:
cond_calib_lines = calib_file.readlines()
cond_calib_lines.pop(0) # Remove header line.
cond_calib_lines = [line.split(',') for line in cond_calib_lines]
calib_points = [CalibPoint(float(line[1]), float(line[0]), float(line[2]), float(line[3]), float(line[4]), \
float(line[5])) for line in cond_calib_lines]
yaws = [point.yaw for point in calib_points]
pitches = [point.pitch for point in calib_points]
yaws_u = list(set(yaws))
yaws_u.sort()
pitches_u = list(set(pitches))
pitches_u.sort()
yaw_lines = {}
for yaw in yaws_u:
yaw_lines[yaw] = {}
for pitch in pitches_u:
for point in calib_points:
if point.yaw == yaw and point.pitch == pitch:
yaw_lines[yaw][pitch] = point
self.yaw_lines = yaw_lines
pitch_lines = {}
for pitch in pitches_u:
pitch_lines[pitch] = {}
for yaw in yaws_u:
for point in calib_points:
if point.pitch == pitch and point.yaw == yaw:
pitch_lines[pitch][yaw] = point
self.pitch_lines = pitch_lines
class TestPoint:
def __init__(self, x, z, V1, V2, V3, V4, V5, P_ref, rho, calib_data):
self.calib_data = calib_data
self.x = x
self.z = z
self.V1 = V1
self.V2 = V2
self.V3 = V3
self.V4 = V4
self.V5 = V5
self.P1 = self.get_pressure_30psi_sensor(V1) + P_ref
self.P2 = self.get_pressure_30psi_sensor(V2) + P_ref
self.P3 = self.get_pressure_30psi_sensor(V3) + P_ref
self.P4 = self.get_pressure_30psi_sensor(V4) + P_ref
self.P5 = self.get_pressure_30psi_sensor(V5) + P_ref
self.Pavg = (self.P2 + self.P3 + self.P4 + self.P5) / 4.0
self.Pref = Pref
self.rho = rho
self.cp_yaw = (self.P2 - self.P3) / (self.P1 - self.Pavg)
self.cp_pitch = (self.P4 - self.P5) / (self.P1 - self.Pavg)
try:
angles = self.get_angles()
self.yaw = angles[0]
self.pitch = angles[1]
self.cp_static = self.get_cp_static()
self.cp_total = self.get_cp_total()
self.Ptotal = self.get_Ptotal()
self.Pstatic = self.get_Pstatic()
self.vel = self.get_velocity()
except:
print("BAD POINT")
self.yaw = 0.0
self.pitch = 0.0
self.cp_static = 0.0
self.cp_total = 0.0
self.vel = 0.0
self.Pstatic = 0.0
self.Ptotal = 0.0
def get_pressure_30psi_sensor(self,voltage):
return voltage*(30.0/5.0)
def get_angles(self):
yaw_lines = self.calib_data.yaw_lines
pitch_lines = self.calib_data.pitch_lines
new_lines = []
for yaw in yaw_lines.keys():
curr_cp_pitches = [yaw_lines[yaw][pitch].cp_pitch for pitch in yaw_lines[yaw].keys()]
curr_cp_yaws = [yaw_lines[yaw][pitch].cp_yaw for pitch in yaw_lines[yaw].keys()]
new_lines.append([self.cp_pitch, lin_interp(curr_cp_pitches, curr_cp_yaws, self.cp_pitch)])
curr_cp_yaws = [line[1] for line in new_lines]
yaw = lin_interp(curr_cp_yaws, list(yaw_lines.keys()), self.cp_yaw)
new_lines = []
for pitch in pitch_lines.keys():
curr_cp_pitches = [pitch_lines[pitch][yaw].cp_pitch for yaw in pitch_lines[pitch].keys()]
curr_cp_yaws = [pitch_lines[pitch][yaw].cp_yaw for yaw in pitch_lines[pitch].keys()]
new_lines.append([self.cp_yaw, lin_interp(curr_cp_yaws, curr_cp_pitches, self.cp_yaw)])
curr_cp_pitches = [line[1] for line in new_lines]
pitch = lin_interp(curr_cp_pitches, list(pitch_lines.keys()), self.cp_pitch)
return [yaw, pitch]
def get_cp_static(self):
yaw_lines = self.calib_data.yaw_lines
for ind, yaw in enumerate(list(yaw_lines.keys())):
if yaw < self.yaw and list(yaw_lines.keys())[ind + 1] > self.yaw:
pitches_low = list(yaw_lines[yaw].keys())
cp_statics_low = [yaw_lines[yaw][pitch].cp_static for pitch in yaw_lines[yaw].keys()]
lower_int = lin_interp(pitches_low, cp_statics_low, self.pitch)
pitches_high = list(yaw_lines[list(yaw_lines.keys())[ind + 1]].keys())
cp_statics_high = [yaw_lines[list(yaw_lines.keys())[ind + 1]][pitch].cp_static for pitch in
list(yaw_lines[list(yaw_lines.keys())[ind + 1]].keys())]
higher_int = lin_interp(pitches_high, cp_statics_high, self.pitch)
cp_static = (lower_int + higher_int) / 2.0
return cp_static
def get_cp_total(self):
yaw_lines = self.calib_data.yaw_lines
for ind, yaw in enumerate(list(yaw_lines.keys())):
if yaw < self.yaw and list(yaw_lines.keys())[ind + 1] > self.yaw:
pitches_low = list(yaw_lines[yaw].keys())
cp_totals_low = [yaw_lines[yaw][pitch].cp_total for pitch in yaw_lines[yaw].keys()]
lower_int = lin_interp(pitches_low, cp_totals_low, self.pitch)
pitches_high = list(yaw_lines[list(yaw_lines.keys())[ind + 1]].keys())
cp_totals_high = [yaw_lines[list(yaw_lines.keys())[ind + 1]][pitch].cp_total for pitch in
list(yaw_lines[list(yaw_lines.keys())[ind + 1]].keys())]
higher_int = lin_interp(pitches_high, cp_totals_high, self.pitch)
cp_total = (lower_int + higher_int) / 2.0
return cp_total
def get_Ptotal(self):
return self.Pref + (self.P1 - self.Pref) - self.cp_total * ((self.P1 - self.Pref) - (self.Pavg - self.Pref))
def get_Pstatic(self):
return self.Pref + (self.Pavg - self.Pref) - self.cp_static * ((self.P1 - self.Pref) - (self.Pavg - self.Pref))
def get_velocity(self):
return (2 / self.rho) * (self.Ptotal - self.Pstatic) ** 0.5
class TestData:
def __init__(self,results_filename,calib_data,Pref,density):
with open(results_filename) as results_file:
results_lines = results_file.readlines()
results_lines = [line.strip().replace('(', '').replace(')', '').split(';') for line in results_lines]
self.results_filename = results_filename
self.test_points = [
TestPoint(float(line[0].split(',')[0]), float(line[0].split(',')[1]), float(line[1].split(',')[0]), \
float(line[1].split(',')[1]), float(line[1].split(',')[2]), float(line[1].split(',')[3]), \
float(line[1].split(',')[4]), Pref, density, calib_data) for line in results_lines]
def write(self):
out_filename = os.path.splitext(self.results_filename)[0] + "_Results.csv"
with open(out_filename, 'w') as results_out:
results_out.write("X(mm),Z(mm),Vx(mm/s),Vy(mm/s),Vz(mm/s),P(Pa),P0(Pa),T(K)\n")
X = []
Y = []
Z = []
u = []
v = []
w = []
for point in self.test_points:
Vx = np.sin(point.yaw * (np.pi / 180.0)) * np.cos(point.pitch * (np.pi / 180.0)) * point.vel
Vy = np.cos(point.yaw * (np.pi / 180.0)) * np.cos(point.pitch * (np.pi / 180.0)) * point.vel
Vz = np.sin(point.pitch * (np.pi / 180.0)) * point.vel
if np.iscomplex(Vx):
Vx = 0.0
if np.iscomplex(Vy):
Vy = 0.0
if np.iscomplex(Vz):
Vz = 0.0
X.append(point.x)
Y.append(0.0)
Z.append(point.z)
u.append(Vz)
v.append(Vy)
w.append(-Vx)
results_out.write(
"{},{},{},{},{},{},{},{}\n".format(point.x, point.z, Vx * 304.8, Vy * 304.8, Vz * 304.8,
point.Pstatic * 6894.76, point.Ptotal * 6894.76, 293.0))
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.quiver(X,Y,Z,u,v,w,arrow_length_ratio=0.1)
ax.set_xlim3d(0,300)
ax.set_zlim3d(0,300)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_ylim3d(0,1000)
plt.show()
if __name__ =="__main__":
calibration_filename = r"C:\Users\jjbet\Desktop\CalibrationCurves\Condensed_FCalibData.csv"
results_filename = r"E:\Results\CenterFlap_0Deg.csv"
sample_calibration = CalibData(calibration_filename)
Pref = 14.5 # psia
density = 0.002297145 # slugs/ft^3
sample_test = TestData(results_filename,sample_calibration,Pref,density)
sample_test.write()
| true |
63a2bc86260b5b7c8323c2b986a28d7dddf35011 | Python | JasonLeeFdu/SRCNN | /VERSIONs/v1/RecordMaker.py | UTF-8 | 4,043 | 2.578125 | 3 | [] | no_license | import os
import cv2 as cv
import numpy as np
import math
import tensorflow as tf
from PIL import Image as image
def prepareTrainingData(recordName):
# PIL RGB More efficiently
# img.size[0]-- width img.size[1]-- height
# tf.record里面有一个一个的example,每一个example,每一个example都是含有若干个feature的字典
# opencv 矩阵计算法 批 行 列 通道
DIR = '/home/winston/PycharmProjects/SRCNN_TF_REBUILD/Data/singlImg/'
PATCH_SIZE = 32
SCALE = 2
writer = tf.python_io.TFRecordWriter(recordName)
fileList = os.listdir(DIR)
totalNum = len(fileList)
counter = 0
for img_name in fileList:
#读取, 归一化, 类型|| 以及对训练数据进行任何的操作,操作完毕后写到相应tf record 位置上
imgGT = cv.imread(DIR+img_name)
width = imgGT.shape[1]
height = imgGT.shape[0]
nw = math.floor(width/PATCH_SIZE)
nh = math.floor(height/PATCH_SIZE)
for x in range(nw):
for y in range(nh):
subGT = imgGT[y*PATCH_SIZE:(y+1)*PATCH_SIZE,x*PATCH_SIZE:(x+1)*PATCH_SIZE,:]
subX = cv.resize(subGT,(int(PATCH_SIZE/SCALE),int(PATCH_SIZE/SCALE)))
subX = cv.resize(subX, (int(PATCH_SIZE), int(PATCH_SIZE)))
subX = subX.astype(np.float32)
subGT = subGT.astype(np.float32)
subGT = subGT / 255
subX = subX / 255
subGT_raw = subGT.tobytes()
subX_raw = subX.tobytes()
sample = tf.train.Example(features=tf.train.Features(feature={
"label": tf.train.Feature(bytes_list=tf.train.BytesList(value=[subGT_raw])),
'input': tf.train.Feature(bytes_list=tf.train.BytesList(value=[subX_raw]))
}))
writer.write(sample.SerializeToString()) # 序列化为字符串
counter = counter + 1
if counter%10==0:
print('当前进度:',round(counter*100/totalNum))
writer.close()
print("写入完毕")
def readAndDecode(fileName):
'''传统的tensorflow文件训练数据读写函数'''
fileQueue = tf.train.string_input_producer([fileName]) # 文件读取队列,生成tensor
recordReader = tf.TFRecordReader() # 记录读取器
_,serializedExample = recordReader.read(fileQueue) # 用记录读取器,读取出一个序列化的示例数据.对训练数据进行解析需要下一步
features = tf.parse_single_example( # 序列化的示例数据(训练数据单元).解析为一个含有很多数据项的feature字典{x1:,x2:,...y1:,y2:...}
serializedExample,
features={ # 解析目标说明
'label':tf.FixedLenFeature([],tf.string),
'input':tf.FixedLenFeature([],tf.string)
}
)
inputImg = tf.decode_raw(features['input'], tf.float32) # 从生字符串进行解析序列,然后变形图片
inputImg = tf.reshape(inputImg,[32,32,3])
labelImg = tf.decode_raw(features['label'],tf.float32)
labelImg = tf.reshape(labelImg,[32,32,3])
return inputImg,labelImg
'''
实验用函数:
def readImgOpencv():
#opencv->img numpy ndarray || BGR
DIR = '/home/winston/PycharmProjects/SRCNN_TF_REBUILD/Data/singlImg/'
fileName = 'ILSVRC2013_val_00004178.JPEG'
img = cv.imread(DIR+fileName)
cv.imshow('hahhah',img)
cv.waitKey(0)
def reamImgPIL():
# PIL RGB More efficiently
DIR = '/home/winston/PycharmProjects/SRCNN_TF_REBUILD/Data/singlImg/'
fileName = 'ILSVRC2013_val_00004178.JPEG'
img = image.open(DIR+fileName)
z = np.array(img)
zz = z
def printNames():
DIR = '/home/winston/PycharmProjects/SRCNN_TF_REBUILD/Data/singlImg/'
for img_name in os.listdir(DIR):
print(img_name)
def main():
reamImgPIL()
if __name__ == '__main__':
main()
''' | true |
7ceb90d046bf117d268124cab8ed147a31e6f211 | Python | beginnerHB1/Invoice_extraction | /unicareer.py | UTF-8 | 8,794 | 2.859375 | 3 | [] | no_license | import pdftotext
import re
def read_text(lst):
text = lst[0][:lst[0].index("A Finance charge will be imposed by")]
for i in range(1, len(lst)):
start_index = lst[i].index("AMT") + 3
# try:
# end_index = lst[i].index("TOTAL NET SALE USD")
end_index = lst[i].index("A Finance charge will be imposed by")
z = lst[i][start_index:end_index]
# except ValueError:
# z = lst[i][start_index:]
text += "\n" + z
return text
def remove_header_footer(lst):
for i in range(len(lst)):
#footer
try:
# if "A Finance charge will be imposed by use of a periodic rate of one and one−half percent (1 1/2 %)" in lst[i]:
# lst.remove(lst[i])
# elif "annual percentage rate of eighteen percent (18%), on balances over thirty (30) days old." in lst[i]:
# lst.remove(lst[i])
# #header
if "INVOICE" == lst[i].strip():
lst[i] = re.sub("INVOICE", " ", lst[i])
elif "UniCarriers Americas Corporation" in lst[i]:
lst[i] = re.sub("UniCarriers Americas Corporation", " ", lst[i])
elif "240 N. Prospect Street − Marengo, IL 60152−3298" in lst[i]:
lst[i] = re.sub("240 N. Prospect Street − Marengo, IL 60152−3298", " ", lst[i])
elif "Remit To: P.O.Box 70700 − Chicago, IL 60673−0700" in lst[i]:
lst[i] = re.sub("Remit To: P.O.Box 70700 − Chicago, IL 60673−0700", " ", lst[i])
elif "Billing Inquiries (815) 568−0061" in lst[i]:
lst[i] = lst[i].split("568−0061")[-1]
except:
continue
for i in lst:
if len(i.strip()) == 0:
lst.remove(i)
return lst
#to find Invoice and UAC number
def find_invoice_uac_no(lst):
'''
lst : splited list of all extracted text wit "\n"
'''
for i in lst:
if "Invoice Number" in i:
invoice_index = lst.index(i) +1
elif "UCA Order No." in i:
uac_index = lst.index(i) + 1
return invoice_index, uac_index
#to find Invoice Date, ...
def find_invoice_date_table(lst):
for i in lst:
if "Invoice Date" in i.strip():
return lst.index(i) + 1
def find_address_indexes(lst):
'''
lst : splited list of all extracted text wit "\n"
info:
To find start and end index of address(sold_to and ship_to)
'''
for i in lst:
if "Sold To" in i:
start_index = lst.index(i)
elif "Invoice Date" in i:
end_index = lst.index(i)
return start_index, end_index
def add_1_2(lst):
'''
lst : splited list of all extracted text wit "\n"
'''
start, end = find_address_indexes(lst)
address_lst = lst[start:end]
address_1 = ''
address_2 = ''
for i in range(len(address_lst)):
lst = address_lst[i].split(" ")
index_lst = []
'''
To find start and end of address 1 from lst indexes
'''
for i in range(len(lst)):
try:
if lst[i] != "" and (lst[i+1] != "" or lst[i+2] != ""):
# print(lst.index(lst[i]))
index_lst.append(lst.index(lst[i]))
start_index_add_1 = lst.index(lst[i])
break
except:
break
for i in range(1, len(lst[start_index_add_1:])):
try:
if lst[i] == "" and lst[i-1] == "":
end_index_add_1 = start_index_add_1 + lst[start_index_add_1:].index(lst[i])
add = lst[start_index_add_1:end_index_add_1]
break
except ValueError:
add = lst[start_index_add_1:]
break
address_1 += " " + " ".join(add)
lst = lst[end_index_add_1:]
address_2 += " " + " ".join(lst).strip()
return address_1.strip(), address_2.strip()
def line_details(lst):
'''
To find details under table Model or Part #, Description, Quantity, Unit Price, Extended AMT
'''
for i in lst:
if "TOTAL NET SALE USD" in i:
end_index = lst.index(i)
elif "Model or Part #" in i:
start_index = lst.index(i) + 1
lst_table = lst[start_index:end_index]
# print(lst_table)
table_details = []
k = 0
for i, j in enumerate(lst_table):
lst = j.split()
if len(lst) >= 5 and lst[0] != "Comments":
table_details.append({f"Model or Part #_{k}": lst[0],
f"Description_{k}": " ".join(lst[1:-3]),
f"Quantity_{k}": lst[-3],
f"Unit_Price_{k}":lst[-2],
f"Extended_AMT_{k}":lst[-1]})
k += 1
return table_details
#total amount
def invoice_amount_details(lst):
for i in lst:
if "PAYMENT DUE BY" in i:
end_index = lst.index(i)
elif "TOTAL NET SALE USD" in i:
start_index = lst.index(i)
details_lst = lst[start_index:end_index+1]
amount_details_dict = {}
if len(details_lst) == 4:
for i in range(3):
key = " ".join(details_lst[i].split()[:-1])
val = details_lst[i].split()[-1]
amount_details_dict[key] = val
lst = details_lst[3].split()
key = " ".join(lst[:3])
val = lst[3]
amount_details_dict[key] = val
key = " ".join(lst[4:-1])
val = lst[-1]
amount_details_dict[key] = val
return amount_details_dict
def create_json(lst_det):
return {"Field Name":lst_det[0],
"length": lst_det[1],
"Mandotory":lst_det[2],
"Sample Value":lst_det[3]}
def extract_detail(PDF):
json_dct = {"Header":[],"Sold To":[],"Ship To":[], "Line Details (Repeated Segment)":[], "Invoice Amount Details":[]}
bit = False
with open(PDF, "rb") as f:
pdf = pdftotext.PDF(f)
final_lst = []
if len(pdf) == 1:
data = pdf[0]
else:
data = read_text(pdf)
lst = data.split("\n")
# print(lst)
# print(len(lst))
for i in lst:
if "UniCarriers Americas Corporation" in i:
bit = True
if bit:
# x = lst
x = remove_header_footer(lst)
invoice_index, uac_index = find_invoice_uac_no(x)
# try:
json_dct["Header"].append(create_json(["Invoice Number", len(x[invoice_index].strip()), "yes", x[invoice_index].strip()]))
json_dct["Header"].append(create_json(["UCA Order No.", len(x[uac_index].strip().split()[-1]), "yes", x[uac_index].strip().split()[-1]]))
ind = find_invoice_date_table(x)
json_dct["Header"].append(create_json(["Invoice Date", "mmddyyyy", "yes", x[ind].strip().split()[0]]))
json_dct["Header"].append(create_json(["Customer Order Number", len(x[ind].strip().split()[1]), "yes", x[ind].strip().split()[1]]))
json_dct["Header"].append(create_json(["Payment Terms", len(" ".join(x[ind].strip().split()[2:])), "yes", " ".join(x[ind].strip().split()[2:])]))
json_dct["Header"].append(create_json(["Ship Date", "mmddyyyy", "yes", x[ind+2].strip().split()[0]]))
json_dct["Header"].append(create_json(["Ship Via", len(x[ind+2].strip().split()[1]), "yes", x[ind+2].strip().split()[1]]))
json_dct["Header"].append(create_json(["Shipment Terms", len(x[ind+2].strip().split()[2]), "yes", x[ind+2].strip().split()[2]]))
# except:
# json_dct["Header"] = []
try:
address_1, address_2 = add_1_2(x)
address_1 = "".join(address_1.split("Sold To:")).strip()
address_2 = "".join(address_2.split("Ship To:")).strip()
json_dct["Sold To"].append(create_json(["Sold to","", "yes", address_1]))
json_dct["Ship To"].append(create_json(["Ship to","", "yes", address_2]))
except:
json_dct["Sold To"] = []
json_dct["Ship To"] = []
# try:
line_details_under_tabe = line_details(x)
for i in line_details_under_tabe:
for j in list(i.keys()):
json_dct["Line Details (Repeated Segment)"].append(create_json([j, len(i[j]), "yes", i[j]]))
dct = invoice_amount_details(x)
for i in list(dct.keys()):
if i == "PAYMENT DUE BY":
json_dct["Invoice Amount Details"].append(create_json([i, "mmddyyyy", "yes", dct[i]]))
else:
json_dct["Invoice Amount Details"].append(create_json([i, len(dct[i]), "yes", dct[i]]))
# except:
# json_dct["Invoice Amount Details"] = []
return json_dct
else:
return json_dct
| true |
0e81b2c2b5683bd55a87e31e16e634025c3cfa1f | Python | wingluck/stock-analysis | /stock_analysis.py | UTF-8 | 2,184 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os
import pandas as pd
import datetime
class StockData(object):
def __init__(self, fname) -> None:
self._inited = False
self.fname = fname
self.stock_id = fname.split('.')[0]
def amplitude(datadir='stock-data', interval=30, end_date=None):
"""
Calculate the amplitude for all stock in data dir. Return a sorted pandas.DataFrame.
:param datadir: folder name to read stock data from
:param interval: amplitude in this interval
:param end_date: default to None, means that it will calculate amplitude from (now - interval) to now
:return: A sorted pandas.DataFrame
"""
if not os.path.isdir(datadir) or not os.path.exists(datadir):
print('error: directory not exist. %s' % datadir)
return
if end_date is None:
end_date = pd.Timestamp(datetime.datetime.now())
def _ripple(fname, start, end):
data = pd.read_csv(os.path.join(datadir, fname), index_col='日期', parse_dates=True)
# data in file is sorted in **Descend**
data = data.loc[end:start]
def _ripple_radio(d):
return d['最高价'].max() / d['最低价'].min()
if data['最低价'].idxmin() < data['最高价'].idxmax():
ripple_radio = _ripple_radio(data)
else:
ripple_radio = - _ripple_radio(data)
return ripple_radio
files = os.listdir(datadir)
def _stock_id(fname):
return fname.split('.')[0]
end_date = pd.Timestamp(end_date)
start_date = end_date - pd.Timedelta(days=interval)
ripples_list = [(_stock_id(f), _ripple(f, start_date, end_date)) for f in files if f.endswith('.csv')]
ripples = pd.DataFrame(ripples_list, columns=['id', 'amp'])
all_ripples = ripples.sort_values('amp', ascending=False)
print('head 5 recent amplitude in period of %d for all stocks in %s till %s:' % (interval, datadir, end_date))
print(all_ripples.head(5))
print('tail 5 recent ripples in period of %d for all stocks in %s till %s:' % (interval, datadir, end_date))
print(all_ripples.tail(5))
return all_ripples
if __name__ == '__main__':
amplitude()
| true |
3f0ff6724ead56a407c4a1e58b230c8dea1aaf19 | Python | mehdirazarajani/MinutesOfMeeting | /meeting-transcript-data-text-parser/venv/ProblasticRanking.py | UTF-8 | 9,115 | 3.109375 | 3 | [] | no_license | import json
import string
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import csv
import spacy
import operator
from jellyfish import jaro_distance
# clusters = [name of clusters]
# all_words_in_collection = set()
# collections = {word:{list:{cluster#:word_count},total_word_count:int,cluster_count:int}}
def remove_punctuation(text):
"""a function for removing punctuation"""
# replacing the punctuations with no space,
# which in effect deletes the punctuation marks
translator = str.maketrans('', '', string.punctuation)
# return the text stripped of punctuation marks
return text.translate(translator)
def remove_stopwords(text):
"""a function for removing the stopword"""
sw = stopwords.words('english')
# extracting the stopwords from nltk library
# removing the stop words and lowercasing the selected words
text = [word.lower() for word in text.split() if word.lower() not in sw]
# joining the list of words with space separator
return " ".join(text)
def stemming(text):
"""a function which stems each word in the given text"""
stemmer = PorterStemmer()
text = [stemmer.stem(word) for word in text.split()]
return " ".join(text)
def lemmatization(text, sp):
"""a function which lemmatization each word in the given text"""
text = [word.lemma_ for word in sp(text)]
return " ".join(text)
def __calculate_pir_for_word_for_cluster(word_details, total_cluster_count, cluster_name, sentence_len):
try:
rt = word_details['list'][cluster_name]
except:
rt = 0
n = total_cluster_count
r = word_details['total_word_count']
nt = word_details['cluster_count']
# r = sentence_len
# nt = word_details['total_word_count']
# r = word_details['cluster_count']
pt = (rt + 0.5) / (r + 1.0)
ut = (nt + 0.5) / (n + 1.0)
return pt, ut
def __calculate_pir_for_sentence_for_cluster(sentence, cluster_collection, total_cluster_count, all_words,
cluster_name):
rsv = 1.0
a_word_found = False
if not ' ' in sentence:
word = sentence
if word in all_words:
a_word_found = True
pt, ut = __calculate_pir_for_word_for_cluster(cluster_collection[word], total_cluster_count,
cluster_name, len(sentence))
rsv *= ((pt * (1 - ut)) / (ut * (1 - pt)))
if not a_word_found:
rsv = 0.0
else:
for word in sentence.split(' '):
if word in all_words:
a_word_found = True
pt, ut = __calculate_pir_for_word_for_cluster(cluster_collection[word], total_cluster_count,
cluster_name, len(sentence))
rsv *= ((pt * (1 - ut)) / (ut * (1 - pt)))
if not a_word_found:
rsv = 0.0
return rsv
def calculate_pir_for_sentence(sentence, cluster_collection, total_cluster_count, all_words, clusters):
resultant = dict()
for ind, cluster in enumerate(clusters):
resultant[cluster] = __calculate_pir_for_sentence_for_cluster(sentence, cluster_collection, total_cluster_count,
all_words,
str(ind))
return resultant
def calculate_pir(text_corpus, cluster_collection, total_cluster_count, all_words, clusters):
resultant = dict()
for sentence in text_corpus:
sentence = sentence['sentence']
sentence = remove_stopwords(remove_punctuation(sentence.lower()))
resultant[sentence] = calculate_pir_for_sentence(sentence, cluster_collection, total_cluster_count, all_words,
clusters)
return resultant
def fill_the_collection(cluster_corpus, text_corpus):
all_words_in_collection = set()
# collections = {word:{list:{cluster#:word_count},total_word_count:int,cluster_count:int}}
collections = dict()
sp = spacy.load('en_core_web_sm')
for sentence in text_corpus:
sentence = sentence['sentence']
sentence = remove_stopwords(remove_punctuation(sentence.lower()))
sentence = lemmatization(sentence, sp)
# sentence = stemming(sentence)
if ' ' in sentence:
for word in sentence.split(' '):
if word != '':
all_words_in_collection.add(word)
collections = __update_collections(collections, cluster_corpus, word)
else:
word = sentence
if word != '':
all_words_in_collection.add(word)
collections = __update_collections(collections, cluster_corpus, word)
return all_words_in_collection, collections
def __update_collections(collections, cluster_corpus, word):
list1 = dict()
total_word_count = 0
cluster_count = 0
for ind, cluster in enumerate(cluster_corpus):
if word in cluster:
occ = cluster.count(word)
list1[str(ind)] = occ
total_word_count += occ
cluster_count += 1
collection = dict()
collection['list'] = list1
collection['total_word_count'] = total_word_count
collection['cluster_count'] = cluster_count
if total_word_count > 0:
collections[word] = collection
return collections
def write_csv_pir(pir, clusters, raw_sentences, filename):
file = open(filename, 'w')
writer = csv.writer(file)
resultant = ['']
index = 0
for cluster in clusters:
resultant.append(cluster)
writer.writerow(resultant)
for text, scores in pir.items():
raw_sentence = raw_sentences[index]
while not remove_stopwords(remove_punctuation(raw_sentence.lower())) == text:
index += 1
raw_sentence = raw_sentences[index]
resultant = [raw_sentence]
for score in scores.values():
resultant.append(str(score))
writer.writerow(resultant)
file.close()
def fill_cluster_corpus(cluster_text):
clusters = list()
all_words_in_cluster = set()
sp = spacy.load('en_core_web_sm')
for key, values in cluster_text.items():
for value in values:
cluster = remove_stopwords(remove_punctuation(value['text'].lower()))
cluster = lemmatization(cluster, sp)
# cluster = stemming(cluster)
all_words_in_cluster.update(cluster.split(' '))
if cluster not in clusters:
clusters.append(cluster)
return all_words_in_cluster, clusters
def get_all_sentences(sentences):
all_word = []
for text in sentences:
all_word.append(text['sentence'])
return all_word
def find_the_max_ranked_cluster(title, clusters, raw_sentences, pir):
resultants = dict()
index = 0
for cluster in clusters:
resultants[cluster] = []
for text, scores in pir.items():
raw_sentence = raw_sentences[index]['sentence']
sp = spacy.load('en_core_web_sm')
while not lemmatization(remove_stopwords(remove_punctuation(raw_sentence.lower())), sp) == lemmatization(text,sp):
index += 1
raw_sentence = raw_sentences[index]['sentence']
print('.')
max_cluster = max(scores.items(), key=operator.itemgetter(1))[0]
index += 1
print(index)
resultants[max_cluster].append(raw_sentences[index])
return {title: resultants}
if __name__ == '__main__':
with open('data_agenda1.txt') as agenda_file:
cluster_text = json.load(agenda_file)
all_words_in_cluster, cluster_corpus = fill_cluster_corpus(cluster_text['structured_agenda_texts'])
print(all_words_in_cluster)
print(cluster_corpus)
with open('data_meeting_text1.txt') as text_file:
meeting_text = json.load(text_file)
all_words_in_collection, collections = fill_the_collection(cluster_corpus, meeting_text[
'structured_meeting_texts_without_introduction'])
all_sentences = get_all_sentences(meeting_text['structured_meeting_texts_without_introduction'])
print(all_words_in_collection)
print(len(all_words_in_collection))
with open('collections.txt', 'w') as outfile1:
json.dump(collections, outfile1)
all_pir = calculate_pir(meeting_text['structured_meeting_texts_without_introduction'], collections,
len(cluster_corpus), all_words_in_cluster, cluster_corpus)
with open('pir.txt', 'w') as outfile1:
json.dump(all_pir, outfile1)
# write_csv_pir(all_pir, cluster_corpus, all_sentences, 'result pir.csv')
clustered_sentences = find_the_max_ranked_cluster('clustered_sentences', cluster_corpus, meeting_text[
'structured_meeting_texts_without_introduction'], all_pir)
with open('clustered_sentences.txt', 'w') as outfile1:
json.dump(clustered_sentences, outfile1) | true |
9354efb4d1cdd3b0f9dc3218b4fc93c2ba645dde | Python | Urvashi-91/Urvashi_Git_Repo | /Interview/Stripe/triangle.py | UTF-8 | 1,181 | 3.984375 | 4 | [] | no_license | // https://www.codewars.com/kata/56606694ec01347ce800001b/solutions/javascript
// Implement a method that accepts 3 integer values a, b, c. The method should return true if a triangle can be built with the sides of given length and false in any other case.
// (In this case, all triangles must have surface greater than 0 to be accepted).
// The sum of the lengths of any two sides of a triangle is greater than the length of the third side. Similarly, the difference between the lengths of any two sides of a triangle is less than the length of the third side.
const isTriangle = (a, b, c) => {
console.log(a, b, c)
if (a === null || b === null || c === null ){
return 0
}
if ((a + b) > c){
console.log('1')
if ((b+c) > a){
console.log('2');
if ((a+c) > b){
console.log('3');
return true
} else {
return false
}
} else {
return false
}
} else {
return false
}
}
console.log(isTriangle(1,2,2)); /* => true*/
/*=> false*/
console.log(isTriangle(7,2,2));
// Test.describe("PublicTest", function() {
// Test.assertEquals(isTriangle(1,2,2), true);
// Test.assertEquals(isTriangle(7,2,2), false);
// }); | true |
55affb214e0ecbf6c75510f302126be6cac2eb77 | Python | DLenthu/Face_applications | /deep_face/face_similarity.py | UTF-8 | 730 | 2.703125 | 3 | [] | no_license | from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt
import logging
import os
import math
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger('tensorflow').setLevel(logging.FATAL)
img1_path = "test1.jpg"
img2_path = "test2.jpg"
img3_path = "test3.jpeg"
img4_path = "test4.jpeg"
img1 = cv2.imread(img1_path)
img2 = cv2.imread(img2_path)
img3 = cv2.imread(img3_path)
img4 = cv2.imread(img4_path)
def verify(image1,image2):
result = DeepFace.verify(image1,image2)
similarity = math.acos(result["distance"])/(math.pi/2)
print("Both people have a similarity score of :",similarity)
verify(img1,img3) | true |
61da1d7f7e22c7199373f590895085ce42cf6442 | Python | mrcszk/Python | /Kolokwium/liczby_zaprzyjaźnione.py | UTF-8 | 688 | 4 | 4 | [] | no_license | #program wypisujący pary liczb zaprzyjaźnionych mniejszych od n
def szukanie_dzielnikow(a):
dzielniki = []
for i in range(1,a):
if not a%i:
dzielniki.append(i)
return dzielniki
def sumowanie_dzielników(a):
dzielniki = szukanie_dzielnikow(a)
suma = 0
for i in range(len(dzielniki)):
suma += dzielniki[i]
return suma
n = int(input("Podaj liczbe n:"))
print("liczby zaprzyjaźnione:")
for k in range(1,n):
suma_k = sumowanie_dzielników(k)
suma_j = sumowanie_dzielników(suma_k)
if k == suma_j:
if not k == suma_k:
print(k, suma_k)
print("liczby doskonałe:")
for k in range(1,n):
if sumowanie_dzielników(k) == k:
print(k) | true |
84d804575079f8787b1a93b3c531fcaa35993667 | Python | mdnahidmolla/URI-Online-Judge-Solutions-in-Python | /URI-Online-Judge-Solutions-in-Python/1037 - Interval.py | UTF-8 | 356 | 3.65625 | 4 | [] | no_license | n = float(input())
if (n >= 0 and n <= 25.0000):
print("Intervalo [0,25]")
elif (n >= 25.00001 and n <= 50.0000000):
print("Intervalo (25,50]")
elif (n >= 50.00000001 and n <= 75.0000000):
print("Intervalo (50,75]")
elif (n >= 75.00000001 and n <= 100.0000000):
print("Intervalo (75,100]")
else:
print("Fora de intervalo")
| true |
11f55aef051019c4e15313365ab16a66ffeacd56 | Python | AkshithBellare/year3sem5 | /daa300/lab/6lab/fractional_knapsack.py | UTF-8 | 1,641 | 3.953125 | 4 | [] | no_license | class Item:
def __init__(self, value, weight):
self.v = value
self.w = weight
self.x = 0
def __str__(self):
return f"weight={self.w} value={self.v}"
def greedy_fractional_knapsack(items, capacity):
num_items = len(items)
for i in range(num_items):
items[i].x = 0
weight = 0
for i in range(num_items):
if weight + items[i].w <= capacity:
weight = weight + items[i].w
items[i].x = 1
else:
items[i].x = (capacity - weight) / items[i].w
weight = capacity
break
return [item.x for item in items]
def recursive_knapsack(items, index, num, capacity):
if index >= num:
return 0
elif capacity < items[index].w:
return recursive_knapsack(items, index+1, num, capacity)
else:
return max(recursive_knapsack(items, index+1, num, capacity), items[index].v + recursive_knapsack(items, index+1, num, capacity - items[index].w))
def main():
values = [280, 100, 120, 120]
weights = [40, 10, 20, 24]
capacity = 60
items = []
for i in range(len(values)):
item = Item(value = values[i], weight = weights[i])
items.append(item)
items.sort(reverse=True, key=lambda item: item.v/item.w)
for item in items:
print(item)
x = greedy_fractional_knapsack(items=items, capacity=capacity)
print(x)
max_profit = 0
for item in items:
max_profit += item.v * item.x
print(max_profit)
print(recursive_knapsack(items=items, index=0, num=4, capacity=capacity))
if __name__ == "__main__":
main() | true |
d47903a7b8639baaf846930bcfc6c0d68730ebb3 | Python | smallblackMIN/PytestPractice | /tesecase/test_Calc_02.py | UTF-8 | 4,512 | 3.453125 | 3 | [] | no_license | from func.Calc import Calc
import pytest
import yaml
class Test_Calc_02():
def setup(self):
self.calc = Calc()
@pytest.mark.parametrize(["a","b","c"], yaml.safe_load(open("add_normal_data.yaml")))
def calc_add_normal(self,a,b,c):
'''
针对加法中正常数值的等价类用例
:param a: 加数1
:param b: 加数2
:param c: 结果
将数字类型划分为正整数,负整数,正浮点数,负浮点数,进行组合相加
'''
data = (a,b)
assert round(self.calc.add(*data),1) == c
# assert round(self.calc.add1(data)) == c
@pytest.mark.parametrize(["a", "b", "c"], yaml.safe_load(open("add_error_data.yaml")))
def calc_add_error(self,a,b,c):
'''
针对加法异常值的用例
:param a: 加数1
:param b: 加数2
:param c: 结果
设计了两个加数中存在不是数字类型的用例
'''
with pytest.raises(TypeError): #捕获异常
assert self.calc.add(a, b) == c
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./div_normal_data.yaml")))
def calc_div_normal(self,a,b,c):
'''
针对div方法正常值的等价类用例
:param a: 分子
:param b: 分母
:param c: 结果
分子划分为:0,正数,负数,分母划分为:正数,负数
'''
assert round(self.calc.div(a, b), 1) == c
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./div_error_data.yaml")))
def calc_div_error_01(self,a,b,c):
'''
针对div方法异常输入的等价类用例
:param a: 分子
:param b: 分母
:param c: 结果
分子分母中存在非数字类型的用例
'''
with pytest.raises(TypeError) as exc: #捕获异常
round(self.calc.div(a, b), 1)
assert exc.type == c
def calc_div_error_02(self):
'''
针对div方法中非法数字输入的用例,即分母为0的情况
:return:
'''
with pytest.raises(ZeroDivisionError) as exc: #捕获异常
self.calc.div(4,0)
assert exc.type == ZeroDivisionError
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./sub_normal_data.yaml")))
def calc_sub_normal(self,a,b,c):
'''
针对sub方法中正常值的用例
:param a: 被减数
:param b: 减数
:param c: 结果
被减数和减数分为以下几个等价类:正整数,负整数,正浮点数,负浮点数
1、正整数相减
2、正整数减负整数
3、负整数相减
4、正浮点数相减
5、负浮点数相减
'''
assert round(self.calc.sub(a, b), 1) == c
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./sub_error_data.yaml")))
def calc_sub_error(self,a,b,c):
'''
针对sub方法进行异常值检查,针对输入为非数字类型的测试数据进行检测
:param a: 被减数
:param b: 减数
:param c: 结果
'''
with pytest.raises(TypeError): #捕获异常
assert self.calc.sub(a, b) == c
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./mul_normal_data.yaml")))
def calc_mul_normal(self,a,b,c):
'''
针对mul方法中正常值的用例
:param a: 乘数1
:param b: 乘数2
:param c: 结果
被减数和减数分为以下几个等价类:正整数,负整数,正浮点数,负浮点数,0
1、正整数相乘
2、正整数乘负整数
3、负整数相乘
4、正浮点数相乘
5、负浮点数相乘
6、正浮点乘以负浮点
7、乘数中存在一个0
8、乘数均为0
'''
assert round(self.calc.mul(a, b), 2) == c
@pytest.mark.parametrize(["a","b","c"],yaml.safe_load(open("./mul_error_data.yaml")))
def calc_mul_error(self,a,b,c):
'''
针对mul方法进行异常值检查,针对输入为非数字类型的测试数据进行检测
:param a: 乘数1
:param b: 乘数2
:param c: 结果
'''
with pytest.raises(TypeError): #捕获异常
assert self.calc.mul(a, b) == c
if __name__ == '__main__':
pytest.main(["-m"],["add"],["test_Calc_02.py"])
# pytest.main('-m div test_Calc_02.py') | true |
8385b0c2059bcb4a8f4fe2011810ed408837ddc6 | Python | nightfuryyy/deep-text-recognition-benchmark | /modules/gcn.py | UTF-8 | 4,131 | 2.6875 | 3 | [] | permissive |
import math
import torch
import torch.nn as nn
class GraphConvolution(nn.modules.module.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, batch_size, len_sequence, in_features, out_features, bias=False, scale_factor = 0., dropout = 0.0, isnormalize = False):
super(GraphConvolution, self).__init__()
self.batch_size = batch_size
self.in_features = in_features
self.out_features = out_features
self.LinearInput = nn.Linear(in_features, in_features)
self.CosineSimilarity = nn.CosineSimilarity(dim=-2, eps=1e-8)
self.len_sequence = len_sequence
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.isnormalize = isnormalize
self.distance_matrix = self.get_distance_matrix( len_sequence, scale_factor).to(self.device)
self.eye_matrix = torch.eye(len_sequence)
# self.weight = torch.nn.parameter.Parameter(torch.FloatTensor(in_features, out_features)).to(device)
self.OutputLayers = nn.Sequential(
nn.Linear(in_features, out_features, bias = bias),
nn.BatchNorm1d(len_sequence),
torch.nn.LeakyReLU(inplace=True),
nn.Dropout(p=dropout)
)
def reset_parameters(m):
if type(m) == nn.Linear:
nn.init.kaiming_uniform_(m.weight, a=math.sqrt(5))
# m.bias.data.fill_(0.0)
self.OutputLayers.apply(reset_parameters)
# if bias:
# self.bias = torch.nn.parameter.Parameter(torch.FloatTensor(out_features)).to(device)
# else:
# self.register_parameter('bias', None)
def get_distance_matrix(self, len_sequence, scale_factor):
tmp = torch.arange(float(len_sequence)).repeat(len_sequence, 1)
tmp = 1 / (1 + torch.exp(torch.abs(tmp-torch.transpose(tmp, 0, 1))-scale_factor))
tmp[tmp < 0.25] = 0
return tmp.unsqueeze(0)
# def normalize_pygcn(adjacency_maxtrix):
# """ normalize adjacency matrix with normalization-trick. This variant
# is proposed in https://github.com/tkipf/pygcn .
# Refer https://github.com/tkipf/pygcn/issues/11 for the author's comment.
# Arguments:
# a (scipy.sparse.coo_matrix): Unnormalied adjacency matrix
# Returns:
# scipy.sparse.coo_matrix: Normalized adjacency matrix
# """
# # no need to add identity matrix because self connection has already been added
# # a += sp.eye(a.shape[0])
# rowsum = np.array(adjacency_maxtrix.sum(1))
# rowsum_inv = np.power(rowsum, -1).flatten()
# rowsum_inv[np.isinf(rowsum_inv)] = 0.
# # ~D in the GCN paper
# d_tilde = sp.diags(rowsum_inv)
# return d_tilde.dot(a)
def normalize_pygcn(self, adjacency_maxtrix, net):
adjacency_maxtrix = adjacency_maxtrix + torch.eye(self.len_sequence).to(self.device)
rowsum = torch.sum(adjacency_maxtrix,2)
rowsum_inv = torch.pow(rowsum, -1)
rowsum_inv[torch.isinf(rowsum_inv)] = 0.
d_tilde = torch.diag_embed(rowsum_inv, 0)
return torch.einsum('bij,bjk,bkl->bil',d_tilde,adjacency_maxtrix,net)
def cosine_pairwise(self,x):
x = x.permute((1, 2, 0))
cos_sim_pairwise = self.CosineSimilarity(x, x.unsqueeze(1))
cos_sim_pairwise = cos_sim_pairwise.permute((2, 0, 1))
return cos_sim_pairwise
def forward(self, input):
net = input
c = self.LinearInput(net)
similarity_maxtrix = self.cosine_pairwise(c)
adjacency_maxtrix = similarity_maxtrix * self.distance_matrix
if self.isnormalize :
net = self.normalize_pygcn(adjacency_maxtrix, net)
else :
net = torch.einsum('ijk,ikl->ijl',adjacency_maxtrix, net)
net = self.OutputLayers(net)
return net
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| true |
15f92fcf1a5ced90ef592a881582eef2f580b4d7 | Python | rajendrapallala/hackerrank-python-practice | /strings/alphabet_rangoli.py | UTF-8 | 1,439 | 3.40625 | 3 | [] | no_license | def print_rangoli(size):
import string
alpha = string.ascii_lowercase
l =[]
if size == 0:
return
if size == 1:
print(alpha[0])
return
for i in range(size):
strg = alpha[i:size]
l.append('-'.join(strg[::-1]+strg[1:]).center(size+3*(size-1),'-'))
print('\n'.join(l[:0:-1]),'\n'.join(l),sep='\n')
def print_rangoli_noteligent(size):
# your code goes here
alphab = 'abcdefghijklmnopqrstuvwxyz'
cnt = 0
line_len = size + (size-1) * 3
for i in range(size):
cntr=''
if cnt == 0:
cntr = alphab[size-1] + '-'
else:
for j in range(cnt,-1,-1):
if j == cnt:
cntr = alphab[size-j-1] + '-'
else:
cntr = alphab[size-j-1] + '-' + cntr + alphab[size-j-1] + '-'
cnt = cnt + 1
print(cntr.strip('-').center(line_len,'-'))
cnt = size
for i in range(size-2,-1,-1):
cntr=''
if cnt == 1:
cntr = alphab[size-cnt] + '-'
else:
for j in range(cnt-1):
if j == 0:
cntr = alphab[size-cnt+1] + '-'
else:
cntr = alphab[size-cnt+j+1] + '-' + cntr + alphab[size-cnt+j+1] + '-'
cnt = cnt - 1
print(cntr.strip('-').center(line_len,'-'))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
| true |
59506802f17561e3061ceb6204731980c05e0a5f | Python | midas-research/calling-out-bluff | /Model2-EASE/src/nltk/nltk/stem/snowball.py | UTF-8 | 150,072 | 3.5625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-NC-ND-3.0",
"AGPL-3.0-only",
"MIT"
] | permissive | # -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Peter Michael Stahl <pemistahl@gmail.com>
# Peter Ljunglof <peter.ljunglof@heatherleaf.se> (revisions)
# Algorithms: Dr Martin Porter <martin@tartarus.org>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
u"""
Snowball stemmers and appendant demo function
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function demonstrating the different
algorithms. It can be invoked directly on the command line.
For more information take a look into the class SnowballStemmer.
"""
from nltk.corpus import stopwords
from nltk.stem import porter
from api import StemmerI
class SnowballStemmer(StemmerI):
u"""
Snowball Stemmer
At the moment, this port is able to stem words from fourteen
languages: Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
Furthermore, there is also the original English Porter algorithm:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because he invented
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> SnowballStemmer.languages # See which languages are supported
('danish', 'dutch', 'english', 'finnish', 'french', 'german', 'hungarian',
'italian', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian',
'spanish', 'swedish')
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem(u"Autobahnen") # Stem a word
u'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem(u"Autobahnen")
u'autobahn'
Create a language specific instance of the Snowball stemmer.
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError(u"The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
class _LanguageSpecificStemmer(StemmerI):
u"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word.decode("utf-8"))
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
u"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
u"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
u"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = u""
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
u"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
u"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = u""
r2 = u""
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in xrange(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
u"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = u""
if len(word) >= 2:
if word[1] not in vowels:
for i in xrange(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[:2] in vowels:
for i in xrange(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
u"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = u"aeiouy\xE6\xE5\xF8"
__consonants = u"bcdfghjklmnpqrstvwxz"
__double_consonants = (u"bb", u"cc", u"dd", u"ff", u"gg", u"hh", u"jj",
u"kk", u"ll", u"mm", u"nn", u"pp", u"qq", u"rr",
u"ss", u"tt", u"vv", u"ww", u"xx", u"zz")
__s_ending = u"abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = (u"erendes", u"erende", u"hedens", u"ethed",
u"erede", u"heden", u"heder", u"endes",
u"ernes", u"erens", u"erets", u"ered",
u"ende", u"erne", u"eren", u"erer", u"heds",
u"enes", u"eres", u"eret", u"hed", u"ene", u"ere",
u"ens", u"ers", u"ets", u"en", u"er", u"es", u"et",
u"e", u"s")
__step2_suffixes = (u"gd", u"dt", u"gt", u"kt")
__step3_suffixes = (u"elig", u"l\xF8st", u"lig", u"els", u"ig")
def stem(self, word):
u"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith(u"igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == u"l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
u"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = u"aeiouy\xE8"
__step1_suffixes = (u"heden", u"ene", u"en", u"se", u"s")
__step3b_suffixes = (u"baar", u"lijk", u"bar", u"end", u"ing", u"ig")
def stem(self, word):
u"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace(u"\xE4", u"a").replace(u"\xE1", u"a")
.replace(u"\xEB", u"e").replace(u"\xE9", u"e")
.replace(u"\xED", u"i").replace(u"\xEF", u"i")
.replace(u"\xF6", u"o").replace(u"\xF3", u"o")
.replace(u"\xFC", u"u").replace(u"\xFA", u"u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith(u"y"):
word = u"".join((u"Y", word[1:]))
for i in xrange(1, len(word)):
if word[i-1] in self.__vowels and word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
for i in xrange(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == u"i" and
word[i+1] in self.__vowels):
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in xrange(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"heden":
word = u"".join((word[:-5], u"heid"))
r1 = u"".join((r1[:-5], u"heid"))
if r2.endswith(u"heden"):
r2 = u"".join((r2[:-5], u"heid"))
elif (suffix in (u"ene", u"en") and
not word.endswith(u"heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != u"gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in (u"se", u"s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != u"j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith(u"e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith(u"heid") and word[-5] != u"c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith(u"en") and word[-3] not in self.__vowels and
word[-5:-2] != u"gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in (u"end", u"ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith(u"ig") and word[-3] != u"e":
word = word[:-2]
else:
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
elif suffix == u"ig" and word[-3] != u"e":
word = word[:-2]
elif suffix == u"lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith(u"e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith((u"kk", u"dd", u"tt")):
word = word[:-1]
elif suffix == u"baar":
word = word[:-4]
elif suffix == u"bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != u"I":
if word[-3:-1] in (u"aa", u"ee", u"oo", u"uu"):
if word[-4] not in self.__vowels:
word = u"".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace(u"I", u"i").replace(u"Y", u"y")
return word
class EnglishStemmer(_StandardStemmer):
u"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = u"aeiouy"
__double_consonants = (u"bb", u"dd", u"ff", u"gg", u"mm", u"nn",
u"pp", u"rr", u"tt")
__li_ending = u"cdeghkmnrt"
__step0_suffixes = (u"'s'", u"'s", u"'")
__step1a_suffixes = (u"sses", u"ied", u"ies", u"us", u"ss", u"s")
__step1b_suffixes = (u"eedly", u"ingly", u"edly", u"eed", u"ing", u"ed")
__step2_suffixes = (u'ization', u'ational', u'fulness', u'ousness',
u'iveness', u'tional', u'biliti', u'lessli',
u'entli', u'ation', u'alism', u'aliti', u'ousli',
u'iviti', u'fulli', u'enci', u'anci', u'abli',
u'izer', u'ator', u'alli', u'bli', u'ogi', u'li')
__step3_suffixes = (u'ational', u'tional', u'alize', u'icate', u'iciti',
u'ative', u'ical', u'ness', u'ful')
__step4_suffixes = (u'ement', u'ance', u'ence', u'able', u'ible', u'ment',
u'ant', u'ent', u'ism', u'ate', u'iti', u'ous',
u'ive', u'ize', u'ion', u'al', u'er', u'ic')
__step5_suffixes = (u"e", u"l")
__special_words = {u"skis" : u"ski",
u"skies" : u"sky",
u"dying" : u"die",
u"lying" : u"lie",
u"tying" : u"tie",
u"idly" : u"idl",
u"gently" : u"gentl",
u"ugly" : u"ugli",
u"early" : u"earli",
u"only" : u"onli",
u"singly" : u"singl",
u"sky" : u"sky",
u"news" : u"news",
u"howe" : u"howe",
u"atlas" : u"atlas",
u"cosmos" : u"cosmos",
u"bias" : u"bias",
u"andes" : u"andes",
u"inning" : u"inning",
u"innings" : u"inning",
u"outing" : u"outing",
u"outings" : u"outing",
u"canning" : u"canning",
u"cannings" : u"canning",
u"herring" : u"herring",
u"herrings" : u"herring",
u"earring" : u"earring",
u"earrings" : u"earring",
u"proceed" : u"proceed",
u"proceeds" : u"proceed",
u"proceeded" : u"proceed",
u"proceeding" : u"proceed",
u"exceed" : u"exceed",
u"exceeds" : u"exceed",
u"exceeded" : u"exceed",
u"exceeding" : u"exceed",
u"succeed" : u"succeed",
u"succeeds" : u"succeed",
u"succeeded" : u"succeed",
u"succeeding" : u"succeed"}
def stem(self, word):
u"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace(u"\u2019", u"\x27")
.replace(u"\u2018", u"\x27")
.replace(u"\u201B", u"\x27"))
if word.startswith(u"\x27"):
word = word[1:]
if word.startswith(u"y"):
word = "".join((u"Y", word[1:]))
for i in xrange(1, len(word)):
if word[i-1] in self.__vowels and word[i] == u"y":
word = "".join((word[:i], u"Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = u""
r2 = u""
if word.startswith((u"gener", u"commun", u"arsen")):
if word.startswith((u"gener", u"arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in xrange(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == u"sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"ied", u"ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == u"s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in (u"eed", u"eedly"):
if r1.endswith(suffix):
word = u"".join((word[:-len(suffix)], u"ee"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ee"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ee"))
else:
r2 = u""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith((u"at", u"bl", u"iz")):
word = u"".join((word, u"e"))
r1 = u"".join((r1, u"e"))
if len(word) > 5 or len(r1) >=3:
r2 = u"".join((r2, u"e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == u"" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in u"wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == u"" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = u"".join((word, u"e"))
if len(r1) > 0:
r1 = u"".join((r1, u"e"))
if len(r2) > 0:
r2 = u"".join((r2, u"e"))
break
# STEP 1c
if word[-1] in u"yY" and word[-2] not in self.__vowels and len(word) > 2:
word = u"".join((word[:-1], u"i"))
if len(r1) >= 1:
r1 = u"".join((r1[:-1], u"i"))
else:
r1 = u""
if len(r2) >= 1:
r2 = u"".join((r2[:-1], u"i"))
else:
r2 = u""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"enci", u"anci", u"abli"):
word = u"".join((word[:-1], u"e"))
if len(r1) >= 1:
r1 = u"".join((r1[:-1], u"e"))
else:
r1 = u""
if len(r2) >= 1:
r2 = u"".join((r2[:-1], u"e"))
else:
r2 = u""
elif suffix == u"entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in (u"izer", u"ization"):
word = u"".join((word[:-len(suffix)], u"ize"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ize"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ize"))
else:
r2 = u""
elif suffix in (u"ational", u"ation", u"ator"):
word = u"".join((word[:-len(suffix)], u"ate"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ate"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ate"))
else:
r2 = u"e"
elif suffix in (u"alism", u"aliti", u"alli"):
word = u"".join((word[:-len(suffix)], u"al"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"al"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"al"))
else:
r2 = u""
elif suffix == u"fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in (u"ousli", u"ousness"):
word = u"".join((word[:-len(suffix)], u"ous"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ous"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ous"))
else:
r2 = u""
elif suffix in (u"iveness", u"iviti"):
word = u"".join((word[:-len(suffix)], u"ive"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ive"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ive"))
else:
r2 = u"e"
elif suffix in (u"biliti", u"bli"):
word = u"".join((word[:-len(suffix)], u"ble"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ble"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ble"))
else:
r2 = u""
elif suffix == u"ogi" and word[-4] == u"l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in (u"fulli", u"lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"ational":
word = u"".join((word[:-len(suffix)], u"ate"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ate"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ate"))
else:
r2 = u""
elif suffix == u"alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in (u"icate", u"iciti", u"ical"):
word = u"".join((word[:-len(suffix)], u"ic"))
if len(r1) >= len(suffix):
r1 = u"".join((r1[:-len(suffix)], u"ic"))
else:
r1 = u""
if len(r2) >= len(suffix):
r2 = u"".join((r2[:-len(suffix)], u"ic"))
else:
r2 = u""
elif suffix in (u"ful", u"ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == u"ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == u"ion":
if word[-4] in u"st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith(u"l") and word[-2] == u"l":
word = word[:-1]
elif r2.endswith(u"e"):
word = word[:-1]
elif r1.endswith(u"e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in u"wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace(u"Y", u"y")
return word
class FinnishStemmer(_StandardStemmer):
u"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = u"aeiouy\xE4\xF6"
__restricted_vowels = u"aeiou\xE4\xF6"
__long_vowels = (u"aa", u"ee", u"ii", u"oo", u"uu", u"\xE4\xE4",
u"\xF6\xF6")
__consonants = u"bcdfghjklmnpqrstvwxz"
__double_consonants = (u"bb", u"cc", u"dd", u"ff", u"gg", u"hh", u"jj",
u"kk", u"ll", u"mm", u"nn", u"pp", u"qq", u"rr",
u"ss", u"tt", u"vv", u"ww", u"xx", u"zz")
__step1_suffixes = (u'kaan', u'k\xE4\xE4n', u'sti', u'kin', u'han',
u'h\xE4n', u'ko', u'k\xF6', u'pa', u'p\xE4')
__step2_suffixes = (u'nsa', u'ns\xE4', u'mme', u'nne', u'si', u'ni',
u'an', u'\xE4n', u'en')
__step3_suffixes = (u'siin', u'tten', u'seen', u'han', u'hen', u'hin',
u'hon', u'h\xE4n', u'h\xF6n', u'den', u'tta',
u'tt\xE4', u'ssa', u'ss\xE4', u'sta',
u'st\xE4', u'lla', u'll\xE4', u'lta',
u'lt\xE4', u'lle', u'ksi', u'ine', u'ta',
u't\xE4', u'na', u'n\xE4', u'a', u'\xE4',
u'n')
__step4_suffixes = (u'impi', u'impa', u'imp\xE4', u'immi', u'imma',
u'imm\xE4', u'mpi', u'mpa', u'mp\xE4', u'mmi',
u'mma', u'mm\xE4', u'eja', u'ej\xE4')
def stem(self, word):
u"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in u"ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == u"si":
if word[-3] != u"k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(u"kse"):
word = u"".join((word[:-3], u"ksi"))
if r1.endswith(u"kse"):
r1 = u"".join((r1[:-3], u"ksi"))
if r2.endswith(u"kse"):
r2 = u"".join((r2[:-3], u"ksi"))
elif suffix == u"an":
if (word[-4:-2] in (u"ta", u"na") or
word[-5:-2] in (u"ssa", u"sta", u"lla", u"lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"\xE4n":
if (word[-4:-2] in (u"t\xE4", u"n\xE4") or
word[-5:-2] in (u"ss\xE4", u"st\xE4",
u"ll\xE4", u"lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == u"en":
if word[-5:-2] in (u"lle", u"ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in (u"han", u"hen", u"hin", u"hon", u"h\xE4n",
u"h\xF6n"):
if ((suffix == u"han" and word[-4] == u"a") or
(suffix == u"hen" and word[-4] == u"e") or
(suffix == u"hin" and word[-4] == u"i") or
(suffix == u"hon" and word[-4] == u"o") or
(suffix == u"h\xE4n" and word[-4] == u"\xE4") or
(suffix == u"h\xF6n" and word[-4] == u"\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in (u"siin", u"den", u"tten"):
if (word[-len(suffix)-1] == u"i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == u"seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in (u"a", u"\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in (u"tta", u"tt\xE4"):
if word[-4] == u"e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == u"n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == u"ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in (u"mpi", u"mpa", u"mp\xE4", u"mmi", u"mma",
u"mm\xE4"):
if word[-5:-3] != u"po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in u"ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == u"t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith(u"imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith(u"mma") and r2[-5:-3] != u"po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in u"a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith((u"oj", u"uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(u"jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in xrange(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = u"".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
u"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = u"aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = (u'issements', u'issement', u'atrices', u'atrice',
u'ateurs', u'ations', u'logies', u'usions',
u'utions', u'ements', u'amment', u'emment',
u'ances', u'iqUes', u'ismes', u'ables', u'istes',
u'ateur', u'ation', u'logie', u'usion', u'ution',
u'ences', u'ement', u'euses', u'ments', u'ance',
u'iqUe', u'isme', u'able', u'iste', u'ence',
u'it\xE9s', u'ives', u'eaux', u'euse', u'ment',
u'eux', u'it\xE9', u'ive', u'ifs', u'aux', u'if')
__step2a_suffixes = (u'issaIent', u'issantes', u'iraIent', u'issante',
u'issants', u'issions', u'irions', u'issais',
u'issait', u'issant', u'issent', u'issiez', u'issons',
u'irais', u'irait', u'irent', u'iriez', u'irons',
u'iront', u'isses', u'issez', u'\xEEmes',
u'\xEEtes', u'irai', u'iras', u'irez', u'isse',
u'ies', u'ira', u'\xEEt', u'ie', u'ir', u'is',
u'it', u'i')
__step2b_suffixes = (u'eraIent', u'assions', u'erions', u'assent',
u'assiez', u'\xE8rent', u'erais', u'erait',
u'eriez', u'erons', u'eront', u'aIent', u'antes',
u'asses', u'ions', u'erai', u'eras', u'erez',
u'\xE2mes', u'\xE2tes', u'ante', u'ants',
u'asse', u'\xE9es', u'era', u'iez', u'ais',
u'ait', u'ant', u'\xE9e', u'\xE9s', u'er',
u'ez', u'\xE2t', u'ai', u'as', u'\xE9', u'a')
__step4_suffixes = (u'i\xE8re', u'I\xE8re', u'ion', u'ier', u'Ier',
u'e', u'\xEB')
def stem(self, word):
u"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in xrange(1, len(word)):
if word[i-1] == u"q" and word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"eaux":
word = word[:-1]
step1_success = True
elif suffix in (u"euse", u"euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = u"".join((word[:-len(suffix)], u"eux"))
step1_success = True
elif suffix in (u"ement", u"ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"iv" and u"iv" in r2:
word = word[:-2]
if word[-2:] == u"at" and u"at" in r2:
word = word[:-2]
elif word[-3:] == u"eus":
if u"eus" in r2:
word = word[:-3]
elif u"eus" in r1:
word = u"".join((word[:-1], u"x"))
elif word[-3:] in (u"abl", u"iqU"):
if u"abl" in r2 or u"iqU" in r2:
word = word[:-3]
elif word[-3:] in (u"i\xE8r", u"I\xE8r"):
if u"i\xE8r" in rv or u"I\xE8r" in rv:
word = u"".join((word[:-3], u"i"))
elif suffix == u"amment" and suffix in rv:
word = u"".join((word[:-6], u"ant"))
rv = u"".join((rv[:-6], u"ant"))
rv_ending_found = True
elif suffix == u"emment" and suffix in rv:
word = u"".join((word[:-6], u"ent"))
rv_ending_found = True
elif (suffix in (u"ment", u"ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == u"aux" and suffix in r1:
word = u"".join((word[:-2], u"l"))
step1_success = True
elif (suffix in (u"issement", u"issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in (u"ance", u"iqUe", u"isme", u"able", u"iste",
u"eux", u"ances", u"iqUes", u"ismes",
u"ables", u"istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in (u"atrice", u"ateur", u"ation", u"atrices",
u"ateurs", u"ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
elif suffix in (u"logie", u"logies") and suffix in r2:
word = u"".join((word[:-len(suffix)], u"log"))
step1_success = True
elif (suffix in (u"usion", u"ution", u"usions", u"utions") and
suffix in r2):
word = u"".join((word[:-len(suffix)], u"u"))
step1_success = True
elif suffix in (u"ence", u"ences") and suffix in r2:
word = u"".join((word[:-len(suffix)], u"ent"))
step1_success = True
elif suffix in (u"it\xE9", u"it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == u"abil":
if u"abil" in r2:
word = word[:-4]
else:
word = u"".join((word[:-2], u"l"))
elif word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
elif word[-2:] == u"iv":
if u"iv" in r2:
word = word[:-2]
elif (suffix in (u"if", u"ive", u"ifs", u"ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == u"at" and u"at" in r2:
word = word[:-2]
if word[-2:] == u"ic":
if u"ic" in r2:
word = word[:-2]
else:
word = u"".join((word[:-2], u"iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == u"ions" and u"ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in (u'eraIent', u'erions', u'\xE8rent',
u'erais', u'erait', u'eriez',
u'erons', u'eront', u'erai', u'eras',
u'erez', u'\xE9es', u'era', u'iez',
u'\xE9e', u'\xE9s', u'er', u'ez',
u'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in (u'assions', u'assent', u'assiez',
u'aIent', u'antes', u'asses',
u'\xE2mes', u'\xE2tes', u'ante',
u'ants', u'asse', u'ais', u'ait',
u'ant', u'\xE2t', u'ai', u'as',
u'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith(u"e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == u"Y":
word = u"".join((word[:-1], u"i"))
elif word[-1] == u"\xE7":
word = u"".join((word[:-1], u"c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == u"s" and
word[-2] not in u"aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == u"ion" and suffix in r2 and
rv[-4] in u"st"):
word = word[:-3]
elif suffix in (u"ier", u"i\xE8re", u"Ier",
u"I\xE8re"):
word = u"".join((word[:-len(suffix)], u"i"))
elif suffix == u"e":
word = word[:-1]
elif suffix == u"\xEB" and word[-3:-1] == u"gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith((u"enn", u"onn", u"ett", u"ell", u"eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in xrange(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in (u"\xE9", u"\xE8"):
word = u"".join((word[:-i], u"e", word[-i+1:]))
break
word = (word.replace(u"I", u"i")
.replace(u"U", u"u")
.replace(u"Y", u"y"))
return word
def __rv_french(self, word, vowels):
u"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = u""
if len(word) >= 2:
if (word.startswith((u"par", u"col", u"tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in xrange(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
u"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = u"aeiouy\xE4\xF6\xFC"
__s_ending = u"bdfghklmnrt"
__st_ending = u"bdfghklmnt"
__step1_suffixes = (u"ern", u"em", u"er", u"en", u"es", u"e", u"s")
__step2_suffixes = (u"est", u"en", u"er", u"st")
__step3_suffixes = (u"isch", u"lich", u"heit", u"keit",
u"end", u"ung", u"ig", u"ik")
def stem(self, word):
u"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace(u"\xDF", u"ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"y":
word = u"".join((word[:i], u"Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in xrange(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in (u"en", u"es", u"e") and
word[-len(suffix)-4:-len(suffix)] == u"niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == u"st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in (u"end", u"ung"):
if (u"ig" in r2[-len(suffix)-2:-len(suffix)] and
u"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in (u"ig", u"ik", u"isch") and
u"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in (u"lich", u"heit"):
if (u"er" in r1[-len(suffix)-2:-len(suffix)] or
u"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == u"keit":
if u"lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif u"ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace(u"\xE4", u"a").replace(u"\xF6", u"o")
.replace(u"\xFC", u"u").replace(u"U", u"u")
.replace(u"Y", u"y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
u"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = u"aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = (u"cs", u"dz", u"dzs", u"gy", u"ly", u"ny", u"ty", u"zs")
__double_consonants = (u"bb", u"cc", u"ccs", u"dd", u"ff", u"gg",
u"ggy", u"jj", u"kk", u"ll", u"lly", u"mm",
u"nn", u"nny", u"pp", u"rr", u"ss", u"ssz",
u"tt", u"tty", u"vv", u"zz", u"zzs")
__step1_suffixes = (u"al", u"el")
__step2_suffixes = (u'k\xE9ppen', u'onk\xE9nt', u'enk\xE9nt',
u'ank\xE9nt', u'k\xE9pp', u'k\xE9nt', u'ban',
u'ben', u'nak', u'nek', u'val', u'vel', u't\xF3l',
u't\xF5l', u'r\xF3l', u'r\xF5l', u'b\xF3l',
u'b\xF5l', u'hoz', u'hez', u'h\xF6z',
u'n\xE1l', u'n\xE9l', u'\xE9rt', u'kor',
u'ba', u'be', u'ra', u're', u'ig', u'at', u'et',
u'ot', u'\xF6t', u'ul', u'\xFCl', u'v\xE1',
u'v\xE9', u'en', u'on', u'an', u'\xF6n',
u'n', u't')
__step3_suffixes = (u"\xE1nk\xE9nt", u"\xE1n", u"\xE9n")
__step4_suffixes = (u'astul', u'est\xFCl', u'\xE1stul',
u'\xE9st\xFCl', u'stul', u'st\xFCl')
__step5_suffixes = (u"\xE1", u"\xE9")
__step6_suffixes = (u'ok\xE9', u'\xF6k\xE9', u'ak\xE9',
u'ek\xE9', u'\xE1k\xE9', u'\xE1\xE9i',
u'\xE9k\xE9', u'\xE9\xE9i', u'k\xE9',
u'\xE9i', u'\xE9\xE9', u'\xE9')
__step7_suffixes = (u'\xE1juk', u'\xE9j\xFCk', u'\xFCnk',
u'unk', u'juk', u'j\xFCk', u'\xE1nk',
u'\xE9nk', u'nk', u'uk', u'\xFCk', u'em',
u'om', u'am', u'od', u'ed', u'ad', u'\xF6d',
u'ja', u'je', u'\xE1m', u'\xE1d', u'\xE9m',
u'\xE9d', u'm', u'd', u'a', u'e', u'o',
u'\xE1', u'\xE9')
__step8_suffixes = (u'jaitok', u'jeitek', u'jaink', u'jeink', u'aitok',
u'eitek', u'\xE1itok', u'\xE9itek', u'jaim',
u'jeim', u'jaid', u'jeid', u'eink', u'aink',
u'itek', u'jeik', u'jaik', u'\xE1ink',
u'\xE9ink', u'aim', u'eim', u'aid', u'eid',
u'jai', u'jei', u'ink', u'aik', u'eik',
u'\xE1im', u'\xE1id', u'\xE1ik', u'\xE9im',
u'\xE9id', u'\xE9ik', u'im', u'id', u'ai',
u'ei', u'ik', u'\xE1i', u'\xE9i', u'i')
__step9_suffixes = (u"\xE1k", u"\xE9k", u"\xF6k", u"ok",
u"ek", u"ak", u"k")
def stem(self, word):
u"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = u"".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = u"".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(u"\xE1"):
word = u"".join((word[:-1], u"a"))
r1 = u"".join((r1[:-1], u"a"))
elif r1.endswith(u"\xE9"):
word = u"".join((word[:-1], u"e"))
r1 = u"".join((r1[:-1], u"e"))
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == u"\xE9n":
word = u"".join((word[:-2], u"e"))
r1 = u"".join((r1[:-2], u"e"))
else:
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == u"\xE1stul":
word = u"".join((word[:-5], u"a"))
r1 = u"".join((r1[:-5], u"a"))
elif suffix == u"\xE9st\xFCl":
word = u"".join((word[:-5], u"e"))
r1 = u"".join((r1[:-5], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = u"".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = u"".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in (u"\xE1k\xE9", u"\xE1\xE9i"):
word = u"".join((word[:-3], u"a"))
r1 = u"".join((r1[:-3], u"a"))
elif suffix in (u"\xE9k\xE9", u"\xE9\xE9i",
u"\xE9\xE9"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in (u"\xE1nk", u"\xE1juk", u"\xE1m",
u"\xE1d", u"\xE1"):
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
elif suffix in (u"\xE9nk", u"\xE9j\xFCk",
u"\xE9m", u"\xE9d", u"\xE9"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in (u"\xE1im", u"\xE1id", u"\xE1i",
u"\xE1ink", u"\xE1itok", u"\xE1ik"):
word = u"".join((word[:-len(suffix)], u"a"))
r1 = u"".join((r1[:-len(suffix)], u"a"))
elif suffix in (u"\xE9im", u"\xE9id", u"\xE9i",
u"\xE9ink", u"\xE9itek", u"\xE9ik"):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == u"\xE1k":
word = u"".join((word[:-2], u"a"))
elif suffix == u"\xE9k":
word = u"".join((word[:-2], u"e"))
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
u"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = u""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in xrange(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in xrange(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
u"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = u"aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = (u'gliela', u'gliele', u'glieli', u'glielo',
u'gliene', u'sene', u'mela', u'mele', u'meli',
u'melo', u'mene', u'tela', u'tele', u'teli',
u'telo', u'tene', u'cela', u'cele', u'celi',
u'celo', u'cene', u'vela', u'vele', u'veli',
u'velo', u'vene', u'gli', u'ci', u'la', u'le',
u'li', u'lo', u'mi', u'ne', u'si', u'ti', u'vi')
__step1_suffixes = (u'atrice', u'atrici', u'azione', u'azioni',
u'uzione', u'uzioni', u'usione', u'usioni',
u'amento', u'amenti', u'imento', u'imenti',
u'amente', u'abile', u'abili', u'ibile', u'ibili',
u'mente', u'atore', u'atori', u'logia', u'logie',
u'anza', u'anze', u'iche', u'ichi', u'ismo',
u'ismi', u'ista', u'iste', u'isti', u'ist\xE0',
u'ist\xE8', u'ist\xEC', u'ante', u'anti',
u'enza', u'enze', u'ico', u'ici', u'ica', u'ice',
u'oso', u'osi', u'osa', u'ose', u'it\xE0',
u'ivo', u'ivi', u'iva', u'ive')
__step2_suffixes = (u'erebbero', u'irebbero', u'assero', u'assimo',
u'eranno', u'erebbe', u'eremmo', u'ereste',
u'eresti', u'essero', u'iranno', u'irebbe',
u'iremmo', u'ireste', u'iresti', u'iscano',
u'iscono', u'issero', u'arono', u'avamo', u'avano',
u'avate', u'eremo', u'erete', u'erono', u'evamo',
u'evano', u'evate', u'iremo', u'irete', u'irono',
u'ivamo', u'ivano', u'ivate', u'ammo', u'ando',
u'asse', u'assi', u'emmo', u'enda', u'ende',
u'endi', u'endo', u'erai', u'erei', u'Yamo',
u'iamo', u'immo', u'irai', u'irei', u'isca',
u'isce', u'isci', u'isco', u'ano', u'are', u'ata',
u'ate', u'ati', u'ato', u'ava', u'avi', u'avo',
u'er\xE0', u'ere', u'er\xF2', u'ete', u'eva',
u'evi', u'evo', u'ir\xE0', u'ire', u'ir\xF2',
u'ita', u'ite', u'iti', u'ito', u'iva', u'ivi',
u'ivo', u'ono', u'uta', u'ute', u'uti', u'uto',
u'ar', u'ir')
def stem(self, word):
u"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace(u"\xE1", u"\xE0")
.replace(u"\xE9", u"\xE8")
.replace(u"\xED", u"\xEC")
.replace(u"\xF3", u"\xF2")
.replace(u"\xFA", u"\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in xrange(1, len(word)):
if word[i-1] == u"q" and word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word [i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in (u"ando", u"endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
(u"ar", u"er", u"ir")):
word = u"".join((word[:-len(suffix)], u"e"))
r1 = u"".join((r1[:-len(suffix)], u"e"))
r2 = u"".join((r2[:-len(suffix)], u"e"))
rv = u"".join((rv[:-len(suffix)], u"e"))
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in (u"amento", u"amenti",
u"imento", u"imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"azione", u"azioni", u"atore", u"atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"logia", u"logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in (u"uzione", u"uzioni",
u"usione", u"usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in (u"enza", u"enze"):
word = u"".join((word[:-2], u"te"))
rv = u"".join((rv[:-2], u"te"))
elif suffix == u"it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith((u"ic", u"iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"ivo", u"ivi", u"iva", u"ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(u"at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith((u"a", u"e", u"i", u"o", u"\xE0", u"\xE8",
u"\xEC", u"\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith(u"i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith((u"ch", u"gh")):
word = word[:-1]
word = word.replace(u"I", u"i").replace(u"U", u"u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
u"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
"""
__vowels = u"aeiouy\xE6\xE5\xF8"
__s_ending = u"bcdfghjlmnoprtvyz"
__step1_suffixes = (u"hetenes", u"hetene", u"hetens", u"heter",
u"heten", u"endes", u"ande", u"ende", u"edes",
u"enes", u"erte", u"ede", u"ane", u"ene", u"ens",
u"ers", u"ets", u"het", u"ast", u"ert", u"en",
u"ar", u"er", u"as", u"es", u"et", u"a", u"e", u"s")
__step2_suffixes = (u"dt", u"vt")
__step3_suffixes = (u"hetslov", u"eleg", u"elig", u"elov", u"slov",
u"leg", u"eig", u"lig", u"els", u"lov", u"ig")
def stem(self, word):
u"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in (u"erte", u"ert"):
word = u"".join((word[:-len(suffix)], u"er"))
r1 = u"".join((r1[:-len(suffix)], u"er"))
elif suffix == u"s":
if (word[-2] in self.__s_ending or
(word[-2] == u"k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
u"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = u"aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = (u'amentos', u'imentos', u'uciones', u'amento',
u'imento', u'adoras', u'adores', u'a\xE7o~es',
u'log\xEDas', u'\xEAncias', u'amente',
u'idades', u'ismos', u'istas', u'adora',
u'a\xE7a~o', u'antes', u'\xE2ncia',
u'log\xEDa', u'uci\xF3n', u'\xEAncia',
u'mente', u'idade', u'ezas', u'icos', u'icas',
u'ismo', u'\xE1vel', u'\xEDvel', u'ista',
u'osos', u'osas', u'ador', u'ante', u'ivas',
u'ivos', u'iras', u'eza', u'ico', u'ica',
u'oso', u'osa', u'iva', u'ivo', u'ira')
__step2_suffixes = (u'ar\xEDamos', u'er\xEDamos', u'ir\xEDamos',
u'\xE1ssemos', u'\xEAssemos', u'\xEDssemos',
u'ar\xEDeis', u'er\xEDeis', u'ir\xEDeis',
u'\xE1sseis', u'\xE9sseis', u'\xEDsseis',
u'\xE1ramos', u'\xE9ramos', u'\xEDramos',
u'\xE1vamos', u'aremos', u'eremos', u'iremos',
u'ariam', u'eriam', u'iriam', u'assem', u'essem',
u'issem', u'ara~o', u'era~o', u'ira~o', u'arias',
u'erias', u'irias', u'ardes', u'erdes', u'irdes',
u'asses', u'esses', u'isses', u'astes', u'estes',
u'istes', u'\xE1reis', u'areis', u'\xE9reis',
u'ereis', u'\xEDreis', u'ireis', u'\xE1veis',
u'\xEDamos', u'armos', u'ermos', u'irmos',
u'aria', u'eria', u'iria', u'asse', u'esse',
u'isse', u'aste', u'este', u'iste', u'arei',
u'erei', u'irei', u'aram', u'eram', u'iram',
u'avam', u'arem', u'erem', u'irem',
u'ando', u'endo', u'indo', u'adas', u'idas',
u'ar\xE1s', u'aras', u'er\xE1s', u'eras',
u'ir\xE1s', u'avas', u'ares', u'eres', u'ires',
u'\xEDeis', u'ados', u'idos', u'\xE1mos',
u'amos', u'emos', u'imos', u'iras', u'ada', u'ida',
u'ar\xE1', u'ara', u'er\xE1', u'era',
u'ir\xE1', u'ava', u'iam', u'ado', u'ido',
u'ias', u'ais', u'eis', u'ira', u'ia', u'ei', u'am',
u'em', u'ar', u'er', u'ir', u'as',
u'es', u'is', u'eu', u'iu', u'ou')
__step4_suffixes = (u"os", u"a", u"i", u"o", u"\xE1",
u"\xED", u"\xF3")
def stem(self, word):
u"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace(u"\xE3", u"a~")
.replace(u"\xF5", u"o~"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic", u"ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in (u"ira", u"iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == u"e"):
step1_success = True
word = u"".join((word[:-len(suffix)], u"ir"))
rv = u"".join((rv[:-len(suffix)], u"ir"))
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"log\xEDa", u"log\xEDas"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"uci\xF3n", u"uciones"):
word = u"".join((word[:-len(suffix)], u"u"))
rv = u"".join((rv[:-len(suffix)], u"u"))
elif suffix in (u"\xEAncia", u"\xEAncias"):
word = u"".join((word[:-len(suffix)], u"ente"))
rv = u"".join((rv[:-len(suffix)], u"ente"))
elif suffix == u"mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith((u"ante", u"avel", u"\xEDvel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"idade", u"idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith((u"ic", u"iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(u"abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"iva", u"ivo", u"ivas", u"ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith(u"i") and word[-2] == u"c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith((u"e", u"\xE9", u"\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith(u"gu") and rv.endswith(u"u")) or
(word.endswith(u"ci") and rv.endswith(u"i"))):
word = word[:-1]
elif word.endswith(u"\xE7"):
word = u"".join((word[:-1], u"c"))
word = word.replace(u"a~", u"\xE3").replace(u"o~", u"\xF5")
return word
class RomanianStemmer(_StandardStemmer):
u"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = u"aeiou\u0103\xE2\xEE"
__step0_suffixes = (u'iilor', u'ului', u'elor', u'iile', u'ilor',
u'atei', u'a\u0163ie', u'a\u0163ia', u'aua',
u'ele', u'iua', u'iei', u'ile', u'ul', u'ea',
u'ii')
__step1_suffixes = (u'abilitate', u'abilitati', u'abilit\u0103\u0163i',
u'ibilitate', u'abilit\u0103i', u'ivitate',
u'ivitati', u'ivit\u0103\u0163i', u'icitate',
u'icitati', u'icit\u0103\u0163i', u'icatori',
u'ivit\u0103i', u'icit\u0103i', u'icator',
u'a\u0163iune', u'atoare', u'\u0103toare',
u'i\u0163iune', u'itoare', u'iciva', u'icive',
u'icivi', u'iciv\u0103', u'icala', u'icale',
u'icali', u'ical\u0103', u'ativa', u'ative',
u'ativi', u'ativ\u0103', u'atori', u'\u0103tori',
u'itiva', u'itive', u'itivi', u'itiv\u0103',
u'itori', u'iciv', u'ical', u'ativ', u'ator',
u'\u0103tor', u'itiv', u'itor')
__step2_suffixes = (u'abila', u'abile', u'abili', u'abil\u0103',
u'ibila', u'ibile', u'ibili', u'ibil\u0103',
u'atori', u'itate', u'itati', u'it\u0103\u0163i',
u'abil', u'ibil', u'oasa', u'oas\u0103', u'oase',
u'anta', u'ante', u'anti', u'ant\u0103', u'ator',
u'it\u0103i', u'iune', u'iuni', u'isme', u'ista',
u'iste', u'isti', u'ist\u0103', u'i\u015Fti',
u'ata', u'at\u0103', u'ati', u'ate', u'uta',
u'ut\u0103', u'uti', u'ute', u'ita', u'it\u0103',
u'iti', u'ite', u'ica', u'ice', u'ici', u'ic\u0103',
u'osi', u'o\u015Fi', u'ant', u'iva', u'ive', u'ivi',
u'iv\u0103', u'ism', u'ist', u'at', u'ut', u'it',
u'ic', u'os', u'iv')
__step3_suffixes = (u'seser\u0103\u0163i', u'aser\u0103\u0163i',
u'iser\u0103\u0163i', u'\xE2ser\u0103\u0163i',
u'user\u0103\u0163i', u'seser\u0103m',
u'aser\u0103m', u'iser\u0103m', u'\xE2ser\u0103m',
u'user\u0103m', u'ser\u0103\u0163i', u'sese\u015Fi',
u'seser\u0103', u'easc\u0103', u'ar\u0103\u0163i',
u'ur\u0103\u0163i', u'ir\u0103\u0163i',
u'\xE2r\u0103\u0163i', u'ase\u015Fi',
u'aser\u0103', u'ise\u015Fi', u'iser\u0103',
u'\xe2se\u015Fi', u'\xE2ser\u0103',
u'use\u015Fi', u'user\u0103', u'ser\u0103m',
u'sesem', u'indu', u'\xE2ndu', u'eaz\u0103',
u'e\u015Fti', u'e\u015Fte', u'\u0103\u015Fti',
u'\u0103\u015Fte', u'ea\u0163i', u'ia\u0163i',
u'ar\u0103m', u'ur\u0103m', u'ir\u0103m',
u'\xE2r\u0103m', u'asem', u'isem',
u'\xE2sem', u'usem', u'se\u015Fi', u'ser\u0103',
u'sese', u'are', u'ere', u'ire', u'\xE2re',
u'ind', u'\xE2nd', u'eze', u'ezi', u'esc',
u'\u0103sc', u'eam', u'eai', u'eau', u'iam',
u'iai', u'iau', u'a\u015Fi', u'ar\u0103',
u'u\u015Fi', u'ur\u0103', u'i\u015Fi', u'ir\u0103',
u'\xE2\u015Fi', u'\xe2r\u0103', u'ase',
u'ise', u'\xE2se', u'use', u'a\u0163i',
u'e\u0163i', u'i\u0163i', u'\xe2\u0163i', u'sei',
u'ez', u'am', u'ai', u'au', u'ea', u'ia', u'ui',
u'\xE2i', u'\u0103m', u'em', u'im', u'\xE2m',
u'se')
def stem(self, word):
u"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in xrange(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == u"u":
word = u"".join((word[:i], u"U", word[i+1:]))
elif word[i] == u"i":
word = u"".join((word[:i], u"I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in (u"ul", u"ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = u""
elif (suffix == u"aua" or suffix == u"atei" or
(suffix == u"ile" and word[-5:-3] != u"ab")):
word = word[:-2]
elif suffix in (u"ea", u"ele", u"elor"):
word = u"".join((word[:-len(suffix)], u"e"))
if suffix in rv:
rv = u"".join((rv[:-len(suffix)], u"e"))
else:
rv = u""
elif suffix in (u"ii", u"iua", u"iei",
u"iile", u"iilor", u"ilor"):
word = u"".join((word[:-len(suffix)], u"i"))
if suffix in rv:
rv = u"".join((rv[:-len(suffix)], u"i"))
else:
rv = u""
elif suffix in (u"a\u0163ie", u"a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in (u"abilitate", u"abilitati",
u"abilit\u0103i",
u"abilit\u0103\u0163i"):
word = u"".join((word[:-len(suffix)], u"abil"))
elif suffix == u"ibilitate":
word = word[:-5]
elif suffix in (u"ivitate", u"ivitati",
u"ivit\u0103i",
u"ivit\u0103\u0163i"):
word = u"".join((word[:-len(suffix)], u"iv"))
elif suffix in (u"icitate", u"icitati", u"icit\u0103i",
u"icit\u0103\u0163i", u"icator",
u"icatori", u"iciv", u"iciva",
u"icive", u"icivi", u"iciv\u0103",
u"ical", u"icala", u"icale", u"icali",
u"ical\u0103"):
word = u"".join((word[:-len(suffix)], u"ic"))
elif suffix in (u"ativ", u"ativa", u"ative", u"ativi",
u"ativ\u0103", u"a\u0163iune",
u"atoare", u"ator", u"atori",
u"\u0103toare",
u"\u0103tor", u"\u0103tori"):
word = u"".join((word[:-len(suffix)], u"at"))
if suffix in r2:
r2 = u"".join((r2[:-len(suffix)], u"at"))
elif suffix in (u"itiv", u"itiva", u"itive", u"itivi",
u"itiv\u0103", u"i\u0163iune",
u"itoare", u"itor", u"itori"):
word = u"".join((word[:-len(suffix)], u"it"))
if suffix in r2:
r2 = u"".join((r2[:-len(suffix)], u"it"))
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in (u"iune", u"iuni"):
if word[-5] == u"\u0163":
word = u"".join((word[:-5], u"t"))
elif suffix in (u"ism", u"isme", u"ist", u"ista", u"iste",
u"isti", u"ist\u0103", u"i\u015Fti"):
word = u"".join((word[:-len(suffix)], u"ist"))
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in (u'seser\u0103\u0163i', u'seser\u0103m',
u'ser\u0103\u0163i', u'sese\u015Fi',
u'seser\u0103', u'ser\u0103m', u'sesem',
u'se\u015Fi', u'ser\u0103', u'sese',
u'a\u0163i', u'e\u0163i', u'i\u0163i',
u'\xE2\u0163i', u'sei', u'\u0103m',
u'em', u'im', u'\xE2m', u'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
u"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in (u"ie", u"a", u"e", u"i", u"\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace(u"I", u"i").replace(u"U", u"u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
u"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = (u"ivshis'", u"yvshis'", u"vshis'",
u"ivshi", u"yvshi", u"vshi", u"iv",
u"yv", u"v")
__adjectival_suffixes = (u'ui^ushchi^ui^u', u'ui^ushchi^ai^a',
u'ui^ushchimi', u'ui^ushchymi', u'ui^ushchego',
u'ui^ushchogo', u'ui^ushchemu', u'ui^ushchomu',
u'ui^ushchikh', u'ui^ushchykh',
u'ui^ushchui^u', u'ui^ushchaia',
u'ui^ushchoi^u', u'ui^ushchei^u',
u'i^ushchi^ui^u', u'i^ushchi^ai^a',
u'ui^ushchee', u'ui^ushchie',
u'ui^ushchye', u'ui^ushchoe', u'ui^ushchei`',
u'ui^ushchii`', u'ui^ushchyi`',
u'ui^ushchoi`', u'ui^ushchem', u'ui^ushchim',
u'ui^ushchym', u'ui^ushchom', u'i^ushchimi',
u'i^ushchymi', u'i^ushchego', u'i^ushchogo',
u'i^ushchemu', u'i^ushchomu', u'i^ushchikh',
u'i^ushchykh', u'i^ushchui^u', u'i^ushchai^a',
u'i^ushchoi^u', u'i^ushchei^u', u'i^ushchee',
u'i^ushchie', u'i^ushchye', u'i^ushchoe',
u'i^ushchei`', u'i^ushchii`',
u'i^ushchyi`', u'i^ushchoi`', u'i^ushchem',
u'i^ushchim', u'i^ushchym', u'i^ushchom',
u'shchi^ui^u', u'shchi^ai^a', u'ivshi^ui^u',
u'ivshi^ai^a', u'yvshi^ui^u', u'yvshi^ai^a',
u'shchimi', u'shchymi', u'shchego', u'shchogo',
u'shchemu', u'shchomu', u'shchikh', u'shchykh',
u'shchui^u', u'shchai^a', u'shchoi^u',
u'shchei^u', u'ivshimi', u'ivshymi',
u'ivshego', u'ivshogo', u'ivshemu', u'ivshomu',
u'ivshikh', u'ivshykh', u'ivshui^u',
u'ivshai^a', u'ivshoi^u', u'ivshei^u',
u'yvshimi', u'yvshymi', u'yvshego', u'yvshogo',
u'yvshemu', u'yvshomu', u'yvshikh', u'yvshykh',
u'yvshui^u', u'yvshai^a', u'yvshoi^u',
u'yvshei^u', u'vshi^ui^u', u'vshi^ai^a',
u'shchee', u'shchie', u'shchye', u'shchoe',
u'shchei`', u'shchii`', u'shchyi`', u'shchoi`',
u'shchem', u'shchim', u'shchym', u'shchom',
u'ivshee', u'ivshie', u'ivshye', u'ivshoe',
u'ivshei`', u'ivshii`', u'ivshyi`',
u'ivshoi`', u'ivshem', u'ivshim', u'ivshym',
u'ivshom', u'yvshee', u'yvshie', u'yvshye',
u'yvshoe', u'yvshei`', u'yvshii`',
u'yvshyi`', u'yvshoi`', u'yvshem',
u'yvshim', u'yvshym', u'yvshom', u'vshimi',
u'vshymi', u'vshego', u'vshogo', u'vshemu',
u'vshomu', u'vshikh', u'vshykh', u'vshui^u',
u'vshai^a', u'vshoi^u', u'vshei^u',
u'emi^ui^u', u'emi^ai^a', u'nni^ui^u',
u'nni^ai^a', u'vshee',
u'vshie', u'vshye', u'vshoe', u'vshei`',
u'vshii`', u'vshyi`', u'vshoi`',
u'vshem', u'vshim', u'vshym', u'vshom',
u'emimi', u'emymi', u'emego', u'emogo',
u'ememu', u'emomu', u'emikh', u'emykh',
u'emui^u', u'emai^a', u'emoi^u', u'emei^u',
u'nnimi', u'nnymi', u'nnego', u'nnogo',
u'nnemu', u'nnomu', u'nnikh', u'nnykh',
u'nnui^u', u'nnai^a', u'nnoi^u', u'nnei^u',
u'emee', u'emie', u'emye', u'emoe',
u'emei`', u'emii`', u'emyi`',
u'emoi`', u'emem', u'emim', u'emym',
u'emom', u'nnee', u'nnie', u'nnye', u'nnoe',
u'nnei`', u'nnii`', u'nnyi`',
u'nnoi`', u'nnem', u'nnim', u'nnym',
u'nnom', u'i^ui^u', u'i^ai^a', u'imi', u'ymi',
u'ego', u'ogo', u'emu', u'omu', u'ikh',
u'ykh', u'ui^u', u'ai^a', u'oi^u', u'ei^u',
u'ee', u'ie', u'ye', u'oe', u'ei`',
u'ii`', u'yi`', u'oi`', u'em',
u'im', u'ym', u'om')
__reflexive_suffixes = (u"si^a", u"s'")
__verb_suffixes = (u"esh'", u'ei`te', u'ui`te', u'ui^ut',
u"ish'", u'ete', u'i`te', u'i^ut', u'nno',
u'ila', u'yla', u'ena', u'ite', u'ili', u'yli',
u'ilo', u'ylo', u'eno', u'i^at', u'uet', u'eny',
u"it'", u"yt'", u'ui^u', u'la', u'na', u'li',
u'em', u'lo', u'no', u'et', u'ny', u"t'",
u'ei`', u'ui`', u'il', u'yl', u'im',
u'ym', u'en', u'it', u'yt', u'i^u', u'i`',
u'l', u'n')
__noun_suffixes = (u'ii^ami', u'ii^akh', u'i^ami', u'ii^am', u'i^akh',
u'ami', u'iei`', u'i^am', u'iem', u'akh',
u'ii^u', u"'i^u", u'ii^a', u"'i^a", u'ev', u'ov',
u'ie', u"'e", u'ei', u'ii', u'ei`',
u'oi`', u'ii`', u'em', u'am', u'om',
u'i^u', u'i^a', u'a', u'e', u'i', u'i`',
u'o', u'u', u'y', u"'")
__superlative_suffixes = (u"ei`she", u"ei`sh")
__derivational_suffixes = (u"ost'", u"ost")
def stem(self, word):
u"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in xrange(len(word)):
if ord(word[i]) not in xrange(256):
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in (u"v", u"vshi", u"vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in (u'i^ushchi^ui^u', u'i^ushchi^ai^a',
u'i^ushchui^u', u'i^ushchai^a', u'i^ushchoi^u',
u'i^ushchei^u', u'i^ushchimi', u'i^ushchymi',
u'i^ushchego', u'i^ushchogo', u'i^ushchemu',
u'i^ushchomu', u'i^ushchikh', u'i^ushchykh',
u'shchi^ui^u', u'shchi^ai^a', u'i^ushchee',
u'i^ushchie', u'i^ushchye', u'i^ushchoe',
u'i^ushchei`', u'i^ushchii`', u'i^ushchyi`',
u'i^ushchoi`', u'i^ushchem', u'i^ushchim',
u'i^ushchym', u'i^ushchom', u'vshi^ui^u',
u'vshi^ai^a', u'shchui^u', u'shchai^a',
u'shchoi^u', u'shchei^u', u'emi^ui^u',
u'emi^ai^a', u'nni^ui^u', u'nni^ai^a',
u'shchimi', u'shchymi', u'shchego', u'shchogo',
u'shchemu', u'shchomu', u'shchikh', u'shchykh',
u'vshui^u', u'vshai^a', u'vshoi^u', u'vshei^u',
u'shchee', u'shchie', u'shchye', u'shchoe',
u'shchei`', u'shchii`', u'shchyi`', u'shchoi`',
u'shchem', u'shchim', u'shchym', u'shchom',
u'vshimi', u'vshymi', u'vshego', u'vshogo',
u'vshemu', u'vshomu', u'vshikh', u'vshykh',
u'emui^u', u'emai^a', u'emoi^u', u'emei^u',
u'nnui^u', u'nnai^a', u'nnoi^u', u'nnei^u',
u'vshee', u'vshie', u'vshye', u'vshoe',
u'vshei`', u'vshii`', u'vshyi`', u'vshoi`',
u'vshem', u'vshim', u'vshym', u'vshom',
u'emimi', u'emymi', u'emego', u'emogo',
u'ememu', u'emomu', u'emikh', u'emykh',
u'nnimi', u'nnymi', u'nnego', u'nnogo',
u'nnemu', u'nnomu', u'nnikh', u'nnykh',
u'emee', u'emie', u'emye', u'emoe', u'emei`',
u'emii`', u'emyi`', u'emoi`', u'emem', u'emim',
u'emym', u'emom', u'nnee', u'nnie', u'nnye',
u'nnoe', u'nnei`', u'nnii`', u'nnyi`', u'nnoi`',
u'nnem', u'nnim', u'nnym', u'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in (u"la", u"na", u"ete", u"i`te", u"li",
u"i`", u"l", u"em", u"n", u"lo", u"no",
u"et", u"i^ut", u"ny", u"t'", u"esh'",
u"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
u"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = u""
r2 = u""
rv = u""
vowels = (u"A", u"U", u"E", u"a", u"e", u"i", u"o", u"u", u"y")
word = (word.replace(u"i^a", u"A")
.replace(u"i^u", u"U")
.replace(u"e`", u"E"))
for i in xrange(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in xrange(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in xrange(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace(u"A", u"i^a")
.replace(u"U", u"i^u")
.replace(u"E", u"e`"))
rv = (rv.replace(u"A", u"i^a")
.replace(u"U", u"i^u")
.replace(u"E", u"e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
u"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace(u"\u0410", u"a").replace(u"\u0430", u"a")
.replace(u"\u0411", u"b").replace(u"\u0431", u"b")
.replace(u"\u0412", u"v").replace(u"\u0432", u"v")
.replace(u"\u0413", u"g").replace(u"\u0433", u"g")
.replace(u"\u0414", u"d").replace(u"\u0434", u"d")
.replace(u"\u0415", u"e").replace(u"\u0435", u"e")
.replace(u"\u0401", u"e").replace(u"\u0451", u"e")
.replace(u"\u0416", u"zh").replace(u"\u0436", u"zh")
.replace(u"\u0417", u"z").replace(u"\u0437", u"z")
.replace(u"\u0418", u"i").replace(u"\u0438", u"i")
.replace(u"\u0419", u"i`").replace(u"\u0439", u"i`")
.replace(u"\u041A", u"k").replace(u"\u043A", u"k")
.replace(u"\u041B", u"l").replace(u"\u043B", u"l")
.replace(u"\u041C", u"m").replace(u"\u043C", u"m")
.replace(u"\u041D", u"n").replace(u"\u043D", u"n")
.replace(u"\u041E", u"o").replace(u"\u043E", u"o")
.replace(u"\u041F", u"p").replace(u"\u043F", u"p")
.replace(u"\u0420", u"r").replace(u"\u0440", u"r")
.replace(u"\u0421", u"s").replace(u"\u0441", u"s")
.replace(u"\u0422", u"t").replace(u"\u0442", u"t")
.replace(u"\u0423", u"u").replace(u"\u0443", u"u")
.replace(u"\u0424", u"f").replace(u"\u0444", u"f")
.replace(u"\u0425", u"kh").replace(u"\u0445", u"kh")
.replace(u"\u0426", u"t^s").replace(u"\u0446", u"t^s")
.replace(u"\u0427", u"ch").replace(u"\u0447", u"ch")
.replace(u"\u0428", u"sh").replace(u"\u0448", u"sh")
.replace(u"\u0429", u"shch").replace(u"\u0449", u"shch")
.replace(u"\u042A", u"''").replace(u"\u044A", u"''")
.replace(u"\u042B", u"y").replace(u"\u044B", u"y")
.replace(u"\u042C", u"'").replace(u"\u044C", u"'")
.replace(u"\u042D", u"e`").replace(u"\u044D", u"e`")
.replace(u"\u042E", u"i^u").replace(u"\u044E", u"i^u")
.replace(u"\u042F", u"i^a").replace(u"\u044F", u"i^a"))
return word
def __roman_to_cyrillic(self, word):
u"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace(u"i^u", u"\u044E").replace(u"i^a", u"\u044F")
.replace(u"shch", u"\u0449").replace(u"kh", u"\u0445")
.replace(u"t^s", u"\u0446").replace(u"ch", u"\u0447")
.replace(u"e`", u"\u044D").replace(u"i`", u"\u0439")
.replace(u"sh", u"\u0448").replace(u"k", u"\u043A")
.replace(u"e", u"\u0435").replace(u"zh", u"\u0436")
.replace(u"a", u"\u0430").replace(u"b", u"\u0431")
.replace(u"v", u"\u0432").replace(u"g", u"\u0433")
.replace(u"d", u"\u0434").replace(u"e", u"\u0435")
.replace(u"z", u"\u0437").replace(u"i", u"\u0438")
.replace(u"l", u"\u043B").replace(u"m", u"\u043C")
.replace(u"n", u"\u043D").replace(u"o", u"\u043E")
.replace(u"p", u"\u043F").replace(u"r", u"\u0440")
.replace(u"s", u"\u0441").replace(u"t", u"\u0442")
.replace(u"u", u"\u0443").replace(u"f", u"\u0444")
.replace(u"''", u"\u044A").replace(u"y", u"\u044B")
.replace(u"'", u"\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
u"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = u"aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = (u"selas", u"selos", u"sela", u"selo", u"las",
u"les", u"los", u"nos", u"me", u"se", u"la", u"le",
u"lo")
__step1_suffixes = (u'amientos', u'imientos', u'amiento', u'imiento',
u'aciones', u'uciones', u'adoras', u'adores',
u'ancias', u'log\xEDas', u'encias', u'amente',
u'idades', u'anzas', u'ismos', u'ables', u'ibles',
u'istas', u'adora', u'aci\xF3n', u'antes',
u'ancia', u'log\xEDa', u'uci\xf3n', u'encia',
u'mente', u'anza', u'icos', u'icas', u'ismo',
u'able', u'ible', u'ista', u'osos', u'osas',
u'ador', u'ante', u'idad', u'ivas', u'ivos',
u'ico',
u'ica', u'oso', u'osa', u'iva', u'ivo')
__step2a_suffixes = (u'yeron', u'yendo', u'yamos', u'yais', u'yan',
u'yen', u'yas', u'yes', u'ya', u'ye', u'yo',
u'y\xF3')
__step2b_suffixes = (u'ar\xEDamos', u'er\xEDamos', u'ir\xEDamos',
u'i\xE9ramos', u'i\xE9semos', u'ar\xEDais',
u'aremos', u'er\xEDais', u'eremos',
u'ir\xEDais', u'iremos', u'ierais', u'ieseis',
u'asteis', u'isteis', u'\xE1bamos',
u'\xE1ramos', u'\xE1semos', u'ar\xEDan',
u'ar\xEDas', u'ar\xE9is', u'er\xEDan',
u'er\xEDas', u'er\xE9is', u'ir\xEDan',
u'ir\xEDas', u'ir\xE9is',
u'ieran', u'iesen', u'ieron', u'iendo', u'ieras',
u'ieses', u'abais', u'arais', u'aseis',
u'\xE9amos', u'ar\xE1n', u'ar\xE1s',
u'ar\xEDa', u'er\xE1n', u'er\xE1s',
u'er\xEDa', u'ir\xE1n', u'ir\xE1s',
u'ir\xEDa', u'iera', u'iese', u'aste', u'iste',
u'aban', u'aran', u'asen', u'aron', u'ando',
u'abas', u'adas', u'idas', u'aras', u'ases',
u'\xEDais', u'ados', u'idos', u'amos', u'imos',
u'emos', u'ar\xE1', u'ar\xE9', u'er\xE1',
u'er\xE9', u'ir\xE1', u'ir\xE9', u'aba',
u'ada', u'ida', u'ara', u'ase', u'\xEDan',
u'ado', u'ido', u'\xEDas', u'\xE1is',
u'\xE9is', u'\xEDa', u'ad', u'ed', u'id',
u'an', u'i\xF3', u'ar', u'er', u'ir', u'as',
u'\xEDs', u'en', u'es')
__step3_suffixes = (u"os", u"a", u"e", u"o", u"\xE1",
u"\xE9", u"\xED", u"\xF3")
def stem(self, word):
u"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if rv.endswith(suffix):
if rv[:-len(suffix)].endswith((u"i\xE9ndo",
u"\xE1ndo",
u"\xE1r", u"\xE9r",
u"\xEDr")):
word = (word[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
r1 = (r1[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
r2 = (r2[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
rv = (rv[:-len(suffix)].replace(u"\xE1", u"a")
.replace(u"\xE9", u"e")
.replace(u"\xED", u"i"))
elif rv[:-len(suffix)].endswith((u"ando", u"iendo",
u"ar", u"er", u"ir")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[:-len(suffix)].endswith(u"yendo") and
word[:-len(suffix)].endswith(u"uyendo")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == u"amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith(u"iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith((u"os", u"ic", u"ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in (u"adora", u"ador", u"aci\xF3n", u"adoras",
u"adores", u"aciones", u"ante", u"antes",
u"ancia", u"ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u"log\xEDa", u"log\xEDas"):
word = word.replace(suffix, u"log")
rv = rv.replace(suffix, u"log")
elif suffix in (u"uci\xF3n", u"uciones"):
word = word.replace(suffix, u"u")
rv = rv.replace(suffix, u"u")
elif suffix in (u"encia", u"encias"):
word = word.replace(suffix, u"ente")
rv = rv.replace(suffix, u"ente")
elif suffix == u"mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith((u"ante", u"able", u"ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in (u"idad", u"idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in (u"abil", u"ic", u"iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in (u"ivo", u"iva", u"ivos", u"ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(u"at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == u"u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix in (u"en", u"es", u"\xE9is", u"emos"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word.endswith(u"gu"):
word = word[:-1]
if rv.endswith(u"gu"):
rv = rv[:-1]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
if suffix in (u"e", u"\xE9"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word[-2:] == u"gu" and rv[-1] == u"u":
word = word[:-1]
else:
word = word[:-len(suffix)]
break
word = (word.replace(u"\xE1", u"a").replace(u"\xE9", u"e")
.replace(u"\xED", u"i").replace(u"\xF3", u"o")
.replace(u"\xFA", u"u"))
return word
class SwedishStemmer(_ScandinavianStemmer):
u"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = u"aeiouy\xE4\xE5\xF6"
__s_ending = u"bcdfghjklmnoprtvy"
__step1_suffixes = (u"heterna", u"hetens", u"heter", u"heten",
u"anden", u"arnas", u"ernas", u"ornas", u"andes",
u"andet", u"arens", u"arna", u"erna", u"orna",
u"ande", u"arne", u"aste", u"aren", u"ades",
u"erns", u"ade", u"are", u"ern", u"ens", u"het",
u"ast", u"ad", u"en", u"ar", u"er", u"or", u"as",
u"es", u"at", u"a", u"e", u"s")
__step2_suffixes = (u"dd", u"gd", u"nn", u"dt", u"gt", u"kt", u"tt")
__step3_suffixes = (u"fullt", u"l\xF6st", u"els", u"lig", u"ig")
def stem(self, word):
u"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == u"s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in (u"els", u"lig", u"ig"):
word = word[:-len(suffix)]
elif suffix in (u"fullt", u"l\xF6st"):
word = word[:-1]
break
return word
def demo():
u"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print u"\n"
print u"******************************"
print u"Demo for the Snowball stemmers"
print u"******************************"
while True:
language = raw_input(u"Please enter the name of the language " +
u"to be demonstrated\n" +
u"/".join(SnowballStemmer.languages) +
u"\n" +
u"(enter 'exit' in order to leave): ")
if language == u"exit":
break
if language not in SnowballStemmer.languages:
print (u"\nOops, there is no stemmer for this language. " +
u"Please try again.\n")
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = u" ".join([stemmer.stem(word) for word in excerpt])
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+u' ').rstrip()
excerpt = u" ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+u' ').rstrip()
print u"\n"
print u'-' * 70
print u'ORIGINAL'.center(70)
print excerpt
print u"\n\n"
print u'STEMMED RESULTS'.center(70)
print stemmed
print u'-' * 70
print u"\n"
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| true |
3a473f649011be04111abf61011f18f8df9ad106 | Python | aclements/thesis | /thesis/data/processors/moore.py | UTF-8 | 5,286 | 2.546875 | 3 | [] | no_license | import collections
import datetime
import json
import re
import csv
import os
import itertools
class Proc(collections.namedtuple(
'Proc', 'name date clock_mhz cores total_cores tdp_watts product_id')):
def dominates(self, other):
"""Return True if self strictly dominates other."""
# cmps = [cmp(getattr(self, field), getattr(other, field))
# for field in ('clock_mhz', 'cores', 'tdp_watts')]
cmps = [cmp(getattr(self, field), getattr(other, field))
for field in ('clock_mhz', 'total_cores')]
return all(c >= 0 for c in cmps) and any(c > 0 for c in cmps)
def weak_dominates(self, other):
cmps = [cmp(getattr(self, field), getattr(other, field))
for field in ('clock_mhz', 'total_cores', 'date')]
# Smaller dates dominate larger dates
cmps[2] = -cmps[2]
return any(c > 0 for c in cmps) or all(c == 0 for c in cmps)
def parse_odata_date(s):
m = re.match(r'/Date\(([0-9]+)\)/', s)
return datetime.date.fromtimestamp(int(m.group(1)) / 1000)
def read_ark(fp):
d = json.load(fp)['d']
for rec in d:
if 'Phi' in rec['ProductName']:
# These reach to higher core counts, but I'm not sure I
# would consider the "general purpose".
continue
if rec['LaunchDate'] is None:
# XXX Lots of these have BornOnDate
continue
if rec['MaxTDP'] is None:
continue
date = parse_odata_date(rec['LaunchDate'])
yield Proc(name=rec['ProductName'], date=date,
clock_mhz=rec['ClockSpeedMhz'],
cores=rec['CoreCount'],
total_cores=rec['CoreCount'] * (rec['MaxCPUs'] or 1),
tdp_watts=rec['MaxTDP'],
product_id=('Intel', rec['ProductId']))
def read_cpudb(path):
x86s = {row['microarchitecture_id']
for row in csv.DictReader(
open(os.path.join(path, 'microarchitecture.csv')))
if row['isa'] in ('x86-32', 'x86-64')}
manu = {row['manufacturer_id']
for row in csv.DictReader(
open(os.path.join(path, 'manufacturer.csv')))
if row['name'] in ('AMD', 'Intel')}
for rec in csv.DictReader(open(os.path.join(path, 'processor.csv'))):
# if rec['microarchitecture_id'] not in x86s:
# continue
if rec['manufacturer_id'] not in manu:
continue
if rec['date'].startswith('1982-'):
# Meh. Points before 1985 are just to make the smoothing
# pretty (we don't actually show them), and the 80286
# messes with our pretty smoothing.
continue
date = rec['date']
if not date:
continue
date = datetime.date(*map(int, date.split('-')))
if not rec['tdp']:
continue
m = re.match(r'http://ark.intel.com/Product\.aspx\?id=([0-9]+)',
rec['source'])
if m:
product_id = ('Intel', int(m.group(1)))
else:
product_id = None
yield Proc(name=rec['model'], date=date,
clock_mhz=float(rec['clock']),
cores=int(rec['hw_ncores']),
total_cores=int(rec['hw_ncores']),
tdp_watts=float(rec['tdp']),
product_id=product_id)
def dedup(ark, cpudb):
ids = set()
for proc in ark:
yield proc
ids.add(proc.product_id)
for proc in cpudb:
if proc.product_id is None or proc.product_id not in ids:
yield proc
def dedominate_month(procs):
"""From each month, remove processors strictly dominated by another.
This usually weeds out multiple speeds of the same basic model.
The result is somewhat messy. Not recommended.
"""
groups = {}
for proc in procs:
key = proc.date.replace(day=1)
groups.setdefault(key, []).append(proc)
for procs in groups.itervalues():
for proc in procs:
if not any(other.dominates(proc) for other in procs):
yield proc
def dedominate_past(procs):
"""Remove processors strictly dominated by an earlier processor.
This focuses on "top of the line" processors.
"""
groups = {}
for proc in procs:
groups.setdefault(proc.date, []).append(proc)
kept = []
for date, procs in sorted(groups.iteritems()):
for proc in procs:
# Is this processor is dominated by an earlier processor
# or one released at the same time?
if not any(other.dominates(proc) for other in kept + procs):
kept.append(proc)
yield proc
def dedominate_any(procs):
kept = []
procs = list(procs)
for proc1 in procs:
if all(proc1.weak_dominates(proc2) for proc2 in procs):
kept.append(proc1)
kept.sort(key=lambda p: p.date)
return kept
for proc in dedominate_any(dedup(
read_ark(open('ark/processors.json')),
read_cpudb('cpudb'))):
df = proc.date.year + (proc.date.replace(year=1).toordinal() / 365.0)
print df, proc.clock_mhz, proc.tdp_watts, proc.cores, proc.total_cores, proc.name.encode('utf-8')
| true |
bb401e48c400835bb31114e2a6db0c5b1a58f22d | Python | ShallyZhang/Shally | /microblock.py | UTF-8 | 1,226 | 2.8125 | 3 | [] | no_license | import datetime # 导入时间库
import hashlib # 导入哈希函数库
from Transaction import Transaction # 导入交易类
class microblock: # 交易块的类,也称作microblock 的类
def __init__(self,previoushash):
self.transactionlist = [] # 交易数据列表
self.timestamp = datetime.datetime.now() # 当前交易块时间
self.hash = None # 交易块hash
self.previoushash = previoushash # 上一个块的hash
def addTransaction(self,data): # 添加新的交易到交易数据列表
self.transactionlist = self.transactionlist + data
def set_microhash(self): # 设置microblock 的自身ID
combination = str(self.timestamp) + str(self.previoushash)
for trans in self.transactionlist:
combination = combination + str(trans)
self.hash = hashlib.sha256( combination.encode("utf-8")).hexdigest()
def __repr__(self):
return "\nIndex: " + str(self.index) + "\nPreviousHash: " + str(self.previoushash) + "\nTransactionlist: " + str(len(self.transactionlist)) \
+ "\nTimeStamp: " + str(self.timestamp) + "\nHash: " + str(self.hash)+ "\n"
| true |
7b4e9ede9cbe7bab84e574d1250cdd71f76c01cc | Python | Melted-Cheese96/WebinteractionBots | /very_basic_web_scraper..py | UTF-8 | 219 | 2.53125 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
import re
r = requests.get('https://www.jimsmowing.net')
content = r.text
soup = BeautifulSoup(content, 'html.parser')
#print(soup.find_all('p')[4].get_text()) | true |
249acf53c0392f37d40ceb10e9e82d4574fc8473 | Python | ripl/camera-scene-classifier | /src/scene_classifier | UTF-8 | 3,053 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
# @Author: Andrea F. Daniele <afdaniele>
# @Date: Thursday, April 27th 2018
# @Email: afdaniele@ttic.edu
# @Last modified by: afdaniele
# @Last modified time: Thursday, April 27th 2018
import sys, os
import numpy as np
import math
import rospy
import json
from darknet_ros_msgs.msg import BoundingBoxes
from camera_scene_classifier.msg import SceneClassification
class SceneClassifier():
def __init__(self, config_file, detections_topic):
self.paused = True
self.objects_to_scene_label_map = {}
self.verbose = True
# initialize ROS node
rospy.init_node('camera_scene_classifier')
# load map
self.objects_to_scene_label_map = json.load(open(config_file))
self.scene_labels = self.objects_to_scene_label_map.keys()
self.scene_labels.sort()
# subscribe to stream of detections
rospy.Subscriber(detections_topic, BoundingBoxes, self.detections_callback, queue_size=1)
# advertise new ROS topic
self.scene_class_publisher = rospy.Publisher('~/scene_classification', SceneClassification, queue_size=5)
def start(self):
self.paused = False
# consume messages
rospy.spin()
def _most_likely_scene_given_object(self, object_label):
for scene_lbl, scene_objects in self.objects_to_scene_label_map.items():
if object_label in scene_objects: return scene_lbl
return None
def detections_callback(self, detections_msg):
# temp structures and vars
count_per_category = {
label : 0 for label in self.objects_to_scene_label_map
}
# count object labels per scene category
for bounding_box in detections_msg.bounding_boxes:
scene_lbl = self._most_likely_scene_given_object( bounding_box.Class )
if scene_lbl is None: continue
count_per_category[ scene_lbl ] += 1
# return most likely scene category
max_count = 0
scene_name = ""
for scene_lbl, obj_count in count_per_category.items():
if obj_count > max_count:
max_count = obj_count
scene_name = scene_lbl
# get scene ID
scene_id = self.scene_labels.index(scene_name)
# publish scene class
scene_class_msg = SceneClassification(
header = detections_msg.header,
class = scene_id,
classes = self.scene_labels
)
if self.verbose:
print "Detected '%s'" % scene_name
self.scene_class_publisher.publish( scene_class_msg )
if __name__ == '__main__':
# get parameters
config_file = rospy.get_param("~config_file")
detection_topic = rospy.get_param("~detection_topic")
# make sure that the configuration file exists
if not os.path.isfile( config_file ):
rospy.logfatal('The configuration file "%s" does not exist.', config_file)
# create scene classifier
classifier = SceneClassifier( config_file, detections_topic )
classifier.start()
| true |
c0c31cbf6027550e7fed4758fb49c21d436e3d36 | Python | kaizsv/GoMoKu | /player.py | UTF-8 | 2,292 | 3.0625 | 3 | [] | no_license | import re
import numpy as np
class Player(object):
def __init__(self, player, learing, n):
self.player = player
self.color = 'Black' if player == 1 else 'White'
self.is_learning = learing
self.board_size = n
def __str__(self):
return self.__class__.__name__ + ' is ' + self.color
def convert_state(self, state):
# TODO: this might be wrong
#return np.where(state==0, 0, np.where(state==1, 1, -1))
opp_player = 2 if self.player == 1 else 1
d_phase = { 0:2, self.player:0, opp_player:1 }
#d_phase = { 0:2, 1:0, 2:1 }
c_state = np.zeros((2, self.board_size ** 2), dtype=np.int)
for idx, s in enumerate(state):
if s != 0:
c_state[d_phase[s]][idx] = 1
return c_state.reshape(2 * self.board_size ** 2)
def move(self, action_prob = None, legal_moves = None):
row = [chr(i) for i in range(ord('a'), ord('a') + self.board_size)]
col = [str(i) for i in range(1, 1 + self.board_size)]
while True:
move = raw_input('Your move > ')
if move == '-1':
return -1
x = move[:-1] # except last char
y = move[-1] # last char
if x in col and y in row:
x = int(x) - 1
y = ord(y) - ord('a')
return x * self.board_size + y
print 'Illegal move'
def fair_board_move(self, board):
# black can only move outside the limit line
# of the board at the first move.
limit = board.board_limit
size = board.size
while True:
# There is no learning mode in player
# game mode
action = self.move()
if action < 0:
return action
if self.check_fair_board(action, size, limit):
return action
print 'fair board rule\nYou need to play outside the limit line ' + str(limit) + '\n'
def check_fair_board(self, action, size, limit):
if action < size * limit or \
action > size ** 2 - 1 - size * limit or \
action % size < limit or \
action % size > size - 1 - limit:
return True
else:
return False
| true |
3390aa3a961f55033a15ad80520071decbfd77e1 | Python | lpatruno/airline-time-analysis | /PythonScripts/avg_delay_by_time_outgoing/heatmap.py | UTF-8 | 644 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
"""
Generate the heat map using the previously computed data for avg depart delay by time
@author Luigi Patruno
@date 29 Apr 2015
"""
file_path = '../../data/avg_delay_by_time_outgoing/part-00000'
data = []
f = open(file_path)
for line in f:
key, val = line.strip().split('\t')
(month, day, time) = (x for x in key.split('_'))
if len(time) == 3:
hour, minute = time[0], time[1:]
else:
hour, minute = time[:2], time[2:]
data.append( [int(month), int(day), int(hour), int(minute), float(val) ] )
data = sorted(data, key = lambda x: (x[0], x[1], x[2], x[3]) )
num_rows = len(data)
print data
f.close()
| true |
ac0ea7882e3bb6d15b6181dd1efb16854c8ae9d9 | Python | atlarge-research/opendc-autoscaling-prototype | /autoscalers/plan_autoscaler.py | UTF-8 | 5,372 | 2.65625 | 3 | [] | no_license | from collections import deque
from autoscalers.Autoscaler import Autoscaler
from core import SimCore, Constants
from core.Task import Task
class PlanAutoscaler(Autoscaler):
def __init__(self, simulator, logger):
super(PlanAutoscaler, self).__init__(simulator, 'Plan', logger)
# will contain one plan per processor
self.plans = deque(maxlen=self.resource_manager.get_maximum_capacity())
# simulated finish time
self.finish_times = {}
def get_level_of_parallelism(self):
return sum(1 for processor_plan in self.plans if processor_plan)
def get_min_processor_plan(self, eligible_plans):
if not eligible_plans:
return None
min_possible_plan = None
min_finish_time = None
for processor_plan in eligible_plans:
if not processor_plan:
return processor_plan
plan_finish_time = processor_plan[-1]
if not min_possible_plan or plan_finish_time < min_finish_time:
min_possible_plan = processor_plan
min_finish_time = plan_finish_time
return min_possible_plan
def get_eligible_plans(self, max_parent_finish_time):
eligible_plans = []
for plan in self.plans:
if not plan and not max_parent_finish_time:
eligible_plans.append(plan)
elif plan:
# plan contains finish times of it's tasks
plan_finish_time = plan[-1]
if plan_finish_time >= max_parent_finish_time:
eligible_plans.append(plan)
return eligible_plans
def get_max_parent_finish_time(self, task):
"""Gets the critical parent of a task."""
parent_tasks = task.dependencies
if not parent_tasks:
return 0
critical_parent = 0
for parent_id in parent_tasks:
parent_finish_time = self.finish_times.get(parent_id, 0)
if parent_finish_time > critical_parent:
critical_parent = parent_finish_time
return critical_parent
def place_tasks(self, tasks):
for task in tasks:
critical_parent_finish_time = self.get_max_parent_finish_time(task)
eligible_plans = self.get_eligible_plans(critical_parent_finish_time)
# gets a reference to the processor plan with the least amount of work
min_possible_plan = self.get_min_processor_plan(eligible_plans)
if min_possible_plan == None:
continue
min_start_time = min_possible_plan[-1] if min_possible_plan else 0
if min_start_time >= self.N_TICKS_PER_EVALUATE:
self.logger.log('Time threshold reached, plan surpasses next autoscaling interval', 'debug')
return True
task_runtime = (task.ts_end - self.sim.ts_now) if task.status == Task.STATUS_RUNNING else task.runtime
task_finish_time = min_start_time + task_runtime
min_possible_plan.append(task_finish_time)
self.finish_times[task.id] = task_finish_time
return False
def get_entry_tasks(self):
"""Tasks with dependencies that have been met, including running tasks."""
running_tasks = []
for site in self.resource_manager.sites:
running_tasks += site.running_tasks.values()
return running_tasks + list(self.sim.central_queue.tasks_to_schedule())
def get_child_tasks(self, tasks):
child_tasks = []
for task in tasks:
child_tasks.extend(task.children)
return child_tasks
def predict(self):
self.plans.clear()
for _ in xrange(self.plans.maxlen):
# one plan per processor
per_processor_plan = deque()
self.plans.append(per_processor_plan)
# (re)initialize simulated finish times
self.finish_times.clear()
tasks = self.get_entry_tasks()
while tasks:
time_threshold_reached = self.place_tasks(tasks)
if time_threshold_reached:
break
tasks = self.get_child_tasks(tasks)
return self.get_level_of_parallelism()
def evaluate(self, params):
super(PlanAutoscaler, self).evaluate(params)
prediction = self.predict()
mutation = 0
current_capacity = self.resource_manager.get_current_capacity()
target = prediction - current_capacity
if target > 0:
self.autoscale_op = 1
mutation = self.resource_manager.start_up_best_effort(target)
self.logger.log('Upscaled by {0}, target was {1}'.format(mutation, target))
elif target < 0:
self.autoscale_op = -1
target = abs(target)
mutation = self.resource_manager.release_resources_best_effort(target)
self.logger.log('Downscaled by {0}, target was {1}'.format(mutation, target))
self.log(current_capacity, mutation, target)
self.refresh_stats(prediction, current_capacity + mutation * self.autoscale_op)
self.sim.events.enqueue(
SimCore.Event(
self.sim.ts_now + self.N_TICKS_PER_EVALUATE,
self.id,
self.id,
{'type': Constants.AUTO_SCALE_EVALUATE}
)
)
| true |
2267289dfe1db171ed8295c49eec7deb46808043 | Python | devilhtc/leetcode-solutions | /0x0024_36.Valid_Sudoku/solution.py | UTF-8 | 993 | 2.984375 | 3 | [] | no_license | class Solution:
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
def validate_vals(vals):
return all(
v == 1
for _, v in collections.Counter(
[int(v) for v in vals if v != "."]
).items()
)
def validate_row(i):
return validate_vals(board[i])
def validate_col(i):
return validate_vals([board[j][i] for j in range(9)])
def validate_box(i, j):
return validate_vals(
[
board[m][n]
for m in range(i * 3, i * 3 + 3)
for n in range(j * 3, j * 3 + 3)
]
)
return (
all(validate_row(i) for i in range(9))
and all(validate_col(i) for i in range(9))
and all(validate_box(i, j) for i in range(3) for j in range(3))
)
| true |
2ff3fe7475c1799365623032e1cdc2296b5e846b | Python | 5l1v3r1/RumourSpread | /OriginalModel/graph.py | UTF-8 | 1,708 | 3.28125 | 3 | [] | no_license | import numpy as np
class Graph:
def __init__(self, n):
self.n = n
self.adj_list = [[] for i in range(n)]
def add_edge(self, u, v):
self.adj_list[u].append(v)
self.adj_list[v].append(u)
def add_node(self):
self.n += 1
self.adj_list.append([])
return self.n - 1
def compute_degrees(self):
return [len(self.adj_list[i]) for i in range(self.n)]
def compute_degree_distribution(self):
deg_list = self.compute_degrees()
deg_dist = [0] * self.n
for deg in deg_list:
deg_dist[deg] += 1
return deg_dist
def compute_diameter(self):
INF = int(1e8)
dp = [[INF for i in range(self.n)] for j in range(self.n)]
for node in range(self.n):
for nbr in self.adj_list[node]:
dp[node][nbr] = 1
for k in range(self.n):
for i in range(self.n):
for j in range(self.n):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k][j])
diameter = 0
for i in range(self.n):
for j in range(self.n):
diameter = max(diameter, dp[i][j])
return 'infinity' if diameter == INF else diameter
def compute_diameter(self):
dp = np.full((self.n, self.n), np.inf)
for node in range(self.n):
for nbr in self.adj_list[node]:
dp[node, nbr] = 1
for k in range(self.n):
dp = np.minimum(
dp, np.expand_dims(dp[:, k], axis=1)
+ np.expand_dims(dp[k, :], axis=0))
diameter = np.max(dp)
return 'infinity' if diameter == np.inf else diameter
| true |
40e65b777920adf80c5d6656e5a9cdda2890c959 | Python | department-of-general-services/code_jam | /advent_of_code_2020/james/day_2/day_two.py | UTF-8 | 1,947 | 3.625 | 4 | [] | no_license | import functools
import time
from pathlib import Path
from itertools import combinations
import pandas as pd
import re
def split_input(row):
regex = r"^(\d+)-(\d+)\s(\w):\s(\w+)"
match = re.match(pattern=regex, string=row["raw_text"])
row["min_reps"] = int(match.groups()[0])
row["max_reps"] = int(match.groups()[1])
row["target_char"] = match.groups()[2]
row["password"] = match.groups()[3]
return row
def is_password_legit(row):
# count the number of times the char in question appears
row["count"] = row["password"].count(row["target_char"])
# check that the count is not less than the min or more than the max
row["is_valid"] = row["min_reps"] <= row["count"] <= row["max_reps"]
return row
def is_password_legit_part_II(row):
# rename the changed cols to avoid confusion
row = row.rename({"min_reps": "pos_1", "max_reps": "pos_2"})
# switch to 0-based indexing
row["pos_1"] = row["pos_1"] - 1
row["pos_2"] = row["pos_2"] - 1
# creating boolean columns for positions 1 & 2
row["is_in_pos_1"] = row["password"][row["pos_1"]] == row["target_char"]
row["is_in_pos_2"] = row["password"][row["pos_2"]] == row["target_char"]
# the two boolean cols need to sum to 1 (exclusive or)
row["is_valid"] = sum(row[["is_in_pos_1", "is_in_pos_2"]]) == 1
return row
if __name__ == "__main__":
input_path = Path.cwd() / "day_2" / "input_day_2.txt"
passwords = open(input_path).read().splitlines()
df = pd.DataFrame(passwords, columns=["raw_text"])
df_split = df.apply(split_input, axis=1)
### Part I
res_df = df_split.apply(is_password_legit, axis=1)
print("Part I")
print(f"Of {len(res_df)} passwords, {res_df['is_valid'].sum()} are legit.")
### Part II
res_df_2 = df_split.apply(is_password_legit_part_II, axis=1)
print("Part II")
print(f"Of {len(res_df_2)} passwords, {res_df_2['is_valid'].sum()} are legit.") | true |
d32415e83f4447be4139a778226ca0f0b28ff00f | Python | dongho108/CodingTestByPython | /boostcamp/ex/dfs_bfs/1_solved.py | UTF-8 | 399 | 2.875 | 3 | [] | no_license | answer = 0
def dfs(n, sum, numbers, target):
global answer
if n == len(numbers):
if sum == target:
answer += 1
return
dfs(n+1, sum+numbers[n], numbers, target)
dfs(n+1, sum-numbers[n], numbers, target)
def solution(numbers, target):
global answer
dfs(1, numbers[0], numbers, target)
dfs(1, -numbers[0], numbers, target)
return answer | true |
ae67114d4b45a5bb8d07bddc9422542c16ac01df | Python | madacol/segwit-p2sh | /test-pw.py | UTF-8 | 1,889 | 2.828125 | 3 | [
"WTFPL",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python3
import sys
from lib.keystore import from_bip39_seed
from lib.storage import WalletStorage
from lib.wallet import Standard_Wallet
# Change this to be YOUR seed phrase:
SEED_WORDS = 'final round trust era topic march brain envelope spoon minimum bunker start'
# Change this to be the addresses the wallet might had.
POSSIBLE_ADDRESSES = [
'3QZWeXoFxk3Sxr2rZ7iFGLBqGuYny4PGPE',
'34xSpck4yJ3kjMWzaynKVFmzwY7u3KjoDC',
'3PtdPR38hG3PbX5bqGD5gKXmXCY9fLtFi3',
'3KurtNhsTjMjNCrp8PDEBZ7bpHnbh8W1sN',
]
# If you think any of your possible addresses must be among the first 3 addresses generated, then change this to 3
NUM_OF_ADDRESSES_TO_GENERATE = 5 # less is faster
def _create_standard_wallet(ks):
store = WalletStorage('if_this_exists_mocking_failed_648151893')
store.put('keystore', ks.dump())
store.put('gap_limit', NUM_OF_ADDRESSES_TO_GENERATE)
w = Standard_Wallet(store)
w.synchronize()
return w
def test_bip39_seed_bip49_p2sh_segwit(password):
# The BIP32/43 path below could be made a parameter:
ks = from_bip39_seed(SEED_WORDS, password, "m/49'/0'/0'")
w = _create_standard_wallet(ks)
for possible_address in POSSIBLE_ADDRESSES:
for address in w.get_receiving_addresses():
if ( possible_address == address):
return True, address
return False, None
def check_pass(password, failures):
is_found, address = test_bip39_seed_bip49_p2sh_segwit(password)
if (is_found):
print(failures + '. FOUND!\npassword: "' + password + '"\naddress: "' + address +'"')
sys.exit(1)
else:
print(failures + '. NOT: ' + password)
return False
# Read passwords from STDIN and check them against known address above
failures = 1
for password in sys.stdin.read().split('\n'):
if not check_pass(password, str(failures)):
failures += 1
| true |
3c138392271675bd8caf3c8a93ce20e8bf2c0e1a | Python | Nikolov-A/SoftUni | /PythonBasics/E_Easter_eggs_battle.py | UTF-8 | 625 | 4 | 4 | [] | no_license | eggs_player_1 = int(input())
eggs_player_2 = int(input())
winner = None
while winner != "End of battle":
winner = input()
if winner == "one":
eggs_player_2 -= 1
elif winner == "two":
eggs_player_1 -= 1
if eggs_player_1 == 0:
print(f"Player one is out of eggs. Player two has {eggs_player_2} eggs left.")
break
elif eggs_player_2 == 0:
print(f"Player two is out of eggs. Player one has {eggs_player_1} eggs left.")
break
if winner == "End of battle":
print(f"""Player one has {eggs_player_1} eggs left.
Player two has {eggs_player_2} eggs left.""") | true |
9456097f653c47004e5f28ae43f19b9138fea4e4 | Python | ArmanHZ/CS401-GitHub_Analyzer | /DevelopersNetwork andClustring/Developers Network.py | UTF-8 | 3,524 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
# Developers simple network graph
import networkx as nx
import matplotlib.pyplot as plt
class Link:
def __init__(self, _source, _target, _value):
self.source = _source
self.target = _target
self.value = _value
links = []
with open("PartnerDevelopersCount.txt", encoding="utf-8") as fp:
for line in fp:
line_elements = line.split(" ")
link = Link(line_elements[0], line_elements[1], line_elements[2])
links.append(link)
my_graph = nx.Graph()
for i in links:
my_graph.add_edge(i.source, i.target, weight=int(i.value))
plt.figure(figsize = (5, 5))
plt.savefig("DevelopersNetwork.png")
nx.draw_circular(my_graph, with_labels=True, font_weight='bold', node_color="blue")
# In[4]:
import networkx as nx
G = nx.read_weighted_edgelist('PartnerDevelopersCount.txt', delimiter =" ")
population = {
'Vadim' : 628,
'Aarni' : 15,
'Umar' : 2,
'Alexander' : 11,
'Konstantin' : 2,
'Justin' : 1,
'Robert' : 48,
'Eli' : 1,
'Maxim' : 12,
'egor' : 3,
'Máximo' : 2,
'Marcelo' : 1,
'Andrew' : 15,
'hnarasaki' : 1,
'Jim' : 1,
'Adam' : 1,
'Santiago' : 1,
'moncho' : 2,
'Jeff' : 1
}
# In[5]:
for i in list(G.nodes()):
G.nodes[i]['population'] = population[i]
nx.draw_networkx(G, with_label = True)
# In[6]:
# fixing the size of the figure
plt.figure(figsize =(10, 7))
node_color = [G.degree(v) for v in G]
# node colour is a list of degrees of nodes
node_size = [10 * nx.get_node_attributes(G, 'population')[v] for v in G]
# size of node is a list of population of cities
edge_width = [0.4 * G[u][v]['weight'] for u, v in G.edges()]
# width of edge is a list of weight of edges
nx.draw_networkx(G, node_size = node_size,
node_color = node_color, alpha = 0.7,
with_labels = True, width = edge_width,
edge_color ='.5', cmap = plt.cm.Blues)
plt.axis('off')
plt.tight_layout();
plt.savefig("developersNetworkwithWeighted.png")
# In[10]:
print("Random Layout:")
node_color = [G.degree(v) for v in G]
node_size = [10 * nx.get_node_attributes(G, 'population')[v] for v in G]
edge_width = [0.4 * G[u][v]['weight'] for u, v in G.edges()]
plt.figure(figsize =(10, 9))
pos = nx.random_layout(G)
# demonstrating random layout
nx.draw_networkx(G, pos, node_size = node_size,
node_color = node_color, alpha = 0.7,
with_labels = True, width = edge_width,
edge_color ='.4', cmap = plt.cm.Blues)
plt.figure(figsize =(10, 9))
pos = nx.circular_layout(G)
print("Circular Layout:")
# demonstrating circular layout
nx.draw_networkx(G, pos, node_size = node_size,
node_color = node_color, alpha = 0.7,
with_labels = True, width = edge_width,
edge_color ='.4', cmap = plt.cm.Blues)
plt.savefig("DevelopersWeightedCircular.png")
# In[14]:
#colored
import networkx as nx
G_fb = nx.read_edgelist('partnerDevelopers.txt', create_using = nx.Graph(), nodetype=str)
pos = nx.spring_layout(G_fb)
betCent = nx.betweenness_centrality(G_fb, normalized=True, endpoints=True)
node_color = [100 * G_fb.degree(v) for v in G_fb]
node_size = [v * 10000 for v in betCent.values()]
plt.figure(figsize=(20,20))
nx.draw_networkx(G_fb, pos=pos, with_labels=True,
node_color=node_color,
node_size=node_size )
plt.savefig("DevelopersColored.png")
# In[ ]:
| true |
2e7e246281e7100f3cb3143bd5461512bc422965 | Python | gersonUrban/find_best_words_with_chi2 | /data_prep.py | UTF-8 | 1,007 | 3.359375 | 3 | [] | no_license | from nltk.corpus import stopwords
def basic_preprocess_text(text_series, language='english'):
'''
Function to make a basic data prep in text, according to sentiment analysis dataset
text_series: pandas series with texts to be treated
language: string indicating stopwords language to be used
return: Pandas Series with treated text
'''
# Passing text to lowercase
text_series = text_series.str.lower()
# Defining stopwords to be removed
pat = r'\b(?:{})\b'.format('|'.join(stopwords.words(language)))
# removing stopwords
text_series = text_series.str.replace(pat,'')
# Removing ponctuation from text
text_series = text_series.str.replace(r'\s+',' ')
# Normalizing to NFDK, if have words with special characteres
text_series = text_series.str.normalize('NFKD')
text_series = text_series.str.replace('[^\w\s]','')
# Removing numeric substrings from text
text_series = text_series.str.replace(' \d+','')
return text_series
| true |
2230dcca831f061291c7f96e93c0ac47a6fc5b09 | Python | Abiyash/guvi | /code kata/prime.py | UTF-8 | 137 | 3.25 | 3 | [] | no_license | a=int(input())
if a>0:
for x in range(2,a):
if(a%x==0):
print("no")
break
else:
print("yes")
else:
print("no")
| true |
7d1f106223c7d1ab1e4620d9d36b29a15adbbb9c | Python | cyberLaVoy/algorithms-notebook | /python/bfs.py | UTF-8 | 571 | 3.53125 | 4 | [] | no_license | from queue import Queue
# graph: a list of lists (adjacency list) [ [ w0, w1, ...], ...]
# start: starting vertex as index to adjacency list
# Output: the step-wise distance to all vertices from start vertex
def bfs(graph, start):
distance = [None]*len(graph)
distance[start] = 0
queue = Queue() # FIFO queue
queue.put(start)
while not queue.empty():
u = queue.get()
for v in graph[u]:
if distance[v] is None:
queue.put(v)
distance[v] = distance[u] + 1
return distance
| true |
a46d190b3af807185b10e1a961267fe6922332de | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2211/60624/278179.py | UTF-8 | 609 | 2.921875 | 3 | [] | no_license | def func10():
temp = list(map(int, input().split(" ")))
n = temp[0]
k = temp[1]
names = [input().split(" ")[0]]
for i in range(n-1):
names.append(input().split(" ")[0]+names[i])
interesting_names = []
while k > 0:
k -= 1
interesting_names.append(input())
ans = []
for interest in interesting_names:
tmp = 0
for name in names:
if len(name) >= len(interest):
if name[:len(interest)] == interest:
tmp += 1
ans.append(tmp)
for res in ans:
print(res)
return
func10() | true |
ac1e7292ea135c0f00f69eb7a5b34dc87ed8de6f | Python | rrwt/daily-coding-challenge | /gfg/heaps/connect_ropes.py | UTF-8 | 670 | 3.828125 | 4 | [
"MIT"
] | permissive | """
There are n ropes of different lengths, we need to connect these
ropes into one rope. The cost to connect two ropes is equal to sum of
their lengths. We need to connect the ropes with minimum cost.
"""
import heapq
from typing import List
def connect_cost(ropes: List[int]) -> int:
length = len(ropes)
if length == 0:
return 0
if length == 1:
return ropes[0]
heapq.heapify(ropes)
rope = heapq.heappop(ropes)
total = 0
while ropes:
element = heapq.heappop(ropes)
total += rope + element
rope += element
return total
if __name__ == "__main__":
assert connect_cost([4, 3, 2, 6]) == 29
| true |
fcaa4403f299b93115cc2c70aa24e3b904905308 | Python | Kurolox/AdventOfCode17 | /Python/7/part2.py | UTF-8 | 1,096 | 3.453125 | 3 | [] | no_license | disk_dict = {}
weight_dict = {}
problematic_nodes = []
def find_weight(program):
# Check if the program has a disk above itself
if len(disk_dict[program]) > 0:
weight = []
# If it does, check the weight of each one of said programs
for i, disk_program in enumerate(disk_dict[program]):
weight.append(find_weight(disk_program))
if len(set(weight)) != 1:
problematic_nodes.append(program)
return sum(weight) + weight_dict[program]
return weight_dict[program]
with open("input", "r") as the_input:
for line in the_input:
try:
disk_dict[line.split()[0]] = [program.strip() for program in line.split("->")[1].split(",")]
except IndexError:
disk_dict[line.split()[0]] = []
weight_dict[line.split()[0]] = int(line.split()[1].lstrip("(").rstrip(")"))
for program, weight in disk_dict.items():
find_weight(program)
# Now we have a list with nodes causing issues. The one with a weight issue will be the one with the lightest weight, since it will be near the top."
| true |
ad40731ed07afe9791d0f45db99932ce73dc9a59 | Python | andrebargas/xor-neural-net | /xor_neural_network.py | UTF-8 | 2,082 | 3.546875 | 4 | [
"MIT"
] | permissive | from numpy import exp, array, dot, random
class NeuralNetwork():
def __init__(self):
# inicializa gerador de numeros aleatorios
random.seed(1)
self.synaptic_weights_1layer = 2 * random.random((3, 2)) - 1
self.synaptic_weights_2layer = 2 * random.random((2, 1)) - 1
def sigmoid(self, x):
result = 1 / (1 + exp(-x))
return result
def sigmoid_derivative(self, x):
result = x * (1 - x)
return result
def think(self, inputs):
result = self.sigmoid(dot(inputs, self.synaptic_weights_1layer))
return result
def think_2layer(self, inputs):
result = self.sigmoid(dot(inputs, self.synaptic_weights_2layer))
return result
def think_all(self, inputs):
result = self.think_2layer(self.think(inputs))
return result
def train(self, training_set_inputs, training_set_outputs,
number_of_training_interations):
for interation in xrange(number_of_training_interations):
outputs_1layer = self.think(training_set_inputs)
outputs_2layer = self.think_2layer(outputs_1layer)
error_2layer = training_set_outputs - outputs_2layer
delta_2layer = error_2layer * self.sigmoid_derivative(outputs_2layer)
error_1layer = dot(delta_2layer, self.synaptic_weights_2layer.T)
delta_1layer = error_1layer * self.sigmoid_derivative(outputs_1layer)
adjustment_2layer = dot(outputs_1layer.T, delta_2layer)
adjustment_1layer = dot(training_set_inputs.T, delta_1layer)
self.synaptic_weights_1layer += adjustment_1layer
self.synaptic_weights_2layer += adjustment_2layer
if __name__ == "__main__":
neural_network = NeuralNetwork()
training_inputs = array([[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0]])
training_outputs = array([[0], [1], [1], [0]])
neural_network.train(training_inputs, training_outputs, 5)
print "Testando com entradas 00 ; 01 ; 10 ; 11 :"
print neural_network.think_all(training_inputs)
| true |
04a2eb6f6a074ccc997bb897d50b5f0dd679a818 | Python | mkao006/dl_udacity | /deep_learning_nano_degree/4_recurrent_neural_networks/seq2seq/seq2seq.py | UTF-8 | 14,751 | 3.34375 | 3 | [] | no_license | # Steps for training a seq2seq model.
#
# Data processing:
# - Create dictionary to convert words in to index.
# - Append special start and end tokens.
# - Pad sequence to maximum length. (7 in this example)
# - Pad target sequence with start token
#
# Model:
# - Create embedding layer for input sequence.
# - Create LSTM to generate hidden state
# - Create embedding layer for target sequence.
# - Create LSTM to decode both the target sequence and the hidden state
# from encoder.
# - Create fullly connected layer for decoder LSTM output.
# - Create trainer for the decoder LSTM
# - Create inference decoder, this actually generates the prediction.
# - Train the model
import tensorflow as tf
import numpy as np
import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path).split()
target_sentences = helper.load_data(target_path).split()
# params
start_token = '<s>'
end_token = '<\s>'
unknown_token = '<unk>'
pad_token = '<pad>'
epochs = 1
batch_size = 128
rnn_size = 50
num_layers = 2
encoding_embedding_size = 13
decoding_embedding_size = 13
learning_rate = 0.001
class SeqToSeq:
def __init__(self,
source,
target,
start_token='<s>',
end_token='<\s>',
unknown_token='<unk>',
pad_token='<pad>',
epochs=60,
batch_size=128,
rnn_size=50,
num_layers=2,
encoding_embedding_size=13,
decoding_embedding_size=13,
learning_rate=0.001):
''' Defines the processing and model hyperparameters.
'''
self.start_token = start_token
self.end_token = end_token
self.unknown_token = unknown_token
self.pad_token = pad_token
self.epochs = epochs
self.batch_size = batch_size
self.rnn_size = rnn_size
self.num_layers = num_layers
self.encoding_embedding_size = encoding_embedding_size
self.decoding_embedding_size = decoding_embedding_size
self.learning_rate = learning_rate
self.special_tokens = [self.start_token,
self.end_token,
self.unknown_token,
self.pad_token]
self.max_seq_len = max([len(item) for item in source + target])
self.source_ind, self.target_ind = (
self._convert_sequence_to_ind(source, target))
self.train_source = self.source_ind[self.batch_size:]
self.train_target = self.target_ind[self.batch_size:]
self.valid_source = self.source_ind[:self.batch_size]
self.valid_target = self.target_ind[:self.batch_size]
def _batch_generator(self, source, target):
self.n_batches = len(source) // self.batch_size
truncated_sample_size = self.n_batches * self.batch_size
truncated_source = source[:truncated_sample_size]
truncated_target = target[:truncated_sample_size]
for start in range(0, truncated_sample_size, self.batch_size):
end = start + self.batch_size
yield truncated_source[start:end], truncated_target[start:end]
def _convert_sequence_to_ind(self, source, target, padding=True):
'''Function to convert the source and target to indexes using the
dictionary constructed. Sequences are also padded.
'''
complete_sequence = source + target
set_words = set([character
for item in complete_sequence
for character in item])
complete_set = self.special_tokens + list(set_words)
self.int_to_vocab = {word_ind: word
for word_ind, word in enumerate(complete_set)}
self.vocab_to_int = {word: word_ind
for word_ind, word in self.int_to_vocab.items()}
self.vocab_size = len(self.vocab_to_int)
unknown_ind = self.vocab_to_int[self.unknown_token]
source_sequence_ind = [
[self.vocab_to_int.get(letter, unknown_ind) for letter in item]
for item in source]
target_sequence_ind = [
[self.vocab_to_int.get(letter, unknown_ind) for letter in item]
for item in target]
if padding:
padding_ind = [self.vocab_to_int[self.unknown_token]]
source_sequence_ind = [seq +
padding_ind * (self.max_seq_len - len(seq))
for seq in source_sequence_ind]
target_sequence_ind = [seq +
padding_ind * (self.max_seq_len - len(seq))
for seq in target_sequence_ind]
return source_sequence_ind, target_sequence_ind
def initialise_graph(self):
# The graph should be defined here.
self.graph = tf.Graph()
with tf.Session(graph=self.graph):
# Define placeholders
self.source = tf.placeholder(tf.int32,
shape=[self.batch_size,
self.max_seq_len],
name='source')
self.target = tf.placeholder(tf.int32,
shape=[self.batch_size,
self.max_seq_len],
name='target')
# Define encoding embedding
#
# NOTE (Michael): Do we need the vocab size? TO me it make
# sense that it is required. If this is
# the case, then we may need to move the
# creation of graph after the data has
# been processed.
self.encoder_embed = (
tf.contrib.layers.embed_sequence(
ids=self.source,
vocab_size=self.vocab_size,
embed_dim=self.encoding_embedding_size))
# Define encoding LSTM
#
# NOTE (Michael): Can we implement dropout here?
encoder_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.rnn_size)
encoder = tf.contrib.rnn.MultiRNNCell(
cells=[encoder_cell] * self.num_layers)
# NOTE (Michael): We don't need the output of the RNN,
# since only the state is passed to the
# decoder.
_, self.encoder_state = tf.nn.dynamic_rnn(cell=encoder,
inputs=self.encoder_embed,
dtype=tf.float32)
# Define decoder input
#
# NOTE (Michael): This implies that we need to move the
# construction of the grph after the data
# processing since we don't have the
# 'vocab_to_int' dictionary yet!!
start_ind = self.vocab_to_int[self.start_token]
self.decoder_input = tf.concat(
[tf.fill([batch_size, 1], start_ind),
tf.strided_slice(input_=self.target,
begin=[0, 0],
end=[self.batch_size, -1],
strides=[1, 1])],
axis=1
)
# Define decoder embedding
self.decoder_embed_weights = (
tf.Variable(tf.random_uniform([self.vocab_size,
self.decoding_embedding_size]),
name='decoder_embed_weights'))
self.decoder_embed = tf.nn.embedding_lookup(
params=self.decoder_embed_weights,
ids=self.decoder_input)
# Define decoder LSTM
decoder_cell = tf.contrib.rnn.BasicLSTMCell(
num_units=self.rnn_size)
decoder = tf.contrib.rnn.MultiRNNCell(
cells=[decoder_cell] * self.num_layers)
# Decode the output of LSTM and generate prediction
with tf.variable_scope('decoding') as decoding_scope:
# Output Layer
output_fn = (
lambda x: tf.contrib.layers.fully_connected(
inputs=x,
num_outputs=self.vocab_size,
activation_fn=None,
scope=decoding_scope))
# Training Decoder
train_decoder_fn = (
tf.contrib.seq2seq.simple_decoder_fn_train(
encoder_state=self.encoder_state))
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=decoder,
decoder_fn=train_decoder_fn,
inputs=self.decoder_embed,
sequence_length=self.max_seq_len,
scope=decoding_scope)
train_logits = output_fn(train_pred)
with tf.variable_scope('decoding', reuse=True) as decoding_scope:
# Inference Decoder
infer_decoder_fn = (
tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn=output_fn,
encoder_state=self.encoder_state,
embeddings=self.decoder_embed_weights,
start_of_sequence_id=self.vocab_to_int[self.start_token],
end_of_sequence_id=self.vocab_to_int[self.end_token],
maximum_length=self.max_seq_len - 1,
num_decoder_symbols=self.vocab_size))
self.inference_logits, _, _ = (
tf.contrib.seq2seq.dynamic_rnn_decoder(cell=decoder,
decoder_fn=infer_decoder_fn,
scope=decoding_scope))
# Define the loss function and optimiser
self.loss = (
tf.contrib.seq2seq.sequence_loss(logits=train_logits,
targets=self.target,
weights=tf.ones([self.batch_size,
self.max_seq_len])))
self.optimiser = tf.train.AdamOptimizer(
learning_rate=self.learning_rate)
# Gradient Clipping
gradients = self.optimiser.compute_gradients(self.loss)
capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var)
for grad, var in gradients if grad is not None]
self.train_ops = self.optimiser.apply_gradients(capped_gradients)
def train(self):
''' Method to train the seq2seq RNN.
'''
with tf.Session(graph=self.graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(self.epochs):
for batch, (source_batch, target_batch) in enumerate(
self._batch_generator(self.train_source,
self.train_target)):
_, loss = sess.run([self.train_ops, self.loss],
feed_dict={
self.source: source_batch,
self.target: target_batch})
batch_train_logits = sess.run(
self.inference_logits,
feed_dict={self.source: source_batch})
batch_valid_logits = sess.run(
self.inference_logits,
feed_dict={self.source: self.valid_source})
train_acc = np.mean(
np.equal(target_batch,
np.argmax(batch_train_logits, axis=2)))
valid_acc = np.mean(
np.equal(self.valid_target,
np.argmax(batch_valid_logits, axis=2)))
print('''Epoch {:>3} Batch {:>4}/{} Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'''
.format(epoch,
batch,
self.n_batches,
train_acc,
valid_acc,
loss))
def respond(self, input_sentence):
''' Method to give a response based on new input.
'''
unknown_ind = self.vocab_to_int[self.unknown_token]
input_sentence_ind = [self.vocab_to_int.get(char, unknown_ind)
for char in input_sentence]
padding_ind = [self.vocab_to_int[self.unknown_token]]
input_sentence_ind = input_sentence_ind + \
padding_ind * (self.max_seq_len - len(input_sentence_ind))
batch_shell = np.zeros((self.batch_size, self.max_seq_len))
batch_shell[0] = input_sentence_ind
with tf.Session(graph=self.graph) as sess:
chatbot_logits = sess.run(
self.inference_logits, {self.source: batch_shell})[0]
print('Input')
print(' Word Ids: {}'.format(
[i for i in input_sentence_ind]))
print(' Input Words: {}'.format(
[self.int_to_vocab[i] for i in input_sentence_ind]))
print('\nPrediction')
print(' Word Ids: {}'.format(
[i for i in np.argmax(chatbot_logits, 1)]))
print(' Chatbot Answer Words: {}'.format(
[self.int_to_vocab[i] for i in np.argmax(chatbot_logits, 1)]))
model = SeqToSeq(source=source_sentences,
target=target_sentences,
start_token=start_token,
end_token=end_token,
unknown_token=unknown_token,
pad_token=pad_token,
epochs=epochs,
batch_size=batch_size,
rnn_size=rnn_size,
num_layers=num_layers,
encoding_embedding_size=encoding_embedding_size,
decoding_embedding_size=decoding_embedding_size,
learning_rate=learning_rate)
model.initialise_graph()
model.train()
model.respond('hello')
| true |
dec404ac01d62b54c6eb62d1a30d66e8391539e6 | Python | ArtjomKotkov/Tobe | /tobe/bot/games/types.py | UTF-8 | 2,098 | 3.015625 | 3 | [] | no_license | from ..types import BaseType
from ..base.types import PhotoSize, MessageEntity, Animation, User
class Game(BaseType):
"""This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers.
Parameters
----------
title : String
Title of the game
description : String
Description of the game
photo : Array of PhotoSize
Photo that will be displayed in the game message in chats.
text : String, optional
Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters.
text_entities : Array of MessageEntity, optional
Special entities that appear in text, such as usernames, URLs, bot commands, etc.
animation : Animation, optional
Animation that will be displayed in the game message in chats. Upload via BotFather
"""
def __init__(self, title,
description,
photo,
text=None,
text_entities=None,
animation=None):
super().__init__()
self.title = title
self.description = description
self.photo = PhotoSize.parse(photo, iterable=True)
self.text = text
self.text_entities = MessageEntity.parse(text_entities, iterable=True)
self.animation = Animation.parse(animation, iterable=True)
class GameHighScore(BaseType):
"""This object represents one row of the high scores table for a game.
Parameters
----------
position : Integer
Position in high score table for the game
user : User
User
score : Integer
Score
"""
def __init__(self, position,
user,
score):
super().__init__()
self.position = position
self.user = User.parse(user)
self.score = score
| true |
10ae1fdde42992888ec43a47ed5909d651b50022 | Python | MarvinLiangWW/learning_python_cookbook | /第一章:数据结构与算法/learning.py | UTF-8 | 3,893 | 3.484375 | 3 | [] | no_license | # 需要掌握一些基础的包 以及内置的常用函数,加快处理
# 学习这个用来记笔记的话还是用jupyter notebook比较好一点
from collections import Counter
# most_common
from collections import deque
import heapq
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print(heapq.nlargest(3, nums)) # Prints [42, 37, 23]
print(heapq.nsmallest(3, nums)) # Prints [-4, 1, 2]
class PriorityQueue:
def __init__(self):
self._queue = []
self._index = 0
def push(self, item, priority):
heapq.heappush(self._queue, (-priority, self._index, item))
self._index += 1
def pop(self):
return heapq.heappop(self._queue)[-1]
#我常用的写法
d = {}
for key, value in pairs:
if key not in d:
d[key] = []
d[key].append(value)
# 可能会更优雅一点
d = defaultdict(list)
for key, value in pairs:
d[key].append(value)
# 没有理解的这个*号
a = slice(5, 50, 2)
s = 'HelloWorld'
a.indices(len(s)) # (5, 10, 2)
for i in range(*a.indices(len(s))):
print(s[i])
rows = [
{'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},
{'fname': 'David', 'lname': 'Beazley', 'uid': 1002},
{'fname': 'John', 'lname': 'Cleese', 'uid': 1001},
{'fname': 'Big', 'lname': 'Jones', 'uid': 1004}
]
from operator import itemgetter # 用于字典的key
rows_by_fname = sorted(rows, key=itemgetter('fname'))
rows_by_uid = sorted(rows, key=itemgetter('uid'))
print(rows_by_fname)
print(rows_by_uid)
from operater import attrgetter # 用于对象的属性比较
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
def sort_not_compare():
users = [User(23), User(3), User(99)]
print(users)
print(sorted(users, key=lambda u: u.user_id))
sorted(users, key=attrgetter('user_id'))
# 使用 groupby in itertools
rows = [
{'address': '5412 N CLARK', 'date': '07/01/2012'},
{'address': '5148 N CLARK', 'date': '07/04/2012'},
{'address': '5800 E 58TH', 'date': '07/02/2012'},
{'address': '2122 N CLARK', 'date': '07/03/2012'},
{'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},
{'address': '1060 W ADDISON', 'date': '07/02/2012'},
{'address': '4801 N BROADWAY', 'date': '07/01/2012'},
{'address': '1039 W GRANVILLE', 'date': '07/04/2012'},
]
from operator import itemgetter
from itertools import groupby
# Sort by the desired field first
rows.sort(key=itemgetter('date'))
# Iterate in groups
for date, items in groupby(rows, key=itemgetter('date')):
print(date)
for i in items:
print(' ', i)
# 如何用compress函数
from itertools import compress
addresses = [
'5412 N CLARK',
'5148 N CLARK',
'5800 E 58TH',
'2122 N CLARK',
'5645 N RAVENSWOOD',
'1060 W ADDISON',
'4801 N BROADWAY',
'1039 W GRANVILLE',
]
counts = [ 0, 3, 10, 4, 1, 7, 6, 1]
more5 = [n > 5 for n in counts]
list(compress(addresses, more5))
# 类似于类的调用
from collections import namedtuple
Subscriber = namedtuple('Subscriber', ['addr', 'joined'])
sub = Subscriber('jonesy@example.com', '2012-10-19')
def compute_cost(records):
total = 0.0
for rec in records:
s = Stock(*rec)
total += s.shares * s.price
return total
# 可以省略一个临时列表
s = sum([x * x for x in nums])
s = sum((x * x for x in nums)) # 显式的传递一个生成器表达式对象
s = sum(x * x for x in nums) # 更加优雅的实现方式,省略了括号
# 现在有多个字典或者映射,你想将它们从逻辑上合并为一个单一的映射后执行某些操作, 比如查找值或者检查某些键是否存在
a = {'x': 1, 'z': 3 }
b = {'y': 2, 'z': 4 }
from collections import ChainMap
c = ChainMap(a,b)
print(c['x']) # Outputs 1 (from a)
print(c['y']) # Outputs 2 (from b)
print(c['z']) # Outputs 3 (from a)
| true |
3abd81148d21cbd5dff537453432310c3f5c383a | Python | Jimmy-INL/google-research | /tf3d/utils/voxel_utils_test.py | UTF-8 | 18,644 | 2.515625 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf3d.utils.voxel_utils."""
import numpy as np
from six.moves import range
import tensorflow as tf
from tf3d.utils import voxel_utils
class VoxelUtilsTest(tf.test.TestCase):
def get_sample_points(self):
return tf.constant([[10.0, 12.0, 2.0],
[2.0, 10.0, 9.0],
[1.0, 11.0, 11.0],
[0.0, 1.0, 11.0],
[0.0, 0.0, 10.0],
[-1.0, 1.0, 11.0],
[11.0, 11.0, 1.0],
[11.0, 12.0, -1.0],
[0.0, 0.0, 11.0],
[0.01, 0.0, 11.0]], dtype=tf.float32)
def test_crop_and_pad(self):
voxels = tf.ones([100, 75, 50, 3], dtype=tf.float32)
cropped_voxels_1 = voxel_utils.crop_and_pad_voxels(
voxels=voxels,
start_coordinates=[43, 40, 0, 0],
end_coordinates=[58, 61, voxels.shape[2], voxels.shape[3]])
cropped_voxels_2 = voxel_utils.crop_and_pad_voxels(
voxels=voxels,
start_coordinates=[-5, -5, 0, 0],
end_coordinates=[16, 16, voxels.shape[2], voxels.shape[3]])
cropped_voxels_3 = voxel_utils.crop_and_pad_voxels(
voxels=voxels,
start_coordinates=[84, 59, 0, 0],
end_coordinates=[115, 90, voxels.shape[2], voxels.shape[3]])
np_cropped_region_1 = cropped_voxels_1.numpy()
np_cropped_region_2 = cropped_voxels_2.numpy()
np_cropped_region_3 = cropped_voxels_3.numpy()
self.assertAllEqual(np_cropped_region_1.shape, (15, 21, 50, 3))
# Check that every value is a one
self.assertEqual(np_cropped_region_1.mean(), 1)
self.assertEqual(np_cropped_region_1.std(), 0)
self.assertAllEqual(np_cropped_region_2.shape, (21, 21, 50, 3))
# Check that the padded region is all zeros
self.assertEqual(np_cropped_region_2[:5, :5, :, :].sum(), 0)
# Check that for cropped regione very value is 1
self.assertEqual(np_cropped_region_2[5:, 5:, :, :].mean(), 1)
self.assertEqual(np_cropped_region_2[5:, 5:, :, :].std(), 0)
self.assertAllEqual(np_cropped_region_3.shape, (31, 31, 50, 3))
# Cropped region
self.assertEqual(np_cropped_region_3[:16, :16, :, :].mean(), 1)
# Padding region
self.assertEqual(np_cropped_region_3[:16, :16, :, :].std(), 0)
self.assertEqual(np_cropped_region_3[16:, 16:, :, :].sum(), 0)
def test_pointcloud_to_voxel_grid_shapes(self):
start_locations = [(-5, -5, -5),
(0, 0, 0),
(2.5, 2.5, 2.5)]
end_locations = [(0, 0, 0),
(10, 10, 10),
(3, 3, 3)]
grid_cell_sizes = [(0.5, 0.5, 0.5),
(0.1, 0.1, 0.1),
(0.5, 0.5, 0.5)]
feature_dims = [3, 5, 10]
expected_output_shapes = [(10, 10, 10, 3),
(100, 100, 100, 5),
(1, 1, 1, 10)]
# For each test case we want to check if the output shape matches
for test_case in range(3):
points = tf.constant([[0.1, 0.1, 0.1]], tf.float32)
features = tf.constant([list(range(feature_dims[test_case]))], tf.float32)
voxel_grid, segment_ids, _ = voxel_utils.pointcloud_to_voxel_grid(
points=points,
features=features,
grid_cell_size=grid_cell_sizes[test_case],
start_location=start_locations[test_case],
end_location=end_locations[test_case])
self.assertEqual(voxel_grid.shape,
tuple(expected_output_shapes[test_case]))
self.assertEqual(segment_ids.shape, (1,))
def test_pointcloud_to_voxel_grid(self):
points = self.get_sample_points()
grid_cell_size = (20, 20, 20)
start_location = (-20, -20, -20)
end_location = (20, 20, 20)
features = tf.constant([[10.0, 12.0, 2.0, 1.0],
[2.0, 10.0, 9.0, 0.0],
[1.0, 11.0, 11.0, 1.0],
[0.01, 1.01, 11.0, 0.0],
[0.01, 0.01, 10.0, 1.0],
[-1.0, 1.0, 11.0, 0.0],
[11.0, 11.0, 1.0, 1.0],
[11.0, 12.0, -1.0, 0.0],
[0.01, 0.01, 11.0, 1.0],
[0.01, 0.01, 11.0, 0.0]], dtype=tf.float32)
voxel_features, _, _ = voxel_utils.pointcloud_to_voxel_grid(
points=points,
features=features,
grid_cell_size=grid_cell_size,
start_location=start_location,
end_location=end_location)
np_voxel_features = voxel_features.numpy()
# [-20:0, -20:0, -20:0]
self.assertAllClose(np_voxel_features[0, 0, 0, :], [0.0, 0.0, 0.0, 0.0])
# [-20:0, -20:0, 0:20]
self.assertAllClose(np_voxel_features[0, 0, 1, :], [0.0, 0.0, 0.0, 0.0])
# [-20:0, 0:20, -20:0]
self.assertAllClose(np_voxel_features[0, 1, 0, :], [0.0, 0.0, 0.0, 0.0])
# [-20:0, 20:0, 0:20]
self.assertAllClose(np_voxel_features[0, 1, 1, :], [-1.0, 1.0, 11.0, 0.0])
# [0:20, -20:0, -20:0]
self.assertAllClose(np_voxel_features[1, 0, 0, :], [0.0, 0.0, 0.0, 0.0])
# [0:20, -20:0, 0:20]
self.assertAllClose(np_voxel_features[1, 0, 1, :], [0.0, 0.0, 0.0, 0.0])
# [0:20, 0:20, -20:0]
self.assertAllClose(np_voxel_features[1, 1, 0, :], [11.0, 12.0, -1.0, 0.0])
# [0:20, 20:0, 0:20]
self.assertAllClose(np_voxel_features[1, 1, 1, :],
[24.04 / 8.0, 45.04 / 8.0, 66.0 / 8.0, 5.0 / 8.0])
def test_pointcloud_to_voxel_grid_placement(self):
points = tf.constant([[0.5, 0.5, 0.5],
[0.25, 0.25, 0.25],
[1.6, 1.6, 1.6],
[1.75, 1.75, 1.75],
[1.9, 1.9, 1.9],
[2.1, 2.1, 2.1],
[2.3, 2.35, 2.37]], dtype=tf.float32)
features = tf.constant([[100, 110, 120],
[120, 130, 140],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[1000, 500, 250],
[200, 300, 150]], dtype=tf.float32)
grid_cell_size = (1, 1, 1)
start_location = (0, 0, 0)
end_location = (10, 10, 10)
voxel_features, segment_ids, _ = voxel_utils.pointcloud_to_voxel_grid(
points=points,
features=features,
grid_cell_size=grid_cell_size,
start_location=start_location,
end_location=end_location)
per_point_values = voxel_utils.voxels_to_points(voxel_features, segment_ids)
np_voxel_features = voxel_features.numpy()
np_segment_ids = segment_ids.numpy()
np_per_point_values = per_point_values.numpy()
# Check voxel grid values
self.assertAllClose(np_voxel_features[0, 0, 0, :], [110, 120, 130])
self.assertAllClose(np_voxel_features[1, 1, 1, :], [2, 3, 4])
self.assertAllClose(np_voxel_features[2, 2, 2, :], [600, 400, 200])
# Check values after mapping back to points
self.assertAllClose(np_per_point_values[0, :], (110.0, 120.0, 130.0))
self.assertAllClose(np_per_point_values[1, :], (110.0, 120.0, 130.0))
self.assertAllClose(np_per_point_values[2, :], (2.0, 3.0, 4.0))
self.assertAllClose(np_per_point_values[3, :], (2.0, 3.0, 4.0))
self.assertAllClose(np_per_point_values[4, :], (2.0, 3.0, 4.0))
self.assertAllClose(np_per_point_values[5, :], (600.0, 400.0, 200.0))
self.assertAllClose(np_per_point_values[6, :], (600.0, 400.0, 200.0))
# Check segment ids match what they should
# Locations: [0, 0, 0] == 0, [1, 1, 1] == 111, [2, 2, 2] == 222
self.assertAllEqual([0, 0, 111, 111, 111, 222, 222], np_segment_ids)
def test_points_offset_in_voxels(self):
points = tf.constant([[[0.5, 0.5, 0.5],
[0.25, 0.25, 0.25],
[1.6, 1.6, 1.6],
[1.75, 1.75, 1.75],
[1.9, 1.9, 1.9],
[2.1, 2.1, 2.1],
[2.3, 2.35, 2.37]]], dtype=tf.float32)
point_offsets = voxel_utils.points_offset_in_voxels(
points, grid_cell_size=(0.1, 0.1, 0.1))
expected_points = np.array(
[[[0.0, 0.0, 0.0],
[-0.5, -0.5, -0.5],
[0.0, 0.0, 0.0],
[-0.5, -0.5, -0.5],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.5, -0.3]]], dtype=np.float32)
self.assertAllClose(point_offsets.numpy(), expected_points, atol=1e-3)
def test_pointcloud_to_sparse_voxel_grid_unbatched(self):
points = tf.constant([[0.5, 0.5, 0.5],
[0.25, 0.25, 0.25],
[1.6, 1.6, 1.6],
[1.75, 1.75, 1.75],
[1.9, 1.9, 1.9],
[2.1, 2.1, 2.1],
[2.3, 2.35, 2.37]], dtype=tf.float32)
features = tf.constant([[100, 110, 120],
[120, 130, 140],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[1000, 500, 250],
[200, 300, 150]], dtype=tf.float32)
grid_cell_size = (0.5, 0.5, 0.5)
(voxel_features_max, voxel_indices_max, segment_ids_max,
voxel_start_location_max
) = voxel_utils.pointcloud_to_sparse_voxel_grid_unbatched(
points=points,
features=features,
grid_cell_size=grid_cell_size,
segment_func=tf.math.unsorted_segment_max)
(voxel_features_mean, voxel_indices_mean, segment_ids_mean,
voxel_start_location_mean
) = voxel_utils.pointcloud_to_sparse_voxel_grid_unbatched(
points=points,
features=features,
grid_cell_size=grid_cell_size,
segment_func=tf.math.unsorted_segment_mean)
self.assertAllClose(voxel_features_max.numpy(),
np.array([[120., 130., 140.],
[1., 2., 3.],
[1000., 500., 250.],
[200., 300., 150.]]))
self.assertAllClose(voxel_features_mean.numpy(),
np.array([[110., 120., 130.],
[1., 2., 3.],
[335., 169., 259.0 / 3.0],
[200., 300., 150.]]))
self.assertAllEqual(voxel_indices_max.numpy(), np.array([[0, 0, 0],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]]))
self.assertAllEqual(segment_ids_max.numpy(),
np.array([0, 0, 1, 2, 2, 2, 3]))
self.assertAllEqual(voxel_indices_mean.numpy(), voxel_indices_max.numpy())
self.assertAllEqual(segment_ids_mean.numpy(), segment_ids_max.numpy())
self.assertAllClose(voxel_start_location_mean.numpy(),
np.array([0.25, 0.25, 0.25]))
self.assertAllClose(voxel_start_location_max.numpy(),
np.array([0.25, 0.25, 0.25]))
def test_pointcloud_to_sparse_voxel_grid(self):
points = tf.constant([[[0.5, 0.5, 0.5],
[0.25, 0.25, 0.25],
[1.6, 1.6, 1.6],
[1.75, 1.75, 1.75],
[1.9, 1.9, 1.9],
[2.1, 2.1, 2.1],
[2.3, 2.35, 2.37],
[0.0, 0.0, 0.0]]], dtype=tf.float32)
features = tf.constant([[[100, 110, 120],
[120, 130, 140],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[1000, 500, 250],
[200, 300, 150],
[0, 0, 0]]], dtype=tf.float32)
num_valid_points = tf.constant([7], dtype=tf.int32)
grid_cell_size = (0.5, 0.5, 0.5)
(voxel_features, voxel_indices, num_valid_voxels, segment_ids,
voxel_start_locations) = voxel_utils.pointcloud_to_sparse_voxel_grid(
points=points,
features=features,
num_valid_points=num_valid_points,
grid_cell_size=grid_cell_size,
voxels_pad_or_clip_size=5,
segment_func=tf.math.unsorted_segment_max)
self.assertAllClose(voxel_features.numpy(), np.array([[[120., 130., 140.],
[1., 2., 3.],
[1000., 500., 250.],
[200., 300., 150.],
[0.0, 0.0, 0.0]]]))
self.assertAllEqual(voxel_indices.numpy(), np.array([[[0, 0, 0],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4],
[0, 0, 0]]]))
self.assertAllEqual(segment_ids.numpy(),
np.array([[0, 0, 1, 2, 2, 2, 3, 0]]))
self.assertAllEqual(num_valid_voxels.numpy(), np.array([4]))
self.assertAllClose(voxel_start_locations.numpy(),
np.array([[0.25, 0.25, 0.25]]))
def test_sparse_voxel_grid_to_pointcloud(self):
voxel_features_0 = tf.constant([[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]], dtype=tf.float32)
voxel_features_1 = tf.constant([[0.0, 0.0, 0.5],
[0.0, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.5, 0.5, 0.5]], dtype=tf.float32)
voxel_features = tf.stack([voxel_features_0, voxel_features_1], axis=0)
segment_ids = tf.constant([[0, 0, 1, 1, 2, 2, 0, 0, 0, 0],
[1, 3, 1, 2, 0, 4, 4, 0, 0, 0]], dtype=tf.int32)
num_valid_voxels = tf.constant([3, 5], dtype=tf.int32)
num_valid_points = tf.constant([7, 9], dtype=tf.int32)
point_features = voxel_utils.sparse_voxel_grid_to_pointcloud(
voxel_features=voxel_features,
segment_ids=segment_ids,
num_valid_voxels=num_valid_voxels,
num_valid_points=num_valid_points)
np_point_features = point_features.numpy()
self.assertAllEqual(np_point_features.shape, [2, 10, 3])
self.assertAllClose(np_point_features[0], np.array([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]))
self.assertAllClose(np_point_features[1], np.array([[0.0, 0.5, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.5, 0.0],
[0.5, 0.0, 0.0],
[0.0, 0.0, 0.5],
[0.5, 0.5, 0.5],
[0.5, 0.5, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.5],
[0.0, 0.0, 0.0]]))
def test_per_voxel_point_sample_segment_func(self):
data = tf.constant(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 1.0, 0.0],
[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
dtype=tf.float32)
segment_ids = tf.constant([0, 3, 1, 0, 3, 0, 0], dtype=tf.int32)
num_segments = 4
num_samples_per_voxel = 2
voxel_features = voxel_utils.per_voxel_point_sample_segment_func(
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
num_samples_per_voxel=num_samples_per_voxel)
expected_voxel_features = tf.constant([[[1.0, 1.0, 1.0], [0.0, 1.0, 1.0]],
[[0.0, 0.0, 1.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]])
self.assertAllEqual(voxel_features.shape, np.array([4, 2, 3]))
self.assertAllClose(voxel_features.numpy(), expected_voxel_features.numpy())
def test_compute_pointcloud_weights_based_on_voxel_density(self):
points = tf.constant([[-1.0, -1.0, -1.0],
[-1.1, -1.1, -1.1],
[5.0, 5.0, 5.0],
[5.1, 5.1, 5.1],
[5.2, 5.2, 5.2],
[10.0, 10.0, 10.0],
[15.0, 15.0, 15.0]], dtype=tf.float32)
point_weights = (
voxel_utils.compute_pointcloud_weights_based_on_voxel_density(
points=points, grid_cell_size=(4.0, 4.0, 4.0)))
self.assertAllClose(
point_weights.numpy(),
np.array([[0.875], [0.875], [0.5833334], [0.5833334], [0.5833334],
[1.75], [1.75]],
dtype=np.float32))
if __name__ == '__main__':
tf.test.main()
| true |
107443104641161132dd28b5a7adf4c52b1eec0c | Python | SnehaMishra28/Python-DeepLearning_Fall2018 | /Mod1_Lab1/Source/mod1_lab1/Part4.py | UTF-8 | 4,092 | 3.484375 | 3 | [] | no_license | # Hospital Class with name and address public data attribute
class Hospital:
def __init__(self, n, a):
self.hname = n
self.haddress = a
# Dental Procedure class with procedure name , procedure code , procedure fee detailes
class Procedure:
def __init__(self, pcode, pname, pfee):
self.procedure_name = pname
self.procedure_code = pcode
self.procedure_fee = pfee
# Patient class with name, address, gender and dental procedure details extended from class procedure and hospital
class Patient(Hospital, Procedure): # Multiple inheritance
total_patient = 0 # class attribute for counting number of in hospital
def __init__(self, pid, pname, page, phname, paddress, pcode, pcname, pfee):
super(Patient, self).__init__(phname, paddress) # Super class Hospital call for Patient Class
Procedure.__init__(self, pcode, pcname, pfee) # Call for __int__ Procedure
self.__patient_id = pid # Defining patient ID as private
self.patient_name = pname
self.patient_age = page
self.__class__.total_patient += 1 # Incrementing Patient Class by 1
def patient_display(self):
print('Patient Name:', self.patient_name, 'Denatl Procedure Undergone:',
self.procedure_name, 'Fee paid of $', self.procedure_fee)
def getpatient_id(self): # Function to return Private Patient ID
return self.__patient_id
# Hospital Staff Class with Staff ID and Staff Type
class Staff(Hospital):
def __init__(self, scode, stype, hname, haddress):
super(Staff, self).__init__(hname, haddress)
self.staff_code = scode
self.staff_type = stype
# Doctor Class
class Doctor(Staff): # Multilevel Inheritance logic implemented here
total_doctor = 0 # Class attribute for counting number of doctors
def __init__(self, did, name, qual, city, spec, scode, stype, hname, haddress):
super(Doctor, self).__init__(scode, stype, hname, haddress) # Call to base class Staff using supre method
self.__doc_id = did # Defining Doctor ID as Private data member
self.doc_name = name
self.doc_qual = qual
self.doc_city = city
self.doc_specaility = spec
self.__class__.total_doctor += 1 # Incrementing Doctor Count by 1
def doctor_display(self):
print('Doctor Name :', self.doc_name, 'Qualification:', self.doc_qual,
'Specaility:', self.doc_specaility, 'Hospital', self.hname)
def getdoctor_id(self): # Function to return private Doctor ID
return self.__doc_id
# Nurse Class
class Nurse(Staff):
total_nurse = 0 # Class attribute for counting number of Nurses
def __init__(self, nid, name, age, qual, city, scode, stype, hname, haddress):
super(Nurse, self).__init__(scode, stype, hname, haddress) # Call to base class using super method
self.__nurse_id = nid
self.nurse_name = name
self.nurse_qual = qual
self.nurse_city = city
self.nurse_age = age
self.__class__.total_nurse += 1 # incrmenting nurse Count by one
def display_nurse(self):
print('Nurse Name: ', self.nurse_name, 'Nurse Qualification :', self.nurse_qual,
'Hospital:', self.hname)
def getnurse_id(self):
return self.__nurse_id
# Driver Program
if __name__ == "__main__":
# Creating patient Class Object
p1 = Patient(1, 'Raju Nekadi', 30, 'ABC', '6100 fsoter St', 'D5992',
'Tooth Cleansing', 200)
p1.patient_display() # Patient Display method call
print('Patient ID:', p1.getpatient_id())
# Creating Doctor Class object
d1 = Doctor(1, 'Sneha mIshra', 'Dental M.D', 'Kansas City', 'Dentist', 100, 'Doctors', 'ABC', '6100 fsoter St')
d1.doctor_display() # Doctor Display Method Call
print('Doctor ID:', d1.getdoctor_id())
# Creating nurse Class Object
n1 = Nurse(1, 'Swati Singh', '28', 'Health Science', 'Kansas City', 200, 'Nurse', 'ABC', '6100 Foster St')
n1.display_nurse() # Nurse Display Method Call
print('Nurse ID:', n1.getnurse_id()) | true |
bb4d0af6c8cf44f223333a00f82553c5ddc61e4f | Python | RagavendranMRN/Machine-Learning-Scratch | /Linear Regression/Basic Linear Regression.py | UTF-8 | 385 | 3.125 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
df = pd.read_csv('mydata.csv')
plt.xlabel('area')
plt.ylabel('prices')
plt.title("HOUSE PRICE PREDICTION")
plt.scatter(df.area,df.prices,color='red',marker='+')
area = df[['area']]
price = df.prices
price
reg = linear_model.LinearRegression()
reg.fit(area,price)
reg.predict(1499)
| true |
6731d94cc5eee423fc2d1f7ac455645719d09788 | Python | rmorgan10/PythonProgrammingGroupStudy | /People/Juan/Week 4/Currency.py | UTF-8 | 2,333 | 3.359375 | 3 | [
"MIT"
] | permissive | from typing import List
import csv
import os
from decimal import Decimal
class Money:
"""
Class that holds currency of a particular type
"""
CURRENCIES_FILENAME = os.path.join(os.getcwd(),"currency_codes.csv")
def __init__(self, amount: Decimal, currency_code: str = "USD"):
if currency_code.upper() not in self._currency_codes:
raise ValueError(f"{currency_code} is not a valid currency code")
self.__currency_code = currency_code
assert amount > 0, "Cannot create negative money."
self.__amount = amount.quantize(self.quantizer)
@property
def _currency_codes(self) -> List[str]:
return currency_codes(Money.CURRENCIES_FILENAME)
@property
def amount(self):
return self.__amount
@property
def quantizer(self):
return Decimal("1."+"0"*get_minor_unit(self.__currency_code, Money.CURRENCIES_FILENAME))
@property
def currency(self):
return self.__currency_code
def currency_codes(currencies_filename: str = "currency_codes.csv") -> List[str]:
"""
Returns all valid currency codes as specified in the currencies .csv file
Args:
currencies_filename : name of the file containing information on global currencies
Returns:
list of all valid currency codes
"""
with open(currencies_filename, newline="") as f:
currency_info = list(csv.reader(f))
return [cur[2] for cur in currency_info]
def get_minor_unit(currency_code: str, currencies_filename: str = "currency_codes.csv") -> int:
"""
Returns the minor unit (how many decimal places are needed to store info about this currency)
from the currencies file
Args:
currency_code : valid country currency
currencies_filename : name of the file containing information on global currencies
Returns:
minor unit
"""
if currency_code not in currency_codes(currencies_filename):
raise ValueError(f"{currency_code} is not a valid currency code!")
with open(currencies_filename, newline="") as f:
currency_info = dict(zip(currency_codes(currencies_filename), list(csv.reader(f) )))
minor_unit = currency_info[currency_code][4]
try:
minor_unit = int(minor_unit)
except ValueError:
"""
Not all "currencies" are just currencies. Those don't have a valid minor unit. Assume
storage to arbitrary precision, then realize arbitrary precision past 10 decimal places is
fake
"""
minor_unit = 10
return minor_unit
| true |
2e867bb74e02a0f8320aace9ff1bd0c10f8a1802 | Python | bp274/HackerRank | /Algorithms/Graphs/Breadth First Search - Shortest Reach.py | UTF-8 | 1,044 | 3.234375 | 3 | [] | no_license | #!/bin/python3
def bfs(n, m, graph, s):
distance = [-1 for _ in range(n)]
distance[s] = 0
frontier = [s]
while frontier:
next = []
for u in frontier:
for v in graph[u]:
if distance[v] == -1:
distance[v] = 6 + distance[u]
next.append(v)
elif distance[v] > 6 + distance[u]:
distance[v] = 6 + distance[u]
frontier = next
return distance
if __name__ == '__main__':
q = int(input().strip())
for q_itr in range(q):
n, m = map(int, input().strip().split())
graph = [[] for _ in range(n)]
for _ in range(m):
u, v = map(int, input().strip().split())
graph[u - 1].append(v - 1)
graph[v - 1].append(u - 1)
s = int(input().strip())
result = bfs(n, m, graph, s - 1)
for i in range(n):
if i != s - 1:
print(result[i], end = ' ')
print()
| true |
f212e8bea367d8455149b428bf3c67506da672d6 | Python | jtlongino/lott-python | /exercises/chapter_5/section_5_5_2_problem_5.py | UTF-8 | 84 | 3 | 3 | [] | no_license | """ Exercise 5 from Section 5.5.2 """
print("Force on sail is", 15**2 * 0.004 * 61)
| true |
5d59d7090768f74771f2af62bc39a0e8db2a1900 | Python | DrDavxr/Water-Rocket-Simulator | /Simulator_H2O_rocket.py | UTF-8 | 5,665 | 2.953125 | 3 | [
"MIT"
] | permissive | """
Trajectory simulator of the H2O rocket for the Course on Rocket Motors.
"""
# Import the libraries.
import numpy as np
from Integration import Simulation
from scipy.optimize import minimize_scalar
import matplotlib.pyplot as plt
# %% SOLVE FOR THE TRAJECTORY OF THE ROCKET.
def main(x, *args):
# Definition of the initial state parameters.
init_h, init_v, init_FP, V, init_P_air, step, alpha, delta, g, D, d, m_wo_H2O, P_amb, T_init = args
init_V_air = V - x
state_vector = [init_h, init_v, init_FP, init_V_air, init_P_air]
m_tot = x * 1000 + m_wo_H2O
Trajectory = Simulation(x, state_vector, step, alpha, delta, g, D, d,
m_tot, P_amb, init_P_air, T_init)
return -Trajectory[0][-1]
# %% INTRODUCE THE INITIAL VALUES OF THE STATE PARAMETERS.
init_v = 0.01 # Initial velocity [m/s].
init_FP = np.radians(90)
init_z = 665 # Initial Altitude (Leganés) w.r.t SL [m]
R_Earth = 6371000 # Earth Radius [m]
T_0 = 288.15 # Reference Temperature for ISA [K]
P_0 = 101325 # Reference pressure for ISA [Pa]
rho_0 = 1.225 # Reference density for an ISA day [kg/m^3]
# Transform the altitude (z) into geopotential altitude (h).
init_h_ISA = (init_z)/(1+init_z/R_Earth)
# Compute the ISA temperature and pressure at Leganés.
Delta = 1 - 2.25569*1e-5*init_h_ISA # Non-dimensional temperature ratio T/T_0
T_amb = T_0 * Delta # Temperature at Leganés for an ISA day [K]
P_atm = P_0 * Delta**5.2561 # Pressure at Leganés for an ISA day [Pa]
rho_amb = rho_0 * Delta**4.2561 # Air density at Leganés for ISA day [kg/m^3]
# Define tank maximum pressure.
P_max = 2.83e5 # [Pa]
T_init = 30 # [ºC]
# Define Flight initial parameters.
alpha = np.radians(0)
delta = np.radians(0)
# Define Geometry Characteristics.
D = 10.2e-2 # Bottle diameter [m]
d = 8e-3 # Nozzle throat diameter [m]
# Define payload and structural mass.
m_pl = 12e-3 # Payload mass [kg]
m_str = 2*46.7e-3 # Structural mass [kg]
m_wo_H2O = m_pl + m_str # Initial mass of the rocket without water [kg].
# Redefine the initial altitude w.r.t the ground.
init_h_g = 0 # [m]
# Define the maximum volume of the bottle.
V = 2e-3
# Define the gravity.
g = 9.80655 # [m/s^2]
# Define the step of integration.
step = 0.01
# %% COMPUTE THE TRAJECTORY OF THE ROCKET.
args = (init_h_g, init_v, init_FP, V, P_max, step, alpha, delta, g,
D, d, m_wo_H2O, P_atm, T_init)
# Obtain the optimized value of the initial volume.
solution = minimize_scalar(main, args=args, method='bounded',
bounds=(0.5e-3, V))
state_vector = [init_h_g, init_v, init_FP, V - solution.x, P_max]
Trajectory = Simulation(solution.x, state_vector, step, alpha, delta, g, D, d,
solution.x * 1000 + m_wo_H2O, P_atm, P_max, T_init)
print(f'Maximum Altitude: {Trajectory[0][-1]} m.\nV_H2O = {solution.x*1e3} L')
print(f'Time elapsed during water propulsive phase: {Trajectory[-1][0]} s.')
print(f'Time elapsed during air propulsive phase: {Trajectory[-1][1]-Trajectory[-1][0]} s.')
print(f'Time elapsed during free flight: {Trajectory[-1][-1]-Trajectory[-1][1]} s.')
t_vec = np.linspace(0, Trajectory[-1][-1], len(Trajectory[0]))
mpl = plt.figure()
plt.plot(t_vec, Trajectory[0])
plt.title('Altitude')
mpl = plt.figure()
plt.plot(t_vec, Trajectory[1])
plt.title('Speed')
mpl = plt.figure()
plt.plot(t_vec, Trajectory[2])
plt.title('Flight Path Angle')
mpl = plt.figure()
plt.plot(t_vec, Trajectory[4])
plt.title('Pressure')
# %% Contour of height as a function of structural and water mass
m_str = np.linspace(0.01, 0.21, 30) # Structural mass[kg]
m_water = np.linspace(0.05, 1.1, 30) # Water mass [kg]
X, Y = np.meshgrid(m_water, m_str)
Z = np.empty((np.shape(X)[0], np.shape(X)[0]))
Vel = np.empty((np.shape(X)[0], np.shape(X)[0]))
for i in range(np.shape(X)[0]):
for j in range(np.shape(X)[0]):
x = X[i][j]
y = Y[i][j]
state_vector = [init_h_g, init_v, init_FP, V - x/1e3, P_max]
Trajectory = Simulation(x/1000, state_vector, step, alpha, delta, g, D,
d, x+y, P_atm, P_max, T_init)
Z[i][j] = max(Trajectory[0])
Vel[i][j] = max(Trajectory[1])
fig, ax = plt.subplots()
CS = ax.contourf(X, Y, Z, 7, cmap='jet')
CB = fig.colorbar(CS)
plt.xlabel('Water mass [kg]')
plt.ylabel('Structural mass [kg]')
plt.title('Height [m]')
plt.show()
fig, ax = plt.subplots()
CS = ax.contourf(X, Y, Vel, 7, cmap='jet')
CB = fig.colorbar(CS)
plt.xlabel('Water mass [kg]')
plt.ylabel('Structural mass [kg]')
plt.title('Max speed [m/s]')
plt.show()
# %% SIMULATION PLOTS FOR THE REPORT.
"""
It is required to plot the evolution of the altitude and velocity of the
rocket; the evolution of the air pressure inside the rocket and the evolution
of water mass for a dry mass of 80g (0.08kg).
"""
# From contour, the ideal water volume is 0.33L approximately.
state_vector = [init_h_g, init_v, init_FP, V - 0.35/1e3, P_max]
report = Simulation(0.35/1000, state_vector, step, alpha, delta, g, D, d,
0.085+0.35, P_atm, P_max, T_init)
t_vec = np.linspace(0, report[-2][-2], len(report[0]))
mpl = plt.figure()
plt.plot(t_vec, report[0])
plt.title('Altitude evolution')
plt.xlabel('Time [s]')
plt.ylabel('Altitude [m]')
mpl = plt.figure()
plt.plot(t_vec, report[1])
plt.title('Speed evolution')
plt.xlabel('Time [s]')
plt.ylabel('Speed [m/s]')
mpl = plt.figure()
plt.plot(t_vec, report[4])
plt.title('Pressure evolution')
plt.xlabel('Time [s]')
plt.ylabel('Pressure [Pa]')
mpl = plt.figure()
plt.plot(t_vec, report[-1])
plt.title('Water mass evolution')
plt.xlabel('Time [s]')
plt.ylabel('Water mass [kg]') | true |
bc0463aaae21fa46807494b1f6759ad426d8ff27 | Python | deepak3698/FlaskAPI-For-AudioFileType | /main.py | UTF-8 | 9,093 | 2.640625 | 3 | [] | no_license | from flask import Flask, request, jsonify
import json
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
# Init app
app = Flask(__name__)
# Reading Data from Json
with open('config.json', 'r') as data:
params = json.load(data)["params"]
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = params['local_uri']
# Init db
db = SQLAlchemy(app)
# Init ma
ma = Marshmallow(app)
# Song Class/Model
class Song(db.Model):
ID = db.Column(db.Integer,primary_key=True) # ID Name Duration Uploaded_time
Name = db.Column(db.String(100), nullable=False)
Duration = db.Column(db.Integer, nullable=False)
UploadedTime = db.Column(db.DateTime, nullable=False)
def __init__(self, ID, Name, Duration, UploadedTime):
self.ID = ID
self.Name = Name
self.Duration = Duration
self.UploadedTime = UploadedTime
# Podcast Class/Model
class Podcast(db.Model):
ID = db.Column(db.Integer,primary_key=True) # ID Name Duration Uploaded_time Host Participants
Name = db.Column(db.String(100), nullable=False)
Duration = db.Column(db.Integer, nullable=False)
Uploaded_time = db.Column(db.DateTime, nullable=False)
Host = db.Column(db.String(100), nullable=False)
Participants = db.Column(db.String(500), nullable=True)
def __init__(self, ID, Name, Duration, Uploaded_time,Host,Participants):
self.ID = ID
self.Name = Name
self.Duration = Duration
self.Uploaded_time = Uploaded_time
self.Host = Host
self.Participants = Participants
# Audiobook Class/Model
class Audiobook(db.Model):
ID = db.Column(db.Integer, primary_key=True) # ID Title Author Narrator Duration Uploaded_time
Title = db.Column(db.String(100), nullable=False)
Author = db.Column(db.String(100), nullable=False)
Narrator = db.Column(db.String(100), nullable=False)
Duration = db.Column(db.Integer, nullable=False)
Uploaded_time = db.Column(db.DateTime, nullable=False)
def __init__(self, ID, Title, Author, Narrator,Duration,Uploaded_time):
self.ID = ID
self.Title = Title
self.Author = Author
self.Narrator = Narrator
self.Duration = Duration
self.Uploaded_time = Uploaded_time
# Song Schema
class SongSchema(ma.Schema):
class Meta: # ID Name Duration Uploaded_time
fields = ('ID', 'Name', 'Duration', 'Uploaded_time')
# Podcast Schema
class PodcastSchema(ma.Schema):
class Meta: # ID Name Duration Uploaded_time Host Participants
fields = ('ID', 'Name', 'Duration', 'Uploaded_time','Host', 'Participants')
# Audiobook Schema
class AudiobookSchema(ma.Schema):
class Meta: # ID Title Author Narrator Duration Uploaded_time
fields = ('ID', 'Title', 'Author', 'Narrator','Duration','Uploaded_time')
# Init schema for Song
song_schema = SongSchema()
songs_schema = SongSchema(many=True)
# Init schema for Podcast
podcast_schema = PodcastSchema()
podcasts_schema = PodcastSchema(many=True)
# Init schema for Audiobook
audiobook_schema = AudiobookSchema()
audiobooks_schema = AudiobookSchema(many=True)
# Create a audioFileType
@app.route('/AddAudioFile', methods=['POST'])
def addAudioFile():
try:
audioFileType = request.json['audioFileType']
audioFileMetadata = request.json['audioFileMetadata']
if audioFileType=="song": # ID Name Duration Uploaded_time
newSong=Song(audioFileMetadata["ID"],audioFileMetadata["Name"],audioFileMetadata["Duration"],audioFileMetadata["Uploaded_time"])
db.session.add(newSong)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="podcast": # ID Name Duration Uploaded_time Host Participants
newPodcast=Podcast(audioFileMetadata["ID"],audioFileMetadata["Name"],audioFileMetadata["Duration"],
audioFileMetadata["Uploaded_time"],audioFileMetadata["Host"],audioFileMetadata["Participants"])
print(audioFileMetadata["Uploaded_time"])
db.session.add(newPodcast)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="audiobook": # ID Title Author Narrator Duration Uploaded_time
newAudiobook=Audiobook(audioFileMetadata["ID"],audioFileMetadata["Title"],audioFileMetadata["Author"],
audioFileMetadata["Narrator"],audioFileMetadata["Duration"],audioFileMetadata["Uploaded_time"])
db.session.add(newAudiobook)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
else:
return jsonify({"The request is invalid":" 400 bad request"}),400
except:
return jsonify({"Any error": "500 internal server error"}),500
@app.route('/<string:audioFileType>/<int:audioFileID>', methods=['DELETE'])
def deleteAudioFile(audioFileType,audioFileID):
try:
if audioFileType=="song":
song = Song.query.get(audioFileID)
db.session.delete(song)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="podcast":
podcast = Podcast.query.get(audioFileID)
db.session.delete(podcast)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="audiobook":
audiobook = Audiobook.query.get(audioFileID)
db.session.delete(audiobook)
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
else:
return jsonify({"The request is invalid":" 400 bad request"}),400
except:
return jsonify({"Any error": "500 internal server error"}),500
@app.route('/<string:audioFileType>', methods=['GET'])
@app.route('/<string:audioFileType>/<int:audioFileID>', methods=['GET'])
def getAudioFile(audioFileType,audioFileID=0):
try:
if audioFileType=="song":
if audioFileID==0:
all_songs = Song.query.all()
result = songs_schema.dump(all_songs)
return jsonify(result)
else:
song=Song.query.get(audioFileID)
return song_schema.jsonify(song)
elif audioFileType=="podcast":
if audioFileID==0:
all_podcast = Podcast.query.all()
result = podcasts_schema.dump(all_podcast)
return jsonify(result)
else:
podcast=Podcast.query.get(audioFileID)
return song_schema.jsonify(podcast)
elif audioFileType=="audiobook":
if audioFileID==0:
all_audiobook = Audiobook.query.all()
result = songs_schema.dump(all_audiobook)
return jsonify(result)
else:
audiobook=Audiobook.query.get(audioFileID)
return song_schema.jsonify(audiobook)
else:
return jsonify({"The request is invalid":" 400 bad request"}),400
except:
return jsonify({"Any error": "500 internal server error"}),500
@app.route('/<string:audioFileType>/<int:audioFileID>', methods=['PUT'])
def updateAudio(audioFileType,audioFileID):
try:
audioFileMetadata = request.json['audioFileMetadata']
if audioFileType=="song":
song = Song.query.get(audioFileID)
song.ID=audioFileMetadata["ID"]
song.Name=audioFileMetadata["Name"]
song.Duration=audioFileMetadata["Duration"]
song.Uploaded_time=audioFileMetadata["Uploaded_time"]
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="podcast":
podcast=Podcast.query.get(audioFileID)
podcast.Name=audioFileMetadata["Name"]
podcast.Duration=audioFileMetadata["Duration"]
podcast.Uploaded_time=audioFileMetadata["Uploaded_time"]
podcast.Host=audioFileMetadata["Host"]
podcast.Participants=audioFileMetadata["Participants"]
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
elif audioFileType=="audiobook":
audiobook=Audiobook.query.get(audioFileID)
audiobook.ID=audioFileMetadata["ID"]
audiobook.Title=audioFileMetadata["Title"]
audiobook.Author=audioFileMetadata["Author"]
audiobook.Narrator=audioFileMetadata["Narrator"]
audiobook.Duration=audioFileMetadata["Duration"]
audiobook.Uploaded_time=audioFileMetadata["Uploaded_time"]
db.session.commit()
return jsonify({"Action is successful": "200 OK"}),200
else:
return jsonify({"The request is invalid":" 400 bad request"}),400
except:
return jsonify({"Any error": "500 internal server error"}),500
# Run Server
if __name__ == '__main__':
app.run(debug=True) | true |
a1b5ce391f1b8feb2af28f5d0aba813924270939 | Python | FreshDee/DataMiningETHZ | /Assignment1/Problem2.py | UTF-8 | 1,393 | 2.9375 | 3 | [] | no_license | import re
import sys
import collections
def mapCount(id, lines):
emit_array = []
for line in lines:
line = line.replace("\n", " ")
line = line.replace("\t", " ")
line = re.sub(r'[^\w\s]', '', line)
line = re.sub(r'[0-9]+', '', line)
words = re.split(r'\W+', line)
for word in words:
if (word not in (" ", "\n")):
emit_array.append([word.lower(), 1])
return emit_array
def reduceCount(list_word_count_pairs):
sums = {}
final = {}
letter = {}
A = 0
B = 100000
for word in list_word_count_pairs:
count = int(word[1])
try:
sums[word[0]] = sums[word[0]] + count
except:
sums[word[0]] = count
sorted_sums = collections.OrderedDict(sorted(sums.items()))
for word in sorted_sums.keys():
count_word = sorted_sums[word]
if (A <= count_word <= B):
key = str(word)
if (key != ""):
try:
letter[key[0]] = letter[key[0]] + 1
except:
letter[key[0]] = 1
if(letter[key[0]]<=30):
final[word] = count_word
return final
input_text = sys.stdin
word_count_pairs=reduceCount(mapCount(1, input_text))
for word in word_count_pairs.keys():
print('%s\t%s'% ( word, word_count_pairs[word] ))
| true |
7d5077c3a7e0d8f116d27c76d46859301fbf99ef | Python | pbrowneCS/srsBznz | /srsBznz.py | UTF-8 | 2,268 | 2.75 | 3 | [] | no_license | import random
class Unit(object):
def __init_(self):
self.name = name
self.energy = 5
self.health = 100
self.strength = 5
self.intelligence = 5
self.dexterity = 5
self.defense = self.level * 1.5 + self.strength * 2
self.evade = self.level * 1.5 + self.dexterity * 2
self.will = self.level * 1.5 + self.intelligence * 2
self.level = 1
#THIS IS THE MOVEMENT/MAP STUFF?
def move(self, choice):
#THIS IS THE BATTLE ACTIONS/CALCUATIONS
def attack(self):
self.hitChance = hitChance
self.dmgDealt = dmgDealt
#self.scanOnChoice should be part of the LOOP
# self.findTargets is part of scanOnChoice, in the LOOP
def dmg(self):
#target unit's health -= dmgDealt
def userInput(self):
#convert user input into "choice"
#prompt and take userInput
class Warrior(Unit):
def __init_(self):
super(Warrior, self).__init__()
self.strength = 15
self.dexterity = 10
self.OptionSet = {
Slash:{max:5,min:0,type:"physical",range:1},
RockToss:{max:1,min:0,type:"physical",range:5}
}
class Archer(Unit):
def __init_(self):
super(Archer, self).__init__()
self.dexterity = 15
self.strength = 7
self.magic = 7
self.OptionSet = {
DaggerStab:{max:3,min:0,type:"physical",range:1},
ShootArrow:{max:5,min:0,type:"physical",range:10}
}
class Mage(Unit):
def __init_(self):
super(Mage, self).__init__()
self.intelligence = 20
self.OptionSet = {
BurningHands:{max:6,min:0,type:"arcane",range:1},
LightningBolt:{max:4,min:0,type:"arcane",range:5}
}
class Area(object):
def __init_(self):
self.tiles = [[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,0,0,0,0,0,0,0,0,0,0,0,0,0,2],
[2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]]
class Options(object):
def __init__(self):
#arrows move
#2. attack
#A. melee targets available
#B. ranged targets available
#C. magic targets available
| true |
8201e308967edc9e652ed1d0063306ca5cc70e5e | Python | dmaynard24/leetcode | /python/questions_001_100/question_015/three_sum.py | UTF-8 | 1,546 | 3.59375 | 4 | [] | no_license | # 3Sum
# Problem 15
# Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
# Note:
# The solution set must not contain duplicate triplets.
# Example:
# Given array nums = [-1, 0, 1, 2, -1, -4],
# A solution set is:
# [
# [-1, 0, 1],
# [-1, -1, 2]
# ]
from typing import List
class Solution:
def three_sum(self, nums: List[int]) -> List[List[int]]:
if len(nums) < 3:
return []
# must sort
nums.sort()
solution_set = []
cached_term_indices = {}
# start by caching term indices
for i in range(len(nums)):
if nums[i] not in cached_term_indices:
cached_term_indices[nums[i]] = [i]
else:
cached_term_indices[nums[i]].append(i)
for i in range(len(nums)):
if i > 0 and nums[i] == nums[i - 1]:
continue
first_term = nums[i]
for j in range(i + 1, len(nums)):
second_term = nums[j]
third_term = (first_term + second_term) * -1
third_term_indices = cached_term_indices.get(third_term)
if third_term_indices is None:
continue
for k in range(len(third_term_indices)):
if third_term_indices[k] > j:
prev_set = solution_set[-1] if len(solution_set) > 0 else None
curr_set = [first_term, second_term, third_term]
if prev_set is None or prev_set != curr_set:
solution_set.append(curr_set)
break
return solution_set
| true |
9f6a61f388a792ff037a8595440f70d438d263e4 | Python | Nefed-dev/Euler-project | /euler_010.py | UTF-8 | 540 | 3.9375 | 4 | [] | no_license | # Сумма простых чисел меньше 10 равна 2 + 3 + 5 + 7 = 17.
# Найдите сумму всех простых чисел меньше двух миллионов.
# Решето Эратосфена
def get_primes(n):
m = n+1
numbers = [True] * m
for i in range(2, int(n**0.5 + 1)):
if numbers[i]:
for j in range(i*i, m, i):
numbers[j] = False
primes = []
for i in range(2, m):
if numbers[i]:
primes.append(i)
return primes
primes = get_primes(2000000)
print(sum(primes))
# Answer:142913828922 | true |
5bdee83e82999542c01ac399860b446291816646 | Python | ifredom/py-desktop-app | /tweepy/demo1.1.py | UTF-8 | 916 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# coding:utf-8
import tweepy
import json
consumer_key = 'dkYGHcJMl4enNsNIMJYE3vx0M'
consumer_secret = 'F0zCq4ietgc0zAIvDeugLGOeou8AMpyTXk7O8WirvdZe9aI1G5'
access_token = '796625332501671936-nu7pw8sL71pVTztbXjooyZnT5Q8xrfL'
access_token_secret = '1GAx8IQPtaDiIZ9BMB4SgpphIGjZdcWbrEjnDD5YaEmtf'
# 获取特朗普的最新twitter
# 提交你的Key和secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# 获取类似于内容句柄的东西
api = tweepy.API(auth)
# 写文件,不存在就创建
def wfile(data):
with open("test.json", "w") as f:
f.write(json.dumps(data, indent=2))
# 读文件
def rfile():
with open("test.json", "r") as f:
json_obj = json.load(f)
# 打印其他用户主页上的时间轴里的内容。美国总统.特朗普
other_public_tweets = api.user_timeline('realDonaldTrump')
dicts = []
for tweet in other_public_tweets:
temp = {}
temp['text'] = tweet.text
dicts.append(temp)
wfile(dicts)
| true |
8d9d40f748d8351017fe52bef8296c45a8bbea76 | Python | botaoap/python_db_proway_2021 | /aula2/class/classes.py | UTF-8 | 943 | 4.3125 | 4 | [] | no_license | """
classmethod - staticmethod - dcorators
"""
class MinhaClasse:
def __init__(self, nome, idade) -> None:
self.nome = nome
self.idade = idade
def __repr__(self) -> str:
return f"{self.nome}, {self.idade}"
def metodo_de_instancia(self):
print(f"Eu sou uma classe {self}")
print(f"Meu nome é {self.nome}")
# classmethod normalmente usado para cirar classes apartir dele
# o classmethod conhece o que existe dentro da class
# podendo alterar os valores da classe por exemplo o __init__
@classmethod
def metodo_de_classe(cls, nome, idade):
if idade < 18:
raise Exception("Nao pode menor de idade")
return cls(nome, idade)
minha_classe = MinhaClasse(nome="Jorge", idade=25)
# print(minha_classe.nome)
# print(minha_classe.idade)
print(minha_classe)
outra_classe = MinhaClasse.metodo_de_classe("Junior", 19)
print(outra_classe.idade) | true |
b13f87760e333ab606977ec77e251d0d422e8c32 | Python | hhuongnt/Sorting-Deck | /test/bla.py | UTF-8 | 39 | 2.703125 | 3 | [] | no_license | for i in range(1,-2,-1):
print (i)
| true |
2b7fc7c4ecc786f3c7a7d9f72f3c12e1472d0789 | Python | rafiyajaved/ML_project_1 | /boosting.py | UTF-8 | 4,133 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 15 16:30:02 2017
@author: Rafiya
"""
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report,confusion_matrix, accuracy_score
from sklearn import tree
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import validation_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import learning_curve
a=pd.read_csv('EDdata.csv', encoding="ISO-8859-1")
b=pd.read_csv('HPSAdata.csv', encoding="ISO-8859-1")
Xa = a.values[:, 2:26]
ya = a.values[:,28]
Xb = b.values[:, 2:26]
yb = b.values[:,27]
Xa_train, Xa_test, ya_train, ya_test = train_test_split( Xa, ya, test_size = 0.3, random_state = 100)
Xb_train, Xb_test, yb_train, yb_test = train_test_split( Xb, yb, test_size = 0.3, random_state = 100)
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
[-1,-1e-3,-(1e-3)*10**-0.5, -1e-2, -(1e-2)*10**-0.5,-1e-1,-(1e-1)*10**-0.5, 0, (1e-1)*10**-0.5,1e-1,(1e-2)*10**-0.5,1e-2,(1e-3)*10**-0.5,1e-3]
params= [1,2,5,10,20,30,45,60,80,100,150]
boosterA = AdaBoostClassifier(algorithm='SAMME',learning_rate=1)
boosterB = AdaBoostClassifier(algorithm='SAMME',learning_rate=1)
train_scores, test_scores = validation_curve(boosterA, Xa_train, ya_train.astype(int), "n_estimators",params, cv=3)
train_scoresB, test_scoresB = validation_curve(boosterB, Xb_train, yb_train.astype(int), "n_estimators",params, cv=3)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
train_scores_meanB = np.mean(train_scoresB, axis=1)
train_scores_stdB = np.std(train_scoresB, axis=1)
test_scores_meanB = np.mean(test_scoresB, axis=1)
test_scores_stdB = np.std(test_scoresB, axis=1)
print(params)
plt.figure(0)
plt.title("Data 1: Validation curve vs. Number of estimators")
plt.xlabel("n_estimators")
plt.ylabel("Score")
lw=2
plt.plot(params, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(params, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.plot(params, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(params, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.savefig('validation_Boosting_A.png')
plt.figure(1)
plt.title("Data 2: Validation curve vs. Number of estimators")
plt.xlabel("n_estimators")
plt.ylabel("Score")
plt.plot(params, train_scores_meanB, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(params, train_scores_meanB - train_scores_stdB,
train_scores_meanB + train_scores_stdB, alpha=0.2,
color="darkorange", lw=lw)
plt.plot(params, test_scores_meanB, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(params, test_scores_meanB - test_scores_stdB,
test_scores_meanB + test_scores_stdB, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.savefig('validationcurves_Boosting_B.png')
clf=AdaBoostClassifier(algorithm='SAMME',learning_rate=1,n_estimators=140)
clf.fit(Xa_train, ya_train.astype(int))
predictions = clf.predict(Xa_test)
print(accuracy_score(ya_test.astype(int),predictions))
print(classification_report(ya_test.astype(int),predictions))
print(confusion_matrix(ya_test.astype(int),predictions))
clf.fit(Xb_train, yb_train.astype(int))
predictions = clf.predict(Xb_test)
print(accuracy_score(yb_test.astype(int),predictions))
print(classification_report(yb_test.astype(int),predictions))
print(confusion_matrix(yb_test.astype(int),predictions)) | true |
5cac9717cb78fa03ae3cc9e01a8776069e0d99b0 | Python | gh102003/CipherChallenge2020 | /transposition.py | UTF-8 | 740 | 3.25 | 3 | [] | no_license | ciphertext = input("Enter ciphertext: ")
key = input("enter decryption key: ")
def clean_key(key):
out = []
for c_in in key:
try:
c_out = int(c_in)
except:
c_out = ord(c_in.upper()) - 64
out.append(c_out)
return out
column_orders = clean_key(key)
column_length = len(ciphertext) // len(column_orders)
columns = {}
for i, column_order in enumerate(column_orders):
column = ciphertext[i * column_length: (i + 1) * column_length]
columns[column_order] = column
ordered_columns = map(lambda x: x[1], sorted(columns.items(), key=lambda x: x[0]))
plaintext = ""
## read off rows
for a, b, c in zip(*list(ordered_columns)):
plaintext += a + b + c
print(plaintext)
| true |
ab063c3ee1cd44b09e7c6b11b62c0222df623c80 | Python | varunhari17/-calculadora-del-sistema | /syscalculator.py | UTF-8 | 6,602 | 2.8125 | 3 | [] | no_license | import tkinter
from tkinter import *
from tkinter import messagebox
val =" "
A = 0
operator = ""
def btn_1_isclicked():
global val
val = val + "1"
data.set(val)
def btn_2_isclicked():
global val
val = val + "2"
data.set(val)
def btn_3_isclicked():
global val
val = val + "3"
data.set(val)
def btn_4_isclicked():
global val
val = val + "4"
data.set(val)
def btn_5_isclicked():
global val
val = val + "5"
data.set(val)
def btn_6_isclicked():
global val
val = val + "6"
data.set(val)
def btn_7_isclicked():
global val
val = val + "7"
data.set(val)
def btn_8_isclicked():
global val
val = val + "8"
data.set(val)
def btn_9_isclicked():
global val
val = val + "9"
data.set(val)
def btn_0_isclicked():
global val
val = val + "0"
data.set(val)
def btn_plus_isclicked():
global A
global operator
global val
A = int(val)
operator = "+"
val = val + "+"
data.set(val)
def btn_minus_isclicked():
global A
global operator
global val
A = int(val)
operator = "-"
val = val + "-"
data.set(val)
def btn_mul_isclicked():
global A
global operator
global val
A = int(val)
operator = "*"
val = val + "*"
data.set(val)
def btn_div_isclicked():
global A
global operator
global val
A = int(val)
operator = "/"
val = val + "/"
data.set(val)
def c_pressed():
global A
global operator
global val
val = " "
A= 0
operator =" "
data.set(val)
def result():
global A
global operator
global val
val2 = val
if operator == "+":
x = int((val2.split("+")[1]))
C = A + x
data.set(C)
val = str(C)
elif operator == "-":
x = int((val2.split("-")[1]))
C = A - x
data.set(C)
val = str(C)
elif operator == "*":
x = int((val2.split("*")[1]))
C = A * x
data.set(C)
val = str(C)
elif operator == "/":
x = int((val2.split("/")[1]))
if x == 0:
messagebox.showerror("Error","Division By 0 Not Supported")
A = " "
val = " "
data.set(val)
else:
C = int (A / x)
data.set(C)
val = str(C)
root = tkinter.Tk()
root.geometry("250x400+300+300")
root.resizable(0,0)
root.title("CalCulator")
data = StringVar()
lbl = Label(
root,
text = "Label",
anchor = SE,
font = ("verdana", 20),
textvariable = data,
background = "#ffffff",
fg = "#000000",
)
lbl.pack(expand = True , fill="both",)
btnrow1= Frame(root , bg="#000000")
btnrow1.pack(expand = True , fill="both" , )
btnrow2= Frame(root)
btnrow2.pack(expand = True , fill="both" , )
btnrow3= Frame(root)
btnrow3.pack(expand = True , fill="both" , )
btnrow4= Frame(root)
btnrow4.pack(expand = True , fill="both" , )
btn1 = Button(
btnrow1,
text = "1",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_1_isclicked,
)
btn1.pack(side =LEFT , expand = True , fill = "both",)
btn2 = Button(
btnrow1,
text = "2",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_2_isclicked,
)
btn2.pack(side =LEFT , expand = True , fill = "both",)
btn3 = Button(
btnrow1,
text = "3",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command =btn_3_isclicked,
)
btn3.pack(side =LEFT , expand = True , fill = "both",)
btnplus = Button(
btnrow1,
text = "+",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_plus_isclicked,
)
btnplus.pack(side =LEFT , expand = True , fill = "both",)
btn4= Button(
btnrow2,
text = "4",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_4_isclicked,
)
btn4.pack(side =LEFT , expand = True , fill = "both",)
btn5 = Button(
btnrow2,
text = "5",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_5_isclicked,
)
btn5.pack(side =LEFT , expand = True , fill = "both",)
btn6= Button(
btnrow2,
text = "6",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_6_isclicked,
)
btn6.pack(side =LEFT , expand = True , fill = "both",)
btnminus= Button(
btnrow2,
text = " - ",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command =btn_minus_isclicked ,
)
btnminus.pack(side =LEFT , expand = True , fill = "both",)
btn7 = Button(
btnrow3,
text = "7",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_7_isclicked,
)
btn7.pack(side =LEFT , expand = True , fill = "both",)
btn8 = Button(
btnrow3,
text = "8",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_8_isclicked
)
btn8.pack(side =LEFT , expand = True , fill = "both",)
btn9= Button(
btnrow3,
text = "9",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command =btn_9_isclicked,
)
btn9.pack(side =LEFT , expand = True , fill = "both",)
btnmul= Button(
btnrow3,
text = "*",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_mul_isclicked,
)
btnmul.pack(side =LEFT , expand = True , fill = "both",)
btnc= Button(
btnrow4,
text = "C",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = c_pressed,
)
btnc.pack(side =LEFT , expand = True , fill = "both",)
btn0= Button(
btnrow4,
text = "0",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_0_isclicked,
)
btn0.pack(side =LEFT , expand = True , fill = "both",)
btnequ= Button(
btnrow4,
text = "=",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = result,
)
btnequ.pack(side =LEFT , expand = True , fill = "both",)
btndiv = Button(
btnrow4,
text = "/",
font =("verdana" , 22),
relief = GROOVE ,
border = 0 ,
command = btn_div_isclicked,
)
btndiv.pack(side =LEFT , expand = True , fill = "both",)
root.mainloop()
| true |
bd698800dc9655c7f8c0db11ee57accf60786d0d | Python | shixing/CDS | /py/corpus/word2phrase.py | UTF-8 | 4,368 | 2.859375 | 3 | [] | no_license | # 1 POS tags
# 2 Scan A + N, find top 50K phrases
# replace top 50K phrases
import sys,os
import nltk
import cPickle
from utils.config import get_config
import configparser
import logging
def pos_tagging(file_in, file_out):
fin = open(file_in)
fout = open(file_out,'w')
i = 0
for line in fin:
sts = nltk.sent_tokenize(line)
for st in sts:
text = st.split()
pos = nltk.pos_tag(text)
pos_string = ' '.join([x[1] for x in pos])
fout.write( pos_string + ' ' )
fout.write('\n')
i += 1
if i % 1000 == 0:
logging.info('Tagging #{}'.format(i))
fin.close()
fout.close()
def top_AN(text_file,pos_file,dict_file):
jj = set(['JJ','JJS','JJR'])
nn = set(['NN','NNS'])
phrase_dict = {}
fpos = open(pos_file)
k = 0
for line in open(text_file):
pos_string = fpos.readline()
pos = pos_string.strip().split()
text = line.strip().split()
if (len(text)!=len(pos)):
print line
print pos
print len(text), len(pos)
print k
assert(len(text)==len(pos))
for i in xrange(len(pos)-1):
p0 = pos[i]
p1 = pos[i+1]
if p0 in jj and p1 in nn:
phrase = (text[i],text[i+1])
if not phrase in phrase_dict:
phrase_dict[phrase] = 0
phrase_dict[phrase] += 1
k += 1
if k % 10000 == 0:
logging.info('Collecting phrases #{}'.format(k))
fpos.close()
# sorting
logging.info('Sorting {} phrases'.format(len(phrase_dict)))
phrases= []
for key in phrase_dict:
count = phrase_dict[key]
phrases.append((count,key))
phrases = sorted(phrases,reverse=True)
# saving
logging.info('Saving {} phrases'.format(len(phrase_dict)))
fout = open(dict_file,'w')
for phrase in phrases:
fout.write(phrase[1][0]+'_'+phrase[1][1]+' '+str(phrase[0])+'\n')
fout.close()
def load_dict(dict_file,topn):
phrase_dict = {}
i = 0
for line in open(dict_file):
ll = line.strip().split()
count = int(ll[1])
phrase_dict[ll[0]] = count
i += 1
if i>= topn:
break
return phrase_dict
def replace_phrase(file_word,file_phrase,file_dict,topn):
fword = open(file_word)
fphrase = open(file_phrase,'w')
phrase_dict = load_dict(file_dict,topn)
k = 0
for line in fword:
text = line.strip().split()
i = 0
temp = []
replaced = False
while i<len(text):
if i == len(text) - 1:
temp.append(text[i])
break
phrase = text[i] + '_' + text[i+1]
if phrase in phrase_dict:
temp.append(phrase)
replaced = True
i += 2
else:
temp.append(text[i])
i += 1
if replaced:
fphrase.write(' '.join(temp)+'\n')
fphrase.write(line)
else:
fphrase.write(' '.join(temp)+'\n')
k += 1
if k % 10000 == 0:
logging.info('Replacing #{}'.format(k))
fphrase.close()
def test_pos_tagging():
ftext = '/Users/xingshi/Workspace/misc/CDS/data/100.text.combine'
fpos = '/Users/xingshi/Workspace/misc/CDS/data/100.pos.combine'
fdict = '/Users/xingshi/Workspace/misc/CDS/data/100.phrase.dict'
fphrase = '/Users/xingshi/Workspace/misc/CDS/data/100.phrase'
#pos_tagging(ftext,fpos)
top_AN(ftext,fpos,fdict)
replace_phrase(ftext,fphrase,fdict,50000)
def main():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
config_fn = sys.argv[1]
config = get_config(config_fn)
ftext = config.get('path','short_abstracts_text')
fpos = config.get('path','short_abstracts_pos')
fdict = ftext + '.phrase.dict'
fphrase = ftext + '.phrase'
#logging.info('POS tagging...')
#pos_tagging(ftext,fpos)
logging.info('collecting phrases...')
top_AN(ftext,fpos,fdict)
logging.info('replacing phrases...')
replace_phrase(ftext,fphrase,fdict,50000)
if __name__ == '__main__':
# test_pos_tagging()
main()
| true |
500c0b02f97523c037576a15c041405bf2908ec4 | Python | Y-Suzaki/python-alchemy | /python-alchemy/src/sqlalchemy_writer_test.py | UTF-8 | 2,018 | 2.609375 | 3 | [] | no_license | import unittest
from sqlalchemy_writer import SqlAlchemyWriter
from model.skill import Skill
from model.engineer import Engineer
from model.engineer_skill import EngineerSkill
class SqlAlchemyWriterTest(unittest.TestCase):
def test_skill_all(self):
# 外部キー張っているので、先に削除しておく
SqlAlchemyWriter.remove_engineer_skill('00001', '00001')
SqlAlchemyWriter.remove_engineer_skill('00001', '00002')
SqlAlchemyWriter.remove_skill(id='00001')
SqlAlchemyWriter.remove_skill(id='00002')
SqlAlchemyWriter.remove_skill(id='00003')
SqlAlchemyWriter.add_skill(Skill(id='00001', name='python3'))
SqlAlchemyWriter.add_skill(Skill(id='00002', name='java'))
SqlAlchemyWriter.add_skill(Skill(id='00003', name='AWS'))
SqlAlchemyWriter.update_skill(id='00001', name='python2')
SqlAlchemyWriter.update_skill(id='00002', name='java1.8')
SqlAlchemyWriter.add_engineer_skill(EngineerSkill(engineer_id='00001', skill_id='00001'))
SqlAlchemyWriter.add_engineer_skill(EngineerSkill(engineer_id='00001', skill_id='00002'))
def test_engineer_all(self):
# 外部キー張っているので、先に削除しておく
SqlAlchemyWriter.remove_engineer_skill('00001', '00001')
SqlAlchemyWriter.remove_engineer_skill('00001', '00002')
SqlAlchemyWriter.remove_engineer(id='00001')
SqlAlchemyWriter.remove_engineer(id='00002')
SqlAlchemyWriter.add_engineer(Engineer(id='00001', name='tanaka', age=37))
SqlAlchemyWriter.add_engineer(Engineer(id='00002', name='hayashi', age=25))
SqlAlchemyWriter.update_engineer(id='00001', name='tanaka', age=38)
SqlAlchemyWriter.add_engineer_skill(EngineerSkill(engineer_id='00001', skill_id='00001'))
SqlAlchemyWriter.add_engineer_skill(EngineerSkill(engineer_id='00001', skill_id='00002'))
if __name__ == "__main__":
unittest.main() | true |
e9b84b5aa87c97f3af8cb4a79a16a1ff5793af14 | Python | priyankakushi/machine-learning | /028_11_19 OOPS.py | UTF-8 | 1,852 | 4.25 | 4 | [
"CC-BY-3.0"
] | permissive | #create a class named MyClass
'''class MyClass:
#assign the values to the MyClass attributs
number = 0
name = "abc"
def Main():
#Creating an object of the MyClass. Here, "me" is the object
me = MyClass()
#Accessing the attributes of MyClass using the dot(.)operator
me.number = 1337
me.name
#str is an build- in function that creates an string
print(me.name + " " + str(me.number))
if __name__=="__main__":
Main()
class Student:
def __init__(self, name, roll_no):
self.name = name
self.roll_no = roll_no
#object of Laptop class(Inner class)
#self.lap = self.Laptop()
def show(self):
print(self.name, self.roll_no)
#self.lap.display()
class Laptop:
def __init__(self):
self.brand = "sony_Vaio"
self.cpu = "i5"
self.ram = "8 GB"
def display(self,brand,cpu,ram):
self.brand = brand
self.cpu = cpu
self.ram = ram
print(self.brand, self.cpu, self.ram)
s1 = Student("Akshay", 2)
s2 = Student("Rajat", 10)
print()
s1.show()
s2.show()
lap1 = Student.Laptop()
print()
lap1.display('HP', 'i5', '16 GB')
class Animal:
def speak(self):
print("Animal Speaking")
class Dog(Animal):
def bark(self):
print("dog barking")
d = Dog()
d.bark()
d.speak()'''
#create a class named MyClass
class MyClass:
#assign the values to the MyClass attributs
number = 0
name = "abc"
def Main():
#Creating an object of the MyClass. Here, "me" is the object
me = MyClass()
#Accessing the attributes of MyClass using the dot(.)operator
me.number = 1337
me.name
#str is an build- in function that creates an string
print(me.name + " " + str(me.number))
if __name__=="__main__":
Main()
| true |
af0eb1af682ccbf99d43db230e3a3b64942848f5 | Python | SummerNam/Python-Study | /part03_16.py | UTF-8 | 442 | 3.59375 | 4 | [] | no_license | Python 3.6.1 (v3.6.1:69c0db5, Mar 21 2017, 17:54:52) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> # Nth fibonacci number
>>>
>>> def main():
n = int(input("Enter the value of n: "))
fst, snd = 1,1
for i in range(n-2):
fst, snd = fst+snd ,fst
print("The nth Fibonacci number is", fst)
>>> main()
Enter the value of n: 6
The nth Fibonacci number is 8
>>>
| true |
7597dbe0bceef93ec1bd121b62fcbd28c35b3945 | Python | hculpan/StarTradingCompany | /app.py | UTF-8 | 1,703 | 2.765625 | 3 | [
"MIT"
] | permissive | import pygame
import random
from StarTradingCompany import MainScene
class MainApp:
def main_loop(self, width, height, fps):
random.seed()
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode(
(width, height), pygame.SCALED)
pygame.display.set_caption("Star Trading Company")
clock = pygame.time.Clock()
no_keys_pressed = pygame.key.get_pressed()
active_scene = MainScene.MainScene(width, height)
while active_scene is not None:
# Event filtering
filtered_events = []
for event in pygame.event.get():
pressed_keys = no_keys_pressed
quit_attempt = False
if event.type == pygame.QUIT:
quit_attempt = True
elif event.type == pygame.KEYDOWN:
pressed_keys = pygame.key.get_pressed()
alt_pressed = pressed_keys[pygame.K_LALT] or \
pressed_keys[pygame.K_RALT]
if event.key == pygame.K_ESCAPE:
quit_attempt = True
elif event.key == pygame.K_F4 and alt_pressed:
quit_attempt = True
if quit_attempt and active_scene.Terminate():
pygame.quit()
filtered_events.append(event)
active_scene.ProcessInput(filtered_events, pressed_keys)
active_scene.Update()
active_scene.Render(screen)
active_scene = active_scene.next
pygame.display.flip()
clock.tick(fps)
app = MainApp()
app.main_loop(1200, 1071, 30)
| true |
5226b8bf4fb87ab540ffbb8e777b1371c9a740a3 | Python | jackey6/test-repo | /test.py | UTF-8 | 91 | 2.84375 | 3 | [] | no_license | a = 5;
def fun():
a = 10;
print(a)
def conflict():
print(a)
print(a)
fun()
conflict() | true |
6afedb3479a402cb83ffca1ddb78e6cdcd2b3069 | Python | jerrylee529/twelvewin | /analysis/test.py | UTF-8 | 2,704 | 2.921875 | 3 | [] | no_license | # coding=utf8
"""
测试文件
"""
__author__ = 'Administrator'
import numpy as np
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#x1 = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 9])
#x2 = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 3])
#x = np.array(list(zip(x1, x2))).reshape(len(x1), 2)
import pandas as pd
df = pd.read_csv("e:/sz50.csv")
df = df.fillna(0.1)
x = df.values
print np.isnan(x).any()
print x
'''
from sklearn.cluster import KMeans
kmeans=KMeans(n_clusters=8) #n_clusters:number of cluster
kmeans.fit(x)
print kmeans.labels_
df = pd.read_csv("e:/sz50_symbol.csv", encoding='gbk')
df.set_index('code', inplace=True)
names = df['name']
i = 0
for name in names.values:
print 'name: %s, label: %d' % (name, kmeans.labels_[i])
i += 1
'''
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
'''
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
print X
'''
# #############################################################################
# Compute Affinity Propagation
'''precomputed, euclidean'''
af = AffinityPropagation(affinity='precomputed').fit(x)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print labels
df = pd.read_csv("e:/sz50_symbol.csv")
df.set_index('code', inplace=True)
names = df['name']
#print names
i = 0
d = {}
for name in names.values:
if d.has_key(str(labels[i])):
d[str(labels[i])].append(name)
else:
d[str(labels[i])] = []
d[str(labels[i])].append(name)
i += 1
for key, values in d.items():
names = u''
for value in values:
names += value
names += ','
print key, names
'''
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(x, labels, metric='sqeuclidean'))
'''
| true |