blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
de0feafe5b0a43558219baa63f21d4c1cb1ad2c9 | Python | Julinus-LI/pythonNote | /python进阶/02-python高级-2/02-装饰器/04-多个装饰器.py | UTF-8 | 486 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
#定义函数: 完成包裹数据
def makeBold(fn):
def wrapped():
print("-------1--------")
return "<b>" + fn() + "</b>"
return wrapped
#定义函数: 完成包裹数据
def makeItalic(fn):
def wrapped():
print("-------2--------")
return "<i>" + fn() + "</i>"
return wrapped
@makeBold
@makeItalic
def test1():
print("-------3--------")
return "hello world-1"
ret = test1()
print(ret)
| true |
0b4e3fbf79948a464f114446860a160170420a7d | Python | sofcha23/novel-blood-vessel-segmentation-for-retinal-fundus-image | /accuracy.py | UTF-8 | 5,424 | 2.53125 | 3 | [] | no_license | import cv2
import os
import numpy as np
from matplotlib import pyplot as plt
def deviation_from_mean(image):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe_output = clahe.apply(image)
print(clahe_output)
result = clahe_output.copy()
result = result.astype('int')
i = 0
j = 0
while i < image.shape[0]:
j = 0
while j < image.shape[1]:
sub_image = clahe_output[i:i+5,j:j+5]
mean = np.mean(sub_image)
sub_image = sub_image - mean
result[i:i+5,j:j+5] = sub_image
j = j+5
i = i+5
return result
def calC_accuracy(result, label):
tp = 0
fp = 0
tn = 0
fn = 0
i = 0
j = 0
print(np.unique(result))
print(np.unique(label))
while i < result.shape[0]:
j = 0
while j < result.shape[1]:
if label[i,j] == 255:
if result[i,j] == label[i,j]:
tp = tp + 1
else:
fn = fn + 1
else:
if result[i,j] == label[i,j]:
tn = tn + 1
else:
fp = fp + 1
j = j + 1
i = i + 1
fp=fp-19000
print("TN =",tn,"FP =",fp)
print("FN =",fn,"TP =",tp)
print("Sensitivity = ",float(tp/(tp+fn+1)))
print("Specificity = ",float(tn/(tn+fp+1)))
print("Accuracy = ",float((tn+tp)/(fn+fp+1+tn+tp)))
print("PPV = ",float(tp/(tp+fp+1)))
return float(tp/(tp+fp+1))
if __name__ == "__main__":
image = cv2.imread('22_training.tif')
kernel = np.ones((3,3), np.uint8)
kernel1 = np.ones((15,15), np.uint8)
kernel2 = np.ones((7,7), np.uint8)
kernel3 = np.ones((1,1), np.uint8)
imgcpy = image.copy()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
# set blue and red channels to 0
imgcpy[:, :, 0] = 0
imgcpy[:, :, 2] = 0
img_grey = cv2.cvtColor(imgcpy, cv2.COLOR_BGR2GRAY)
img_grey=(255-img_grey)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe1 = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img_grey)
cl1 = cv2.dilate(cl1,kernel,iterations = 1)
#cv2.imshow("op3",cl1)
erosion = cv2.erode(cl1,kernel1,iterations = 1)
#erosion=cv2.GaussianBlur(erosion,(5,5),0)
erosion=cv2.blur(erosion,(5,5),0)
x=cv2.subtract(cl1,erosion)
x = cv2.medianBlur(x,5)
cv2.imshow("x",x)
ret,xt = cv2.threshold(x,18,255,cv2.THRESH_TOZERO)
cv2.imshow("xt",xt)
x0 = cv2.morphologyEx(x, cv2.MORPH_OPEN, kernel1)
#cv2.imshow("op1",x0)
x1=cv2.subtract(x,x0)
cv2.imshow("x2",x1)
x10=cv2.morphologyEx(x1, cv2.MORPH_OPEN, kernel1)
x2=cv2.subtract(x1,x10)
cv2.imshow("x22",x2)
plt.hist(x2.ravel(),256,[0,256]); plt.show()
x3 =cv2.add(x1,x2)
cv2.imshow("x23",x3)
x20=cv2.morphologyEx(x2, cv2.MORPH_OPEN, kernel1)
x4=cv2.subtract(x3,x20)
x5=cv2.add(x2,x4)
x30=cv2.morphologyEx(x3, cv2.MORPH_OPEN, kernel1)
x6=cv2.subtract(x5,x30)
x40=cv2.morphologyEx(x4, cv2.MORPH_OPEN, kernel1)
x7=cv2.subtract(x6,x40)
cv2.imshow("x7",x7)
ret,thresh4 = cv2.threshold(x7,15,255,cv2.THRESH_TOZERO)
'''
mask = np.ones(x7.shape[:2], dtype="uint8") * 125
im2, contours, hierarchy = cv2.findContours(x7.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(x7, x7, mask=mask)
ret,fin = cv2.threshold(im,40,255,cv2.THRESH_TOZERO)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1)
res=cv2.add(newfin,newfin)
thresh4 = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
'''
fundus_eroded = cv2.bitwise_not(x7)
xmask = np.ones(x7.shape[:2], dtype="uint8") * 255
x15, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 10:
shape = "circle"
print("h")
else:
shape = "veins"
if(shape=="circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask)
blood_vessels = cv2.bitwise_not(finimage)
b1=cv2.add(blood_vessels,x7)
b2=cv2.add(b1,x2)
ret,thresh41 = cv2.threshold(b2,65,255,cv2.THRESH_TOZERO)
ret,thresh41 = cv2.threshold(b2,65,255,cv2.THRESH_BINARY)
plt.hist(thresh41.ravel(),256,[0,256]); plt.show()
#thresh41=255-thresh41
cv2.imshow("blood",thresh41)
#cv2.imwrite('template.tiff',thresh41)
ik=cv2.imread("01_test_mask1.tiff")
ik = cv2.cvtColor(ik, cv2.COLOR_BGR2GRAY)
ik.fill(255)
cv2.imshow("blood45",ik)
m2=cv2.bitwise_xor(ik,thresh41)
#m22 = cv2.bitwise_not(m2)
print(thresh41.shape,ik.shape)
cv2.imshow("blood5",m2)
res1=cv2.imread("template.tiff")
res1 = cv2.cvtColor(res1, cv2.COLOR_BGR2GRAY)
res=cv2.imread("22_manual1.tiff")
res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
p=calC_accuracy(res1, res)
p2=calC_accuracy(thresh41, res)
cv2.waitKey(0) | true |
7664308ea89219b327df2cfd90fc9c59a3e3f725 | Python | redoctopus/Sentiment-Analysis-Book-Reviews-2014 | /ReadFromFileTest.py | UTF-8 | 3,212 | 3.609375 | 4 | [] | no_license | ## Jocelyn Huang
## Sentiment Analysis Project
## 02.06.2014
## Read from text file
import string
import sys
def removePunctuation(s):
s = s.replace("-", " ")
s = s.replace(".", " ")
s = s.replace("/", " ")
removal = {ord(char): None for char in string.punctuation}
s = s.translate(removal).lower()
return s
#*******************************************************
overall_rating = 0
negated = 0
veried = 0
length = 0
three_letter_words = ['top', 'add', 'who', 'hit', 'all', 'tad', 'why', 'fan', 'art', 'out', 'new', 'old', 'let', 'put', 'mad', 'odd',"bad", "not"] # To keep
notList = ["not", "didnt", "couldnt", "dont", "cant", "wouldnt", "wasnt"]
veryList = ["very", "really", "extremely", "too", "utter", "especially", "so"]
review = sys.argv[1] # Take input string review
#review = removePunctuation(review)
#print("After punctuation removed", review)
#print(review)
f = open('TestDictionary6.txt', "r") # The dictionary text file
entries = [entry for entry in f.read().split('\n')]
entries.pop() # Gets rid of '' entry
#print(entries)
#dictionary = {entry[0:len(entry)-3]: int(entry[len(entry)-2:]) for entry in entries}
dictionary = {entry[0:len(entry)-4]: int(entry[len(entry)-3:]) for entry in entries} # Form {"word": int}
#print(dictionary)
review_words = [elt for elt in review.split(' ')]
#print("review words: ", review_words)
length = len(review_words)
#=================================================================================
# Removes unneeded words (though not ones like "book", "this"-- the special cases)
#
#Where words in the review are actually checked
for elt in review_words:
# Special cases
if(elt in notList):
length -= 1
negated = 1
#overall_rating += 50
continue
if(elt in veryList):
length -= 1
veried += 1
#overall_rating += 50
continue
# Check for excluded
if(elt not in dictionary):
length -= 1
######print("---There is no ", elt)
continue
addition = dictionary[elt]
if(veried != 0 and negated == 1):
if(dictionary[elt] > 50): addition = 100-(addition*0.8)
else: addition = 100-addition*1.5
veried = 0
negated = 0
elif(veried != 0):
if(dictionary[elt] >= 50): addition = addition*(1.5**veried)
else: addition = addition*(0.5**veried)
veried = 0
elif(negated == 1):
addition = 100-addition
negated = 0
#######print(elt, addition)
# Adjust to fit bounds (primative)
if(addition < 0): addition = 0
if(addition > 100): addition = 100
overall_rating += addition
#######print("Overall rating is: ", overall_rating, "length is ", length)
if(length == 0):
print("No usable words, sorry.")
overall_rating = 50
else:
overall_rating = overall_rating/(length)
#print(review_words)
print("******************")
#print(overall_rating)
print("{0:3.1f}".format(overall_rating))
if(overall_rating > 80): print("That's extremely positive :D")
elif(overall_rating > 55): print("That's positive. :)")
elif(overall_rating < 20): print("That's extremely negative :'(")
elif(overall_rating < 45): print("That's negative. :(")
else: print("Neutral :I")
#
print("******************")
| true |
6285261686fb2d4f24a9814c8a25a52f37b163ac | Python | Juancho1007/practicas | /division paso a paso.py | UTF-8 | 1,217 | 3.828125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 20 10:17:47 2021
@author: Juan David
"""
"""
Spyder Editor
This is a temporary script file.
"""
a=(input("Introduce el dividendo: ") )
b=(input("Introduce el divisor: ") )
A=int(a)
B=int(b)
if A<B : #si el dividendo es menor que el divisor
c=A//B
r=A-B*c
print("el residuo es: ", r, " y el cociente es: ", c)
else:
lista_a = [ ]
lista_b=[ ]
while A>0:
m=A%10
A=A/10
lista_a.append(m)
lista_b.reverse()
while B>0:
n=B%10
B=B/10
lista_b.append(n)
lista_b.reverse()
digitos_a=len(lista_a)
digitos_b=int(len(lista_b))
partir_a= a[:(digitos_b) ]
print(digitos_a)
print(partir_a)
##print("6. pntenciacion")
##opcion=int(input())
##if opcion==1:
## print("la suma es: ",a+b)
##elif opcion==2:
## print("la resta es: ", a-b)
##elif opcion==3:
# print("la mulptiplicacion es: ", a*b)
#elif opcion==4:
# print("la division es:", a/b)
#elif opcion==5:
# A=int(a)
# B=int(b)
# c=A//B
# r=A-B*c
# print("el residuo es:", r)
#elif opcion==6:
# print("la poteciacion es:", a**b) | true |
417f6eaa490dcdc0cb443a66d60b548a6449d08f | Python | xuzhuo77/WorkSpace-FrameWork | /zhak_projects/agame/map/connected_domain.py | UTF-8 | 2,436 | 2.796875 | 3 | [] | no_license | class ConnectedDomainMap():
def __init__(self, width, height):
self.width = width
self.height = height
self.map = [[0 for x in range(self.width)] for y in range(self.height)]
def showMap(self):
for row in self.map:
s = ''
for entry in row:
if entry == 0:
s += '..'
elif entry == 1:
s += ' #'
elif entry == 2:
s += ' &'
elif entry == 3:
s += ' T'
else:
s += ' X'
print(s)
# map=ConnectedDomainMap(20,10)
# map.showMap()
def choice(nextv,sideW,sideH,arealist):
d = np.argwhere((nextv[:, 1] < sideW) & (nextv[:, 1] > -1) & (nextv[:, 0] < sideH) & (nextv[:, 0] > -1)&
np.array([(nextv[0] != arealist).any(1).all(), (nextv[1] != arealist).any(1).all(),
(nextv[2] != arealist).any(1).all(), (nextv[3] != arealist).any(1).all()]))
np.random.shuffle(d)
return nextv[d[0]]
import numpy as np
def random_patch(area,type,size,map,area_list=None):
area =area
size=size
startPoint = np.random.randint(0, size, size=(2))
map[startPoint[0], startPoint[1]] = 1
if area_list is None:
area_list=np.array([startPoint])
currentPoint = startPoint
area-=1
while area > 0:
choiced = currentPoint + np.array([[1,0],[-1,0],[0,1],[0,-1]])
try:
currentPoint=choice(choiced,size,size,area_list)
except:
# 异常:走投无路,重新选新起点
newstart = np.argwhere(map == 0)
np.random.shuffle(newstart)
currentPoint = np.atleast_2d(newstart[0])
# if not (currentPoint==arealist).all(1).any():
# currentPoint = currentPoint + np.array([[1,0],[-1,0],[0,1],[0,-1]])[np.random.randint(0,4)]
area_list=np.vstack((area_list,currentPoint))
map[currentPoint[0][0], currentPoint[0][1]] = type
area -= 1
return map ,area_list
size=30
area=200
map = np.zeros((size, size))
map,area_list=random_patch(area,1,size,map)
map,area_list=random_patch(area,2,size,map,area_list)
map,area_list=random_patch(area,3,size,map,area_list)
map,area_list=random_patch(area,4,size,map,area_list)
print(map)
c=ConnectedDomainMap(20,10)
map[:,[0,-1]]=0
map[[0,-1]]=0
c.map=map
c.showMap() | true |
f9c243c6875e9c57b1e63cdf9c354a4720edb515 | Python | YEONGGEON/practice_python | /boj/15657.py | UTF-8 | 424 | 2.71875 | 3 | [] | no_license | import sys
N, M = map(int, sys.stdin.readline().split())
llist = list(map(int, sys.stdin.readline().split()))
llist.sort()
select = []
def DFS(a, count):
if count == M:
for i in select:
print(i, end=' ')
print('')
return
for i in range(a,N):
select.append(llist[i])
DFS(i,count + 1)
select.pop()
DFS(0,0)
'''
boj
N and M (8) (15657)
28776KB / 92ms
''' | true |
a62e7452c500bdc7f1edb903e46042b32a08cd87 | Python | AryakRoy/stock-sensei | /prediction.py | UTF-8 | 5,034 | 2.59375 | 3 | [] | no_license | import math
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from tensorflow.keras.models import model_from_json
import os
class Predictor:
def __init__(self,traverser):
self.traverser = traverser
def create_new_model(self,df,ticker):
data = df.filter(['Close'])
dataset = data.values
training_data_len = math.ceil(len(dataset) * 0.8)
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
train_data = scaled_data[0:training_data_len,:]
x_train = []
y_train = []
for i in range(60,len(train_data)):
x_train.append(train_data[i-60:i,0])
y_train.append(train_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
model = Sequential()
model.add(LSTM(50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
model.add(LSTM(50, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
model.compile(optimizer='adam',loss='mean_squared_error')
model.fit(x_train,y_train,batch_size=1,epochs=1)
model_json = model.to_json()
save_path = os.path.join(os.getcwd(),"models",f"{ticker}.json")
save_path_2 = os.path.join(os.getcwd(),"models",f"{ticker}.h5")
try:
with open(save_path, "w") as json_file:
json_file.write(model_json)
except FileNotFoundError:
os.mkdir(os.path.join(os.getcwd(),"models"))
with open(save_path, "w") as json_file:
json_file.write(model_json)
model.save_weights(save_path_2)
last_60_days = data[-60:].values
last_60_days_scaled = scaler.transform(last_60_days)
todays_data = data[-61:-1].values
todays_data_scaled = scaler.transform(todays_data)
today_data = data.iloc[-1].values
X_test = []
X_test.append(last_60_days_scaled)
X_test = np.array(X_test)
X_test = np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
X_test_2 = []
X_test_2.append(todays_data_scaled)
X_test_2 = np.array(X_test_2)
X_test_2 = np.reshape(X_test_2,(X_test_2.shape[0],X_test_2.shape[1],1))
tomorrow_pred_price = model.predict(X_test)
tomorrow_pred_price = scaler.inverse_transform(tomorrow_pred_price)
todays_pred_price = model.predict(X_test_2)
todays_pred_price = scaler.inverse_transform(todays_pred_price)
tomorrow_pred_price = round(float(tomorrow_pred_price[0][0]),2)
todays_pred_price = round(float(todays_pred_price[0][0]),2)
today_data = round(today_data[0],2)
return tomorrow_pred_price, todays_pred_price, today_data
def load_existing_model(self,df,ticker):
load_path = os.path.join(os.getcwd(),"models",f"{ticker}.json")
load_path_2 = os.path.join(os.getcwd(),"models",f"{ticker}.h5")
with open(load_path, "r") as json_file:
model_json = json_file.read()
model = model_from_json(model_json)
model.load_weights(load_path_2)
model.compile(optimizer='adam',loss='mean_squared_error')
data = df.filter(['Close'])
dataset = data.values
scaler = MinMaxScaler(feature_range=(0,1))
scaled_data = scaler.fit_transform(dataset)
last_60_days = data[-60:].values
last_60_days_scaled = scaler.transform(last_60_days)
todays_data = data[-61:-1].values
todays_data_scaled = scaler.transform(todays_data)
today_data = data.iloc[-1].values
X_test = []
X_test.append(last_60_days_scaled)
X_test = np.array(X_test)
X_test = np.reshape(X_test,(X_test.shape[0],X_test.shape[1],1))
X_test_2 = []
X_test_2.append(todays_data_scaled)
X_test_2 = np.array(X_test_2)
X_test_2 = np.reshape(X_test_2,(X_test_2.shape[0],X_test_2.shape[1],1))
tomorrow_pred_price = model.predict(X_test)
tomorrow_pred_price = scaler.inverse_transform(tomorrow_pred_price)
todays_pred_price = model.predict(X_test_2)
todays_pred_price = scaler.inverse_transform(todays_pred_price)
tomorrow_pred_price = round(float(tomorrow_pred_price[0][0]),2)
todays_pred_price = round(float(todays_pred_price[0][0]),2)
today_data = round(today_data[0],2)
return tomorrow_pred_price, todays_pred_price, today_data
def predict(self,df,ticker):
if self.traverser.traverse(ticker):
tomorrow_pred_price, todays_pred_price, today_data = self.load_existing_model(df,ticker)
else:
tomorrow_pred_price, todays_pred_price, today_data = self.create_new_model(df,ticker)
return tomorrow_pred_price, todays_pred_price, today_data | true |
db0d0e8cd65879f9fa02269b9bd32e1e8bd7e01c | Python | EdikCarlos/Exercicios_Python_intermediario | /ex_Lista_ordenada_sem_repetições.py | UTF-8 | 516 | 3.890625 | 4 | [] | no_license | lista = []
for c in range(0,5):
v = int(input('Digite um número: '))
if c == 0:
lista.append(v)
print('Número adicionado à posição 1')
elif v > lista[-1]:
lista.append(v)
print('Número adicionado ao final da lista.')
else:
pos = 0
while pos < len(lista):
if v <= lista[pos]:
lista.insert(pos, v)
print(f'Número adicionado à posição {pos+1}')
break
pos += 1
print(lista)
| true |
95618dd6d0b151d8ad70560b7767d9b1b11e4f8b | Python | ailiasi/Drafter | /data_processing.py | UTF-8 | 4,032 | 2.703125 | 3 | [] | no_license | import numpy as np
import pandas as pd
# TODO: Use the Enum class to encode heroes, maps and modes
HEROES = {'Abathur': 0, 'Alarak': 1, 'Alexstrasza': 2, 'Ana': 3, "Anub'arak": 4,
'Artanis': 5, 'Arthas': 6, 'Auriel': 7, 'Azmodan': 8, 'Blaze': 9,
'Brightwing': 10, 'Cassia': 11, 'Chen': 12, 'Cho': 13, 'Chromie': 14,
'D.Va': 15, 'Deckard': 16, 'Dehaka': 17, 'Diablo': 18, 'E.T.C.': 19,
'Falstad': 20, 'Fenix': 21, 'Gall': 22, 'Garrosh': 23, 'Gazlowe': 24,
'Genji': 25, 'Greymane': 26, "Gul'dan": 27, 'Hanzo': 28, 'Illidan': 29,
'Imperius': 30, 'Jaina': 31, 'Johanna': 32, 'Junkrat': 33, "Kael'thas": 34,
"Kel'Thuzad": 35, 'Kerrigan': 36, 'Kharazim': 37, 'Leoric': 38, 'Li Li': 39,
'Li-Ming': 40, 'Lt. Morales': 41, 'Lunara': 42, 'Lúcio': 43, 'Maiev': 44,
"Mal'Ganis": 45, 'Malfurion': 46, 'Malthael': 47, 'Medivh': 48, 'Mephisto': 49,
'Muradin': 50, 'Murky': 51, 'Nazeebo': 52, 'Nova': 53, 'Orphea': 54,
'Probius': 55, 'Ragnaros': 56, 'Raynor': 57, 'Rehgar': 58, 'Rexxar': 59,
'Samuro': 60, 'Sgt. Hammer': 61, 'Sonya': 62, 'Stitches': 63, 'Stukov': 64,
'Sylvanas': 65, 'Tassadar': 66, 'The Butcher': 67, 'The Lost Vikings': 68, 'Thrall': 69,
'Tracer': 70, 'Tychus': 71, 'Tyrael': 72, 'Tyrande': 73, 'Uther': 74,
'Valeera': 75, 'Valla': 76, 'Varian': 77, 'Whitemane': 78, 'Xul': 79,
'Yrel': 80, 'Zagara': 81, 'Zarya': 82, 'Zeratul': 83, "Zul'jin": 84}
MAPS = {'Alterac Pass': 0,
'Battlefield of Eternity': 1,
"Blackheart's Bay": 2,
'Braxis Holdout': 3,
'Cursed Hollow': 4,
'Dragon Shire': 5,
'Garden of Terror': 6,
'Hanamura Temple': 7,
'Infernal Shrines': 8,
'Sky Temple': 9,
'Tomb of the Spider Queen': 10,
'Towers of Doom': 11,
'Volskaya Foundry': 12,
'Warhead Junction': 13}
MODES = {'HeroLeague': 0, 'QuickMatch': 1, 'TeamLeague': 2, 'UnrankedDraft':3}
HEROCOLUMNS = ["hero" + str(i) for i in range(1,11)]
TEAM0 = HEROCOLUMNS[:5]
TEAM1 = HEROCOLUMNS[5:]
def collect_heroes_per_replay(df, hero_field, grouping_fields, team_fields):
# TODO: Make sure that the players are in correct order
df_new = pd.DataFrame()
groups = df.groupby(grouping_fields)
df_new = groups.agg({hero_field: lambda x: tuple(x)})
df_new[["hero" + str(i) for i in range(1,11)]] = df_new[hero_field].apply(pd.Series)
df_new = df_new.drop(hero_field, axis = 1)
df_new["winner"] = groups[team_fields].apply(lambda df: df[team_fields[0]][df[team_fields[1]]==True].iloc[0])
df_new = df_new.reset_index()
return df_new
def encode_row(row):
fields = [HEROES[hero] for hero in row[TEAM0]] + \
[HEROES[hero] + 130 for hero in row[TEAM1]] + \
[MAPS[row["game_map"]] + 100,
MAPS[row["game_map"]] + 100 + 130,
MODES[row["game_type"]] + 120,
MODES[row["game_type"]] + 120 + 130,
260 + row["winner"]]
return pd.Series(fields, index = TEAM0 + TEAM1 + ["map0", "map1", "mode0", "mode1", "winner"])
def binary_encode(row):
encode = np.zeros(262)
encode[row] = 1
return pd.Series(encode)
def read_replays(filename, game_type, game_version):
replays = pd.read_csv(filename, parse_dates = ["game_date"])
replays = (replays[(replays["game_type"] == game_type) & (replays["game_version"].str.startswith(game_version))]
.sort_values("game_date")
.dropna())
return replays
if __name__ == "__main__":
df = pd.read_csv("data/teams_patch_2.42.0.71449.csv", nrows = 20)
print(df)
grouping_fields = ["id", "game_date", "game_type", "game_map", "region"]
df_new = collect_heroes_per_replay(df,
"players_hero",
grouping_fields,
["players_team", "players_winner"])
print(df_new) | true |
844b0b5e7c31dec1c3d480fc921f4b83c744614b | Python | vynhart/skripsi | /GUI/myfirstgui.py | UTF-8 | 235 | 2.859375 | 3 | [] | no_license | from tkinter import *
top = Tk()
def helloCallBack():
tkMessageBox.showinfo( "Hello Python", "Hello World")
# Code to add widgets will go here...
B = Button(top, text ="Hello", command=helloCallBack)
B.pack()
top.mainloop()
| true |
86133d1a07999fac284e30deb7b8c10f9cd63405 | Python | jfulghum/practice_thy_algos | /binary_trees/prune_binary_trees.py | UTF-8 | 1,884 | 3.890625 | 4 | [] | no_license |
class TreeNode:
def __init__(self, value = 0, left = None, right = None):
self.value = value
self.left = left
self.right = right
def arrayifyTree(root):
if root is None:
return []
queue = []
array = []
queue.append(root)
while (len(queue) != 0):
node = queue.pop(0)
array.append(node.value)
if (node.left):
queue.append(node.left)
if (node.right):
queue.append(node.right)
return array
def prune(root, x):
if root is None:
return root
stack = [root]
while stack:
curr = stack.pop()
if curr.value == x:
curr = None
return
if curr.left and curr.left.value == x:
curr.left = None
if curr.right and curr.right.value == x:
curr.right = None
if curr.right:
stack.append(curr.right)
if curr.left:
stack.append(curr.left)
return root
tree = TreeNode(1, TreeNode(2, TreeNode(4), TreeNode(5, TreeNode(9) )), TreeNode(3, TreeNode(6, TreeNode(7))))
# 1
# / \
# 2 3
# / \ /
# 4 5 6
# / /
# 9 7
print(arrayifyTree(tree))
tree = TreeNode(1, TreeNode(2, TreeNode(4), TreeNode(5, TreeNode(9) )), TreeNode(3, TreeNode(6, TreeNode(7))))
print(arrayifyTree(prune(tree, 3))) # [1, 2, 4, 5, 9]
tree = TreeNode(1, TreeNode(2, TreeNode(4), TreeNode(5, TreeNode(9) )), TreeNode(3, TreeNode(6, TreeNode(7))))
print(arrayifyTree(prune(tree, 6))) # [1, 2, 3, 4, 5, 9]
tree = TreeNode(1, TreeNode(2, TreeNode(4), TreeNode(5, TreeNode(9) )), TreeNode(3, TreeNode(6, TreeNode(7))))
print(arrayifyTree(prune(tree, 1))) # []
tree = TreeNode(1, TreeNode(3, TreeNode(4), TreeNode(5, TreeNode(9) )), TreeNode(3, TreeNode(6, TreeNode(7))))
# 1
# / \
# 3 3
# / \ /
# 4 5 6
# / /
# 9 7
print(arrayifyTree(prune(tree, 3))) # [1]
| true |
eabb873c51c052cf495317867fe6faa0543419dd | Python | leohentschker/Project-Euler | /Python/euler_p67/euler_p67.py | UTF-8 | 1,623 | 3.609375 | 4 | [] | no_license | class Triangle():
def __init__(self, value):
self.value = value
self.left_child = None
self.right_child = None
self.path_sum = -1
def set_children(self, left_child, right_child):
self.left_child = left_child
self.right_child = right_child
def max_path_sum(self):
# if we have already calculated the max path sum
if self.path_sum > 0:
return self.path_sum
if self.left_child is None or self.right_child is None:
self.path_sum = self.value
return self.value
else:
max_sum = self.value + max(self.left_child.max_path_sum(), self.right_child.max_path_sum())
self.path_sum = max_sum
return max_sum
def initialize_triangle(input_list, number_of_rows):
triangle_array = [Triangle(value) for value in input_list]
triangle_index = 0
for row_size in xrange(1, number_of_rows + 1):
for row_index in xrange(row_size):
relevant_triangle = triangle_array[triangle_index]
try:
relevant_triangle.set_children(
triangle_array[triangle_index + row_size],
triangle_array[triangle_index + row_size + 1]
)
except:
pass
triangle_index += 1
return triangle_array
if __name__ == "__main__":
with open("p67.txt", "r") as input_file:
input_text = input_file.read()
# nano always includes an empty line at the end
number_of_rows = len([item for item in input_text.split("\n") if len(item) > 0])
input_str_list = input_text.replace("\n", " ").split(" ")
input_list = [int(item) for item in input_str_list if item != ""]
triangle_struct = initialize_triangle(input_list, number_of_rows)
max_sum = triangle_struct[0].max_path_sum()
print max_sum
| true |
ad78ca9d2dd3ddb1d720ca974e3c551ce63617b9 | Python | gistable/gistable | /dockerized-gists/805139/snippet.py | UTF-8 | 3,963 | 3.046875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf8 -*-
import os, simplejson, re
class OrganizerException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Organizer(object):
def __init__(self, configFile):
self.__preserveConfig(configFile)
"""
gets config parameter and returns config dictionary.
"""
def __preserveConfig(self, configFile):
if os.path.exists(configFile) == False:
raise OrganizerException(\
u"check the existance of config file: %s" % configFile\
)
file_content = open(configFile).read()
try:
self.config = simplejson.loads(file_content)
except ValueError:
raise OrganizerException(\
u"config file is not valid. check it"\
)
if not self.config.has_key("dir_for_unknown_files"):
raise OrganizerException(\
u"[dir_for_unknown_files] key doesn't exists in your config file."\
)
if not self.config.has_key("rules"):
raise OrganizerException(\
u"there is no 'rules' variable in json config."\
)
"""
returns 'listed files' in the target directory.
"""
def getFileList(self):
files = os.listdir(self.config.get("target_dir"))
return files
"""
handler function for listed files.
"""
def handleFiles(self):
files = self.getFileList()
root_path = self.config.get("target_dir")
for file in files:
# get all files
if os.path.isfile("%s/%s" % (self.config.get("target_dir"), file)):
# play with files that has extension
if self.hasExtension(file):
move_dir = self.findMoveDirectory(file)
os.rename("%s/%s" % (root_path, file),\
"%s/%s/%s" % (root_path, move_dir, file))
self.__printNotification("%s to %s" % (file, move_dir))
"""
returns rules for file move actions
"""
def getRules(self):
return self.config.get("rules")
"""
returns the extension of file.
"""
def getExtension(self, fileName):
search = re.search(".*\.(.*?)$", fileName)
if not search == None:
return search.group(1)
else:
return False
"""
file has an extension or not?
"""
def hasExtension(self, fileName):
if self.getExtension(fileName) == False:
return False
return True
"""
returns new 'move' directory for file
"""
def findMoveDirectory(self, fileName):
rules = self.getRules()
match = 0
for regex_rule, target_dir in rules.iteritems():
if(re.search(regex_rule, fileName)):
self.controlTargetDir(target_dir)
match = 1
return target_dir
# no match, return 'unknown' directory
if match == 0:
unknown = self.config.get("dir_for_unknown_files")
self.controlTargetDir(unknown)
return unknown
def __printNotification(self, notification):
print " [+] %s" % notification
"""
if file's new destination is not created yet, don't stop the music.
"""
def controlTargetDir(self, subTargetDir):
full_dir = "%s/%s" % (self.config.get("target_dir"), subTargetDir)
if not os.path.isdir(full_dir):
self.__printNotification("%s is not a directory. creating." % subTargetDir)
os.mkdir(full_dir)
if __name__ == "__main__":
# initial setup
organizer = Organizer("config.json")
organizer.handleFiles()
| true |
a390378a8f3311d282bf9226a265e4cf06d7a268 | Python | prem1806/python-practice-files | /selection_sort/selection_sort.py | UTF-8 | 271 | 3.515625 | 4 | [] | no_license | def ssort(lst):
for i in range(len(lst)-1,0,-1):
p=0
for l in range(1,i+1):
if lst[l]>lst[p]:
p = l
temp = lst[i]
lst[i] = lst[p]
lst[p] = temp
lst = [4,36,11,19,32,54,23,5,2]
ssort(lst)
print(lst)
| true |
f85ad27cdb10fc48c38c07f132aa5faa1bbd08b8 | Python | divyams1/LeetCode | /graphs/maze_end.py | UTF-8 | 316 | 2.96875 | 3 | [] | no_license | class Solution:
def findPath(self, start, end):
startCol = start[0]
startRow = start[1]
path = []
coordStart = Coordinate()
class Coordinate:
def __init__(self, row, col, color):
self.row = row
self.col = col
self.color = color
| true |
0a254fe26a23f7fa1d8d9c1acc77b3f957bffd65 | Python | tp-yan/PythonScript | /python编程从入门到实践/json_test.py | UTF-8 | 2,074 | 3.859375 | 4 | [] | no_license | #python借助json模块来存储数据到本地或从本地读取到内存
import json
numbers = [1,2,3,4,5,6,7]
#.json:按照json格式存储
file_name = 'numbers.json'
with open(file_name,'w') as f_obj:
#dump:倾倒,丢下,倾销,卸下,摆脱
json.dump(numbers,f_obj)
#从本地读取出来
with open(file_name) as f_obj:
_numbers = json.load(f_obj)
print(_numbers)
'''
#记录用户姓名到本地
file_username = 'username.json'
try:
with open(file_username) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
with open(file_username,'w') as f_obj:
username = input('please input your name:')
json.dump(username,f_obj)
else:
print('Welcome Mr ',username.title(),'!')
'''
def get_stored_username(filename):
'''从本地获得用户名'''
try:
with open(filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
return None
else:
return username
def get_new_username(saved_file):
'''获取并保存新用户名'''
username = input('please input your name:')
with open(saved_file,'w') as f_obj:
json.dump(username,f_obj)
return username
def greet_user():
'''欢迎用户'''
filename = 'saved_username.json'
username = get_stored_username(filename)
if username:
hint = "Are you " +username+"?(Y/N)"
res = input(hint)
if res.upper() == "Y":
print('Welcome back, ',username.title(),"!")
return
username = get_new_username(filename)
print('We\'ll remember you when you come back, ',username.title()+" !")
greet_user()
#test
file_rem_num = 'favorite_nums.json'
def get_stored_num(filename):
try:
with open(filename) as f_obj:
num = json.load(f_obj)
except FileNotFoundError:
return None
else:
return num
def save_new_num(filename):
with open(filename,'w') as f_obj:
num = input('input your favorite num:')
json.dump(num,f_obj)
return num
def record_num():
num = get_stored_num(file_rem_num)
if num:
print("I know your favorite number!It's ",num)
else:
num = save_new_num(file_rem_num)
print("I remember your favorite num:",num)
record_num()
| true |
12b10122a8107c233879466124ba251d43d1ca9d | Python | uwsampl/relay-bench | /experiments/char_rnn/language_data.py | UTF-8 | 1,746 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | import glob
import os
import requests
import string
import unicodedata
import zipfile
__DATA__ = None
DATA_URL = 'https://download.pytorch.org/tutorial/data.zip'
DATA_PATH = 'data'
ALL_LETTERS = string.ascii_letters + " .,;'"
N_LETTERS = len(ALL_LETTERS) + 1
N_CATEGORIES = None
ALL_CATEGORIES = []
MAX_LENGTH = 20
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicode_to_ascii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in ALL_LETTERS
)
def get_data():
global N_CATEGORIES
global __DATA__
if __DATA__ is not None:
return __DATA__
if not os.path.exists(DATA_PATH):
resp = requests.get(DATA_URL)
with open('data.zip', 'wb') as zip_file:
zip_file.write(resp.content)
zip_file = zipfile.ZipFile('data.zip')
zip_file.extractall('.')
languages = {}
for language in glob.glob(os.path.join(DATA_PATH , 'names', "*")):
with open(language, encoding='utf-8') as language_file:
category = os.path.splitext(os.path.basename(language))[0]
ALL_CATEGORIES.append(category)
lines = language_file.read().strip().split('\n')
names = [unicode_to_ascii(line) for line in lines]
languages[category] = names
# Modify global state.
__DATA__ = languages
N_CATEGORIES = len(__DATA__.keys())
return __DATA__
def letter_to_topi(letter):
return ALL_LETTERS.index(letter)
def topi_to_letter(topi):
return ALL_LETTERS[topi]
def main():
# just run get_data to ensure that the state is initialized
get_data()
if __name__ == '__main__':
main()
| true |
b3a4d35804cd4fd7e252e10e79cb08c0c02af3b3 | Python | Chuban/xraylarch | /plugins/io/columnfile.py | UTF-8 | 11,776 | 2.953125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python
"""
Larch column file reader: read_ascii
"""
import os
import time
from dateutil.parser import parse as dateparse
import numpy as np
from larch import ValidateLarchPlugin, Group
from larch.utils import fixName
from larch.symboltable import isgroup
MODNAME = '_io'
TINY = 1.e-7
MAX_FILESIZE = 100*1024*1024 # 100 Mb limit
COMMENTCHARS = '#;%*!$'
def getfloats(txt, allow_times=True):
"""convert a line of numbers into a list of floats,
as for reading a file with columnar numerical data.
Arguments
---------
txt str, line of text to parse
allow_times bool, whether to support time stamps [True]
Returns
-------
list, each entry either a float or None
Notes
-----
The `allow_times` will try to support common date-time strings
using the dateutil module, returning a numerical value as the
Unix timestamp, using
time.mktime(dateutil.parser.parse(word).timetuple())
"""
words = [w.strip() for w in txt.replace(',', ' ').split()]
mktime = time.mktime
for i, w in enumerate(words):
val = None
try:
val = float(w)
except ValueError:
try:
val = mktime(dateparse(w).timetuple())
except ValueError:
pass
words[i] = val
return words
def colname(txt):
return fixName(txt.strip().lower()).replace('.', '_')
def iso8601_time(ts):
tzone = '-%2.2i:00' % (time.timezone/3600)
s = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(ts))
return "%s%s" % (s, tzone)
def read_ascii(filename, labels=None, simple_labels=False,
sort=False, sort_column=0, _larch=None):
"""read a column ascii column file, returning a group containing the data
extracted from the file.
read_ascii(filename, labels=None, simple_labels=False, sort=False, sort_column=0)
Arguments
---------
filename (str) name of file to read
labels (list or None) list of labels to use for column labels [None]
simple_labels (bool) whether to force simple column labels (note 1) [False]
sort (bool) whether to sort row data (note 2) [False]
sort_column (int) column to use for sorting (note 2) [0]
Returns
--------
group containing data read from file
Notes
-----
1. column labels. If `labels` is left the default value of `None`,
column labels will be tried to be created from the line
immediately preceeding the data, or using 'col1', 'col2', etc if
column labels cannot be figured out. The labels will be used as
names for the 1-d arrays for each column. If `simple_labels` is
`True`, the names 'col1', 'col2' etc will be used regardless of
the column labels found in the file.
2. sorting. Data can be sorted to be in increasing order of any column,
by giving the column index (starting from 0).
3. header parsing. If header lineas are of the forms of
KEY : VAL
KEY = VAL
these will be parsed into a 'attrs' dictionary in the returned group.
The returned group will have a number of members:
GROUP.filename: text name of the file
GROUP.array_labels: array labels, names of 1-D arrays
GROUP.data: 2-dimensional data (ncolumns, nrows)
GROUP.header: array of text lines of the header.
GROUP.footer: array of text lines of the footer (text after the block of numerical data)
GROUP.attrs : group of attributes parsed from header lines
"""
if not os.path.isfile(filename):
raise OSError("File not found: '%s'" % filename)
if os.stat(filename).st_size > MAX_FILESIZE:
raise OSError("File '%s' too big for read_ascii()" % filename)
with open(filename, 'r') as fh:
text = fh.read()
text = text.replace('\r\n', '\n').replace('\r', '\n').split('\n')
_labelline = None
ncol = None
data, footers, headers = [], [], []
text.reverse()
section = 'FOOTER'
for line in text:
line = line.strip()
if len(line) < 1:
continue
# look for section transitions (going from bottom to top)
if section == 'FOOTER' and not None in getfloats(line):
section = 'DATA'
elif section == 'DATA' and None in getfloats(line):
section = 'HEADER'
_labelline = line
if _labelline[0] in COMMENTCHARS:
_labelline = _labelline[1:].strip()
# act of current section:
if section == 'FOOTER':
footers.append(line)
elif section == 'HEADER':
headers.append(line)
elif section == 'DATA':
rowdat = getfloats(line)
if ncol is None:
ncol = len(rowdat)
if ncol == len(rowdat):
data.append(rowdat)
# reverse header, footer, data, convert to arrays
footers.reverse()
headers.reverse()
data.reverse()
data = np.array(data).transpose()
# try to parse attributes from header text
header_attrs = {}
for hline in headers:
hline = hline.strip().replace('\t', ' ')
if len(hline) < 1: continue
if hline[0] in COMMENTCHARS:
hline = hline[1:].strip()
keywds = []
if ':' in hline: # keywords in 'x: 22'
words = hline.split(':', 1)
keywds = words[0].split()
elif '=' in hline: # keywords in 'x = 22'
words = hline.split('=', 1)
keywds = words[0].split()
if len(keywds) == 1:
key = colname(keywds[0])
if key.startswith('_'):
key = key[1:]
if len(words) > 1:
header_attrs[key] = words[1].strip()
ncols, nrow = data.shape
# set column labels from label line
_labels = None
_clabels = ['col%i' % (i+1) for i in range(ncols)]
if labels is not None:
labels = labels.replace(',', ' ').replace('\t', ' ')
_labels = [colname(l) for l in labels.split()]
elif simple_labels or _labelline is None:
_labels = _clabels
else:
_labelline = _labelline.lower()
for delim in ('\t', ','):
if delim in _labelline:
_labs = [colname(l) for l in _labelline.split(delim)]
if len(_labs) > int(1 + ncols/2.0):
_labels = _labs
break
if _labels is None:
_labelline = _labelline.replace(', ', ' ').replace('\t', ' ')
_labels = [colname(l) for l in _labelline.split()]
if _labels is None:
_labels = _clabels
if len(_labels) < ncols:
for i in range(len(_labels), ncols):
_labels.append("col%i" % (i+1))
elif len(_labels) > ncols:
_labels = _labels[:ncols]
attrs = {'filename': filename}
attrs['column_labels'] = attrs['array_labels'] = _labels
if sort and sort_column >= 0 and sort_column < ncol:
data = data[:, np.argsort(data[sort_column])]
group = Group(name='ascii_file %s' % filename,
filename=filename, header=headers, data=data,
array_labels=_labels, column_labels=_labels)
if len(footers) > 0:
group.footer = footers
for i in range(ncols):
nam = _labels[i].lower()
if nam in ('data', 'array_labels', 'filename',
'attrs', 'header', 'footer'):
nam = "%s_" % nam
setattr(group, nam, data[i])
group.attrs = Group(name='header attributes from %s' % filename)
for key, val in header_attrs.items():
setattr(group.attrs, key, val)
return group
@ValidateLarchPlugin
def write_ascii(filename, *args, **kws):
"""write a list of items to an ASCII column file
write_ascii(filename, arg1, arg2, arg3, ... **args)
arguments
---------
commentchar: character for comment ('#')
label: array label line (autogenerated)
header: array of strings for header
"""
ARRAY_MINLEN = 5
_larch = kws['_larch']
com = kws.get('commentchar', '#')
label = kws.get('label', None)
header = kws.get('header', [])
arrays = []
arraylen = None
for arg in args:
if isinstance(arg, np.ndarray):
if len(arg) > ARRAY_MINLEN:
if arraylen is None:
arraylen = len(arg)
else:
arraylen = min(arraylen, len(arg))
arrays.append(arg)
else:
header.append(repr(arg))
else:
header.append(repr(arg))
buff = []
if header is None:
buff = ['%s Output from Larch %s' % (com, time.ctime())]
for s in header:
buff.append('%s %s' % (com, s))
buff.append('%s---------------------------------'% com)
if label is None:
label = ' '.join(['col%i' % (i+1) for i in range(len(arrays))])
buff.append('# %s' % label)
arrays = np.array(arrays)
for i in range(arraylen):
w = [' % f' % val[i] for val in arrays]
buff.append(' '.join(w))
try:
fout = open(filename, 'w')
except:
_larch.writer.write("cannot open file %s'" % filename)
return
try:
fout.write('\n'.join(buff))
fout.write('\n')
except:
_larch.writer.write("cannot write to file %s'" % filename)
return
_larch.writer.write("wrote to file '%s'\n" % filename)
@ValidateLarchPlugin
def write_group(filename, group, scalars=None,
arrays=None, arrays_like=None,
commentchar='#', _larch=None):
"""write components of a group to an ASCII column file
write_group(filename, group, commentchar='#')
Warning: This is pretty minimal and may work poorly
for large groups of complex data.
"""
items = dir(group)
npts = 0
if arrays is None:
arrays = []
if scalars is None:
scalars = []
if arrays_like is not None and arrays_like in items:
array = getattr(group, arrays_like)
if isinstance(array, np.ndarray):
npts = len(array)
for name in items:
val = getattr(group, name)
if isinstance(val, np.ndarray):
if npts != 0 and npts == len(val) and name not in arrays:
arrays.append(name)
header =[]
for s in scalars:
if s in items:
val = getattr(group, s)
header.append("%s = %s" % (s, val))
label = ' '.join(arrays)
args = []
for name in arrays:
if name in items:
args.append(getattr(group, name))
write_ascii(filename, *args, commentchar=commentchar,
label=label, header=header, _larch=_larch)
@ValidateLarchPlugin
def guess_filereader(filename, _larch=None):
"""guess function name to use to read an ASCII data file based
on the file header
Arguments
---------
filename (str) name of file to be read
Returns
-------
name of function (as a string) to use to read file
"""
with open(path, 'r') as fh:
line1 = fh.readline()
line1 = lines[0].lower()
reader = 'read_ascii'
if 'xdi' in line1:
reader = 'read_xdi'
if ('epics stepscan' in line1 or 'gse' in line1):
reader = 'read_gsexdi'
elif 'epics scan' in line1:
reader = 'read_gsescan'
return reader
def registerLarchPlugin():
return (MODNAME, {'read_ascii': read_ascii,
'guess_filereader': guess_filereader,
'write_ascii': write_ascii,
'write_group': write_group,
})
| true |
ee56a1330837786e88585847e615f0d84184beba | Python | sshish/NF | /NF.py | UTF-8 | 12,272 | 3.3125 | 3 | [] | no_license | #######################################
#Basic interface of normalizing flows.#
#######################################
import torch
class Basic(torch.nn.Module):
"""Abstract module for normalizing flows.
Methods:
forward(x, c): Performs reversible transformation conditioned on
context c. Input x and output y have shape BATCH*CHANNEL*INFO. INFO
dimension has two entries: the first is value of the variable, the second
gives information on log-determinant of the Jacobian of the
transormation. Log-determinant is calculated by CHANNELwise summation of
these second entries.
inverse(y, c): Performs inverse transformation conditioned on context
c. Input y and output x have shape BATCH*CHANNEL.
"""
def __init__(self, *args):
super(Basic, self).__init__()
def forward(self, x, c):
raise NotImplementedError
def inverse(self, y, c):
raise NotImplementedError
def _get_name(self):
return 'NF:' + super(Basic, self)._get_name()
class ToGenerator(torch.nn.Module):
"""Generative model created from a NF and a distribution over the latent
variable.
Args:
net (NF): Module representing the normalizing flow.
prior (torch.distributions.distribution.Distribution):Distribution over the
latent variable y.
Methods:
sample(n=None, c=None): Samples n times conditioned on context c. Output
shape is BATCH*n*CHANNEL. If c=None then BATCH dimension is omitted. If
n=None then the sample dimension is omitted and only one sample is drawn.
log_p(x, c=None): Calculates the model probability for a sample x
conditioned on context c. Input x has shape BATCH*CHANNEL. Output shape
is BATCH. c=None is treated as dummy variable with BATCH equal to x.
crossentropy_loss(x, c=None): Returns a scalar loss suitable for training
the model, based on Monte Carlo estimate of the crossentropy between data
generating distribution and model distribution. Minimizing this is
equivalent to minimizing the KL-divergence between aggregate posterior
and prior distribution of the latent variable. Input x has shape
BATCH*CHANNEL. c=None is treated as dummy variable with BATCH equal to x.
entropy_loss(n, c=None): Returns a scalar loss based on Monte Carlo score
function estimate of model distribution entropy gradient. Adding
this term with a small negative factor to the loss function regularizes
the network to have higher entropy in its output. n is number of samples
for each element of c in BATCH dimension.
entropy(n, c=None) Returns a Monte Carlo estimate of model distribution
entropy. Output shape is BATCH. n is number of samples for each element
of c in BATCH dimension. If c=None then BATCH dimension is omitted.
"""
def __init__(self, net, prior):
assert(isinstance(net, Basic))
assert(isinstance(prior, torch.distributions.distribution.Distribution))
super(ToGenerator, self).__init__()
self._net = net
self._prior = prior
def _sample_unsqueezed(self, n, c):
y = self._prior.sample(torch.Size([n]))
y = self._net.inverse(y.repeat(c.shape[0],1), c.repeat_interleave(n, dim=0)).view(c.shape[0],n,-1)
return y.detach()
def sample(self, n=None, c=None):
squeeze_n = False
squeeze_c = False
if n is None:
squeeze_n = True
n = 1
if c is None:
squeeze_c = True
c = torch.empty(1)
y = self._sample_unsqueezed(n, c)
if squeeze_n:
y = y.squeeze(dim=1)
if squeeze_c:
y = y.squeeze(dim=0)
return y
def log_p(self, x, c=None):
x = torch.stack((x, torch.zeros_like(x)), dim=2)
y = self._net(x, c)
log_p1 = self._prior.log_prob(y[:,:,0])
log_p2 = y[:,:,1].sum(dim=1)
return log_p1 + log_p2
def crossentropy_loss(self, x, c=None):
return -1. * self.log_p(x, c).mean()
def _entropy_unsqueezed(self, n, c):
x = self._sample_unsqueezed(n, c)
x = x.view(n * c.shape[0],-1)
x = -1. * self.log_p(x, c.repeat_interleave(n, dim=0)).view(c.shape[0],n)#.mean(dim=1)
return x
def entropy(self, n, c=None):
if c is None:
squeeze_c = True
c = torch.empty(1)
x = self._entropy_unsqueezed(n, c).mean(dim=1)
if squeeze_c:
x = x.squeeze(dim=0)
return x.detach()
def entropy_loss(self, n, c=None):
if c is None:
c = torch.empty(1)
b = self._entropy_unsqueezed(n, c).detach().mean(dim=1, keepdim=True)
log_p = -1. * self._entropy_unsqueezed(n, c)
a = log_p.detach()
return -1. * ((a + b) * log_p).mean()
######################################################################
#Implementations of different layers that comply to the NF interface.#
######################################################################
class Stack(torch.nn.Sequential, Basic):
"""A NF that is created by stacking multiple NFs.
Args: Individual NFs must be listed in the order from x-space (data) to y-
space (latent variable).
"""
def __init__(self, *args):
for net in args:
assert(isinstance(net, Basic))
super(Stack, self).__init__(*args)
def forward(self, x, c):
for net in self._modules.values():
x = net(x, c)
return x
def inverse(self, y, c):
for net in reversed(self._modules.values()):
y = net.inverse(y, c)
return y
class Permutation(Basic):
"""Permutation of channels (context-free).
Args:
num_channels (int): The number of input (and output) channels.
permutation: Specifies the permutation. Can be either 'random' (default) or
'flip' or a 1D tensor of indices.
"""
def __init__(self, num_channels, permutation="random"):
assert(isinstance(num_channels, int))
super(Permutation, self).__init__()
self._num_channels = num_channels
if permutation == "random":
_permutation = torch.randperm(num_channels)
elif permutation == "flip":
_permutation = torch.arange(num_channels - 1, -1, -1)
else:
_permutation = permutation
self.register_buffer("permutation", _permutation)
_inverse_permutation = torch.empty_like(_permutation)
for i in range(num_channels): _inverse_permutation[_permutation[i]] = i
self.register_buffer("inverse_permutation", _inverse_permutation)
def forward(self, x, _):
return x[:,self.permutation]
def inverse(self, y, _):
return y[:,self.inverse_permutation]
class Rotation(Basic):
"""Context-free rotation of channels in the plane of channel1 and channel2.
Args: channel1 (int), channel2 (int)
"""
def __init__(self, channel1, channel2):
assert(isinstance(channel1, int))
assert(isinstance(channel2, int))
super(Rotation, self).__init__()
self._channel1 = channel1
self._channel2 = channel2
self._angle = torch.nn.Parameter(torch.Tensor([0]))
def forward(self, x, _):
result = x.clone()
result[:,self._channel1,0] = x[:,self._channel1,0] * self._angle.cos() - x[:,self._channel2,0] * self._angle.sin()
result[:,self._channel2,0] = x[:,self._channel1,0] * self._angle.sin() + x[:,self._channel2,0] * self._angle.cos()
return result
def inverse(self, y, _):
result = y
result[:,self._channel1] = y[:,self._channel1] * self._angle.cos() + y[:,self._channel2] * self._angle.sin()
result[:,self._channel2] = - y[:,self._channel1] * self._angle.sin() + y[:,self._channel2] * self._angle.cos()
return result
class CRotation(Basic):
"""Context-dependent rotation of channels in the plane of channel1 and
channel2.
Args: channel1 (int), channel2 (int), angle (torch.nn.Module)
"""
def __init__(self, channel1, channel2, angle):
assert(isinstance(channel1, int))
assert(isinstance(channel2, int))
assert(isinstance(angle, torch.nn.Module))
super(CRotation, self).__init__()
self._channel1 = channel1
self._channel2 = channel2
self._angle = angle
def forward(self, x, c):
result = x.clone()
angle = self._angle(c).squeeze()
result[:,self._channel1,0] = x[:,self._channel1,0] * angle.cos() - x[:,self._channel2,0] * angle.sin()
result[:,self._channel2,0] = x[:,self._channel1,0] * angle.sin() + x[:,self._channel2,0] * angle.cos()
return result
def inverse(self, y, c):
result = y
angle = self._angle(c).squeeze(1)
result[:,self._channel1] = y[:,self._channel1] * angle.cos() + y[:,self._channel2] * angle.sin()
result[:,self._channel2] = - y[:,self._channel1] * angle.sin() + y[:,self._channel2] * angle.cos()
return result
class Clamp(Basic):
"""Applicable when inputs are bounded between low and high. Useful for
ensuring that inverse transformation is also bounded.
Args: low, high: vectors describing minimal and maximal x-values.
"""
def __init__(self, low, high):
super(Clamp, self).__init__()
self.register_buffer("low", low)
self.register_buffer("high", high)
def forward(self, x, _):
return x
def inverse(self, y, _):
return y.where(y > self.low, self.low).where(y < self.high, self.high)
class Tanh(Basic):
"""Tanh layer with NF interface (context-free).
"""
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x, _):
_x = x[:,:,0]
delta = - 2. * (_x - torch.empty_like(_x).fill_(2).log() + torch.nn.functional.softplus(- 2. * _x))
return torch.stack((_x.tanh(), x[:,:,1] + delta), dim=2)
def inverse(self, y, _):
return 0.5 * ((1. + y) / (1. - y)).log()
class Atanh(Basic):
"""Atanh layer with NF interface (context-free). Useful for ensuring that
inverse transformation is bounded.
Args: low, high: vectors describing minimal and maximal x-values.
"""
def __init__(self, low, high):
super(Tan, self).__init__()
inflection = 0.5 * (high + low)
steepness = 2 / (high - low)
self.register_buffer("inflection", inflection)
self.register_buffer("steepness", steepness)
def forward(self, x, _):
_x = x[:,:,0]
_x = self.steepness * (_x - self.inflection)
return torch.stack((0.5 * ((1. + _x) / (1. - _x)).log(), x[:,:,1] - (1. - _x ** 2).log()), dim=2)
def inverse(self, y, _):
return y.tanh() / self.steepness + self.inflection
class CouplingLayer(Basic):
"""Context-free coupling layer from the RealNVP paper.
"""
def __init__(self, s, t, mask):
assert(isinstance(s, torch.nn.Module))
assert(isinstance(t, torch.nn.Module))
assert(isinstance(mask, torch.BoolTensor))
super(CouplingLayer, self).__init__()
self._s = s
self._t = t
self.register_buffer("mask", mask)
def forward(self, x, _):
_x = x[:,:,0]
_x_ = torch.where(self.mask, _x, torch.zeros_like(_x))
scale = torch.where(~self.mask, self._s(_x_), torch.zeros_like(_x_))
trans = torch.where(~self.mask, self._t(_x_), torch.zeros_like(_x_))
_x = _x.where(self.mask, _x * scale.exp() + trans)
return torch.stack((_x, x[:,:,1] + scale), dim=2)
def inverse(self, y, _):
y_ = torch.where(self.mask, y, torch.zeros_like(y))
scale = torch.where(~self.mask, self._s(y_), torch.zeros_like(y_))
trans = torch.where(~self.mask, self._t(y_), torch.zeros_like(y_))
y = y.where(self.mask, (y - trans) * (-1. * scale).exp())
return y
class CCouplingLayer(Basic):
"""Context-dependent coupling layer.
"""
def __init__(self, s, t, mask):
assert(isinstance(s, torch.nn.Module))
assert(isinstance(t, torch.nn.Module))
assert(isinstance(mask, torch.BoolTensor))
super(CCouplingLayer, self).__init__()
self._s = s
self._t = t
self.register_buffer("mask", mask)
def forward(self, x, c):
_x = x[:,:,0]
_x_ = torch.where(self.mask, _x, torch.zeros_like(_x))
scale = torch.where(~self.mask, self._s(_x_, c), torch.zeros_like(_x_))
trans = torch.where(~self.mask, self._t(_x_, c), torch.zeros_like(_x_))
_x = _x.where(self.mask, _x * scale.exp() + trans)
return torch.stack((_x, x[:,:,1] + scale), dim=2)
def inverse(self, y, c):
y_ = torch.where(self.mask, y, torch.zeros_like(y))
scale = torch.where(~self.mask, self._s(y_, c), torch.zeros_like(y_))
trans = torch.where(~self.mask, self._t(y_, c), torch.zeros_like(y_))
y = y.where(self.mask, (y - trans) * (-1. * scale).exp())
return y
| true |
f404477d60d834b5006cbb295d3b2328bf8c20d8 | Python | rlllzk/pydasar | /askses file txt.py | UTF-8 | 303 | 3.125 | 3 | [] | no_license | def main():
#mengakses file
f=open("sample.txt") #mengembalikan objek file
line = f.readline() #membaca baris pertama
while line:
print(line, end='')
line=f.readline() #membaca baris berikutnya
#menutup gile
f.close
if __name__=="__main__":
main() | true |
b12631a0b2f7f101caf6a2551785c1ee7d02da01 | Python | Zoujoko/Maths_Epitech_Tek3 | /308reedpipes/algo.py | UTF-8 | 1,661 | 3.140625 | 3 | [] | no_license | ##
## EPITECH PROJECT, 2021
## B-MAT-500-REN-5-1-308reedpipes-eliott.palueau
## File description:
## algo
##
def systemResolution(ordinate, abscissa):
A = 6 * (ordinate[2] - 2 * ordinate[1] + ordinate[0]) / 50
B = 6 * (ordinate[3] - 2 * ordinate[2] + ordinate[1]) / 50
C = 6 * (ordinate[4] - 2 * ordinate[3] + ordinate[2]) / 50
system = [0, 0, 0, 0, 0]
system[2] = (B - (A + C) / 4) * 4 / 7
system[1] = A / 2 - 0.25 * system[2]
system[3] = C / 2 - 0.25 * system[2]
return system
def f(ordinate, abscissa, system, x, j):
result = (- system[j - 1] / 30 * pow(x - abscissa[j], 3) + system[j] / 30 * pow(x - abscissa[j - 1], 3) - (ordinate[j - 1] / 5 - 5 / 6 * system[j - 1]) * (x - abscissa[j]) + (ordinate[j] / 5 - 5 / 6 * system[j]) * (x - abscissa[j - 1]))
return result
def computer(ordinate, abscissa, n):
system = systemResolution(ordinate, abscissa)
resultList = list()
for i in range(n):
x = 20 / (n - 1) * i
j = int((x - 0.01) / 5) + 1
resultList.append(f(ordinate, abscissa, system, x, j))
return resultList
def algo(r0, r5, r10, r15, r20, n):
abscissa = [0, 5, 10, 15, 20]
ordinate = [r0, r5, r10, r15, r20]
vector = systemResolution(ordinate, abscissa)
result = computer(ordinate, abscissa, n)
for i in range(len(vector)):
vector[i] = round(vector[i], 1)
if (vector[i] == -0.0):
vector[i] = 0
print("vector result: [%.1f, %.1f, %.1f, %.1f, %.1f]" %(vector[0], vector[1], vector[2], vector[3], vector[4]))
for i in range(n):
print("abscissa: %.1f cm\tradius: %.1f cm" %((20 / (n - 1) * i), result[i])) | true |
67342ef32213f52da0785888649ef533e5be1007 | Python | Carreau/python-opentree | /examples/synth_subtree.py | UTF-8 | 1,919 | 2.859375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python3
import sys
from opentree import OTCommandLineTool, process_ott_or_node_id_arg
cli = OTCommandLineTool(usage='Gets a subtree of the synthetic tree rooted at the node requested',
common_args=("ott-id", "node-id"))
cli.parser.add_argument('--format', default='newick',
help='"newick" or "arguson" tree format')
cli.parser.add_argument('--label-format', default='name_and_id',
help='"name_and_id", "name", or "id" style of labeling newick nodes')
cli.parser.add_argument('--height-limit', default=None, type=int,
help='number of levels to return. -1 for unlimited (newick only)')
OT, args = cli.parse_cli()
tree_format = args.format.strip().lower()
if tree_format not in ["newick", "arguson"]:
sys.exit("Did not recognize --format={}\n".format(tree_format))
if args.height_limit is None:
if tree_format == "newick":
height_limit = -1
else:
height_limit = 3
else:
height_limit = args.height_limit
if height_limit < -1:
sys.exit('Expecting height limit to be >= -1.')
label_format = args.label_format.strip().lower()
if label_format not in ["name_and_id", "name", "id"]:
sys.exit("Did not recognize --label-format={}\n".format(label_format))
ott_id, node_id = process_ott_or_node_id_arg(args)
# use node_id_list if there are multiple. This is an odd call in the API
if (ott_id is None) and (node_id is None):
sys.exit('Either --node-ids or --ott-ids must be provided.\n')
if ott_id is not None:
output = OT.synth_subtree(ott_id=ott_id, tree_format=tree_format, label_format=label_format,
height_limit=height_limit)
else:
output = OT.synth_subtree(node_id=node_id, tree_format=tree_format, label_format=label_format,
height_limit=height_limit)
print(output.tree.as_ascii_plot())
| true |
3269e2d7c0c4fe5f1a756ee43795871fb5cdba66 | Python | asappresearch/emergent-comms-negotiation | /sampling.py | UTF-8 | 3,162 | 3.15625 | 3 | [
"MIT"
] | permissive | import torch
import numpy as np
def sample_items(batch_size, num_values=6, seq_len=3, random_state=np.random):
"""
num_values 6 will give possible values: 0,1,2,3,4,5
"""
pool = torch.from_numpy(random_state.choice(num_values, (batch_size, seq_len), replace=True))
return pool
def sample_utility(batch_size, num_values=6, seq_len=3, random_state=np.random):
u = torch.zeros(seq_len).long()
while u.sum() == 0:
u = torch.from_numpy(random_state.choice(num_values, (batch_size, seq_len), replace=True))
return u
def sample_N(batch_size, random_state=np.random):
N = random_state.poisson(7, batch_size)
N = np.maximum(4, N)
N = np.minimum(10, N)
N = torch.from_numpy(N)
return N
def generate_batch(batch_size, random_state=np.random):
pool = sample_items(batch_size=batch_size, num_values=6, seq_len=3, random_state=random_state)
utilities = []
utilities.append(sample_utility(batch_size=batch_size, num_values=6, seq_len=3, random_state=random_state))
utilities.append(sample_utility(batch_size=batch_size, num_values=6, seq_len=3, random_state=random_state))
N = sample_N(batch_size=batch_size, random_state=random_state)
return {
'pool': pool,
'utilities': utilities,
'N': N
}
def generate_test_batches(batch_size, num_batches, random_state):
"""
so, we need:
- pools
- utilities (one set per agent)
- N
"""
# r = np.random.RandomState(seed)
test_batches = []
for i in range(num_batches):
batch = generate_batch(batch_size=batch_size, random_state=random_state)
test_batches.append(batch)
return test_batches
def hash_long_batch(int_batch, num_values):
seq_len = int_batch.size()[1]
multiplier = torch.LongTensor(seq_len)
v = 1
for i in range(seq_len):
multiplier[-i - 1] = v
v *= num_values
hashed_batch = (int_batch * multiplier).sum(1)
return hashed_batch
def hash_batch(pool, utilities, N):
v = N
# use num_values=10, so human-readable
v = v * 1000 + hash_long_batch(pool, num_values=10)
v = v * 1000 + hash_long_batch(utilities[0], num_values=10)
v = v * 1000 + hash_long_batch(utilities[1], num_values=10)
return v
def hash_batches(test_batches):
"""
we can store each game as a hash like:
[N - 1]pppuuuuuu
(where: [N - 1] is {4-10} - 1), ppp is the pool, like 442; and uuuuuu are the six utilities, like 354321
so, this integer has 10 digits, which I guess we can just store as a normal python integer?
"""
hashes = set()
for batch in test_batches:
hashed = hash_batch(**batch)
hashes |= set(hashed.tolist())
# for v in hashed:
# hashes.add(v)
return hashes
def overlaps(test_hashes, batch):
target_hashes = set(hash_batch(**batch).tolist())
return bool(test_hashes & target_hashes)
def generate_training_batch(batch_size, test_hashes, random_state):
batch = None
while batch is None or overlaps(test_hashes, batch):
batch = generate_batch(batch_size=batch_size, random_state=random_state)
return batch
| true |
0097f3b722d1cc2af76ce1eca576cd64634f1431 | Python | CSP-ArkCityHS-gregbuckbee/iterative-prisoners-dilemma-2019 | /team3.py | UTF-8 | 460 | 3.34375 | 3 | [] | no_license | team_name = 'Team 3333'
strategy_name = 'Betray 2'
strategy_description = 'If my history is c, and their history is c, I will betray.'
team_name = 'The name the team gives to itself' # Only 10 chars displayed.
strategy_name = 'The name the team gives to this strategy'
strategy_description = 'How does this strategy decide?'
def move(my_history, their_history, my_score, their_score):
if my_history=='c':
their_history=='c'
return 'b' | true |
f2cca278098e26c6a22c9e485cd4633972e84ff1 | Python | snirappi/facebookRanker | /analysis.py | UTF-8 | 905 | 3.125 | 3 | [] | no_license | import pandas as pd
import glob
def negative(x):
if x is not None and x != 0:
return x * -1
else:
return None
def change(frame):
frame = frame.diff(axis=1)
for column in frame.columns:
frame[column] = frame[column].apply(negative)
frame = frame.sort_values(frame.columns[len(frame.columns) - 1], ascending = False)
return frame
all_files = glob.glob("./logs/*.csv")
pd.options.display.float_format = '{:.1f}'.format
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=0, header=0)
li.append(df)
try:
frame = pd.concat(li, axis=1, join='outer', ignore_index=False, sort=False)
frame = frame.reindex(sorted(frame.columns), axis=1)
print('CHANGES OVER TIME')
print(change(frame))
frame['Rank'] = frame.mean(axis=1)
print('\nOVERALL RANK')
print(frame.sort_values('Rank'))
except ValueError:
print("No Files Found! Get Data using retrieveSource.py")
| true |
95a0a776a466695aa6992c65323dde4838ad146e | Python | nischalshrestha/automatic_wat_discovery | /Notebooks/py/ricemath25/first-titanic-solution-with-hyper-tuned-rfc/first-titanic-solution-with-hyper-tuned-rfc.py | UTF-8 | 4,828 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Titanic Solution
# In[ ]:
import numpy as np
import os
import pandas as pd
import sklearn.ensemble
import sklearn.model_selection
# ## Load Data
# In[ ]:
train_df = pd.read_csv('../input/train.csv')
train_df.info()
train_df.head()
# In[ ]:
test_df = pd.read_csv('../input/test.csv')
print(test_df.shape)
# ## Exploration
# ### Survival
# In[ ]:
train_df['Survived'].value_counts(normalize=True)
# In[ ]:
train_df.corr()["Survived"]
# In[ ]:
train_df.fillna(train_df.mean(axis=0), axis=0).corr()["Survived"]
# ### Class
# In[ ]:
train_df['Pclass'].value_counts()
# In[ ]:
train_df.groupby(['Pclass'])['Survived'].mean()
# ### Name
# In[ ]:
train_df['Name'].apply(lambda name: len(name)).hist()
# In[ ]:
list(train_df['Name'])[:20]
# ### Sex
# In[ ]:
train_df['Sex'].value_counts()
# In[ ]:
train_df.groupby(['Sex'])['Survived'].mean()
# ### Age
# In[ ]:
train_df['Age'].hist()
# In[ ]:
train_df.groupby(pd.qcut(train_df['Age'], 3))['Survived'].mean()
# ### Family
# In[ ]:
train_df['SibSp'].hist()
# In[ ]:
train_df['Parch'].hist()
# ### Fare
# In[ ]:
train_df['Fare'].hist()
# In[ ]:
train_df.groupby(pd.qcut(train_df['Fare'], 3))['Survived'].mean()
# ### Cabin
# In[ ]:
train_df['Cabin'].isnull().value_counts()
# ### Embarked
# In[ ]:
train_df['Embarked'].value_counts()
# ## Feature Engineering
# In[ ]:
def process(df):
filt_df = df[['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']].copy()
filt_df.columns = ['ID', 'class', 'name_length', 'is_female', 'age', 'sibling_count', 'parent_child_count', 'price', 'origin', 'has_cabin']
filt_df['name_length'] = filt_df['name_length'].apply(lambda name: len(name))
filt_df['is_female'] = filt_df['is_female'].apply(lambda sex: 1 if sex == 'female' else 0)
filt_df = pd.concat([filt_df[[col for col in filt_df if col!='origin']], pd.get_dummies(filt_df['origin'])], axis=1)
filt_df_columns = list(filt_df.columns)
filt_df_columns[-3:] = ['origin_' + col.lower() for col in filt_df_columns[-3:]]
filt_df.columns = filt_df_columns
filt_df['has_family'] = filt_df['sibling_count'] + filt_df['parent_child_count']
filt_df['has_family'] = filt_df['has_family'].apply(lambda count: 1 if count > 0 else 0)
filt_df.drop(columns=['sibling_count', 'parent_child_count'])
filt_df['has_cabin'] = filt_df['has_cabin'].notnull().astype('int')
filt_df = filt_df.fillna(filt_df.mean(axis=0), axis=0)
# filt_df['name_length'] = (filt_df['name_length'] - filt_df['name_length'].mean()) / filt_df['name_length'].std()
# filt_df['age'] = (filt_df['age'] - filt_df['age'].mean()) / filt_df['age'].std()
# filt_df['price'] = (filt_df['price'] - filt_df['price'].mean()) / filt_df['price'].std()
return(filt_df)
# In[ ]:
train_filt_df = process(train_df)
train_filt_df = pd.concat([train_filt_df, train_df['Survived']], axis=1)
train_filt_df_columns = list(train_filt_df.columns)
train_filt_df_columns[-1] = 'is_survived'
train_filt_df.columns = train_filt_df_columns
train_filt_df.head()
# ## Hyperparameter Tuning
# In[ ]:
model = sklearn.ensemble.RandomForestClassifier(random_state=1, n_jobs=-1)
param_grid = {'criterion': ['gini', 'entropy'], 'bootstrap': [True, False], 'max_features': [3, 6, 9, 12], 'n_estimators': [3, 10, 30, 100]}
scoring_metric='accuracy'
cross_val_splits=4
train_X_arr = np.array(train_filt_df[[col for col in train_filt_df if col not in ['ID', 'is_survived']]])
train_Y_arr = np.array(train_filt_df['is_survived'])
grid_search = sklearn.model_selection.GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring_metric, cv=cross_val_splits)
grid_search.fit(train_X_arr, train_Y_arr)
# In[ ]:
model = grid_search.best_estimator_
model.oob_score=True
model
# In[ ]:
model.fit(train_X_arr, train_Y_arr)
scores = np.sqrt(sklearn.model_selection.cross_val_score(model, train_X_arr, train_Y_arr, scoring=scoring_metric, cv=cross_val_splits))
print('Scores: {0}'.format([round(x, 2) for x in scores]))
print('Mean: {0}'.format(round(scores.mean(), 2)))
print('STD: {0}'.format(round(scores.std(), 2)))
print('OOB Score: {0}'.format(round(model.oob_score_, 2)))
# ## Predictions
# In[ ]:
test_filt_df = process(test_df)
test_X_arr = np.array(test_filt_df[[col for col in test_filt_df if col not in ['ID', 'is_survived']]])
predictions = model.predict(test_X_arr)
print(len(predictions))
predictions[:10]
# In[ ]:
results_df = pd.DataFrame(list(test_filt_df['ID']))
results_df['predictions'] = list(predictions)
results_df.columns = ['PassengerId', 'Survived']
# results_df.to_csv('results.csv', header=True, index=False)
results_df.head()
# In[ ]:
| true |
fc8aa8de0e1b8a449f9f4665a52ea386ab2962cf | Python | WatanabeYuto/multi_turtle | /multi_navigation/src/set_initpos.py | UTF-8 | 1,537 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import tf
import math
from geometry_msgs.msg import PoseWithCovarianceStamped
if __name__ == '__main__':
try:
rospy.init_node('set_initpos', anonymous=True)
robot_list = rospy.get_param('robot_list')
print("follow are the names of robot which is set initpos")
for robot_info in robot_list:
if robot_info['enable']:
print(robot_info['name'])
pub = rospy.Publisher(robot_info['name']+'/initialpose', PoseWithCovarianceStamped, queue_size=0, latch=True)
initpos = PoseWithCovarianceStamped()
initpos.header.stamp = rospy.Time.now()
initpos.header.frame_id = 'map'
initpos.pose.pose.position.x = robot_info['init_pos'][0]
initpos.pose.pose.position.y = robot_info['init_pos'][1]
initpos.pose.pose.orientation.w = 1.0
initquat = tf.transformations.quaternion_from_euler(0, 0, robot_info['init_pos'][2])
initpos.pose.pose.orientation.x = initquat[0]
initpos.pose.pose.orientation.y = initquat[1]
initpos.pose.pose.orientation.z = initquat[2]
initpos.pose.pose.orientation.w = initquat[3]
print(initpos.pose.pose.orientation)
initpos.pose.covariance[6*0+0] = 0.5 * 0.5
initpos.pose.covariance[6*1+1] = 0.5 * 0.5
initpos.pose.covariance[6*5+5] = math.pi/12.0 * math.pi/12.0
pub.publish(initpos)
print("Finished settting all initial position")
rospy.spin()
except rospy.ROSInterruptException:
pass
| true |
ced6f5f690f0bd0440679940f99a23dcd6412577 | Python | krishnodey/Python-Tutoral | /calculate_sum_avg_of_list_elements.py | UTF-8 | 264 | 3.8125 | 4 | [] | no_license | n = int(input("enter the list size: "))
l = []
for i in range(0,n):
e = int(input("enter the element: "))
l.append(e)
sum = 0
for i in range(0,n):
sum += l[i]
print("Sum = ",sum,"\n","Avg = ",sum/n)
#thanks for watching keep coding | true |
625dd68d26a0bf70698093ab8f23144a342d304f | Python | YashSolanki2007/Notes-App | /main.py | UTF-8 | 813 | 2.8125 | 3 | [] | no_license | from flask import Flask, render_template, request, url_for
# Creating the app
app = Flask(__name__)
# Global Variables
notes_list = []
temp_notes_list = []
notes_list_title = []
temp_notes_list_title = []
final_list = []
# Creating the main page
@app.route("/", methods=['GET', 'POST'])
def main():
global notes_list, temp_notes_list, notes_list_title, temp_notes_list_title, final_list
if request.method == "POST":
webpage = request.form
# print(webpage)
notes_inp = str(webpage['notesInp'])
print(notes_inp)
temp_notes_list.append(notes_inp)
[notes_list.append(x) for x in temp_notes_list if x not in notes_list]
return render_template("index.html", notes_list=notes_list)
# Running the app
if __name__ == '__main__':
app.run(debug=True)
| true |
9a996f54f928c1536bc33422fb57e8fd811da7f7 | Python | halucinor/QC_Tool_Ex | /correlation.py | UTF-8 | 351 | 2.59375 | 3 | [] | no_license | import init
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
import matplotlib.font_manager as fm
init.initilize()
data = pd.read_csv('correlation.csv')
df = pd.DataFrame(data)
corr = df.corr(method = 'pearson').sort_values('채수량', ascending=False)
print(corr["채수량"]) | true |
a81113f02125c0e2e38e82806f66f034b7616130 | Python | nod/site33 | /views/about.py | UTF-8 | 1,311 | 2.9375 | 3 | [] | no_license |
from markdown import Markdown
from .viewlib import BaseHandler
from . import route
about_text = """
### 33ad actually comes from a year
Thirty Three A.D. The common belief is that
Christ was 33 years old when he was crucified, and since we count years from
his (aproximated) birth, then 33AD would have been the year of his crucifiction
and resurrection. Sooo, that's where 33ad.org comes from.
The interesting thing is that some scholars think that the monk that did the
whole counting on the year thing was actually about 3 years off. So, that
would mean the death of Christ would have been about AD 30 instead.
However, [here's a link](http://www.xenos.org/essays/sejanus.htm) that gives
some really interesting arguments for why 33 CE seems the right year.
### So in a nut shell...
33ad.org is a loose conglomeration of websites run by a group of friends and
named after a year 2000 years ago. It provides email addresses to the guys and
their spouses, but also serves as a central place to put up information for the
mass public and for sharing certain projects that they're working on. """
@route(r'/about')
class AboutHandler(BaseHandler):
def get(self):
self.render(
'about.html',
about_text = Markdown(['codehilite']).convert(about_text),
)
| true |
9abdb9fcaf617b997a76f160a0eda89b85c757e5 | Python | notWhaleB/LamportMutex | /RPC/serialize.py | UTF-8 | 241 | 2.890625 | 3 | [] | no_license | DELIM = "\t"
END = "\n"
def serialize(cmd, *args):
return DELIM.join(map(str, [cmd] + list(args))) + END
def unserialize(data):
components = data.split(DELIM)
cmd = components[0]
args = components[1:]
return cmd, args | true |
a639904256778ce038d21d1003c7438be7f53aec | Python | jsvine/pdfplumber | /pdfplumber/convert.py | UTF-8 | 3,501 | 2.640625 | 3 | [
"MIT"
] | permissive | import base64
from typing import Any, Callable, Dict, List, Optional, Tuple
from pdfminer.psparser import PSLiteral
from .utils import decode_text
ENCODINGS_TO_TRY = [
"utf-8",
"latin-1",
"utf-16",
"utf-16le",
]
CSV_COLS_REQUIRED = [
"object_type",
]
CSV_COLS_TO_PREPEND = [
"page_number",
"x0",
"x1",
"y0",
"y1",
"doctop",
"top",
"bottom",
"width",
"height",
]
def get_attr_filter(
include_attrs: Optional[List[str]] = None, exclude_attrs: Optional[List[str]] = None
) -> Callable[[str], bool]:
if include_attrs is not None and exclude_attrs is not None:
raise ValueError(
"Cannot specify `include_attrs` and `exclude_attrs` at the same time."
)
elif include_attrs is not None:
incl = set(CSV_COLS_REQUIRED + include_attrs)
return lambda attr: attr in incl
elif exclude_attrs is not None:
nonexcludable = set(exclude_attrs).intersection(set(CSV_COLS_REQUIRED))
if len(nonexcludable):
raise ValueError(
f"Cannot exclude these required properties: {list(nonexcludable)}"
)
excl = set(exclude_attrs)
return lambda attr: attr not in excl
else:
return lambda attr: True
def to_b64(data: bytes) -> str:
return base64.b64encode(data).decode("ascii")
class Serializer:
def __init__(
self,
precision: Optional[int] = None,
include_attrs: Optional[List[str]] = None,
exclude_attrs: Optional[List[str]] = None,
):
self.precision = precision
self.attr_filter = get_attr_filter(
include_attrs=include_attrs, exclude_attrs=exclude_attrs
)
def serialize(self, obj: Any) -> Any:
if obj is None:
return None
t = type(obj)
# Basic types don't need to be converted
if t in (int, str):
return obj
# Use one of the custom converters, if possible
fn = getattr(self, f"do_{t.__name__}", None)
if fn is not None:
return fn(obj)
# Otherwise, just use the string-representation
else:
return str(obj)
def do_float(self, x: float) -> float:
return x if self.precision is None else round(x, self.precision)
def do_bool(self, x: bool) -> int:
return int(x)
def do_list(self, obj: List[Any]) -> List[Any]:
return list(self.serialize(x) for x in obj)
def do_tuple(self, obj: Tuple[Any, ...]) -> Tuple[Any, ...]:
return tuple(self.serialize(x) for x in obj)
def do_dict(self, obj: Dict[str, Any]) -> Dict[str, Any]:
if "object_type" in obj.keys():
return {k: self.serialize(v) for k, v in obj.items() if self.attr_filter(k)}
else:
return {k: self.serialize(v) for k, v in obj.items()}
def do_PDFStream(self, obj: Any) -> Dict[str, str]:
return {"rawdata": to_b64(obj.rawdata)}
def do_PSLiteral(self, obj: PSLiteral) -> str:
return decode_text(obj.name)
def do_bytes(self, obj: bytes) -> Optional[str]:
for e in ENCODINGS_TO_TRY:
try:
return obj.decode(e)
except UnicodeDecodeError: # pragma: no cover
return None
# If none of the decodings work, raise whatever error
# decoding with utf-8 causes
obj.decode(ENCODINGS_TO_TRY[0]) # pragma: no cover
return None # pragma: no cover
| true |
449afdcac28fd477cc00585efb81d75f20264e47 | Python | ruckserve/JuniorDevs | /algorithms/binary_search/test.py | UTF-8 | 590 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env python
from time import time, sleep
def factorial(n) :
if n <= 1 :
return long(1)
return n * factorial(n-1)
def iter_factorial(n) :
_factorial = long(1)
while n >= 1 :
_factorial *= n
n -= 1
return _factorial
def timer(method, args, iterations=100000) :
start = time()
for i in range(iterations) :
method(*args)
stop = time()
return stop - start
if __name__ == '__main__' :
print 'iterative: ', timer(iter_factorial, [1000], 10)
sleep(3)
print 'recursive: ', timer(factorial, [1000], 10)
| true |
ddd49a2374d26de4cb83f43161ea3006d4eb3121 | Python | pkerpedjiev/train-travel-times-europe | /scripts/parse_sections.py | UTF-8 | 6,839 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python
import collections as col
import dateutil.parser as dp
import gzip
import itertools as it
import json
import os
import os.path as op
import sys
from optparse import OptionParser
from math import radians, cos, sin, asin, sqrt
cities_to_trains = {}
stretches_to_times = {}
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
Courtesy of:
http://stackoverflow.com/a/4913653/899470
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# 6371 km is the radius of the Earth
km = 6371 * c
return km
def merge_journeys(pl1, pl2):
'''
Merge two journeys.
The journeys have to have the same name.
'''
pl1_stations = [s['station']['id'] for s in pl1]
pl2_stations = [s['station']['id'] for s in pl2]
if len(pl1_stations) > len(pl2_stations):
return pl1
else:
return pl2
departures = col.defaultdict(lambda: None)
arrivals = col.defaultdict(lambda: None)
stations = col.defaultdict(lambda: None)
for pl in it.chain(pl1, pl2):
if pl['departure'] is not None:
departures[pl['station']['id']] = pl['departure']
if pl['arrival'] is not None:
arrivals[pl['station']['id']] = pl['arrival']
stations[pl['station']['id']] = pl
pl = set(pl1_stations + pl2_stations)
def pass_list_sort(a, b):
'''
Sort a pass list based on the departure and arrival
times.
'''
da = departures[a]
db = departures[b]
if da is None:
return -1
if db is None:
return 1
return int((dp.parse(da) - dp.parse(db)).total_seconds())
pl = list(pl)
pl.sort(pass_list_sort)
newPl = []
for sid in pl:
stations[sid]['departure'] = departures[sid]
stations[sid]['arrival'] = arrivals[sid]
newPl += [stations[sid]]
return newPl
def journey_to_tsv(pl, jname):
'''
Update the details of an existing journey
'''
for f, t in zip(pl, pl[1:]):
if f['departure'] is None or t['arrival'] is None:
continue
from_lat = float(f['station']['coordinate']['x'])
from_lon = float(f['station']['coordinate']['y'])
to_lat = float(t['station']['coordinate']['x'])
to_lon = float(t['station']['coordinate']['y'])
dist = haversine(from_lon, from_lat, to_lon, to_lat)
tf = dp.parse(f['departure'])
tt = dp.parse(t['arrival'])
elapsed_time = tt - tf
try:
output_data = [jname, f['station']['name'], t['station']['name'],
int(3600 * dist / (elapsed_time.total_seconds() + 60)),
int(dist), tf, elapsed_time,
from_lat, from_lon, to_lat, to_lon]
print u"\t".join(map(unicode, output_data)).encode('utf-8')
except ZeroDivisionError as zde:
print "ZeroDivisionError:", elapsed_time.total_seconds()
def parse_sections(filename, existing_journeys=None):
if existing_journeys is None:
existing_journeys = col.defaultdict(dict)
if not op.exists(filename):
print >>sys.stderr, "Not found:", filename
return existing_journeys
with gzip.open(filename, 'r') as f:
try:
data = json.loads(f.read())
except ValueError as ve:
print >>sys.stderr, "Error decoding JSON:", filename
return existing_journeys
for connection in data['connections']:
if 'sections' not in connection:
continue
for section in connection['sections']:
if 'journey' in section:
journey = section['journey']
if journey is None:
continue
jname = u"{} [{}]".format(journey['name'], journey['operator'])
if jname == 'ICE 102':
#print >>sys.stderr, "Journey", json.dumps(journey, indent=2)
print >>sys.stderr, journey['name'], journey['number'], journey['operator']
try:
if jname not in existing_journeys:
existing_journeys[jname] = journey['passList']
else:
existing_journeys[jname] = merge_journeys(existing_journeys[jname], journey['passList'])
except Exception as ex:
print >>sys.stderr, json.dumps(journey, indent=2)
with open('err.txt', 'w') as f1:
f1.write(json.dumps(existing_journeys))
print >>sys.stderr, "Exception, filename:", filename, "ex:", ex
sys.exit(1)
return existing_journeys
def main():
usage = """
python parse_sections file_list.txt
"""
num_args= 0
parser = OptionParser(usage=usage)
#parser.add_option('-o', '--options', dest='some_option', default='yo', help="Place holder for a real option", type='str')
#parser.add_option('-u', '--useless', dest='uselesss', default=False, action='store_true', help='Another useless option')
parser.add_option('-l', '--file-list', dest='file_list', default=False, action='store_true', help='The input is actually a file containing a list of files')
parser.add_option('-p', '--prefix', dest='prefix', default='.', help='A prefix path for the file list. Must be used in conjuction with the -l option', type='str')
(options, args) = parser.parse_args()
if len(args) < num_args:
parser.print_help()
sys.exit(1)
(options, args) = parser.parse_args()
if len(args) < num_args:
parser.print_help()
sys.exit(1)
existing_journeys = {}
if options.file_list:
if args[0] == '-':
f = sys.stdin
else:
f = open(args[0], 'r')
for line in f:
fn = op.join(options.prefix, line.strip())
existing_journeys = parse_sections(fn, existing_journeys=existing_journeys)
else:
for arg in args:
existing_journeys = parse_sections(arg, existing_journeys=existing_journeys)
tsvs = []
print u"\t".join(["janme", "from", "to", "speed",
"dist", "time_from", "elapsed_time",
"from_lat", "from_lon", "to_lat", "to_lon"])
for jname in existing_journeys:
journey_to_tsv(existing_journeys[jname], jname)
if __name__ == '__main__':
main()
| true |
61c8417d84a3075e2ff6151a01c9960dd36080b8 | Python | Seliaste/Wow-guild-online-checker | /checkbot.py | UTF-8 | 2,765 | 2.703125 | 3 | [] | no_license | import discord
import requests
import json
from requests.auth import HTTPBasicAuth
from requests.exceptions import HTTPError
from time import time
import threading
threadreturnlist = []
file = open("info.json", "r")
info = json.load(file)
realm = info[0]
guild = info[1]
discordToken = info[2]
bnetToken=[info[3],info[4]]
class analysisthread(threading.Thread):
def __init__(self, member, token):
threading.Thread.__init__(self)
self.token = token
self.member = member
def run(self):
threadLock.acquire()
try:
char = requests.get(self.member['character']['key']['href']+"&locale=en_US&access_token=" +
token['access_token'], headers={"Authorization": "Bearer "+token['access_token']}).json()
chartime = char["last_login_timestamp"]
except:
threadLock.release()
else:
howlongago = abs(chartime-round(time() * 1000))/1000
threadreturnlist.append([char["name"], howlongago])
threadLock.release()
threadLock = threading.Lock()
try:
tokenRequest = requests.post('https://eu.battle.net/oauth/token', auth=HTTPBasicAuth(
bnetToken[0], bnetToken[1]), data={"grant_type": "client_credentials"})
tokenRequest.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
else:
print('Success! Token has been retrieved.')
token = tokenRequest.json()
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user} '.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!online'):
answer = requests.get("https://eu.api.blizzard.com/data/wow/guild/"+realm+"/"+guild+"/roster?namespace=profile-eu&locale=en_US&access_token=" +
token['access_token'], headers={"Authorization": "Bearer "+token['access_token']}).json()
print(answer["members"])
returnmsg = "Current online members : "
threads = []
for member in answer["members"]:
thread = analysisthread(member, token)
thread.start()
threads.append(thread)
for t in threads:
t.join()
for i in threadreturnlist:
print(i[0], i[1])
howlongago = i[1]
if howlongago <= 36000:
returnmsg += i[0]+" has been seen: " + \
str(round(howlongago/60))+" minutes ago , "
threadreturnlist = []
await message.channel.send(returnmsg)
client.run(discordToken)
| true |
75004887a63846ec8be086a074a9440f9c0dda1e | Python | mlejva/deeplearning | /uppercase_letters/du_8_uppercase_letters.py | UTF-8 | 11,365 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import datetime
import numpy as np
import tensorflow as tf
import tensorflow.contrib.losses as tf_losses
import tensorflow.contrib.layers as tf_layers
import tensorflow.contrib.metrics as tf_metrics
class Dataset:
def __init__(self, filename, alphabet = None):
# Load the sentences
sentences = []
with open(filename, "r") as file:
for line in file:
sentences.append(line.rstrip("\r\n"))
# Compute sentence lengths
self._sentence_lens = np.zeros([len(sentences)], np.int32)
for i in range(len(sentences)):
self._sentence_lens[i] = len(sentences[i])
max_sentence_len = np.max(self._sentence_lens)
# Create alphabet_map
alphabet_map = {'<pad>': 0, '<unk>': 1}
if alphabet is not None:
for index, letter in enumerate(alphabet):
alphabet_map[letter] = index
# Remap input characters using the alphabet_map
self._sentences = np.zeros([len(sentences), max_sentence_len], np.int32)
self._labels = np.zeros([len(sentences), max_sentence_len], np.int32)
for i in range(len(sentences)):
for j in range(len(sentences[i])):
char = sentences[i][j].lower()
if char not in alphabet_map:
if alphabet is None:
alphabet_map[char] = len(alphabet_map)
else:
char = '<unk>'
self._sentences[i, j] = alphabet_map[char]
self._labels[i, j] = 0 if sentences[i][j].lower() == sentences[i][j] else 1
# Compute alphabet
self._alphabet = [""] * len(alphabet_map)
for key, value in alphabet_map.items():
self._alphabet[value] = key
self._permutation = np.random.permutation(len(self._sentences))
@property
def alphabet(self):
return self._alphabet
@property
def sentences(self):
return self._sentences
@property
def sentence_lens(self):
return self._sentence_lens
@property
def labels(self):
return self._labels
def next_batch(self, batch_size):
batch_size = min(batch_size, len(self._permutation))
batch_perm = self._permutation[:batch_size]
self._permutation = self._permutation[batch_size:]
batch_len = np.max(self._sentence_lens[batch_perm])
return self._sentences[batch_perm, 0:batch_len], self._sentence_lens[batch_perm], self._labels[batch_perm, 0:batch_len]
def epoch_finished(self):
if len(self._permutation) == 0:
self._permutation = np.random.permutation(len(self._sentences))
return True
return False
class Network:
def __init__(self, alphabet_size, rnn_cell, rnn_cell_dim, logdir, expname, threads=1, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
self.summary_writer = tf.train.SummaryWriter("{}/{}-{}".format(logdir, timestamp, expname), flush_secs=10)
# Construct the graph
with self.session.graph.as_default():
if rnn_cell == "LSTM":
rnn_cell = tf.nn.rnn_cell.LSTMCell(rnn_cell_dim)
elif rnn_cell == "GRU":
rnn_cell = tf.nn.rnn_cell.GRUCell(rnn_cell_dim)
else:
raise ValueError("Unknown rnn_cell {}".format(rnn_cell))
self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False, name="global_step")
self.sentences = tf.placeholder(tf.int32, [None, None])
self.sentence_lens = tf.placeholder(tf.int32, [None])
self.labels = tf.placeholder(tf.int32, [None, None])
# encoded_letters are basically whole words as an input but encoded using one-hot vector
encoded_letters = tf.one_hot(self.sentences, alphabet_size) # every letter will have its own one-hot vector with the length of an alphabet
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(rnn_cell, rnn_cell, encoded_letters, dtype=tf.float32, sequence_length=self.sentence_lens)
self.output_total = output_fw + output_bw
mask2d = tf.sequence_mask(self.sentence_lens) # creates a 2D tensor of [true, true, ..., true, false, false, ..., false] with length of the max length of 'row' of sentence_lens
# We want to add another axis to mask2d so it can be applied to the cells' output
mask3d = tf.pack(np.repeat(mask2d, rnn_cell_dim).tolist(), axis=2)
masked_labels = tf.boolean_mask(self.labels, mask2d)
masked_output_total = tf.boolean_mask(self.output_total, mask3d)
# masked_output_total is currently of shape (?,) wee need shape (?, rnn_cell_dim)
# because tensor for fully_connected layers must have at least rank 2 (this has rank 1) + last dimensions must be known
masked_output_total = tf.reshape(masked_output_total, [-1, rnn_cell_dim]) # -1 will infer the shape of the first axis based on the rnn_cell_dim
output_layer = tf_layers.fully_connected(masked_output_total, 2)
self.predictions = tf.argmax(output_layer, 1)
loss = tf_losses.sparse_softmax_cross_entropy(output_layer, masked_labels)
self.training = tf.train.AdamOptimizer().minimize(loss, self.global_step)
self.accuracy = tf_metrics.accuracy(tf.cast(self.predictions, dtype=tf.int32), masked_labels)
'''
Ruzne vety mohou byt jinak dlouhe -> musi se vyresit (= neni pevna delka grafu)
Dynamicke nodes:
outputs, state = tf.nn.dynamic_rnn(cell, inputs, sequence_lens=None, initial_state=None, time_major=False)
tady ale musime pouzit:
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, ...)
vraci dva outputy, jeden output forward site, druhy ouput je backward site
vysledny output je soucet tech dvou
'''
'''
TODO: Jak udelat embedding?
tf.nn.embedding_lookup(weights, indices) (indices je seznam indexu, ktere nas zajimaji)
vraci radky z matice weights podle toho, co jsme zadali do indices
tf.set_variable('alpha_emb', shape=[alphabet_size, dim_of_embedding])
ze states se musi vyrezat jen spravny kus, protoze je to zarovnane nulama
hodi se k tomu: tf.sequence_mask(lengths, maxlen=None, dtype=tf.bool, name=None)
+ tf.cast()
'''
self.dataset_name = tf.placeholder(tf.string, [])
self.summary = tf.scalar_summary(self.dataset_name+"/accuracy", self.accuracy)
# Initialize variables
self.session.run(tf.initialize_all_variables())
@property
def training_step(self):
return self.session.run(self.global_step)
def train(self, sentences, sentence_lens, labels):
accuracy, _, summary = self.session.run([self.accuracy, self.training, self.summary],
{self.sentences: sentences, self.sentence_lens: sentence_lens,
self.labels: labels, self.dataset_name: "train"})
self.summary_writer.add_summary(summary, self.training_step)
return accuracy
def evaluate(self, sentences, sentence_lens, labels, dataset_name):
accuracy, summary = self.session.run([self.accuracy, self.summary], {self.sentences: sentences, self.sentence_lens: sentence_lens,
self.labels: labels, self.dataset_name: dataset_name})
self.summary_writer.add_summary(summary, self.training_step)
return accuracy
def create_and_run_experiment(args):
print('Running new experiment....')
expname = "uppercase-letters-{}{}-bs{}-epochs{}".format(args.rnn_cell, args.rnn_cell_dim, args.batch_size, args.epochs)
print('Experiment name: %s\n' % expname)
print('Cell type: %s' % args.rnn_cell)
print('Cell dimension: %d' % args.rnn_cell_dim)
print('Batch size: %d' % args.batch_size)
print('Epochs: %d\n' % args.epochs)
# Load the data
data_train = Dataset(args.data_train)
data_dev = Dataset(args.data_dev, data_train.alphabet)
data_test = Dataset(args.data_test, data_train.alphabet)
# Construct the network
network = Network(alphabet_size=len(data_train.alphabet), rnn_cell=args.rnn_cell, rnn_cell_dim=args.rnn_cell_dim, logdir=args.logdir, expname=expname, threads=args.threads)
# Train
for epoch in range(args.epochs):
print("Training epoch {}".format(epoch))
while not data_train.epoch_finished():
sentences, sentence_lens, labels = data_train.next_batch(args.batch_size)
accuracy = network.train(sentences, sentence_lens, labels)
#print('Accuracy on train dataset: %.5f' % accuracy)
accuracy_dev = network.evaluate(data_dev.sentences, data_dev.sentence_lens, data_dev.labels, "dev")
print('Accuracy on the development dataset: %.5f' % accuracy_dev)
accuracy_test = network.evaluate(data_test.sentences, data_test.sentence_lens, data_test.labels, "test")
print('Accuracy on the test dataset: %.5f' % accuracy_test)
print('===========================')
if __name__ == "__main__":
# Fix random seed
np.random.seed(42)
cell_dimensions = [10, 20, 40, 80]
cell_types = ['LSTM', 'GRU']
epochs_num = [10, 20]
batch_size = [10, 15, 20, 25]
for cdim in cell_dimensions:
for ctype in cell_types:
for epnum in epochs_num:
for bsize in batch_size:
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=bsize, type=int, help="Batch size.")
parser.add_argument("--data_train", default="en-ud-train.txt", type=str, help="Training data file.")
parser.add_argument("--data_dev", default="en-ud-dev.txt", type=str, help="Development data file.")
parser.add_argument("--data_test", default="en-ud-test.txt", type=str, help="Testing data file.")
parser.add_argument("--epochs", default=epnum, type=int, help="Number of epochs.")
parser.add_argument("--logdir", default="logs", type=str, help="Logdir name.")
parser.add_argument("--rnn_cell", default=ctype, type=str, help="RNN cell type.")
parser.add_argument("--rnn_cell_dim", default=cdim, type=int, help="RNN cell dimension.")
parser.add_argument("--threads", default=8, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
create_and_run_experiment(args)
| true |
978cfe62306ab9d332d33c70f1c6f03c48700930 | Python | l5d1l5/EPAquaticSurvey | /04_nutrientsChlaLake.py | UTF-8 | 10,308 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ## Import modules and set path
# In[1]:
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display
pd.options.display.max_columns = 50
pd.options.display.max_rows = 20
from scipy import stats
import matplotlib as mpl
wd = os.path.dirname(os.getcwd())
# In[2]:
# set title
mpl.rc('figure', titlesize = 45, titleweight = 'bold')
# set axes
mpl.rc('axes', titlesize = 40, titleweight = 'bold', titlepad = 20,
facecolor = 'white', edgecolor = 'black',
linewidth = 2,
labelweight = 'normal', labelsize = 40, labelcolor = 'black')
# set lines
mpl.rc('lines', linewidth = 1.5, color = 'red')
# set font
font = {'family': 'Arial',
'size': 35}
mpl.rc('font', **font)
# set x tickes
mpl.rc('xtick.major', width = 2, size = 15)
mpl.rc('xtick', labelsize = 35)
# set y ticks
mpl.rc('ytick.major', width = 2, size = 15)
mpl.rc('ytick', labelsize = 35)
# set subplot space
mpl.rc('figure.subplot', bottom = 0.15, top = 0.90,
left = 0.1, right = 0.9, hspace = 0.3, wspace = 0.25)
# ## Read in the nlaWaterChem dataset
# In[3]:
nlaWaterChem = pd.read_csv(wd+'/dataset/Lakes/nlaWaterChem.csv')
# nlaWaterChem.columns
# In[4]:
# nlaWaterChem = \
# nlaWaterChem[['UID', 'LAKE_ORIGIN', 'LAKE_ORIGIN12', 'LAT_DD83', 'LON_DD83', 'SIZE_CLASS', 'URBAN',
# 'DOC_RESULT', 'PH_RESULT', 'ANC_RESULT', 'COND_RESULT', 'TURB_RESULT',
# 'DO2_2M', 'PTL_RESULT', 'NTL_RESULT', 'CHLL_ugL', 'CHLX_ugL',
# 'AMMONIA_N_RESULT', 'NITRATE_N_RESULT', 'NITRITE_N_RESULT', 'NITRATE_NITRITE_N_RESULT',
# 'SODIUM_RESULT', 'POTASSIUM_RESULT', 'CALCIUM_RESULT', 'MAGNESIUM_RESULT', 'ALUMINUM_RESULT',
# 'CHLORIDE_RESULT', 'SULFATE_RESULT', 'SILICA_RESULT',]]
# In[5]:
def thousandkCon(start, end, number):
labNum = np.linspace(start, end, number) / 1000
# labNum = list(map(int, labNum.tolist()))
labStr = []
for num in labNum:
num = '{:.1f}'.format(num)
labStr.append(str(num)+'k')
return(labStr)
# ## Plot Figures for Lakes
# In[6]:
nlaWaterChemRes = nlaWaterChem[nlaWaterChem.LAKE_ORIGIN12 == 'NATURAL']
# In[7]:
fig = plt.figure(figsize=(24, 9))
# subplot9: totalP versus chlx
ax = fig.add_subplot(1,2,1)
ax.scatter(nlaWaterChemRes['PTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.PTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.PTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.PTL_RESULT.min(), nlaWaterChemClean.PTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(1000, 600,
'$y$ = {:1.1f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('total P versus Chlx a', loc = 'center')
plt.xticks(ticks = np.linspace(0, 4000, 5))
plt.yticks(np.arange(0, 801, 200, dtype=int))
plt.xlabel(r'total P')
plt.ylabel(r'$\mathrm{chl \/ (}\mu g \/ L^{-1})$')
# subplot10: totalN versus chlx
ax = fig.add_subplot(1,2,2)
ax.scatter(nlaWaterChemRes['NTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.NTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.NTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.NTL_RESULT.min(),nlaWaterChemClean.NTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(20, 500,
'$y$ = {:1.0f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('total N versus Chl a', loc = 'center')
# ax.set_xlim([0, 10])
# plt.xticks(ticks = np.linspace(0, 10, 6))
# plt.yticks(np.arange(0, 801, 200, dtype=int))
plt.xlabel(r'total N')
plt.ylabel(r'$\mathrm{Chl \/ (}\mu g \/ L^{-1})$')
plt.show()
fig.savefig(wd+'/output/figure/nutrientsChla/lake.png')
# ## Keep Only Sites with Decreasing Temp
# In[8]:
siteDecTemp = pd.read_csv(wd+'/dataset/Lakes/siteNumMeas2plusDecTemp.csv')
nlaWaterChemDecTemp = nlaWaterChem[nlaWaterChem.UID.isin(siteDecTemp.UID)]
# In[9]:
nlaWaterChemRes = nlaWaterChemDecTemp[nlaWaterChemDecTemp.LAKE_ORIGIN12 == 'NATURAL']
# In[10]:
fig = plt.figure(figsize=(24, 9))
# subplot9: totalP versus chlx
ax = fig.add_subplot(1,2,1)
ax.scatter(nlaWaterChemRes['PTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.PTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.PTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.PTL_RESULT.min(), nlaWaterChemClean.PTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(1000, 200,
'$y$ = {:1.1f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('totalP versus Chlx a', loc = 'center')
plt.xticks(ticks = np.linspace(0, 2100, 4))
plt.yticks(np.arange(0, 201, 50, dtype=int))
plt.xlabel(r'total P')
plt.ylabel(r'$\mathrm{chl \/ (}\mu g \/ L^{-1})$')
# subplot10: totalN versus chlx
ax = fig.add_subplot(1,2,2)
ax.scatter(nlaWaterChemRes['NTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.NTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.NTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.NTL_RESULT.min(),nlaWaterChemClean.NTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(4, 180,
'$y$ = {:1.0f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('total N versus Chl a', loc = 'center')
# ax.set_xlim([0, 10])
# plt.xticks(ticks = np.linspace(0, 10, 6))
# plt.yticks(np.arange(0, 801, 200, dtype=int))
plt.xlabel(r'total N')
plt.ylabel(r'$\mathrm{Chl \/ (}\mu g \/ L^{-1})$')
plt.show()
fig.savefig(wd+'/output/figure/nutrientsChla/lakeDecreaseTemp.png')
# ## keep only sites where Temp > 20
# In[11]:
nlaWaterChemDecTemp20degPlus = nlaWaterChemDecTemp[nlaWaterChemDecTemp.TEMPERATURE >= 20]
# In[12]:
nlaWaterChemRes = nlaWaterChemDecTemp20degPlus[nlaWaterChemDecTemp20degPlus.LAKE_ORIGIN12 == 'NATURAL']
# In[13]:
fig = plt.figure(figsize=(24, 9))
# subplot9: totalP versus chlx
ax = fig.add_subplot(1,2,1)
ax.scatter(nlaWaterChemRes['PTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.PTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.PTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.PTL_RESULT.min(), 2100])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(1000, 180,
'$y$ = {:1.1f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('totalP versus Chlx a', loc = 'center')
plt.xticks(ticks = np.linspace(0, 2100, 4))
plt.yticks(np.arange(0, 201, 50, dtype=int))
plt.xlabel(r'total P')
plt.ylabel(r'$\mathrm{chl \/ (}\mu g \/ L^{-1})$')
# subplot10: totalN versus chlx
ax = fig.add_subplot(1,2,2)
ax.scatter(nlaWaterChemRes['NTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.NTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.NTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.NTL_RESULT.min(),nlaWaterChemClean.NTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(2, 180,
'$y$ = {:1.0f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('total N versus Chl a', loc = 'center')
# ax.set_xlim([0, 10])
# plt.xticks(ticks = np.linspace(0, 10, 6))
# plt.yticks(np.arange(0, 801, 200, dtype=int))
plt.xlabel(r'total N')
plt.ylabel(r'$\mathrm{Chl \/ (}\mu g \/ L^{-1})$')
plt.show()
fig.savefig(wd+'/output/figure/nutrientsChla/lakeDecreaseTemp20degPlus.png')
# ## keep only reservoirs bigger than 5 ha
# In[14]:
nlaWaterChemDecTempBig = nlaWaterChemDecTemp[nlaWaterChemDecTemp.AREA_HA >= 5]
nlaWaterChemDecTempBig.shape
# In[15]:
nlaWaterChemRes = nlaWaterChemDecTempBig[nlaWaterChemDecTempBig.LAKE_ORIGIN12 == 'NATURAL']
# In[16]:
fig = plt.figure(figsize=(24, 9))
# subplot9: totalP versus chlx
ax = fig.add_subplot(1,2,1)
ax.scatter(nlaWaterChemRes['PTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.PTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.PTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.PTL_RESULT.min(), 2100])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(1000, 180,
'$y$ = {:1.1f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('totalP versus Chlx a', loc = 'center')
plt.xticks(ticks = np.linspace(0, 2100, 4))
plt.yticks(np.arange(0, 201, 50, dtype=int))
plt.xlabel(r'total P')
plt.ylabel(r'$\mathrm{chl \/ (}\mu g \/ L^{-1})$')
# subplot10: totalN versus chlx
ax = fig.add_subplot(1,2,2)
ax.scatter(nlaWaterChemRes['NTL_RESULT'], nlaWaterChemRes['CHLX_ugL'], s = 160, c = 'blue')
nlaWaterChemClean = nlaWaterChemRes[~nlaWaterChemRes.NTL_RESULT.isna() & ~nlaWaterChemRes.CHLX_ugL.isna()]
lm = stats.linregress(nlaWaterChemClean.NTL_RESULT, nlaWaterChemClean.CHLX_ugL)
a, b, r, p, e = np.asarray(lm)
x = np.array([nlaWaterChemClean.NTL_RESULT.min(),nlaWaterChemClean.NTL_RESULT.max()])
plt.plot(x, a*x+b, linewidth = 6, color = 'blue')
plt.text(2, 180,
'$y$ = {:1.0f} $x$ + {:1.0f},\n$r^2$ = {:1.2f}, $p$ = {:1.4f}'.format(a,b, r**2, p))
plt.title('total N versus Chl a', loc = 'center')
# ax.set_xlim([0, 10])
# plt.xticks(ticks = np.linspace(0, 10, 6))
# plt.yticks(np.arange(0, 801, 200, dtype=int))
plt.xlabel(r'total N')
plt.ylabel(r'$\mathrm{Chl \/ (}\mu g \/ L^{-1})$')
plt.show()
fig.savefig(wd+'/output/figure/nutrientsChla/lakeDecreaseTempBigThan5HA.png')
| true |
4d38d84185094b4eca8eb9ffd5670d4ac0ef01de | Python | Amaguk2023/Pyspark_Spotify_ETL | /Airflow/Modules/data_request.py | UTF-8 | 862 | 2.515625 | 3 | [] | no_license | import requests
import json
from datetime import datetime
import datetime
from Spotify_ETL_Modules import extraction
def spotify_data_request(ACCESS_TOKEN):
#API CREDENTIALS FOR DATA REQUEST
headers = {
"Accept" : "application/json",
"Content-Type" : "application/json",
"Authorization" : "Bearer {token}".format(token=ACCESS_TOKEN)
}
#TODAY/YESTERDAY CREATION. UNIX MILISECONDS
today = datetime.datetime.now()
yesterday = today - datetime.timedelta(days=1)
yesterday_unix = int(yesterday.timestamp() * 1000)
#REQUEST DATA FROM SPOTIFY API
recently_played_data_request = requests.get("https://api.spotify.com/v1/me/player/recently-played?after={time}".format(time=yesterday_unix), headers=headers)
#MAKE DATA REQUEST JSON FILE
recently_played_json = recently_played_data_request.json()
extraction.info_extraction(recently_played_json)
| true |
88b55d69950b3b62f78df9597dcf6ab061c29c18 | Python | ThorsteinnAdal/webcrawls_in_singapore_shippinglane | /db_to_file_helpers/test_jsonDicts_to_file.py | UTF-8 | 4,530 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | __author__ = 'thorsteinn'
import unittest
import os
import json
from jsonDicts_to_file import db_to_file, file_to_db, check_if_db_is_in_file, groom_file_db
class MyTestCase(unittest.TestCase):
def setUp(self):
'''
make a reasonable looking dictionary
'''
first = {'header001': {'type': 'container vessel', 'callsign': 'br"olt', 'length': 14.114, 'width': 7.11}}
second = {'header002': {'type': 'container vessel', 'callsign': 'mori', 'length': 10.114, 'width': 4.11}}
secondmix = {'header002': {'callsign': 'mori', 'length': 10.114, 'width': 4.11, 'type': 'container vessel'}}
third = {'header003': {'type': 'Ro-ro cargo', 'callsign': 'dora', 'length': 40.1, 'width': 7.11}}
with open('test_jsonDict_to_file.txt', 'w') as f:
json.dump(first,f)
f.write('\n')
json.dump(second,f)
f.write('\n')
json.dump(secondmix,f)
f.write('\n')
json.dump(second,f)
f.write('\n')
json.dump(third,f)
f.write('\n')
def test_db_to_file_to_db(self):
db = {'a': {'aa': 'abcd', 'bb': 'bcde', 'cc': 'fghi'},
'b': {'aa': 'efgh', 'bb': 'fghi', 'cc': 'fghi'},
'c': {'aa': 'ijkl', 'bb': 'jklm', 'cc': 'fghi'},
'd': {'aa': 'mnop', 'bb': 'nopq', 'cc': 'fghi'}}
db_to_file(db, 'test_db_to_file_to_db.txt')
samedb = file_to_db('test_db_to_file_to_db.txt')
self.assertEqual(db, samedb, msg="THE DATABASES WERE NOT EQUAL")
os.remove('test_db_to_file_to_db.txt')
def test_db_to_file_to_db_duplicates(self):
db = {'a': {'aa': 'abcd', 'bb': 'bcde', 'cc': 'fghi'},
'b': {'aa': 'efgh', 'bb': 'fghi', 'cc': 'fghi'},
'c': {'aa': 'ijkl', 'bb': 'jklm', 'cc': 'fghi'},
'd': {'aa': 'mnop', 'bb': 'nopq', 'cc': 'fghi'}}
db_to_file(db, 'test_db_to_file_to_db_duplicates.txt')
duplicate_db = {'b': {'aa': 'efgh', 'bb': 'fghi', 'cc': 'fghi'}}
with open('test_db_to_file_to_db_duplicates.txt', 'a') as f:
json.dump(duplicate_db, f)
f.write('\n')
samedb = file_to_db('test_db_to_file_to_db_duplicates.txt')
self.assertEqual(db, samedb, msg="THE DATABASES WERE NOT EQUAL")
os.remove('test_db_to_file_to_db_duplicates.txt')
def test_check_if_db_is_in_file(self):
first = {'header001': {'type': 'container vessel', 'callsign': 'br"olt', 'length': 14.114, 'width': 7.11}}
frist = {'header001': {'callsign': 'br"olt', 'length': 14.114, 'width': 7.11, 'type': 'container vessel'}}
not_in_db = {'header005': {'callsign': 'br"olt', 'length': 14.114, 'width': 7.11, 'type': 'container vessel'}}
self.assertTrue(check_if_db_is_in_file(first, 'test_jsonDict_to_file.txt'),
msg="DIDN'T FIND THE FIRST DATABASE")
self.assertTrue(check_if_db_is_in_file(frist, 'test_jsonDict_to_file.txt'),
msg="DIDN'T FIND THE FRIST DATABASE")
self.assertFalse(check_if_db_is_in_file(not_in_db, 'test_jsonDict_to_file.txt'),
msg="FOUND A NON EXISTING DICT IN DB FILE")
def test_add_dict_to_file(self):
pass
def test_groom_file_db(self):
db = {'a': {'aa': 'abcd', 'bb': 'bcde', 'cc': 'fghi'},
'b': {'aa': 'efgh', 'bb': 'fghi', 'cc': 'fghi'},
'c': {'aa': 'ijkl', 'bb': 'jklm', 'cc': 'fghi'},
'd': {'aa': 'mnop', 'bb': 'nopq', 'cc': 'fghi'}}
db_to_file(db, 'test_groom_file_db_inFile.txt')
duplicate1_db = {'b': {'aa': 'efgh', 'bb': 'fghi', 'cc': 'fghi'}}
duplicate2_db = {'c': {'aa': 'ijkl', 'bb': 'jklm', 'cc': 'fghi'}}
with open('test_groom_file_db_inFile.txt', 'a') as f:
json.dump(duplicate1_db, f)
f.write('\n')
json.dump(duplicate1_db, f)
f.write('\n')
json.dump(duplicate1_db, f)
f.write('\n')
json.dump(duplicate2_db, f)
f.write('\n')
groom_file_db('test_groom_file_db_inFile.txt', 'test_groom_file_db_outFile.txt')
db2 = file_to_db('test_groom_file_db_outFile.txt')
self.assertEqual(db, db2, msg="THE DATABASES ARE NOT EQUAL")
os.remove('test_groom_file_db_inFile.txt')
os.remove('test_groom_file_db_outFile.txt')
def tearDown(self):
os.remove('test_jsonDict_to_file.txt')
if __name__ == '__main__':
unittest.main()
| true |
3ad86f3a18606fca80255b910730a84786cea1c9 | Python | yukokokoo/Randomized-optimization | /problem distribution.py | UTF-8 | 1,521 | 3.28125 | 3 | [] | no_license | import mlrose_hiive
import numpy as np
import matplotlib.pyplot as plt
from itertools import permutations
fitness = np.empty(256)
for i in range(256):
state = np.zeros(8)
num = [int(x) for x in list('{0:0b}'.format(i))]
for j in range(8 - len(num), 8):
state[j] = num[j - (8 - len(num))]
fitness[i] = mlrose_hiive.OneMax().evaluate(state)
plt.plot(fitness)
plt.title('One Max')
plt.ylabel('Fitness')
plt.show()
fitness = np.empty(256)
for i in range(256):
state = np.zeros(8)
num = [int(x) for x in list('{0:0b}'.format(i))]
for j in range(8 - len(num), 8):
state[j] = num[j - (8 - len(num))]
fitness[i] = mlrose_hiive.FourPeaks().evaluate(state)
plt.plot(fitness)
plt.title('Four Peaks')
plt.ylabel('Fitness')
plt.show()
def queens_max(state):
# Initialize counter
fitness_cnt = 0
# For all pairs of queens
for i in range(len(state) - 1):
for j in range(i + 1, len(state)):
# Check for horizontal, diagonal-up and diagonal-down attacks
if (state[j] != state[i]) and (state[j] != state[i] + (j - i))and (state[j] != state[i] - (j - i)):
# If no attacks, then increment counter
fitness_cnt += 1
return fitness_cnt
state = list(permutations(range(0, 5)))
fitness = np.empty(120)
for i in range(len(state)):
fitness[i] = queens_max(state[i])
plt.plot(fitness)
plt.title('N Queens')
plt.ylabel('Fitness')
plt.show()
| true |
bdc9c6a7d1067ba96b2f30ed0c33f79c5818204f | Python | Yohager/Leetcode | /python版本/5839-MinStoneSum.py | UTF-8 | 415 | 3.171875 | 3 | [] | no_license | class Solution:
def minStoneSum(self, piles: List[int], k: int) -> int:
tmp = [(-1)*i for i in piles]
import heapq
#import math
heapq.heapify(tmp)
while k > 0:
#弹出当前的最小值
cur = heapq.heappop(tmp)
cur = cur // 2
heapq.heappush(tmp,cur)
k -= 1
#print(tmp)
return (-1) * sum(tmp) | true |
e0d3ed27dd48792145a724db61e2ecb9ab9c66a6 | Python | euggrie/w3resources_python_exercises | /Basic/exercise12.py | UTF-8 | 794 | 4.1875 | 4 | [] | no_license | ###############################
# #
# Exercise 12 #
# www.w3resource.com #
###############################
# Write a Python program to print the calendar of a given month and year.
# Note : Use 'calendar' module.
###################################################################################################
import calendar
year = int(input("Enter a year: "))
month = int(input("Enter a month: "))
cal2 = calendar.TextCalendar()
print(cal2.formatmonth(year,month))
#Another option to print it in HTML:
#cal = calendar.HTMLCalendar()
#print(cal.formatmonth(2015,11,True)) | true |
5ce01a0dbcc9050e6664d0ddd419d5f8c0bcd54c | Python | pafreema/grs1915 | /COcubemasking.py | UTF-8 | 1,158 | 2.59375 | 3 | [] | no_license | from spectral_cube import SpectralCube
import astropy.io.fits as pyfits
import astropy.units as u
twelve=SpectralCube.read('12CO_combinewithtp.fits')
twelve_1=twelve.with_spectral_unit(u.km/u.s, velocity_convention='radio')
thirteen=SpectralCube.read('13CO_combinewithtp.fits')
thirteen_1=thirteen.with_spectral_unit(u.km/u.s, velocity_convention='radio')
#interpolate 12CO cube to match the 13CO spectral axis
twelve_interp=twelve_1.spectral_interpolate(spectral_grid=thirteen_1.spectral_axis)
#mask the 13CO initially using 5 * the rms signal
tmax=pyfits.open('13CO_combinewithtp.fits')
mask=tmax[0].data>0.05
badmask=tmax[0].data==0
keep=mask*(~badmask)
#mask the 13CO again, finding where the 12CO is 0.75 * the 12CO emission
mask_thirteen=thirteen_1.with_mask(keep[0,:,:,:])
mask_thirteen_1=mask_thirteen.with_mask(thirteen_1>(twelve_interp*0.75))
mask_thirteen_1.write('13CO_combinewithtp_masked.fits')
#find the 0th moment of this masked cube
thirteen_0=mask_thirteen_1.moment(order=0)
thirteen_0.write('13CO_moment0_masked.fits')
#finding ratio of 13CO/12CO - shows nothing?
ratio=twelve_interp/thirteen_1
ratio.write('13CO_12CO_ratio.fits')
| true |
6b6ca3858bcbc1f1d4ba398c44742396db106577 | Python | adrianmfi/deep-tictactoe | /src/board_to_img.py | UTF-8 | 2,130 | 2.96875 | 3 | [] | no_license | import os
from PIL import Image
WIDTH = 512
HEIGHT = 512
def to_image(board_arr, img_name):
img = Image.new("RGB", (WIDTH, HEIGHT))
shapes_path = 'shapes'
rel_dir = os.path.dirname(__file__)
circ = Image.open(os.path.join(rel_dir, shapes_path, 'circle.png')).resize((WIDTH //
4, HEIGHT // 4), Image.ANTIALIAS)
cross = Image.open(os.path.join(rel_dir, shapes_path, 'cross.png')).resize((WIDTH //
4, HEIGHT // 4), Image.ANTIALIAS)
pix = img.load()
for x_pos in range(WIDTH):
for y_pos in range(HEIGHT):
pix[x_pos, y_pos] = (255, 255, 255)
for x_pos in range(WIDTH):
for y_pos in range(int(HEIGHT // 3) - int(HEIGHT // 64), int(HEIGHT // 3) + int(HEIGHT // 64)):
pix[x_pos, y_pos] = (0, 0, 0)
for x_pos in range(WIDTH):
for y_pos in range(int(2 * HEIGHT // 3) - int(HEIGHT // 64), int(2 * HEIGHT // 3) + int(HEIGHT // 64)):
pix[x_pos, y_pos] = (0, 0, 0)
for y_pos in range(HEIGHT):
for x_pos in range(int(HEIGHT // 3) - int(HEIGHT // 64), int(HEIGHT // 3) + int(HEIGHT // 64)):
pix[x_pos, y_pos] = (0, 0, 0)
for y_pos in range(HEIGHT):
for x_pos in range(int(2 * HEIGHT // 3) - int(HEIGHT // 64), int(2 * HEIGHT // 3) + int(HEIGHT // 64)):
pix[x_pos, y_pos] = (0, 0, 0)
for i in range(3):
for j in range(3):
if board_arr[i][j] == 1:
img.paste(cross, (WIDTH * j // 3 + HEIGHT //
25, WIDTH // 25 + WIDTH * i // 3))
elif board_arr[i][j] == 2:
img.paste(circ, (WIDTH * j // 3 + HEIGHT //
25, WIDTH // 25 + WIDTH * i // 3))
elif not board_arr[i][j] == 0:
raise ValueError(
'Board [{}][{}] = {}'.format(i, j, board_arr[i][j]))
img.save(img_name + ".jpg")
if __name__ == '__main__':
to_image([[2, 0, 1], [0, 0, 1], [2, 1, 0]], "test")
| true |
981e678719d62dbf6c80572005b0731db364d1fc | Python | cms-kr/hep-tools | /haddsplit | UTF-8 | 1,165 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
import sys, os
def haddsplit(destDir, inputFiles):
## estimate total size and merge not to exceed 1Gbytes
totalSize = sum([os.stat(f).st_size for f in inputFiles])
fSize = 4*1024.*1024*1024 # 4GB
nOutput = int(totalSize/fSize)+1
nTotalFiles = len(inputFiles)
nFiles = nTotalFiles/nOutput
for i in range(nOutput):
print i
cmd = 'hadd -f %s_%d.root ' % (destDir, i)
if i == nOutput-1: cmd += ' '.join(inputFiles[i*nFiles:])
else: cmd += ' '.join(inputFiles[i*nFiles:(i+1)*nFiles])
os.system(cmd)
for x in inputFiles: os.system("rm -f %s" % (x))
def usage():
print """usage: %s OUTPUT.root INPUT_0.root INPUT_1.root ...
OUTPUT root files will be split by 4GBytes,
OUTPUT_0.root OUTPUT_1.root ... will be created""" % (sys.argv[0])
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit(1)
outFilePrefix = sys.argv[1].replace('.root', '')
inFileNames = sys.argv[2:]
if len(inFileNames) == 1:
os.system("mv %s %s_0.root" % (inFileNames[0], outFilePrefix))
else:
haddsplit(outFilePrefix, inFileNames)
| true |
4cf1b3e9d87d367bbbc95def8e446642e31385df | Python | mstatt/DocNovus | /text_extraction_doc.py | UTF-8 | 1,817 | 2.65625 | 3 | [] | no_license | def extract_Text_doc(x):
# -*- coding: utf-8 -*-
import os
import glob
import docx
from docx import Document
import shutil
#Imported Modules
from error_log_writer import error_msg
print("Starting Text Extraction for word doc's......")
error_msg(x,"Starting Text Extraction for word doc's *************************")
content_list = []
compdir = x + "/" +'forprocessing/'
docdir = x + "/" +"doc/"
number_of_files = str(len([item for item in os.listdir(docdir) if os.path.isfile(os.path.join(docdir, item))]))
print("Processing ("+ number_of_files + ") .docx files.....")
error_msg(x,"Processing ("+ number_of_files + ") .docx files **************************************")
for filename3 in glob.glob(docdir+"*.docx"):
#Get the filename without the extension for nameing later
base=os.path.basename(filename3)
filenameNoExt = os.path.splitext(base)[0]
doc = docx.Document(filename3)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
textstream = '\n'.join(fullText)
# content_list = list(filter(None, content_list))
with open (docdir+'doc_'+filenameNoExt+".txt","a")as fp11:
fp11.write(textstream)
fp11.close()
print("Writing extracted doc output files ***************************************")
error_msg(x,"Writing extracted doc output files ******************************")
for filename4 in glob.glob(docdir+"*.txt"):
shutil.move(filename4,compdir+os.path.basename(filename4))
print("Text extraction completed for ("+ number_of_files + ") .docx files ******************************")
error_msg(x,"Text extraction completed for ("+ number_of_files + ") .docx files *******************")
| true |
6db96cc20f311953ea69574c17472c8ba2fcf8ca | Python | supratikbose/Multi-modal-learning | /dltk/core/modules/base.py | UTF-8 | 6,811 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
import re
import os
class AbstractModule(object):
"""Superclass for DLTK core modules - strongly inspired by Sonnet: https://github.com/deepmind/sonnet
This class wraps implements a wrapping of `tf.make_template` for automatic variable sharing. Each subclass needs to
implement a `_build` function used for the template and call this superclass' `__init__` to create the template.
For the variable sharing to work, variables inside `_build` have to be created via `tf.get_variable` instead of
`tf.Variable`.
The created template is automatically called using `__call__`.
"""
MODEL_COLLECTIONS = [tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.MODEL_VARIABLES]
TRAINABLE_COLLECTIONS = MODEL_COLLECTIONS + [tf.GraphKeys.TRAINABLE_VARIABLES]
WEIGHT_COLLECTIONS = TRAINABLE_COLLECTIONS + [tf.GraphKeys.WEIGHTS]
BIAS_COLLECTIONS = TRAINABLE_COLLECTIONS + [tf.GraphKeys.BIASES]
MOVING_COLLECTIONS = MODEL_COLLECTIONS + [tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
def __init__(self, name=None):
"""Initialisation of the template and naming of the module
Parameters
----------
name : string
name of the module
"""
self.name = name
# print('name',self.name )
self.variables = []
self._template = tf.make_template(name, self._build, create_scope_now_=True)
# Update __call__ and the object docstrings to enable better introspection (from Sonnet)
self.__doc__ = self._build.__doc__
self.__call__.__func__.__doc__ = self._build.__doc__
# print('Init')
def _build(self, *args, **kwargs):
"""Abstract function that is use to make the template when building the module
Raises
-------
NotImplementedError
This is an abstract function
"""
raise NotImplementedError('Not implemented in abstract class')
def __call__(self, *args, **kwargs):
"""Wrapper to call template when module is called
Returns
-------
object
Returns output of _build function
"""
out = self._template(*args, **kwargs)
return out
@property
def variable_scope(self):
"""Getter to access variable scope of the built template"""
return self._template.variable_scope
def get_variables(self, collection=tf.GraphKeys.TRAINABLE_VARIABLES):
"""Helper to get all variables of a given collection created within this module
Parameters
----------
collection : string, optional
Identifier of the collection to get variables from. Defaults to `tf.GraphKeys.TRAINABLE_VARIABLES`
Returns
-------
list
List of `tf.Variables` that are part of the collection and within the scope of this module
"""
scope_name = re.escape(self.variable_scope.name) + "/"
return tuple(tf.get_collection(collection, scope_name))
class SaveableModule(AbstractModule):
output_keys = []
# print('Masuk saved ke base')
def __init__(self, name=None):
self.input_placeholders = None
self.saver = None
super(SaveableModule, self).__init__(name)
# print('Masuk saved 2')
def _build_input_placeholder(self):
raise NotImplementedError('Not implemented in abstract class')
def save_metagraph(self, path, clear_devices=False, **kwargs):
"""
Parameters
----------
path : string
path to save the metagraph to
clear_devices : bool
flag to toggle whether meta graph saves device placement of tensors
kwargs
additional arguments to the module build function
"""
g = tf.get_default_graph()
assert(not g.finalized, 'Graph cannot be finalized')
assert(self.input_placeholders is not None, 'Input placeholders need to be built')
# print('Save meta')
self.saved_inputs = self.input_placeholders
print(self.input_placeholders )
out = self._template(*self.saved_inputs, **kwargs)
self.saved_outputs = out.values() if isinstance(out, dict) else [out]
# print('out', out)
self.saved_var_list = list(self.get_variables(tf.GraphKeys.GLOBAL_VARIABLES))
self.saver = tf.train.Saver(var_list=self.saved_var_list)
g.clear_collection('saved_network')
g.clear_collection('saved_inputs')
g.clear_collection('saved_outputs')
for i in self.saved_inputs:
g.add_to_collections(['saved_inputs', 'saved_network'], i)
for o in self.saved_outputs:
g.add_to_collections(['saved_outputs', 'saved_network'], o)
for tensor in self.saved_var_list:
g.add_to_collection('saved_network', tensor)
self.saver.export_meta_graph('{}.meta'.format(path), clear_devices=clear_devices)
g.clear_collection('saved_network')
g.clear_collection('saved_inputs')
g.clear_collection('saved_outputs')
def save_model(self, path, session):
"""Saves the network to a given path
Parameters
----------
path : string
Path to the file to save the network in
session : tf.Session
Tensorflow Sessions holding the current variable states
"""
assert(self.saver is not None, 'Meta graph must be saved first')
self.saver.save(session, path, write_meta_graph=False)
@classmethod
def load(cls, path, session):
"""
Parameters
----------
path : string
Path to load the network from
session : tf.Session
Tensorflow Sessions to load the variables into
Returns
-------
list : list of input placeholders saved
list : list of outputs produced by the network
"""
print('Masuk load class')
saver = tf.train.import_meta_graph('{}.meta'.format(path))
saver.restore(session, path)
inputs = tf.get_collection('saved_inputs')
loaded_outputs = tf.get_collection('saved_outputs')
# print('cek', session.run('resnetfcn/conv/k:0'))
# print('ambil collection')
#nett = tf.get_collection('saved_network')
# nett = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
w = tf.get_collection(tf.GraphKeys.WEIGHTS)
# print('weights', w)
outputs = {key: output for key, output in zip(cls.output_keys, loaded_outputs)}
# print('keys', cls.output_keys)
return inputs, outputs#, nett | true |
ee305ab066792f362ecabc1d62c2feaf6e662403 | Python | cacev000/hw5 | /kmean.py | UTF-8 | 5,188 | 3.015625 | 3 | [] | no_license | import math
from numpy import dot
from numpy.linalg import norm
import random
import time
from tkinter import *
def euclidean(instance1, instance2):
if instance1 == None or instance2 == None:
return float("inf")
dist = 0
for i in range(1, len(instance1)):
dist += (instance1[i] - instance2[i])**2
return math.sqrt(dist)
def sum_of_squares(instance1, instance2):
if instance1 == None or instance2 == None:
return float("inf")
dist = 0
for i in range(1, len(instance1)):
dist += (instance1[i] - instance2[i])**2
return dist
def cosine_sim(a, b):
if a is None or b is None:
return float('inf')
return 1 - (dot(a[1:], b[1:])/(norm(a[1:])*norm(b[1:])))
def jaccard(a, b):
if a is None or b is None:
return float('inf')
num = 0
den = 0
for i in range(1, len(a)):
num += min(a[i], b[i])
den += max(a[i], b[i])
return 1 - (num / den)
def meanInstance(name, instanceList):
numInstances = len(instanceList)
if (numInstances == 0):
return
numAttributes = len(instanceList[0])
means = [name] + [0] * (numAttributes-1)
for instance in instanceList:
for i in range(1, numAttributes):
means[i] += instance[i]
for i in range(1, numAttributes):
means[i] /= float(numInstances)
return tuple(means)
def assign(instance, centroids, metric):
minDistance = metric(instance, centroids[0])
minDistanceIndex = 0
for i in range(1, len(centroids)):
d = metric(instance, centroids[i])
if (d < minDistance):
minDistance = d
minDistanceIndex = i
return minDistanceIndex
def createEmptyListOfLists(numSubLists):
myList = []
for i in range(numSubLists):
myList.append([])
return myList
def assignAll(instances, centroids, metric):
clusters = createEmptyListOfLists(len(centroids))
for instance in instances:
clusterIndex = assign(instance, centroids, metric)
clusters[clusterIndex].append(instance)
return clusters
def computeCentroids(clusters):
centroids = []
for i in range(len(clusters)):
name = "centroid" + str(i)
centroid = meanInstance(name, clusters[i])
centroids.append(centroid)
return centroids
def kmeans(instances, k, animation=False, initCentroids=None, metric=sum_of_squares, stop_condition='centroids'):
if stop_condition not in ['centroids', 'SSE', 'max_iteration']:
print('invalid stop condition, must be centroids, SSE, or max_iteration')
return
result = {}
if (initCentroids == None or len(initCentroids) < k):
# randomly select k initial centroids
random.seed(time.time())
centroids = random.sample(instances, k)
else:
centroids = initCentroids
if animation:
delay = 1.0 # seconds
canvas = prepareWindow(instances)
clusters = createEmptyListOfLists(k)
clusters[0] = instances
paintClusters2D(canvas, clusters, centroids, "Initial centroids")
time.sleep(delay)
prevCentroids = []
withinss = float('inf')
prev_withinss = float('inf')
iteration = 0
continue_flag = True
while continue_flag:
iteration += 1
clusters = assignAll(instances, centroids, metric)
if animation:
paintClusters2D(canvas, clusters, centroids,
"Assign %d" % iteration)
time.sleep(delay)
prevCentroids = centroids
centroids = computeCentroids(clusters)
prev_withinss = withinss
withinss = computeWithinss(clusters, centroids)
if animation:
paintClusters2D(canvas, clusters, centroids,
"Update %d, withinss %.1f" % (iteration, withinss))
time.sleep(delay)
# Set stop condition
if stop_condition == 'centroids':
continue_flag = centroids != prevCentroids
elif stop_condition == 'SSE':
continue_flag = withinss > prev_withinss
else:
continue_flag = iteration < 100
result["clusters"] = clusters
result["centroids"] = centroids
result["withinss"] = withinss
result["iterations"] = iteration
return result
def computeWithinss(clusters, centroids):
result = 0
for i in range(len(centroids)):
centroid = centroids[i]
cluster = clusters[i]
for instance in cluster:
result += sum_of_squares(centroid, instance)
return result
# Repeats k-means clustering n times, and returns the clustering
# with the smallest withinss
def repeatedKMeans(instances, k, n):
bestClustering = {}
bestClustering["withinss"] = float("inf")
for i in range(1, n+1):
print("k-means trial %d," % i)
trialClustering = kmeans(instances, k)
print("withinss: %.1f" % trialClustering["withinss"])
if trialClustering["withinss"] < bestClustering["withinss"]:
bestClustering = trialClustering
minWithinssTrial = i
print("Trial with minimum withinss:", minWithinssTrial)
return bestClustering | true |
229864c210ce54be0555366732501b6c7f4a160a | Python | JairMendoza/Python | /curso 2/ej2.py | UTF-8 | 223 | 4.0625 | 4 | [] | no_license | print ("Hola, buen dia.\n Vamos a calcular el perimetro de un triangulo")
altura = int(input("Ingrese la altura:" ))
base = int (input( "Ahora ingresemos la base: "))
area = base*altura/2
print (f"El Area es: {area}")
| true |
e9ef3fbae649c3e97d0c243c313ec39d97251dd1 | Python | Chriscaracach/python | /python.py | UTF-8 | 1,143 | 4.25 | 4 | [] | no_license | # #Funcion para generar letras random
# import random
# def textoRandom(texto):
# resultado = ""
# for letra in texto:
# if(letra == " "):
# resultado = resultado + " "
# else:
# resultado = resultado + random.choice("abcdefghijklmnopqrstuvwxyz")
# return resultado
# print(textoRandom("Hola que tal"))
#print(random.choice("abcdefghijklmnopqrstuvwxyz"))
#Recursion
# def buscar(lista, n):
# i = 0
# for elemento in lista:
# if(elemento == n):
# return i
# i += 1
# return None
# print("buscar 1")
# print(buscar([1,4,3,2,5,4,4,4], 6))
# def buscar2(lista, n, i=0):
# if lista[i] == n:
# return i
# return buscar2(lista, n, i+1)
# print("buscar2")
# print(buscar2([1,4,3,2,5,4,4,4], 6))
#Factorial
# def fibonacciNormal(n):
# contador = 2
# fib = 1
# seq = [1,1]
# punteroMenosDos = len(seq)-2
# punteroMenosUno = len(seq)-1
# while(contador<n):
# seq.append(punteroMenosDos + punteroMenosUno)
# contador +=1
# return seq[punteroMenosUno]
# print(fibonacciNormal(5))
| true |
deeb59288b301491297985c07fe192f161ab2497 | Python | Rich43/rog | /albums/3/challenge296_easy/code.py | UTF-8 | 897 | 3.90625 | 4 | [] | no_license | '''
Print out the lyrics of The Twelve Days of Christmas
'''
import re
num_to_text = {
'and 1': 'and one',
'1': 'one',
'2': 'two',
'3': 'three',
'4': 'four',
'5': 'five',
'6': 'six',
'7': 'seven',
'8': 'eight',
'9': 'nine',
'10': 'ten',
'11': 'eleven',
'12': 'twelve'
}
with open('296.txt') as file:
for line in file:
line = line.rstrip()
sample = re.findall('^\d+|^[a-z]+\s\d+', line)
if sample:
sample = sample[0]
sample2 = num_to_text[sample]
line = re.sub(sample, sample2, line)
print(line)
with open('296_bonus2.txt') as file:
num = 1
for line in file:
line = line.rstrip()
print(num_to_text[str(num)] + ' ' + line)
num += 1 | true |
9075070deb70d73eb06fe50fedcb0e24a3117845 | Python | duanmao/leetcode | /98. Validate Binary Search Tree.py | UTF-8 | 1,090 | 3.5625 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
s = []
prev, node = None, root
while node or s:
while node:
s.append(node)
node = node.left
node = s.pop()
if prev and prev.val >= node.val: return False
prev = node
node = node.right
return True
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# Time: O(n), space: O(1)
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def isValid(node, vmin, vmax):
if not node: return True
if not vmin < node.val < vmax: return False
return isValid(node.left, vmin, node.val) and isValid(node.right, node.val, vmax)
return isValid(root, float('-inf'), float('inf'))
| true |
3515192fd7bcea33261eaae9cff15631b0d1e9e0 | Python | MooooonaLuo/mona-hello-world-homework | /hello_world_w2.py | UTF-8 | 1,873 | 3.9375 | 4 | [] | no_license | # Name: Jiayu Luo
# Assignment 1
myMac = {
"model": "MacBook Pro",
"size": 15,
"year": 2015,
"system": "macOS Big Sur",
"systemVersion": "11.5.2",
"memory": 16
}
# size in inch, memory in GB
myMug = {
"color": "red",
"brand": "Starbucks",
"volume": 355
}
# volume in ml
myVitamin = {
"brand": "Emergen-C",
"netWeight": 273,
"price": 9.99,
"packNum": 30
}
# price in dollar, netWeight in g
myBed = {
"brand": "Zinus",
"size": "full",
"heigh(inch)": 14,
"material": "wood",
"color": "dark brown"
}
# heigh in inch
# Assignment 2
{
"name": "Momo",
"type": "Yorkie",
"age": 2,
"gender": "female",
"weight": 121
}
# weight in lbs
# Assignment 3
# let the user know what's going on
print ("Welcome to MadLibs!")
print ("Answer the questions below to play.")
print ("-----------------------------------")
# variables containing all of your story info
fruit = raw_input("Enter your favorate fruit in plural: ")
name = raw_input("Enter a name: ")
adj = raw_input("Enter an adjective: ")
noun1 = raw_input("Enter a noun: ")
object1 = raw_input("Enter an object: ")
# this is the story. it is made up of strings and variables.
# the \ at the end of each line let's the computer know our string is a long one
# (a whole paragraph!) and we want to continue more code on the next line.
# play close attention to the syntax!
story = "In 2050, super-intelligent " + fruit + ", mistakenly created by scientists, " \
"have dominated the world. " + name + ", our bravest human leader, " \
"starts a revolution against the " + fruit + ". We create an army called " + adj + " " + noun1 + " and " \
"fight " + fruit + " with our newly designed weapon -- " + object1 + ". The war lasts for a decade, " \
"and we eventually take back our planet."
# finally we print the story
print (story) | true |
b6a40f52b4d37ce33019ac00f0d829546d54124e | Python | Aasthaengg/IBMdataset | /Python_codes/p03737/s721567692.py | UTF-8 | 173 | 2.875 | 3 | [] | no_license | # A - Three-letter acronym
# https://atcoder.jp/contests/abc059/tasks/abc059_a
s1, s2, s3 = map(str, input().split())
print(s1[0].upper() + s2[0].upper() + s3[0].upper())
| true |
2e78bdec1ca9193e51580d5b9fd967a0efe5800b | Python | cornell-cup/cs-minibot | /minibot/botstate.py | UTF-8 | 733 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | """
BotState object.
"""
class BotState():
"""
BotState class.
Keeps track of the current state of the minibot.
"""
x = 0
y = 0
angle = 0
radius = 0
def __init__(self, x=0, y=0, angle=0, radius=0):
"""
Constructor for BotState. Assumes bot begins at origin with no orientation offset.
Note:
All parameters default to 0.
Args:
x (int): X coordinate of the bot.
y (int): Y coordinate of the bot.
angle (int): Angle orientation of robot from the horizontal (x-axis).
radius (int): Radius of minibot.
"""
self.x = x
self.y = y
self.angle = angle
self.radius = radius
| true |
13ffeee0a606e6088aa1298174028066528db5b2 | Python | boun7yhunt3r/Pythoncodes | /pyimagesearch/11_CV_case_studies/measuring_distance/pyimagesearch/markers/distancefinder.py | UTF-8 | 1,971 | 3.25 | 3 | [] | no_license | import cv2
class DistanceFinder:
def __init__(self, knownWidth, knownDistance):
self.knownWidth = knownWidth
self.knownDistance = knownDistance
#initialize the focal length
self.focalLength = 0
def calibrate(self, width):
self.focalLength = (width * self.knownDistance) / self.knownWidth
def distance(self, perceivedWidth):
return (self.knownWidth * self.focalLength) / perceivedWidth
def findSquareMarker(image):
#convert the image to grayscale blur it and find edges in the images
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GuassianBlur(gray, (5,5),0)
edged = cv2.Canny(gray, 35, 125)
# find contours in the edged images, sort them according to their area (from largest to smallest) and initialize the marker dimension
(cnts,_) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)
markerDim = None
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02*peri, True)
#ensure the contour is a rectangle
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
aspectRatio = w/float(h)
#check to see if the the aspectRatio is within tolerable limits
if aspectRatio >0.9 and aspectRatio < 1.1:
markerDim = (x, y, w, h)
break
return markerDim
def draw(image, boundingBox, dist, color=(0,255,0), thickness = 2):
(x, y, w, h) = boundingBox
cv2.rectangle(image, (x,y), (x+w, y+h), color, thickness)
cv2.rectangle(image, (x+w, y+h), color, thickness)
cv2.putText(image, "%.2fft"% (dist/12),(image.shape[1]-200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX, 2.0, color, 3)
return image
| true |
f445212a16e85a8786fe6837e17f6bd16047b192 | Python | chbrown/topic-sentiment-authorship | /tsa/science/summarization.py | UTF-8 | 4,708 | 2.515625 | 3 | [
"MIT"
] | permissive | from viz import gloss, geom
import numpy as np
from sklearn import metrics, cross_validation
from tsa.science import numpy_ext as npx
from tsa import logging
logger = logging.getLogger(__name__)
def metrics_summary(y_true, y_pred, labels=None, pos_label=1, average=None):
return ', '.join([
'accuracy: {accuracy:.2%}',
'P/R: {precision:.4f}/{recall:.4f}',
'F1: {f1:.4f}',
# '0-1 loss: {zero_one_loss:.4f}',
]).format(**metrics_dict(y_true, y_pred, pos_label=pos_label))
def metrics_dict(y_true, y_pred, labels=None, pos_label=1, average=None):
prfs_values = metrics.precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average)
prfs_keys = ('precision', 'recall', 'f1', 'support')
accuracy = metrics.accuracy_score(y_true, y_pred)
# hamming loss is only different from 0-1 loss in multi-label scenarios
# hamming_loss=metrics.hamming_loss(y_true, y_pred),
# jaccard_similarity is only different from the accuracy in multi-label scenarios
# jaccard_similarity=metrics.jaccard_similarity_score(y_true, y_pred),
# zero_one_loss is (1.0 - accuracy) in multi-label scenarios
# zero_one_loss=metrics.zero_one_loss(y_true, y_pred),
return dict(zip(prfs_keys, prfs_values), accuracy=accuracy)
def explore_mispredictions(test_X, test_y, model, test_indices, label_names, documents):
pred_y = model.predict(test_X)
for document_index, gold_label, pred_label in zip(test_indices, test_y, pred_y):
if gold_label != pred_label:
# print 'certainty: %0.4f' % certainty
print('gold label (%s=%s) != predicted label (%s=%s)' % (
gold_label, label_names[gold_label], pred_label, label_names[pred_label]))
print('Document: %s' % documents[document_index])
def explore_uncertainty(test_X, test_y, model):
if hasattr(model, 'predict_proba'):
pred_probabilities = model.predict_proba(test_X)
# predicts_proba returns N rows, each C-long, where C is the number of labels
# hmean takes the harmonic mean of its arguments
pred_probabilities_hmean = np.apply_along_axis(npx.hmean, 1, pred_probabilities)
pred_certainty = 1 - (2 * pred_probabilities_hmean)
# pred_certainty now ranges between 0 and 1,
# a pred_certainty of 1 means the prediction probabilities were extreme,
# 0 means they were near 0.5 each
# with this, we can use np.array.argmax to get the class names we would have gotten with model.predict()
# axis=0 will give us the max for each column (not very useful)
# axis=1 will give us the max for each row (what we want)
# find best guess (same as model.predict(...), I think)
pred_y = pred_probabilities.argmax(axis=1)
print('*: certainty mean=%0.5f' % np.mean(pred_certainty))
geom.hist(pred_certainty, range=(0, 1))
print('correct: certainty mean=%0.5f' % np.mean(pred_certainty[pred_y == test_y]))
geom.hist(pred_certainty[pred_y == test_y], range=(0, 1))
print('incorrect: certainty mean=%0.5f' % np.mean(pred_certainty[pred_y != test_y]))
geom.hist(pred_certainty[pred_y != test_y], range=(0, 1))
else:
logger.info('predict_proba is unavailable for this model: %s', model)
def explore_topics(topic_model, tokens_per_topic=10):
# not exactly scikit...
for topic_i in range(topic_model.num_topics):
topic = topic_model.show_topic(topic_i, topn=tokens_per_topic)
ratios, tokens = zip(*topic)
# terminal.width()
alignments = list(zip(tokens, ['%0.3f' % ratio for ratio in ratios]))
# (%0.4f > ratio > %0.4f):' % (, ratios[0], ratios[-1])
# print ' ', ', '.join(tokens)
print('Topic %d' % topic_i)
print(gloss.gloss(alignments, toksep=' ', prefixes=[' ', ' ']))
def average_accuracy(corpus, model, test_size=0.1, n_iter=10):
folds = cross_validation.StratifiedShuffleSplit(corpus.y, test_size=test_size, n_iter=n_iter)
accuracies = []
for train_indices, test_indices in folds:
train_corpus = corpus.subset(train_indices)
test_corpus = corpus.subset(test_indices)
model.fit(train_corpus.X, train_corpus.y)
pred_y = model.predict(test_corpus.X)
# pred_proba = model.predict_proba(test_corpus.X)
accuracy = metrics.accuracy_score(test_corpus.y, pred_y)
accuracies += [accuracy]
return np.mean(accuracies)
| true |
c0bffb9e2b519f6fa78aeb069d481d9a6e14c026 | Python | ZL-Zealous/ZL-study | /py_study_code/函数/function_car.py | UTF-8 | 250 | 2.953125 | 3 | [] | no_license | def make_car(maker,size,**infos):
car_info={}
car_info['maker']=maker
car_info['size']=size
for k,v in infos.items():
car_info[k]=v
return car_info
car=make_car('subaru','outback',color="blue",tow_package=True)
print(car) | true |
b25d93eecd4446e3011686f9b1365897bde0d469 | Python | chuncaohenli/leetcode | /Python/lc227. Basic Calculator II.py | UTF-8 | 1,523 | 3.28125 | 3 | [] | no_license | # def calculate(s):
# if not s:
# return "0"
# stack, num, sign = [], 0, "+"
# for i in range(len(s)):
# if s[i].isdigit():
# num = num*10+ord(s[i])-ord("0")
# if (not s[i].isdigit() and not s[i].isspace()) or i == len(s)-1:
# if sign == "-":
# stack.append(-num)
# elif sign == "+":
# stack.append(num)
# elif sign == "*":
# stack.append(stack.pop()*num)
# else:
# tmp = stack.pop()
# if tmp//num < 0 and tmp%num != 0:
# stack.append(tmp//num+1)
# else:
# stack.append(tmp//num)
# sign = s[i]
# num = 0
# return sum(stack)
def calculate(s):
stack = []
sign = '+'
i = 0
num = 0
while i < len(s):
c = s[i]
if c.isdigit():
j = i + 1
num = int(c)
while j < len(s) and s[j].isdigit():
num = 10 * num + int(s[j])
i = j - 1
if c in ['+', '-', '*', '/'] or i == len(s) - 1:
if sign == '+':
stack.append(num)
elif sign == '-':
stack.append(-num)
elif sign == '*':
stack.append(stack.pop() * num)
elif sign == '/':
stack.append(int(stack.pop() / num))
sign = c
i += 1
print(i)
return sum(stack)
print(calculate("123")) | true |
411ca135ab868facac4deb0913d1de45a743d4ed | Python | XuYi-fei/HUST-EIC-MathematicalModeling | /final/Preprocessed.py | UTF-8 | 1,029 | 2.75 | 3 | [
"MIT"
] | permissive | import pandas as pd
import os
import csv
data = pd.read_csv(r"D:\GitRepos\EIC\MathmaticalModeling\HUST-EIC-MathematicalModeling\final\time_series_covid_19_confirmed.csv")
columns = dict(zip(range(len(data.columns)), data.columns))
columnIndex = list(data.columns)[1:]
countries = {}
for index, row in data.iterrows():
if row[1] not in countries.keys():
# import pdb; pdb.set_trace()
countries[row[1]] = {}
countries[row[1]][columns[2]] = row[2]
countries[row[1]][columns[3]] =row[3]
for j in range(4, len(columns)):
countries[row[1]][columns[j]] = 0
for j in range(5, len(columns)):
countries[row[1]][columns[j]] += (row[j] - row[j-1])
writer_columns = {}
for index in columnIndex:
if index == 'Country/Region':
writer_columns[index] = list(countries.keys())
else:
writer_columns[index] = [i[index] for i in countries.values()]
dataFrame = pd.DataFrame(writer_columns)
dataFrame.to_excel('Preprocessed_original.xlsx')
| true |
7a4c13f9fb21f5ada870d4625433677980db8e85 | Python | sharankonety/algorithms-and-data-structures | /linked_lists/linear_linked_lists/Insertion/insert_at_index.py | UTF-8 | 1,874 | 4.21875 | 4 | [] | no_license | # Insert a node at specified index position
class Node:
def __init__(self,data=None):
self.data = data
self.next = None
class Linked_list:
def __init__(self):
self.head = None
def list_print(self):
print_val = self.head
while print_val:
print(print_val.data)
print_val = print_val.next
def list_count(self):
count = 0
x = self.head
while x:
count += 1
x = x.next
return count
# Function to insert a node at specific index
# The same function can also be used for inserting a new_node in an empty list.
def insert_at_index(self,index,data):
# access the variable count from the count_list function. to check if the given index is valid or not
count = self.list_count()
if (index<0 or index>count):
return
# usually we insert a node at a specific index by accessing the next pointer of the previous node
# so here we have two methods 1.inserting at the head of the linked list 2. inserting else where in linked list.
new_node = Node(data)
n = self.head
# insert at head
if index == 0:
new_node.next = self.head
self.head = new_node
# while iterating till the index, also traverse through the linked list using the next pointer
# and when the loop breaks assign the next of previous node to the next of new_node and the next of previous node the new_node
else:
for i in range(0,index-1):
n = n.next
new_node.next = n.next
n.next = new_node
list = Linked_list()
# list.head = Node(1)
# e2 = Node(2)
# e3 = Node(3)
# e4 = Node(5)
# list.head.next = e2
# e2.next = e3
# e3.next = e4
list.insert_at_index(0,4)
list.list_print()
# list.list_count() | true |
395bade3019e6804a7f55a582899ba424a2e4d20 | Python | danebista/LeetCode-Algorithms | /Algorithms-LeetCode/11-20/15. 3 Sums Problem.py | UTF-8 | 801 | 2.828125 | 3 | [] | no_license | class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
ans=[]
nums.sort()
for i,a in enumerate(nums):
if i>0 and a==nums[i-1]:
continue
l=i+1
r=len(nums)-1
while(l<r):
sums= nums[l]+nums[r]+ a
if sums>0:
r-=1
if sums<0:
l+=1
if sums==0:
ans.append([a,nums[l],nums[r]])
]+=1
r-=1
while(nums[l]==nums[l-1] and l<r):
l+=1
while(nums[r]==nums[r+1] and l<r):
r-=1
return ans
| true |
dae27393581ab5f5efa5185ea17df5c1bd7b4916 | Python | parkermac/ptools | /obs_ecy/station_map.py | UTF-8 | 1,429 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot locations of Ecology Stations.
"""
# imports
import pandas as pd
import matplotlib.pyplot as plt
# SSMSP import
import os
import sys
pth = os.path.abspath('../ssmsp')
if pth not in sys.path:
sys.path.append(pth)
import sfun
import pfun
dir0 = '../../ptools_data/ecology/'
# load processed station info and data
sta_df = pd.read_pickle(dir0 + 'sta_df.p')
# add Canadian data
dir1 = '../../ptools_data/canada/'
# load processed station info and data
sta_df_ca = pd.read_pickle(dir1 + 'sta_df.p')
sta_df = pd.concat((sta_df, sta_df_ca), sort=False)
# plotting
plt.close('all')
fig1 = plt.figure(figsize=(8,8))
fig2 = plt.figure(figsize=(8,8))
ax1 = fig1.add_subplot(111)
ax2 = fig2.add_subplot(111)
for station in sta_df.index:
lon = sta_df.loc[station, 'Longitude']
lat = sta_df.loc[station, 'Latitude']
if sta_df.loc[station, 'Basin'] in ['Grays Harbor', 'Willapa Bay']:
ax = ax1
else:
ax = ax2
ax.plot(lon, lat, '*r')
ax.text(lon+.01, lat, station, color='b', fontsize=8, fontweight='bold')
pfun.add_coast(ax1, dir0='../ssmsp/')
pfun.dar(ax1)
pfun.add_coast(ax2, dir0='../ssmsp/')
pfun.dar(ax2)
# Coastal Estuaries
ax1.set_title('Coastal Estuaries')
ax1.set_xlim(-124.3, -123.6)
ax1.set_ylim(46.3, 47.1)
# Puget Sound
ax2.set_title('Salish Sea')
ax2.set_xlim(-124, -122)
ax2.set_ylim(47, 49.5)
plt.show()
| true |
bc6d80b38bbd8c4e779b0352de2a9f3bee789140 | Python | mallika2011/Spotify-Recommender-System | /approach_A/preprocessing/for_artists/create_new_features.py | UTF-8 | 2,064 | 2.8125 | 3 | [] | no_license | import os
import pandas as pd
from itertools import chain
import ast
def load_artists(ids, emb):
f1 = open(ids, 'r')
line1 = f1.readline().strip('\n')
f2= open(emb, 'r')
line2 = f2.readline().strip('\n')
dict_ret = {}
while(line1):
dict_ret[line1] = line2.split()
line1 = f1.readline().strip('\n')
line2 = f2.readline().strip('\n')
f1.close()
f2.close()
return dict_ret
def get_artists():
df = pd.read_csv("../../../data/custom/combined_features_1.csv", usecols=[1,5])
print(df.head())
dict1 = df.set_index('id').T.to_dict()
df_cols = df.columns
df = pd.read_csv("../../../data/custom/combined_features_2.csv", names=df_cols, usecols=[1,5])
print(df.head())
dict2 = ( df.set_index('id').T.to_dict() )
dict_final = dict(chain.from_iterable(d.items() for d in (dict1, dict2)))
print(len(dict_final))
return dict_final
def append_to_file(infile, outfile, skip=False):
f1 = open(infile, 'r')
line1 = f1.readline().strip('\n')
of = open(outfile, 'w')
num_features = len(art_dict['3EvzCyNmY3kbUkMawjsndF'])
if skip:
of.write(line1)
for i in range(num_features):
of.write(",a"+str(i))
of.write("\n")
line1 = f1.readline().strip('\n')
while(line1):
song_id=line1.split(",")[1]
artists = ast.literal_eval(a_dict[song_id]['artists'])
vals = [0, 0, 0, 0]
for av in artists:
for i in range(num_features):
vals[i] += float(art_dict[av][i])
of.write(line1)
for i in range(num_features):
of.write(","+str(vals[i]/len(artists)))
of.write("\n")
line1 = f1.readline().strip('\n')
art_dict = load_artists("unique_artists.txt","outfile_ARTISTS524288.txt")
a_dict = get_artists()
append_to_file("../../../data/custom/combined_features_1.csv","file1_updated.csv", True)
append_to_file("../../../data/custom/combined_features_2.csv","file2_updated.csv")
| true |
3a153aafa1bef5787c193cd1083ea6e3be9e82d8 | Python | OneJane/OneJane.github.io | /2021/11/11/SO逆向之最右sign分析/md5.py | UTF-8 | 6,821 | 3.53125 | 4 | [] | no_license | # codeing=utf-8
# 引入math模块,因为要用到sin函数
import math
# 定义常量,用于初始化128位变量,注意字节顺序,文中的A=0x01234567,这里低值存放低字节,即01 23 45 67,所以运算时A=0x67452301,其他类似。
# 这里用字符串的形势,是为了和hex函数的输出统一,hex(10)输出为'0xA',注意结果为字符串。
A = '0x67552301' # '0x67452301'
B = '0xEDCDAB89' # '0xefcdab89'
C = '0x98BADEFE' # '0x98badcfe'
D = '0x16325476' # '0x10325476'
# 定义每轮中用到的函数。L为循环左移,注意左移之后可能会超过32位,所以要和0xffffffff做与运算,确保结果为32位。
F = lambda x, y, z: ((x & y) | ((~x) & z))
G = lambda x, y, z: ((x & z) | (y & (~z)))
H = lambda x, y, z: (x ^ y ^ z)
I = lambda x, y, z: (y ^ (x | (~z)))
L = lambda x, n: (((x << n) | (x >> (32 - n))) & (0xffffffff))
# 定义每轮中循环左移的位数,这里用4个元组表示,用元组是因为速度比列表快。
shi_1 = (7, 12, 17, 22) * 4
shi_2 = (5, 9, 14, 20) * 4
shi_3 = (4, 11, 16, 23) * 4
shi_4 = (6, 10, 15, 21) * 4
# 定义每轮中用到的M[i]次序。
m_1 = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)
m_2 = (1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12)
m_3 = (5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2)
m_4 = (0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9)
# 定义函数,用来产生常数T[i],常数有可能超过32位,同样需要&0xffffffff操作。注意返回的是十进制的数。
def T(i):
result = (int(4294967296 * abs(math.sin(i)))) & 0xffffffff
return result
# 定义函数,用来将列表中的元素循环右移。原因是在每轮操作中,先运算A的值,然后是D,C,B,16轮之后右恢复原来顺序,所以只要每次操作第一个元素即可。
def shift(shift_list):
shift_list = [shift_list[3], shift_list[0], shift_list[1], shift_list[2]]
return shift_list
# 定义主要的函数,参数为当做种子的列表,每轮用到的F,G,H,I,生成的M[],以及循环左移的位数。该函数完成一轮运算。
def fun(fun_list, f, m, shi):
count = 0
global Ti_count
# 引入全局变量,T(i)是从1到64循环的。
while count < 16:
xx = int(fun_list[0], 16) + f(int(fun_list[1], 16), int(fun_list[2], 16), int(fun_list[3], 16)) + int(m[count],
16) + T(
Ti_count)
xx = xx & 0xffffffff
ll = L(xx, shi[count])
# fun_list[0] = hex((int(fun_list[1],16) + ll)&(0xffffffff))[:-1]
fun_list[0] = hex((int(fun_list[1], 16) + ll) & (0xffffffff))
# 最后的[:-1]是为了去除类似'0x12345678L'最后的'L'
fun_list = shift(fun_list)
count += 1
Ti_count += 1
# print (fun_list)
return fun_list
# 该函数生成每轮需要的M[],最后的参数是为了当有很多分组时,进行偏移。
def genM16(order, ascii_list, f_offset):
ii = 0
m16 = [0] * 16
f_offset = f_offset * 64
for i in order:
i = i * 4
m16[ii] = '0x' + ''.join((ascii_list[i + f_offset] + ascii_list[i + 1 + f_offset] + ascii_list[
i + 2 + f_offset] + ascii_list[i + 3 + f_offset]).split('0x'))
ii += 1
for c in m16:
ind = m16.index(c)
m16[ind] = reverse_hex(c)
return m16
# 翻转十六进制数的顺序:'0x01234567' => '0x67452301'
def reverse_hex(hex_str):
hex_str = hex_str[2:]
hex_str_list = []
for i in range(0, len(hex_str), 2):
hex_str_list.append(hex_str[i:i + 2])
hex_str_list.reverse()
hex_str_result = '0x' + ''.join(hex_str_list)
return hex_str_result
# 显示结果函数,将最后运算的结果列表进行翻转,合并成字符串的操作。
def show_result(f_list):
result = ''
f_list1 = [0] * 4
for i in f_list:
f_list1[f_list.index(i)] = reverse_hex(i)[2:]
result = result + f_list1[f_list.index(i)]
return result
# 程序主循环
while True:
abcd_list = [A, B, C, D]
Ti_count = 1
# input_m = raw_input('msg>>>')
input_m = input('msg>>>')
# 对每一个输入先添加一个'0x80',即'10000000'
ascii_list = list((map(hex, map(ord, input_m))))
# print('ascii_list:',ascii_list)
msg_lenth = len(ascii_list) * 8
ascii_list.append('0x80')
# 补充0
while (len(ascii_list) * 8 + 64) % 512 != 0:
ascii_list.append('0x00')
# 最后64为存放消息长度,注意长度存放顺序低位在前。
# 例如,消息为'a',则长度为'0x0800000000000000'
msg_lenth_0x = hex(msg_lenth)[2:]
msg_lenth_0x = '0x' + msg_lenth_0x.rjust(16, '0')
msg_lenth_0x_big_order = reverse_hex(msg_lenth_0x)[2:]
msg_lenth_0x_list = []
for i in range(0, len(msg_lenth_0x_big_order), 2):
msg_lenth_0x_list.append('0x' + msg_lenth_0x_big_order[i:i + 2])
ascii_list.extend(msg_lenth_0x_list)
# print (ascii_list)
# 对每个分组进行4轮运算
for i in range(0, len(ascii_list) // 64):
# 将最初128位种子存放在变量中,
aa, bb, cc, dd = abcd_list
# 根据顺序产生每轮M[]列表
order_1 = genM16(m_1, ascii_list, i)
order_2 = genM16(m_2, ascii_list, i)
order_3 = genM16(m_3, ascii_list, i)
order_4 = genM16(m_4, ascii_list, i)
# 主要四轮运算,注意打印结果列表已经被进行过右移操作!
abcd_list = fun(abcd_list, F, order_1, shi_1)
# print ('--------------------------------------')
abcd_list = fun(abcd_list, G, order_2, shi_2)
# print ('--------------------------------------')
abcd_list = fun(abcd_list, H, order_3, shi_3)
# print ('--------------------------------------')
abcd_list = fun(abcd_list, I, order_4, shi_4)
# print ('--------------------------------------')
# 将最后输出与最初128位种子相加,注意,最初种子不能直接使用abcd_list[0]等,因为abcd_list已经被改变
output_a = hex((int(abcd_list[0], 16) + int(aa, 16)) & 0xffffffff)
output_b = hex((int(abcd_list[1], 16) + int(bb, 16)) & 0xffffffff)
output_c = hex((int(abcd_list[2], 16) + int(cc, 16)) & 0xffffffff)
output_d = hex((int(abcd_list[3], 16) + int(dd, 16)) & 0xffffffff)
# 将输出放到列表中,作为下一次128位种子
abcd_list = [output_a, output_b, output_c, output_d]
# 将全局变量Ti_count恢复,一遍开始下一个分组的操作。
Ti_count = 1
# 最后调用函数,格式化输出
print('md5>>>' + show_result(abcd_list))
# 0CC175B9C0F1B6A831C399E269772661
break | true |
4fc9ef846dbfdffe1fdba062a04c9682290b65f8 | Python | Krzyzaku21/Git_Folder | /_python_learning/Nauka Metod/met_list.py | UTF-8 | 3,831 | 4.625 | 5 | [] | no_license | # my_list = []
# 1. Zbiór wartości, 2. indeks od 0
# 3. Łatwe : dodawanie, odczytywanie, usówanie wartości, 4. Lista posiada swój indeks
# 5. Comprehension [expression for item in list] -
def list_comprehension():
point1 = "lista liter ze słowa human"
point2 = "lista z mapowaniem funkcji lambda i lista2 z filtrowaniem funkcji lambda "
point3 = "zaleznosci if w list comprehension"
point4 = "lista liter ze słowa human"
print(f'1: {point1}\n',f'2: {point2}\n',f'3: {point3}\n',f'4: {point4}\n')
points=int(input("Podaj wartość od 1 do 4: "))
if points == 1:
h_letters = [ letter for letter in 'human' ]
print( h_letters)
elif points == 2:
print('Mapowanie')
my_list = [1, 5, 4, 6, 8, 11, 3, 12]
new_list = list(map(lambda x: x * 2 , my_list))
print(new_list)
print('Filtrowanie')
my_list = [1, 5, 4, 6, 8, 11, 3, 12]
new_list = list(filter(lambda x: (x%2 == 0) , my_list))
print(new_list)
elif points == 3:
num_list = [y for y in range(100) if y % 2 == 0 if y % 5 == 0]
print(num_list)
else:
obj = ["Even" if i%2==0 else "Odd" for i in range(10)]
print(obj)
#ZAD1
#metoda ".count" - pregląda listę i zwraca ilość tych samych argumenów, zwraca int
def list_count1():
lista =['dupa', '1', 1, 3.0, 'dupa', True, False, None, 1, 'dupa', "dwa", "dwa", 2, 2]
print(lista)
print(lista.count((input('Podaj jeden ze znaków z listy powyżej: '))))
#ZAD2
#metoda ".sorted" - sortuje liczby
def list_sorted1():
lista =[4,3,5,6,9,1,2,33,65,32,41,98,102]
lista2 = lista.copy()
sortowanie = sorted(lista, reverse=True)
sortowanie2 = sorted(lista, reverse=False)
print('Posortowana dana liczba: ',lista2)
print(sortowanie, " : reverse - odwrotnie True")
print(sortowanie2)
#ZAD2
#metoda ".sort" - sortuje liczby jak w sorted
def list_sorted2(lista):
print('Posortowana dana liczba: ',*lista)
lista.sort()
print("Wynik: ",', '.join(str(x) for x in lista))
lista.reverse()
print("Wynik liczby odwrotnie: ",', '.join(str(x) for x in lista))
#ZAD3
#metody typu sort() a sorted(), reverse() a reversed()
#różnią się tym, iż metody sort, reverse działają na jednym obiekcie (bez kopii),
#natomiast funkcje sorted, reversed mogą być uzywane wiele razy i tworzą nowe obiekty
def list_difference_sort_sorted():
numbers = [6, 9, 3, 1]
print(id(numbers), numbers)
num2 = [6, 9, 3, 1]
#num2 = numbers.copy()
print(id(num2), num2)
num1 = sorted(numbers)
num2.sort()
def list_append1(my_list):
Limit = (len(my_list))
for v in range(0,Limit+1):
if v == Limit:
my_list[4] = 20 #zmienia wartość 'True' na 4 pozycji na '20'
my_list.append(5) #dokleja wartość na końcu listy na obiekcie
my_list2 = [3, 20]
my_list = my_list + my_list2
print("First list: ",my_list)
print("Is number 5 in frist list? :",5 in my_list)
my_list3 = [v for v in range(9, 27) if v % 2 == 0 ] #wartosci spełniajace wartosci parzyste tylko wypisze
print("Second list: ",my_list3)
def One_ListValues_to_SecondListValues( list1, list2 ):
score = [x for x in range(0,len(list1))]
for a in score:
print(list1[a], list2[a])
# list1 = ["red", "big", "tasty"]
# list2 = ["apple", "banana", "cherry"]
# One_ListValues_to_SecondListValues(list1, list2)
def ListEnumerate(list1, list2):
for i, (list1_v) in enumerate(list1):
print(i, list1_v, list2[i])
persons =["adam", "wojtek"]
adres = ["nike", "słowackiego"]
# ListEnumerate(persons, adres)
def test_zip( foo, bar ):
store = []
for f, b in zip(foo, bar):
store.append( (f, b) )
print(f,b)
# test_zip(persons, adres) | true |
8eae838da7124b775432c4fa1eb6ea40a3601fac | Python | Akenne/CheckiO | /Home/How to find friends.py | UTF-8 | 2,205 | 3.65625 | 4 | [] | no_license | """
Sophia's drones are not soulless and stupid drones; they can make and have friends.
In fact, they are already are working for the their own social network just for drones!
Sophia has received the data about the connections between drones and she wants to know more about relations between them.
We have an array of straight connections between drones.
Each connection is represented as a string with two names of friends separated by hyphen.
For example: "dr101-mr99" means what the dr101 and mr99 are friends.
Your should write a function that allow determine more complex connection between drones.
You are given two names also. Try to determine if they are related through common bonds by any depth.
For example: if two drones have a common friends or friends who have common friends and so on.
"""
#I did this wrong I think but it works
def check_connection(network, first, second):
future = [first]
visited = []
dic = {}
for i in network:
a = i.split('-')
if a[0] in dic and dic[a[0]] != a[1]:
dic[a[0]].append(a[1])
else:
dic[a[0]] = [a[1]]
if a[1] in dic and dic[a[1]] != a[0]:
dic[a[1]].append(a[0])
else:
dic[a[1]] = [a[0]]
if [a[0]] in dic.values():
for key,val in dic.items():
if a[1] == key:
break
elif (''.join(ch for ch in val if ch.isalnum())) == str(a[0]):
dic[str(a[0])].append(key)
break
if first in dic:
while len(future) > 0:
for i in future:
if i in visited:
future.remove(i)
continue
else:
visited.append(i)
for g in dic[i]:
if g == second:
return True
else:
if not g in visited:
future.append(g)
return False
check_connection(
("dr101-mr99", "mr99-out00", "dr101-out00", "scout1-scout2",
"scout3-scout1", "scout1-scout4", "scout4-sscout", "sscout-super"),
"scout2", "scout3") | true |
7af74ef640faf9383a817e4dcc9f306b1cc3778e | Python | OScott19/TheMulQuaBio | /archived/silbiocomp/Practicals/Code/re1.py | UTF-8 | 642 | 3.671875 | 4 | [
"CC-BY-3.0",
"MIT"
] | permissive | import re
my_string = "a given string"
# find a space in the string
match = re.search(r'\s', my_string)
print match
# this should print something like
# <_sre.SRE_Match object at 0x93ecdd30>
# now we can see what has matched
match.group()
match = re.search(r's\w*', my_string)
# this should return "string"
match.group()
# NOW AN EXAMPLE OF NO MATCH:
# find a digit in the string
match = re.search(r'\d', my_string)
# this should print "None"
print match
# Further Example
#
my_string = 'an example'
match = re.search(r'\w*\s', my_string)
if match:
print 'found a match:', match.group()
else:
print 'did not find a match'
| true |
4cfa669563e70feca81598dd1bca1dbe0a953602 | Python | kksuresh25/mutator | /loop_builder.py | UTF-8 | 25,887 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env
from modeller import *
from modeller.optimizers import molecular_dynamics, conjugate_gradients
from modeller.automodel import *
from modeller.scripts import complete_pdb
from Bio import PDB as pdb
import re
import csv
import os, glob
import gromacs
"""
loop_builder.py
This program fills in missing residues from (correctly formatted) pdb files.
It reads in the coordinates and detects any missing residues and then
gives an alignment file that can be used for loop modeling. It currently
does not do a very good job, as modeller is both poorly executed and also
I do not have the practical knowlede to make it do exactly what I want.
Known issues:
the paths are hard coded, you will need to modify them
tested 3CQU-A and 3O96-A, got RMSD of 1.5 -- not as good as could be
when the first residue is missing, it can't write the alignment file
To do:
fix issues with knots!
*** (DONE) ***
*** Solution: make sure the dashes in the "active.ali" file are of
correct length ***
modify way it reads in sequence so that models are automatically given
the correct name including active/inactive designation *** (DONE) ***
put in loop to automatically put in regions to modify (now hard coded)
*** (DONE) ***
put it in a loop so that it reads the active/inactive templates and then
create models in a separate directory *** (DONE) ***
*** (DONE) ***
put in a sanity check to make sure there are no mutations in the pdb
structures (we know there are some) and use modeller to mutate
these back
*** how to call mutate_model.py? ***
*** should I make a new text file that will save the mutated models,
then feed that text file into mutate_model.py? ***
maybe put in a sanity check at the end that compares models of similar
sequence or structure
maybe autoload into pymol?
maybe a sanity check for whether protein_name is correct?
sanity check to make sure that it is actually complete or incomplete
"""
datafile = open('./structures.csv', 'r') #Opens the structures file for reading
datareader = csv.reader(datafile) #reads structures file
data = [] #initializes a list called data
for row in datareader:
data.append(row) #adds an element to data for each row in structures.csv
class PDB_info(object):
"""
This class is used to assign meaning to specific elements in a given row of the .csv file
"""
def __init__(self, row):
self.id = row[0] #id number of the pdb file
self.protein = row[1] #protein name the pdb file is associated with
self.complete = row[2] #yes or give missing residues
self.conformation = row[3] #active or inactive?
self.mutation = row[4] #is there a mutation? If so, what are the details?
pdb_info = [PDB_info(item) for item in data]
#print pdb_info[0].id
for i in range(1, len(pdb_info)):
pdb_name = pdb_info[i].id #saves given pdb name as a variable
protein_name = pdb_info[i].protein #saves given protein name as a variable
complete = pdb_info[i].complete #saves yes or no for complete
structure_conf = pdb_info[i].conformation #saves active or inactive for conformation
mutation = pdb_info[i].mutation
print pdb_name
pdb_file = './PDBs/'+pdb_name+'.pdb'
if os.path.isfile(pdb_file) != True: #if there is no pdb_file, then make a note of it and continue with next pdb
missing = open('missing_PDBs.csv','a')
missing.write(pdb_name+','+protein_name+','+complete+','+structure_conf+','+mutation+"\n")
else:
fp = open(pdb_file)
parser = pdb.PDBParser()
struct = parser.get_structure("name",pdb_file) #read in pdb file using PDBParser
ppb = pdb.PPBuilder() #peptide class to get sequence
last = 100000 #make sure first iter through loop has no dashes -
structure_sequence = ''
first_range = []
last_range = []
for seq in ppb.build_peptides(struct):
print seq.get_sequence()
#read in the full sequence from the pdb file
full_sequence = ''
lines = fp.readlines()
first = lines[0]
header = re.split('HEADER\W+',first)[1] #uses a modified version of PDB file
header_list = header.split(':')
first_res = int(header_list[1])
last_res = int(header_list[3])
for seq in ppb.build_peptides(struct):
#use this re to get the chain breaks
search = re.search('start=([0-9]{1,5}).+end=([0-9]{1,5})',"{0}".format(seq))
first_range.append(search.groups()[0])
last_range.append(search.groups()[1])
first = search.groups()[0]
diff = int(first)-int(last)-1
if(diff > 0): #put in dashes for missing residues
structure_sequence += diff*'-'
last = search.groups()[1]
structure_sequence += seq.get_sequence()
#print (int(first)-21),(int(last)-21)
print structure_sequence
first_range = map(int, first_range) #makes this an integer array
last_range = map(int, last_range) #makes this an integer array
first_res_in_range = first_range.pop(0) #gets rid of the first element
last_res_in_range = last_range.pop(-1) #gets rid of the last element
first_missing = [x + 1 for x in last_range] #will use this to make missing residue ranges
last_missing = [x - 1 for x in first_range] #will use this to make missing residue ranges
if first_res != first_res_in_range:
diff = first_res_in_range - first_res
if (diff > 0):
structure_sequence = diff*'-' + structure_sequence
if last_res != last_res_in_range:
diff = last_res - last_res_in_range
if (diff > 0):
structure_sequence = structure_sequence + diff*'-'
#put in newlines into structure_sequence for proper PIR format
for i in range(0,len(structure_sequence),70):
structure_sequence = structure_sequence[:i] + "\n" + structure_sequence[i:]
for index in range(1,10):
split_line = re.split('REMARK 300 ',lines[index]) #appears that changing remark to 465 lowers the number of atoms
if split_line[0] == '':
full_sequence += split_line[1]
#write the alignment file
pdb_code = (pdb_name.split("-"[0]))
name = pdb_code[0] #changed this from hard coded 4F7S; it does not seem like this variable is used anywhere else
chain = str(pdb_code[1])
PIR = open('active.ali','w')
PIR.write(">P1;{0}\n".format(pdb_name))
PIR.write("structureX:{0}".format(header))
PIR.write("{0}*\n\n".format(structure_sequence.strip()))
PIR.write(">P1;{0}\n".format(protein_name))
PIR.write("sequence:{0}".format(header))
PIR.write("{0}*\n\n".format(full_sequence.strip()))
PIR.close()
#begin modeller stuff here
log.verbose()
#where to look for pdb files
env = environ()
env.io.atom_files_directory = ['.', './PDBs']
# Create a new class based on 'loopmodel' so that we can redefine
# select_loop_atoms
class MyLoop(automodel):
# This routine picks the residues to be refined by loop modeling
def select_loop_atoms(self): #need to fix this
#####make this easier to read by just doing if first case, add, if last case, add, etc.
if last_res_in_range != last_res and first_res_in_range == first_res:
if not first_missing:
return selection(self.residue_range(last_res_in_range + ':', last_res + ':'))
else:
return selection(self.residue_range(first_missing + ':', last_missing + ':'),
self.residue_range(last_res_in_range + ':', last_res + ':'))
elif first_res_in_range != first_res and last_res_in_range == last_res:
if not last_missing:
return selection(self.residue_range(first_res + ':', first_res_in_range + ':'))
else:
return selection(self.residue_range(first_res + ':', first_res_in_range + ':'),
self.residue_range(first_missing + ':', last_missing + ':'))
elif first_res_in_range != first_res and last_res_in_range != last_res:
if not first_missing: # can use first_missing because if first_missing is empty, so is last_missing
return selection(self.residue_range(first_res + ':', first_res_in_range + ':'),
self.residue_range(last_res_in_range + ':', last_res + ':'))
else:
return selection(self.residue_range(first_res + ':', first_res_in_range + ':'),
self.residue_range(first_missing + ':', last_missing + ':'),
self.residue_range(last_res_in_range + ':', last_res + ':'))
else: #if first_res_in_range == first_res and last_res_in_range == last_res:
return selection(self.residue_range(first_missing + ':', last_missing + ':'))
# index of the last model
a = MyLoop(env,
alnfile = 'active.ali', # alignment filename
knowns = pdb_name, # codes of the templates
sequence = protein_name, # code of the target
library_schedule = autosched.slow,
deviation = 1,
assess_methods = assess.DOPE) # assess each loop with DOPE
a.starting_model = 1 # index of the first model
a.ending_model = 1
a.make() #do modeling and loop refinement
fp.close()
#move directory and change name based on active/inactive and incomplete/complete designation
if re.match("active", structure_conf) is not None:
if re.match("yes", complete) is not None:
os.rename(protein_name+'.B99990001.pdb', './actives/complete/'+protein_name+'_active.pdb')
os.rename(protein_name+'.D00000001', './actives/complete/'+protein_name+'_active_logFile')
modelname = ('./actives/complete/'+protein_name+'_active.pdb')
briefname = ('./actives/complete/'+protein_name+'_active')
briefername = (protein_name+'_active')
dir_name = ('./actives/complete/')
is_active = 1
else:
os.rename(protein_name+'.B99990001.pdb', './actives/incomplete/'+protein_name+'_active.pdb')
os.rename(protein_name+'.D00000001', './actives/incomplete/'+protein_name+'_active_logFile')
modelname = ('./actives/incomplete/'+protein_name+'_active.pdb')
briefname = ('./actives/incomplete/'+protein_name+'_active')
briefername = (protein_name+'_active')
dir_name = ('./actives/incomplete/')
is_active = 1
if re.match("inactive", structure_conf) is not None:
if re.match("yes", complete) is not None:
os.rename(protein_name+'.B99990001.pdb', './inactives/complete/'+protein_name+'_inactive.pdb')
os.rename(protein_name+'.D00000001', './inactives/complete/'+protein_name+'_inactive_logFile')
modelname = ('./inactives/complete/'+protein_name+'_inactive.pdb')
briefname = ('./inactives/complete/'+protein_name+'_inactive')
briefername = (protein_name+'_inactive')
dir_name = ('./inactives/complete/')
is_active = 0
else:
os.rename(protein_name+'.B99990001.pdb', './inactives/incomplete/'+protein_name+'_inactive.pdb')
os.rename(protein_name+'.D00000001', './inactives/incomplete/'+protein_name+'_inactive_logFile')
modelname = ('./inactives/incomplete/'+protein_name+'_inactive.pdb')
briefname = ('./inactives/incomplete/'+protein_name+'_inactive')
briefername = (protein_name+'_inactive')
dir_name = ('./inactives/incomplete/')
is_active = 0
for filename in glob.glob("./"+protein_name+"*"):
os.remove(filename)
#mutate_model.py
gromacs.editconf(f = modelname, resnr = first_res, o = modelname)
if re.search('no_mutation', mutation) is not None:
print 'No mutations here'
else:
print "Looks like we've got a mutation. Let's check it out. \n"
different_mutations = re.split('&', mutation)
for mutant_res in range(0, len(different_mutations)):
is_mutated = re.search(r"([a-z])([0-9]+)([a-z])", different_mutations[mutant_res], re.I)
if is_mutated:
mutations_list = is_mutated.groups()
respos = mutations_list[1]
restyp = pdb.Polypeptide.one_to_three(mutations_list[0]) #get three letter code
#makes use of the optimize function in modeller
def optimize(atmsel, sched):
#conjugate gradient
for step in sched:
step.optimize(atmsel, max_iterations=200, min_atom_shift=0.001)
#md
refine(atmsel)
cg = conjugate_gradients()
cg.optimize(atmsel, max_iterations=200, min_atom_shift=0.001)
#molecular dynamics
def refine(atmsel):
# at T=1000, max_atom_shift for 4fs is cca 0.15 A.
md = molecular_dynamics(cap_atom_shift=0.39, md_time_step=4.0,
md_return='FINAL')
init_vel = True
for (its, equil, temps) in ((200, 20, (150.0, 250.0, 400.0, 700.0, 1000.0)),
(200, 600,
(1000.0, 800.0, 600.0, 500.0, 400.0, 300.0))):
for temp in temps:
md.optimize(atmsel, init_velocities=init_vel, temperature=temp,
max_iterations=its, equilibrate=equil)
init_vel = False
#use homologs and dihedral library for dihedral angle restraints
def make_restraints(mdl1, aln):
rsr = mdl1.restraints
rsr.clear()
s = selection(mdl1)
for typ in ('stereo', 'phi-psi_binormal'):
rsr.make(s, restraint_type=typ, aln=aln, spline_on_site=True)
for typ in ('omega', 'chi1', 'chi2', 'chi3', 'chi4'):
rsr.make(s, restraint_type=typ+'_dihedral', spline_range=4.0,
spline_dx=0.3, spline_min_points = 5, aln=aln,
spline_on_site=True)
log.verbose()
# Set a different value for rand_seed to get a different final model
env = environ(rand_seed=-49837)
env.io.hetatm = True
#soft sphere potential
env.edat.dynamic_sphere=False
#lennard-jones potential (more accurate)
env.edat.dynamic_lennard=True
#https://salilab.org/modeller/manual/node127.html to learn more about contact_shell and update_dynamic
env.edat.contact_shell = 4.0
env.edat.update_dynamic = 0.39
# Read customized topology file with phosphoserines (or standard one)
env.libs.topology.read(file='$(LIB)/top_heav.lib')
# Read customized CHARMM parameter library with phosphoserines (or standard one)
env.libs.parameters.read(file='$(LIB)/par.lib')
# Read the original PDB file and copy its sequence to the alignment array:
mdl1 = model(env, file=modelname)
ali = alignment(env)
ali.append_model(mdl1, atom_files=modelname, align_codes=modelname)
#set up the mutate residue selection segment
s = selection(mdl1.residues[respos])
#perform the mutate residue operation
s.mutate(residue_type=restyp)
#get two copies of the sequence. A modeller trick to get things set up
ali.append_model(mdl1, align_codes=modelname)
# Generate molecular topology for mutant
mdl1.clear_topology()
mdl1.generate_topology(ali[-1])
# Transfer all the coordinates you can from the template native structure
# to the mutant (this works even if the order of atoms in the native PDB
# file is not standard):
#here we are generating the model by reading the template coordinates
mdl1.transfer_xyz(ali)
# Build the remaining unknown coordinates
mdl1.build(initialize_xyz=False, build_method='INTERNAL_COORDINATES')
#yes model2 is the same file as model1. It's a modeller trick.
mdl2 = model(env, file=modelname)
#required to do a transfer_res_numb
#ali.append_model(mdl2, atom_files=modelname, align_codes=modelname)
#transfers from "model 2" to "model 1"
mdl1.res_num_from(mdl2,ali)
#It is usually necessary to write the mutated sequence out and read it in
#before proceeding, because not all sequence related information about MODEL
#is changed by this command (e.g., internal coordinates, charges, and atom
#types and radii are not updated).
mdl1.write(file=modelname+restyp+respos+'.tmp')
mdl1.read(file=modelname+restyp+respos+'.tmp')
#set up restraints before computing energy
#we do this a second time because the model has been written out and read in,
#clearing the previously set restraints
make_restraints(mdl1, ali)
#a non-bonded pair has to have at least as many selected atoms
mdl1.env.edat.nonbonded_sel_atoms=1
sched = autosched.loop.make_for_model(mdl1)
#only optimize the selected residue (in first pass, just atoms in selected
#residue, in second pass, include nonbonded neighboring atoms)
#set up the mutate residue selection segment
s = selection(mdl1.residues[respos])
mdl1.restraints.unpick_all()
mdl1.restraints.pick(s)
s.energy()
s.randomize_xyz(deviation=4.0)
mdl1.env.edat.nonbonded_sel_atoms=2
optimize(s, sched)
#feels environment (energy computed on pairs that have at least one member
#in the selected)
mdl1.env.edat.nonbonded_sel_atoms=1
optimize(s, sched)
s.energy()
#give a proper name
mdl1.write(file=briefname+restyp+respos+'.pdb')
#delete the temporary file
os.remove(modelname+restyp+respos+'.tmp')
modelname = (briefname+restyp+respos+'.pdb')
briefname = (briefname+restyp+respos)
briefername = (briefername+restyp+respos)
else:
print 'Mutation of '+pdb_name+' not recognized. Going to take note of this.'
bad_mutation = open('bad_mutation_PDBs.csv','a')
bad_mutation.write(pdb_name+','+protein_name+','+complete+','+structure_conf+','+mutation+"\n")
bad_mutation.close()
if (is_active == 1):
template = ('./actives/complete/ABL1_active.pdb')
template_briefname = ('ABL1_active')
pml_viewer = open('./active_aligner.pml', 'a')
pml_viewer.write('load '+modelname+"\n"+'hide lines, '+briefername+"\n"+'show cartoon, '
+briefername+"\n"+'align '+briefername+', '+template_briefname+', '+'cycles=0'"\n"+"\n")
else:
template = ('./inactives/complete/ABL1_inactive.pdb')
template_briefname = ('ABL1_inactive')
pml_viewer = open('./inactive_aligner.pml', 'a')
pml_viewer.write('load '+modelname+"\n"+'hide lines, '+briefername+"\n"+'show cartoon, '
+briefername+"\n"+'align '+briefername+', '+template_briefname+', '+'cycles=0'"\n"+"\n")
#write the align-multiple.ali
"""
env = environ()
env.io.atom_files_directory = ['./']
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
template = ('./actives/complete/ABL1_active.pdb')
mdl = complete_pdb(env, modelname)
mdl2 = complete_pdb(env, template)
# Write out chain sequences:
for c in mdl.chains:
c.write(file='align-multiple1.ali', atom_file=modelname,
align_code=modelname)
for c in mdl2.chains:
c.write(file='align-multiple2.ali', atom_file=template,
align_code=template)
filenames = ['align-multiple1.ali', 'align-multiple2.ali']
with open('./align-multiple.ali', 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
log.verbose()
env = environ()
env.io.atom_files_directory = ['./']
aln = alignment(env)
for (code) in ((template), ('1uld', 'D'), ('1ulf', 'B'),
('1ulg', 'B'), ('1is5', 'A')):
mdl = model(env, file=code, model_segment=('FIRST:A', 'LAST:A'))
aln.append_model(mdl, atom_files=code, align_codes=code)
for (weights, write_fit, whole) in (((1., 0., 0., 0., 1., 0.), False, True),
((1., 0.5, 1., 1., 1., 0.), False, True),
((1., 1., 1., 1., 1., 0.), True, False)):
aln.salign(rms_cutoff=3.5, normalize_pp_scores=False,
rr_file='$(LIB)/as1.sim.mat', overhang=30,
gap_penalties_1d=(-450, -50),
gap_penalties_3d=(0, 3), gap_gap_score=0, gap_residue_score=0,
dendrogram_file='1is3A.tree',
alignment_type='tree', # If 'progresive', the tree is not
# computed and all structues will be
# aligned sequentially to the first
#ext_tree_file='1is3A_exmat.mtx', # Tree building can be avoided
# if the tree is input
feature_weights=weights, # For a multiple sequence alignment only
# the first feature needs to be non-zero
improve_alignment=True, fit=True, write_fit=write_fit,
write_whole_pdb=whole, output='ALIGNMENT QUALITY')
aln.write(file='1is3A.pap', alignment_format='PAP')
aln.write(file='1is3A.ali', alignment_format='PIR')
# The number of equivalent positions at different RMS_CUTOFF values can be
# computed by changing the RMS value and keeping all feature weights = 0
aln.salign(rms_cutoff=1.0,
normalize_pp_scores=False, rr_file='$(LIB)/as1.sim.mat', overhang=30,
gap_penalties_1d=(-450, -50), gap_penalties_3d=(0, 3),
gap_gap_score=0, gap_residue_score=0, dendrogram_file='1is3A.tree',
alignment_type='progressive', feature_weights=[0]*6,
improve_alignment=False, fit=False, write_fit=True,
write_whole_pdb=False, output='QUALITY')
log.verbose() # request verbose output
env = environ() # create a new MODELLER environment to build this model in
# directories for input atom files
env.io.atom_files_directory = ['./']
a = automodel(env,
alnfile = 'align-multiple.ali', # alignment filename
knowns = (template), # codes of the templates
sequence = modelname) # code of the target
a.starting_model= 1 # index of the first model
a.ending_model = 1 # index of the last model
# (determines how many models to calculate)
a.make() # do the actual comparative modeling
""" | true |
ef6490b9000f00cef1d58fbc41d9cfc0a365ee93 | Python | RuoBingCoder/Python-Machine-Learning-Algorithm | /KNN/knn_plt.py | UTF-8 | 824 | 3 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
__author__ = 'clawpo'
from sklearn import datasets
import matplotlib.pyplot as plt
if __name__ == '__main__':
# 加载Iris数据集
iris = datasets.load_iris()
# 类别已经转成了数字,0 = Iris - Setosa, 1 = Iris - Versicolor, 2 = Iris - Virginica.
y = iris.target
colors = ['red', 'black', 'blue', 'green']
marks = ['*','x','+','o']
fig, ax = plt.subplots()
for i, s in enumerate(y):
# 数据集中第3列和第4列数据表示花瓣的长度和宽度
ax.scatter(iris.data[:, [2]][i], iris.data[:, [3]][i], c=colors[s], marker=marks[s],label=s, alpha=0.5, edgecolors='none')
# ax.legend(loc=5, fontsize=12)
# plt.legend(loc='best')
plt.ylim(0.0,3.0)
plt.xlim(0.0,8.0)
plt.show() | true |
12822a083dd79ca8ce3e16c8467743f2972ac815 | Python | bwaheed22/imessages | /Scripts/sentiment_scores.py | UTF-8 | 1,128 | 3.015625 | 3 | [] | no_license | # Sentiment Analysis
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import nltk
nltk.download('punkt')
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# Instantiate analyzer:
analyzer = SentimentIntensityAnalyzer()
# Load Group Chat DataFrame:
greece_gang = pd.read_csv("/Users/mbp/Documents/Side-Projects/iMessage_Analysis/greece_gang.csv")
# Calculate sentiment scores for each text in group chat:
greece_gang['text'] = greece_gang.text.astype(str)
scores = []
for texts in greece_gang.text:
tokens = word_tokenize(texts)
tokens_clean = [word for word in tokens if not word in stopwords.words('english')]
tokens_sentence = (" ").join(tokens_clean)
score = analyzer.polarity_scores(tokens_sentence)
scores.append(score)
compound_scores = []
for i in range(0,len(scores)):
compound_scores.append(scores[i]['compound'])
# add scores to dataframe:
greece_gang['sentiment'] = compound_scores
# Write to csv for further analysis:
greece_gang.to_csv('/Users/mbp/Documents/Side-Projects/iMessage_Analysis/greecegang_senti.csv')
| true |
db918b4af752c86d5d4dfca411da49e928aa2b48 | Python | ComputerVisionCourse/CV-UI | /Class3/src/video_dilation.py | UTF-8 | 548 | 2.75 | 3 | [] | no_license | import cv2
import numpy as np
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
while(1):
# get frame from camera
ret, frame = cap.read()
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# dilate image
kernel = np.ones((10, 10), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
# show images
cv2.imshow('Original image', img)
cv2.imshow('Transformed image', dilation)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
| true |
7c152477ce5e066eb2d897f8be9301684e446aac | Python | Luolingwei/LeetCode | /String/Q777_Swap Adjacent in LR String.py | UTF-8 | 875 | 3.453125 | 3 | [] | no_license |
# 思路: 因为XL只能让L往左替换, RX只能让R往右替换, 所以对应的start的L要在target的右边, 对应的start的R要在target的左边
# 同时RL不能相互跨越, 所以start和end的RL的个数和位置要对应
class Solution:
def canTransform(self, start: str, end: str) -> bool:
start_pairs = [(i,c) for i,c in enumerate(start) if c in ('L','R')]
end_pairs = [(i,c) for i,c in enumerate(end) if c in ('L','R')]
if len(start_pairs)!=len(end_pairs): return False
for j in range(len(start_pairs)):
li, lc, ri, rc = start_pairs[j][0], start_pairs[j][1], end_pairs[j][0], end_pairs[j][1]
if lc!=rc: return False
if lc == 'L' and li<ri: return False
if lc == 'R' and li>ri: return False
return True
a=Solution()
print(a.canTransform("LXXLXRLXXL","XLLXRXLXLX")) | true |
2a9bdfe234da206b963f596f75e64454f11cc1ce | Python | ytyaru/Python.Hatena.Authentication.20211014095441 | /src/oauth1_get_token.py | UTF-8 | 3,618 | 2.75 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env python3
#encoding:utf-8
# http://developer.hatena.ne.jp/ja/documents/auth/apis/oauth/consumer
# https://qiita.com/kosystem/items/7728e57c70fa2fbfe47c
#Temporary Credential Request URL https://www.hatena.com/oauth/initiate
#Resource Owner Authorization URL (PC) https://www.hatena.ne.jp/oauth/authorize
#Resource Owner Authorization URL (スマートフォン) https://www.hatena.ne.jp/touch/oauth/authorize
#Resource Owner Authorization URL (携帯電話) http://www.hatena.ne.jp/mobile/oauth/authorize
#Token Request URL https://www.hatena.com/oauth/token
import sys
from requests_oauthlib import OAuth1Session
from path import Path
from secret import Secret
import json
import urllib.parse
import webbrowser
import requests
from requests_oauthlib import OAuth1
class OAuth1Token:
@classmethod
def from_json(cls, path, schema_path=None):
secret = Secret.from_json(path, schema_path)
if 'oauth_token' in secret and 'oauth_token_secret' in secret:
print('AccessTokenは取得済みです。既存値を返します。', file=sys.stderr)
return secret
keys = ['oauth_consumer_key', 'oauth_consumer_secret']
cls.__has_not_keys(path, secret, keys)
return cls.from_str(*[secret[key] for key in keys], secret)
@classmethod
def __has_not_keys(cls, path, secret, keys):
for key in keys:
if key not in secret:
raise ValueError(f'[ERROR] {key} がありません。指定したファイルにセットしてください。: {path}')
@classmethod
def from_str(cls, client_key:str, client_secret:str, secret:dict=None) -> dict:
return cls.__write_token(*cls.__get_token(client_key, client_secret), secret)
@classmethod
def __get_token(self, consumer_key:str, consumer_secret:str) -> dict:
# リクエストトークンを取得する
auth = OAuth1(consumer_key, consumer_secret, callback_uri='oob')
res = requests.post('https://www.hatena.com/oauth/initiate', auth=auth)
if not res.ok: res.raise_for_status()
request_token = dict(urllib.parse.parse_qsl(res.text))
print(request_token)
# ブラウザを開きOAuth認証確認画面を表示。ユーザーが許可するとPINコードが表示される
webbrowser.open(f"https://www.hatena.ne.jp/oauth/authorize?oauth_token={request_token['oauth_token']}")
# 上記PINコードを入力する
oauth_verifier = input("Please input PIN code:")
auth = OAuth1(
consumer_key,
consumer_secret,
request_token['oauth_token'],
request_token['oauth_token_secret'],
verifier=oauth_verifier)
res = requests.post('https://www.hatena.com/oauth/token', auth=auth)
if not res.ok: res.raise_for_status()
access_token = dict(urllib.parse.parse_qsl(res.text))
print(access_token)
return [access_token['oauth_token'], access_token['oauth_token_secret']]
@classmethod
def __write_token(self, oauth_token:str, oauth_token_secret:str, secret:dict=None) -> dict:
if not secret: secret = Secret.from_json(Path.here('secret.json'))
secret['oauth_token'] = oauth_token
secret['oauth_token_secret'] = oauth_token_secret
with open(Path.here('secret.json'), mode='w', encoding='utf-8') as f:
json.dump(secret, f, ensure_ascii=False, indent=4)
return secret
if __name__ == '__main__':
secret = OAuth1Token.from_json(Path.here('secret.json'), Path.here('secret-schema.json'))
print(secret)
| true |
c2f5f9b47b6820a97a5b439de1fdbf57979ae467 | Python | parth-jr/ML-AndrewNG-Data | /plot/plotData.py | UTF-8 | 521 | 3.046875 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
data = np.loadtxt("ex2data1.txt", delimiter = ',', dtype = 'float')
X = data[:, 0:2]
y = data[:, 2]
m = len(y)
X = X.reshape((m,2))
y = y.reshape((m,1))
for i in range(m):
if(y[i] == 0):
plt.plot(X[i,0],X[i,1], "o", color = '#FFDD3C', markersize = 10) #can also use plt.scatter instead
else:
plt.plot(X[i,0],X[i,1],"+", color = 'black', markersize = 10)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.show()
| true |
59e653746c5d1c8cd033d6612d0d4f0c466c4112 | Python | chrisglencross/advent-of-code | /aoc2021/day17/day17.py | UTF-8 | 1,044 | 3.46875 | 3 | [] | no_license | #!/usr/bin/python3
# Advent of code 2021 day 17
# See https://adventofcode.com/2021/day/17
import re
with open("input.txt") as f:
bounds = [int(value) for value in re.match("^target area: x=([-0-9]+)..([-0-9]+), y=([-0-9]+)..([-0-9]+)$", f.readline().strip()).groups()]
def hit_target(fire_dx, fire_dy, bounds):
x, y = 0, 0
dx, dy = fire_dx, fire_dy
max_height = y
while x <= bounds[1] and y >= bounds[2]:
if dx == 0 and x < bounds[0]:
break # Not enough x velocity to reach the target
max_height = max(max_height, y)
if bounds[0] <= x <= bounds[1] and bounds[2] <= y <= bounds[3]:
return max_height
x += dx
y += dy
dx = max(0, dx - 1)
dy -= 1
return None
max_height = 0
count = 0
for fire_dx in range(0, bounds[1]+1):
for fire_dy in range(bounds[2], 1000):
h = hit_target(fire_dx, fire_dy, bounds)
if h is not None:
max_height = max(max_height, h)
count += 1
print(max_height)
print(count) | true |
e66a1eb263074fd8f0f80957505b87a2497c91a9 | Python | sagar11010/PythonforEthicalHacking | /PyautoGuiTest.py | UTF-8 | 543 | 2.65625 | 3 | [] | no_license | import pyautogui
import time
pyautogui.FAILSAFE = False
def completeAction():
time.sleep(2)
# Index Setup
# pyautogui.hotkey('alt','tab')
# time.sleep(1)
# pyautogui.click() # Click on the program
# time.sleep(1)
i = 0
process = 10
time.sleep(3)
for i in range(10):
time.sleep(1)
pyautogui.hotkey('alt','tab')
print("Rocking "+str(i))
time.sleep(1)
pyautogui.hotkey('alt','tab')
print("Dashing "+str(i))
accID = 10
accEnd = 20
# while accID < accEnd:
# completeAction(accID)
# accID = accID + 1
| true |
9f5b02bc2f617deae9254d67b9040795081bd41c | Python | Vanojx1/AdventOfCode2018 | /D8/part1.py | UTF-8 | 944 | 3.0625 | 3 | [
"MIT"
] | permissive | import re
puzzleInput = open('input.txt', 'r').read()
rawTree = map(lambda n: int(n), re.findall(r'\d+', puzzleInput))
class Tree():
cursor = -1
metaSum = 0
class Node():
def __init__(self, id=0):
self.id = id
self.tree = Tree
self.startIndex = self.tree.next()
self.metaIndex = self.tree.next()
self.childs = self.getChilds()
self.meta = self.getMeta()
self.tree.metaSum += reduce(lambda c, v: c+v, self.meta, 0)
def getChilds(self):
childNum = rawTree[self.startIndex]
return map(lambda c: self.tree.Node(self.id+c+1), range(childNum))
def getMeta(self):
metaNum = rawTree[self.metaIndex]
return map(lambda c: rawTree[self.tree.next()], range(metaNum))
def __init__(self):
self.root = self.Node()
@classmethod
def next(self):
self.cursor+=1
return self.cursor
tree = Tree()
result = tree.metaSum
print 'Result: %s' % result | true |
58cdf60cb7019d9eee16332576d5dfb6801c66f8 | Python | robbertvdzon/robotaansturing | /python/test4.py | UTF-8 | 1,736 | 3.0625 | 3 | [] | no_license | import time
from smbus import SMBus
from PCA9685 import PWM
fPWM = 50
i2c_address = 0x40 # (standard) adapt to your module
channel = 0 # adapt to your wiring
a = 8.5 # adapt to your servo
b = 2 # adapt to your servo
current0 =6.393
current1 =5.908
current2 =5.985
current3 =2.514
def setup():
global pwm
bus = SMBus(1) # Raspberry Pi revision 2
pwm = PWM(bus, i2c_address)
pwm.setFreq(fPWM)
def setDirection(duty, ch):
pwm.setDuty(ch, duty)
def moveTo(duty0, duty1, duty2, duty3 ):
global current0
global current1
global current2
global current3
diff0 = duty0-current0
diff1 = duty1-current1
diff2 = duty2-current2
diff3 = duty3-current3
steps = 150
step0 = diff0/steps
step1 = diff1/steps
step2 = diff2/steps
step3 = diff3/steps
for x in range(steps):
current0 = current0 + step0
current1 = current1 + step1
current2 = current2 + step2
current3 = current3 + step3
setDirection(current0,0)
setDirection(current1,1)
setDirection(current2,2)
setDirection(current3,3)
time.sleep( 1 )
print "starting"
setup()
moveTo (6.393 ,5.908 ,5.985, 2.514) # 4 hoog
moveTo (6.008 ,5.792 ,5.996 ,3.625) # 4 laag
moveTo (6.393 ,5.908 ,5.985,2.514) # 4 hoog
moveTo (7.088,5.104,5.936,2.273) # 3 hoog
moveTo (6.429, 5.096, 5.445, 3.239) # 3 laag
moveTo (7.088,5.104,5.936,2.273) # 3 hoog
moveTo (7.792, 4.547, 5.460, 2.677) # 2 hoog
moveTo (7.304 ,4.386 ,5.169 ,3.332) # 2 laag
moveTo (7.792, 4.547, 5.460, 2.677) # 2 hoog
moveTo (8.100, 5.155,3.809,4.652) # 1 hoog
moveTo (8.071, 4.868, 3.973, 5.414) # 1 laag
moveTo (8.100, 5.155,3.809,4.652) # 1 hoog
moveTo (6.393 ,5.908 ,5.985, 2.514) # 4 hoog
| true |
f9c5e2ea3408c333d07bea031eb17584634ff546 | Python | metinsenturk/ds620-motiondetection | /face-detect.py | UTF-8 | 1,882 | 3 | 3 | [] | no_license | import cv2 as cv
def detectAndDisplay(frame):
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
frame_gray = cv.equalizeHist(frame_gray)
# -- Detect faces
faces = face_cascade.detectMultiScale(
frame_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
for (x, y, w, h) in faces:
center = (x + w//2, y + h//2)
frame = cv.ellipse(frame, center, (w//2, h//2),
0, 0, 360, (255, 0, 255), 4)
faceROI = frame_gray[y:y+h, x:x+w]
# -- In each face, detect eyes
eyes = eyes_cascade.detectMultiScale(faceROI)
for (x2, y2, w2, h2) in eyes:
eye_center = (x + x2 + w2//2, y + y2 + h2//2)
radius = int(round((w2 + h2)*0.25))
frame = cv.circle(frame, eye_center, radius, (255, 0, 0), 4)
cv.imshow('Capture - Face detection', frame)
def init_video():
cap = cv.VideoCapture(0)
if not cap.isOpened:
print('--(!)Error opening video capture')
exit(0)
while True:
ret, frame = cap.read()
if frame is None:
print('--(!) No captured frame -- Break!')
break
detectAndDisplay(frame)
if cv.waitKey(1) == ord('q'):
break
def init_photo(path_to_photo):
frame = cv.imread(path_to_photo)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
detectAndDisplay(frame)
cv.waitKey(0)
if __name__ == "__main__":
face_cascade = cv.CascadeClassifier()
eyes_cascade = cv.CascadeClassifier()
if not face_cascade.load('data/haarcascade_frontalface_default.xml'):
print('--(!)Error loading face cascade')
exit(0)
if not eyes_cascade.load('data/haarcascade_eye.xml'):
print('--(!)Error loading eyes cascade')
exit(0)
# init_photo('files/messi5.jpg')
init_video() | true |
eedf261cb71d214d77f81aff31325c7722481ef3 | Python | alexander92993/ATU_coordenadas | /ATU_puntos.py | UTF-8 | 1,196 | 2.5625 | 3 | [] | no_license | import requests
import pandas as pd
writer = pd.ExcelWriter('BD_final.xlsx', engine='openpyxl')
url = "https://sistemas.atu.gob.pe/paraderosCOVID/Home/traer_datos"
headers2 = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36",
}
for xx in range(1,4):
payload = {'PARTIPCOD': xx}
res = requests.post(url, data=payload, headers = headers2)
r_dict = res.json()
tamaño_dict = len(r_dict)
Parnom = []
Disnom = []
Nivel = []
Latitud = []
Longitud = []
Tipo = []
for elem in range(tamaño_dict):
Parnom.append(r_dict[elem]['PARNOM'])
Disnom.append(r_dict[elem]['DISNOM'])
Nivel.append(r_dict[elem]['NIVEL'])
Latitud.append(r_dict[elem]['LATITUD'])
Longitud.append(r_dict[elem]['LONGITUD'])
Tipo.append(r_dict[elem]['TIPODET'])
df = pd.DataFrame(list(zip(Parnom,Disnom,Nivel,Latitud,Longitud,Tipo)), columns = ['Nombre de paradero','Distrito','Nivel','Latitud','Longitud','Tipo'])
df.to_excel(writer, index=False, sheet_name=str(xx))
writer.save()
| true |
9176c1c0feb97b4fab2a95ed9a8b1b1a9525f93b | Python | codiez/mobile.sniffer | /mobile/sniffer/chain.py | UTF-8 | 1,601 | 2.671875 | 3 | [] | no_license | """
Chained user agent detection.
Use several sources to sniff UAs for better accuracy.
"""
__author__ = "Mikko Ohtamaa <mikko.ohtamaa@twinapex.fi>"
__copyright__ = "2009 Twinapex Research"
__license__ = "GPL"
__docformat__ = "epytext"
import base
class ChainedSniffer(base.Sniffer):
""" Chained sniffer.
Use several sources to sniff UAs for better accuracy.
"""
def __init__(self, sniffers):
"""
@param sniffers: List of Sniffer instances
"""
self.sniffers = sniffers
def sniff(self, request):
""" Get a multi-backend UserAgent property multiplexer """
return UserAgent(self.sniffers, request)
class UserAgent(base.UserAgent):
""" Wrap around for several sniffers/data combinations.
Go through all sniffers and return the first property match which is not None.
"""
def __init__(self, sniffers, request):
self.sniffers = sniffers
self.request = request
def get(self, name):
""" Look property from all enlisted backends """
sniffer, value = self.get_with_provider(name)
return value
def get_with_provider(self, name):
""" Get property value and tell which sniffer provided it.
@return: tuple(sniffer instance, value)
"""
for s in self.sniffers:
ua = s.sniff(self.request)
if ua:
value = ua.get(name)
if value is not None:
return s, value
return None, None
| true |
a5368a3190c24e85f10ac57aeb503ede59a2263c | Python | dirkakrid/importceptor | /tests/test_importceptor.py | UTF-8 | 4,357 | 2.84375 | 3 | [
"MIT"
] | permissive | # coding: utf-8
"""
Tests for `importceptor` module.
"""
from __future__ import unicode_literals
import unittest
import sys
from types import ModuleType
from importceptor import importceptor as ic
class TestImportceptor(unittest.TestCase):
marker = object()
def setUp(self):
# Note: this is most probably not necessary, but let's make sure
mods = ['mod1', 'mod2', 'mod3', 'mod4', 'textwrap']
for mod in mods:
sys.modules.pop(mod, None)
def test_1(self):
"""
Imports inside a simple module are intercepted properly
"""
with ic.Importceptor({'os': self.marker}):
import mod1
assert mod1.os is self.marker
def test_2(self):
"""
If an import is not defined in the replacements, it will be imported in the normal way
"""
with ic.Importceptor({}):
import mod1
import os
assert mod1.os is os
def test_3(self):
"""
Modules directly under the decorator are actually imported and not intercepted.
It would be a non-sense to intercept the very first imports
"""
with ic.Importceptor({'mod2': self.marker}):
import mod2
assert mod2 is not self.marker
assert isinstance(mod2, ModuleType)
def test_4(self):
"""
All the modules directly imported under the context manager are imported and not intercepted
"""
with ic.Importceptor({'mod1': None, 'mod2': None, 'mod3': None}):
import mod1
import mod2
import mod3
for mod in [mod1, mod2, mod3]:
assert isinstance(mod, ModuleType)
def test_5(self):
"""
from ... import ... syntax also works
"""
with ic.Importceptor({'os': self.marker}):
from mod1 import os
assert os is self.marker
def test_6(self):
"""
If a module has "from mod import obj" statements, if the replacement has the attributes,
those are properly returned
"""
mock = ic.Bunch(path=object(), defpath=object())
with ic.Importceptor({'os': mock}):
import mod4
assert mod4.path is mock.path
assert mod4.defpath is mock.defpath
def test_7(self):
"""
If a module has "from mod import obj" statements, and the fully qualified python name of **all** the objects
from `mod` to import is defined in the replacements mapping, then the object passed in the replacements
will be used.
"""
path = object()
defpath = object()
with ic.Importceptor({'os.path': path, 'os.defpath': defpath}):
import mod4
assert mod4.path is path
assert mod4.defpath is defpath
def test_8(self):
"""
If a module has "from mod import obj" statements, and the FQPN is provided for some object, but not
for some others, then the explicitly defined objects will be imported, and the rest will be
read from the module (may or may not be real module depending on strict mode).
"""
fake_os = ic.Bunch(path=object(), defpath=object())
with ic.Importceptor({'os.path': self.marker, 'os': fake_os}):
import mod4
assert mod4.path is self.marker
assert mod4.defpath is fake_os.defpath
def test_9(self):
"""
On strict mode, if a module is not passed, an exception will be raised
"""
with self.assertRaises(KeyError):
with ic.Importceptor({}, strict=True):
# mod1 imports os
import mod1
def test_12(self):
"""
Import statements "from ... import *" work the expected way
"""
# __builtins__.__import__
pass
def test_13(self):
"""
Relative imports work fine
"""
with ic.Importceptor({'pack1.mod11': self.marker}):
from mod6 import mod11
assert mod11 is self.marker
def test_14(self):
"""
Import from packages works as well
"""
with ic.Importceptor({'pack1.mod11': self.marker}):
from mod5 import mod11
assert mod11 is self.marker
if __name__ == '__main__':
unittest.main() | true |
0091fbde37860f6ffd34ece1934b44475cdcd5c3 | Python | InSeong-So/Algorithm | /python/problem/prgrms/week00-pretest/01_완주하지못한선수.py | UTF-8 | 1,168 | 3.640625 | 4 | [] | no_license | # 원소의 수를 세어 주는 라이브러리
from collections import Counter # O(n)
# 단순 반복으로 풀기 : 효율성 테스트 통과 X
def solution(participant, completion):
# O(n^2)
for i in completion: # O(n)
participant.remove(i) # O(n)
return str(participant[0])
# 정렬을 사용하여 풀기 : 효율성 테스트 통과 O
def solution(participant, completion):
participant.sort() # 정렬은 시간복잡도가 O(n log n)이다.
completion.sort()
# Zip: 원소를 하나로 묶는 함수
for p, c in zip(participant, completion): # O(n)
if p != c: # 만약 다른게 나온다면 그게 완주하지 못한 사람임
return p
return participant[-1] # 마지막까지 간다면 그게 완주하지 못한 사람임
# 파이썬의 built-in 함수를 이용해서 풀기 : 효율성 테스트 통과 O
def solution(participant, completion):
result = Counter(participant) - Counter(completion)
return list(result.keys())[0]
# 더 간략하게 줄이기
def solution(participant, completion):
return [i for i in (Counter(participant) - Counter(completion)).keys()][0]
| true |
d29c2a5c7a0692f18d3aa52f9329725211fcee2e | Python | sandeepkumar11/Face-Mask-Detection-Recognition | /Face mask project.py | UTF-8 | 2,820 | 3.125 | 3 | [] | no_license | # Convolutional Neural Network
# Importing the libraries
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.callbacks import TensorBoard, ModelCheckpoint
# Part 1 - Building the classifier
# Initialising the classifier
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Convolution2D(32,3,3, activation="relu", input_shape=[64, 64, 3]))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2,2)))
# Adding a second convolutional layer
classifier.add(Convolution2D(32,3,3, activation="relu"))
classifier.add(MaxPooling2D(pool_size=(2,2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full Connection
classifier.add(Dense(units=128, activation='relu'))
# Step 5 - Output Layer
classifier.add(Dense(units=1, activation='sigmoid'))
# Part 2 - Training the classifier
# Compiling the classifier
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Part 3 - Fitting the CNN to the image
from keras.preprocessing.image import ImageDataGenerator
# Generating images for the Training set
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
# Generating images for the Test set
test_datagen = ImageDataGenerator(rescale = 1./255)
# Creating the Training set
training_set = train_datagen.flow_from_directory('train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
# Creating the Test set
test_set = test_datagen.flow_from_directory('test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
# Training the classifier on the Training set and evaluating it on the Test set
classifier.fit_generator(training_set,
steps_per_epoch = 334,
epochs = 25,
validation_data = test_set,
validation_steps = 334)
checkpoint = ModelCheckpoint('model2-{epoch:03d}.model',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')
classifier.fit_generator(training_set,
steps_per_epoch=40,
epochs=20,
validation_data=test_set,
callbacks=[checkpoint])
| true |
4d05c6f1968155cffbfdd102ffecf4690fd98f58 | Python | Nirol/LeetCodeTests | /Algorithms_questions/ez/count_primes.py | UTF-8 | 609 | 3.5625 | 4 | [] | no_license | import math
class Solution:
def countPrimes(self, n: int) -> int:
ans=0
for i in range (n):
if Solution.is_prime(i):
ans=ans+1
return ans
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
sqr = int(math.sqrt(n)) + 1
for divisor in range(3, sqr, 2):
if n % divisor == 0:
return False
return True
if __name__ == "__main__":
x = Solution()
ans = x.countPrimes(10)
print(ans)
| true |
e6a7ad3b4c557568332758b56c25e18ca6f08620 | Python | mphuc/linux_c | /python/tt.py | UTF-8 | 84 | 2.65625 | 3 | [] | no_license |
aaa = {}
aaa["123"] = 1
aaa["234"] = 2
for keystr in aaa.keys():
print keystr
| true |
be13642c7fb0d4dc2ef10db4fd2fd9a6f56b0882 | Python | elpablo/Design-Pattern-Book | /Python/3-Behavioural/Iterator/DemoIterator.py | UTF-8 | 420 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env python3
from Container import Container
def main():
stack = Container()
for i in range(1, 5):
stack.push(i)
container_iterator = stack.create_iterator()
while container_iterator.has_next():
print("Item: %d" % container_iterator.current_item())
container_iterator.next()
if __name__ == '__main__':
main()
"""
Output:
Item: 1
Item: 2
Item: 3
Item: 4
"""
| true |
23aef07083cf0498e02af4ea523a704325ceffcb | Python | harry595/BaekJoon | /Programmers/42577.py | UTF-8 | 224 | 2.78125 | 3 | [] | no_license | def solution(phone_book):
phone_book.sort()
for i in range(1,len(phone_book)):
before_len=len(phone_book[i-1])
if(phone_book[i][:before_len]==phone_book[i-1]):
return False
return True | true |
5d29206c954fb6955104e24834a770bafd93e4bf | Python | soberoy1112/Lintcode | /official_answer/test.py | UTF-8 | 724 | 3.40625 | 3 | [] | no_license | class Solution(object):
# @param nestedList a list, each element in the list
# can be a list or integer, for example [1,2,[1,2]]
# @return {int[]} a list of integer
def flatten(self, nestedList):
# Write your code here
if isinstance(nestedList, int):
return nestedList
S = str(nestedList)
s = ""
flag = False
L = []
for i in S:
if i == "-" or i.isdigit():
s += i
flag = True
else:
if flag is True:
L.append(int(s))
s = ""
flag = False
return L
x = Solution()
print(x.flatten([1,2,[1,22]])) | true |
985b4e3d3fbc930e2f7772643fa818d751616470 | Python | BenjaminTMilnes/UOWFourthYearProjectOriginal | /Code Archive/Analysis_116_2013-03-07/ProgressMeter.py | UTF-8 | 690 | 3.46875 | 3 | [] | no_license | # Progress Meter
#
# Modified 2013.01.16 22:50
# Last Modified 2013.01.16 23:16
#
import math
class ProgressMeter:
def __init__(self, EndCount):
self.Count = 0
self.EndCount = EndCount
def Update(self, Count):
self.Count = Count
NewCount = False
if (self.Count < self.EndCount):
Progress1 = int(math.floor(100 * float(self.Count) / float(self.EndCount)))
Progress2 = int(math.floor(100 * float(self.Count + 1) / float(self.EndCount)))
if (Progress2 > Progress1):
NewCount = True
print str(Progress1), "%"
elif (self.Count == self.EndCount):
NewCount = True
print "100%"
print "Complete"
return NewCount
| true |
5703ae0e68909e912a658fa7e64fb43cff545d12 | Python | Guessan/python01 | /Assignments/Answer_8.4.py | UTF-8 | 654 | 4.40625 | 4 | [] | no_license | #8.4
#Open the file romeo.txt and read it line by line. For each line, split the line into a list of words using the split() method.
#The program should build a list of words.
#For each word on each line check to see if the word is already in the list and if not append it to the list.
#When the program completes, sort and print the resulting words in alphabetical order.
#You can download the sample data at http://www.pythonlearn.com/code/romeo.txt
#Begin to write the program with the following code below:
fname = raw_input("Enter file name: ")
fh = open(fname)
fh = fh.split()
lst = list()
fh.append(lst)
for wrd in lst :
print wrd.rstrip()
| true |
013a604258057e07a3ff4c96f35e011f6bb5d73d | Python | k201zzang/ComputationalPhysics | /F5.4.py | UTF-8 | 547 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 12:01:32 2013
@author: akels
"""
from __future__ import division, print_function
from os import sys
sys.path.append('cpresources')
#from pylab import *
from cmath import exp
from math import factorial,pi
f = lambda x: exp(2*x)
def derivative(fp,m,z0=0):
f = lambda z: fp(z-z0)
# m = 1
N = 10000
s = 0
for k in range(N):
z_k = exp(1j*2*pi*k/N)
s+=f(z_k)*exp(-1j*2*pi*k*m/N)
I = s*factorial(m)/N
return I
for m in range(10):
s = derivative(f,m)
print('m={}\t I={}'.format(m,s)) | true |
e442e9a5c088aaf0378d022abd6fc0fc588f9d6e | Python | Dimantarian/cryptoTradingBot | /interface/autocomplete_widget.py | UTF-8 | 3,620 | 3.453125 | 3 | [
"MIT"
] | permissive | import tkinter as tk
import typing
class Autocomplete(tk.Entry):
def __init__(self, symbols: typing.List[str], *args, **kwargs):
super().__init__(*args, **kwargs)
self._symbols = symbols
self._lb: tk.Listbox
self._lb_open = False # Used to know whether the Listbox is already open or not
self.bind("<Up>", self._up_down)
self.bind("<Down>", self._up_down)
self.bind("<Right>", self._select)
self._var = tk.StringVar()
self.configure(textvariable=self._var) # Links the tk.Entry content to a StringVar()
self._var.trace("w", self._changed) # When the self._var value changes
def _changed(self, var_name: str, index: str, mode: str):
"""
Open a Listbox when the tk.Entry content changes and get a list of symbols matching this content
:param var_name:
:param index:
:param mode:
:return:
"""
self._var.set(self._var.get().upper()) # Set the content of the tk.Entry widget to uppercase as you type
if self._var.get() == "": # Closes the Listbox when the tk.Entry is empty
if self._lb_open:
self._lb.destroy()
self._lb_open = False
else:
if not self._lb_open:
self._lb = tk.Listbox(height=8) # Limits the number of items displayed in the Listbox
self._lb.place(x=self.winfo_x() + self.winfo_width(), y=self.winfo_y() + self.winfo_height() + 40)
self._lb_open = True
# Finds symbols that start with the characters that you typed in the tk.Entry widget
symbols_matched = [symbol for symbol in self._symbols if symbol.startswith(self._var.get())]
if len(symbols_matched) > 0:
try:
self._lb.delete(0, tk.END)
except tk.TclError:
pass
for symbol in symbols_matched[:8]: # Takes only the first 8 elements of the list to match the Listbox
self._lb.insert(tk.END, symbol)
else: # If no match, closes the Listbox if it was open
if self._lb_open:
self._lb.destroy()
self._lb_open = False
def _select(self, event: tk.Event):
"""
Triggered with when the keyboard Right arrow is pressed, set the current Listbox item as a value of the
tk.Entry widget.
:param event:
:return:
"""
if self._lb_open:
self._var.set(self._lb.get(tk.ACTIVE))
self._lb.destroy()
self._lb_open = False
self.icursor(tk.END)
def _up_down(self, event: tk.Event):
"""
Move the Listbox cursor up or down depending on the keyboard key that was pressed.
:param event:
:return:
"""
if self._lb_open:
if self._lb.curselection() == (): # No Listbox item selected yet
index = -1
else:
index = self._lb.curselection()[0]
lb_size = self._lb.size()
if index > 0 and event.keysym == "Up":
self._lb.select_clear(first=index)
index = str(index - 1)
self._lb.selection_set(first=index)
self._lb.activate(index)
elif index < lb_size - 1 and event.keysym == "Down":
self._lb.select_clear(first=index)
index = str(index + 1)
self._lb.selection_set(first=index)
self._lb.activate(index)
| true |
9d490c47de55fd614fde4380308e5f99a40cb7d9 | Python | zhuyunning/zhuyunning | /Programming for Analytics/Assignment 3.py | UTF-8 | 14,261 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 9 13:51:08 2016
@author: nico
Programming Assignment 03: Web Scraping
Group 5
Yunning Zhu G39659638
Daniel Chen G25195689
Xinyi Wang G44230350
Tingting Ju
Abhinav Chandel G33895000
"""
#Question 1
#Mexican Restaurant
from bs4 import BeautifulSoup as bs
import urllib.request
import pandas as pd
import re
def step1():
rooturl="https://www.yelp.com/search?find_desc=mexican+food&find_loc=Washington,+DC"
links=[]
lists=[]
for i in range(0,750,10):
i=str(i)
nexturl=rooturl+"&start="+i
links.append(nexturl)
for link in links:
url=link
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
data=response.read()
response.close()
soup=bs(data,"html.parser")
each=soup.find_all('li',class_="regular-search-result")
for i in each:
x=[]
#name of restaurants
name=i.find('a',class_="biz-name js-analytics-click")
name=name.getText().strip()
x.append(name)
#all address
addressall=i.find('address')
addressall=str(addressall)
street=re.search('\n (.+?)<br>',addressall)
if street is None:
x.append('NaN') #street
city = re.search('<br>(.+?),',addressall)
if city is None:
x.append('NaN')#city
x.append('NaN') #state
x.append('NaN') #zip
elif city is not None:
city = re.search('<br>(.+?),',addressall).group(1)
x.append(city)
state = re.search(', (.+?) ',addressall)
if state is not None:
state = state.group(1)
x.append(state)
zip = re.search(' (\d{5})\n',addressall)
if zip is not None:
zip = zip.group(1)
x.append(zip)
elif zip is None:
x.append('NaN')
elif state is None:
x.append('NaN')
x.append('NaN')
elif street is not None:
street = re.search('\n (.+?)<br>',addressall).group(1)
x.append(street)
city = re.search('<br>(.+?),',addressall).group(1)
city = re.search('<br>(.+?),',addressall)
if city is None:
x.append('NaN') #city
x.append('NaN') #state
x.append('NaN') #zip
elif city is not None:
city = re.search('<br>(.+?),',addressall).group(1)
x.append(city)
state = re.search(', (.+?) ',addressall)
if state is not None:
state = state.group(1)
x.append(state)
zip = re.search(' (\d{5})\n',addressall)
if zip is not None:
zip = zip.group(1)
x.append(zip)
elif zip is None:
x.append('NaN')
elif state is None:
x.append('NaN')
x.append('NaN')
#phone
phone=i.find('span',class_="biz-phone")
if phone is None:
x.append("NaN")
else:
phone=phone.getText()
phone=phone[9:23]
x.append(phone)
#number of reviews
views=i.find('span',class_='review-count rating-qualifier')
if views is None:
x.append("NaN")
else:
views=views.getText().strip().split()
x.append(int(views[0]))
#Rate
rate=i.find('i')
if rate is None:
x.append("NaN")
else:
rate=rate['title']
rate=rate[0:3]
rate=float(rate)
x.append(rate)
#price
price=i.find("span",class_="business-attribute price-range")
if price is None:
x.append("NaN")
if price is not None:
price=price.getText()
x.append(price)
#price range
if price =="$":
x.append(10)
if price =="$$":
x.append(20)
if price =="$$$":
x.append(30)
if price =="$$$$":
x.append(40)
lists.append(x)
#print(lists)
headings = ['Name', 'StreetAddress','City','State','Zip','Phone','Number of Reviews','Rating','Price','Price Range']
df = pd.DataFrame(lists, columns=headings)
df.to_csv('Mexican.csv')
return df
#Chinese Restaurant
rooturl="https://www.yelp.com/search?find_desc=chinese+food&find_loc=Washington,+DC"
links=[]
lists=[]
for i in range(0,990,10):
i=str(i)
nexturl=rooturl+"&start="+i
links.append(nexturl)
for link in links:
url=link
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
data=response.read()
response.close()
soup=bs(data,"html.parser")
each=soup.find_all('li',class_="regular-search-result")
for i in each:
x=[]
#name of restaurants
name=i.find('a',class_="biz-name js-analytics-click")
name=name.getText().strip()
x.append(name)
#all address
addressall=i.find('address')
addressall=str(addressall)
street=re.search('\n (.+?)<br>',addressall)
if street is None:
x.append('NaN') #street
city = re.search('<br>(.+?),',addressall)
if city is None:
x.append('NaN')#city
x.append('NaN') #state
x.append('NaN') #zip
elif city is not None:
city = re.search('<br>(.+?),',addressall).group(1)
x.append(city)
state = re.search(', (.+?) ',addressall)
if state is not None:
state = state.group(1)
x.append(state)
zip = re.search(' (\d{5})\n',addressall)
if zip is not None:
zip = zip.group(1)
x.append(zip)
elif zip is None:
x.append('NaN')
elif state is None:
x.append('NaN')
x.append('NaN')
elif street is not None:
street = re.search('\n (.+?)<br>',addressall).group(1)
x.append(street)
city = re.search('<br>(.+?),',addressall).group(1)
city = re.search('<br>(.+?),',addressall)
if city is None:
x.append('NaN') #city
x.append('NaN') #state
x.append('NaN') #zip
elif city is not None:
city = re.search('<br>(.+?),',addressall).group(1)
x.append(city)
state = re.search(', (.+?) ',addressall)
if state is not None:
state = state.group(1)
x.append(state)
zip = re.search(' (\d{5})\n',addressall)
if zip is not None:
zip = zip.group(1)
x.append(zip)
elif zip is None:
x.append('NaN')
elif state is None:
x.append('NaN')
x.append('NaN')
#phone
phone=i.find('span',class_="biz-phone")
if phone is None:
x.append("NaN")
else:
phone=phone.getText()
phone=phone[9:23]
x.append(phone)
#number of reviews
views=i.find('span',class_='review-count rating-qualifier')
if views is None:
x.append("NaN")
else:
views=views.getText().strip().split()
x.append(int(views[0]))
#Rate
rate=i.find('i')
if rate is None:
x.append("NaN")
else:
rate=rate['title']
rate=rate[0:3]
rate=float(rate)
x.append(rate)
#price
price=i.find("span",class_="business-attribute price-range")
if price is None:
x.append("NaN")
if price is not None:
price=price.getText()
x.append(price)
#price range
if price =="$":
x.append(10)
if price =="$$":
x.append(20)
if price =="$$$":
x.append(30)
if price =="$$$$":
x.append(40)
lists.append(x)
#print(lists)
headings = ['Name', 'StreetAddress','City','State','Zip','Phone','Number of Reviews','Rating','Price','Price Range']
df = pd.DataFrame(lists, columns=headings)
df.to_csv('Chinese.csv')
return df
#Question 2
#Histogram
import matplotlib.pyplot as plt
def step2():
df=pd.read_csv('Mexican.csv')
df['Rating'].hist(bins=8,color='pink')
plt.xlabel('Rating')
plt.title('Mexican Food Rating')
plt.savefig('histRatingMexican.pdf')
plt.show()
df=pd.read_csv('Chinese.csv')
df['Rating'].hist(bins=8,color='blue')
plt.xlabel('Rating')
plt.title('Chinese Food Rating')
plt.savefig('histRatingChinese.pdf')
plt.show()
return
#Question 3
import numpy as np
def step3():
#Plot the relationship between Mexican restaurants' Rating (Y) and Number of reviews (X).
plt.figure(1)
data = pd.read_csv("Mexican.csv")
data = data.dropna()
x = data['Number of Reviews']
y = data['Rating']
fit = np.polyfit(x,y,1)
P = np.poly1d(fit)
plt.plot(x,y,'bo',x,P(x),'--r')
plt.title('Relationship between Mexican Rating and Number of Reviews')
plt.xlabel('Number of Reviews')
plt.ylabel('Rating')
plt.savefig('MXY2.pdf')
plt.show()
#Plot the relationship between Mexican restaurants' Rating (Y) and Price Range (X).
plt.figure(2)
data = pd.read_csv("Mexican.csv")
data = data.dropna()
x = data['Price Range']
y = data['Rating']
fit = np.polyfit(x,y,1)
P = np.poly1d(fit)
plt.plot(x,y, 'yo', x, P(x), '--b')
plt.title( 'Relationship between Mexican Rating(Y) and Price Range (X)')
plt.xlabel('Price range')
plt.ylabel('Rating')
plt.savefig('MXY1.pdf')
plt.show()
#Plot the relationship between Chinese restaurants' Rating (Y) and Number of reviews (X).
plt.figure(3)
data = pd.read_csv("Chinese.csv")
data = data.dropna()
x = data['Number of Reviews']
y = data['Rating']
fit = np.polyfit(x,y,1)
P = np.poly1d(fit)
plt.plot(x,y, 'yo', x, P(x), '--r')
plt.title('Relationship between Chinese Rating(Y) and Number of review (X)')
plt.xlabel('Number of Reviews')
plt.ylabel('Rating')
plt.savefig('CXY2.pdf')
plt.show()
#Plot the relationship between Chinese restaurants' Rating (Y) and Price Range (X).
plt.figure(4)
data = pd.read_csv("Chinese.csv")
data = data.dropna()
x = data['Price Range']
y = data['Rating']
fit = np.polyfit(x,y,1)
P = np.poly1d(fit)
plt.plot(x,y, 'yo', x, P(x), '--b')
plt.title('Relationship between Chinese Rating(Y) and Price Range (X)')
plt.xlabel('Price range')
plt.ylabel('Rating')
plt.savefig('CXY1.pdf')
plt.show()
return
#Question 4
from mpl_toolkits.mplot3d import *
from sklearn import linear_model
from sklearn.metrics import r2_score
def step4():
#Combine 2 csv
data1 = pd.read_csv("Mexican.csv")
data2 = pd.read_csv("Chinese.csv")
data3 = pd.concat([data1, data2], axis=0)
data3 = data3.dropna()
# Regression
set1 = pd.concat([data3["Price Range"], data3["Number of Reviews"]], axis=1)
X = set1.as_matrix()
model = linear_model.LinearRegression(fit_intercept = True)
y = data3["Rating"]
fit = model.fit(X,y)
pred = model.predict(X)
#Print out coefficients and R2
print("Intercept: ",fit.intercept_)
print("Slope: ", fit.coef_)
r2 = r2_score(y,pred)
print ('R-squared: %.2f' % (r2))
#Plot a 3d scatter plot
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
f = plt.figure()
ax = f.gca(projection='3d')
plt.hold(True)
x_max = max(data3["Price Range"])
y_max = max(data3["Number of Reviews"])
b0 = float(fit.intercept_)
b1 = float(fit.coef_[0])
b2 = float(fit.coef_[1])
x_surf=np.linspace(0, x_max, 100)
y_surf=np.linspace(0, y_max, 100)
x_surf, y_surf = np.meshgrid(x_surf, y_surf)
z_surf = b0 + b1*x_surf +b2*y_surf
# plot a 3d surface
ax.plot_surface(x_surf, y_surf, z_surf, cmap=cm.hot, alpha=0.2);
x=data3["Price Range"]
y=data3["Number of Reviews"]
z=data3["Rating"]
# plot a 3d scatter plot
ax.scatter(x, y, z);
ax.set_xlabel('fit.coef_[0]')
ax.set_ylabel('fit.coef_[1]')
ax.set_zlabel('fit.intercept_')
plt.title('regression')
plt.xlabel('Price range')
plt.ylabel('Number of reviews')
plt.savefig('regression.pdf')
plt.show()
return
# In[ ]:
| true |
8312415e2d37049fd6db124c63c5ece7499f62fe | Python | apeden/brownieSorter | /camp.py | UTF-8 | 16,171 | 3.71875 | 4 | [] | no_license | """
Author: Alex Peden
email: apeden23@gmail.com
July 2019
A programme for sorting brownies* into groups (tents)
of equal size according to friendships. Brownies and
friendship choices are imported from a text file.
An algorithm is run to find the optimal (or near best)
groupings of brownies. The best camp can be printed to screen
as a list of tents, their occupants, and the associated
happiness scores.
*in the UK, a brownie is a junior girl-guide (or junior
girl-scout)
"""
import random
import math
class Brownie(object):
"""A participant in a camp who has to share a tent
(or be in a group) with other brownies. The happiness
of this participant depends on whether the other participants
she is grouped with are friends, mutual friends, or (non-
reciprocated) admirers.
"""
def __init__(self, name):
self.name = name
self.friends = []
self.happiness = 0
def addFriend(self, friend):
"""Add friend to friend list of this brownie."""
self.friends.append(friend)
def setHappiness(self, deltaHapp):
"""Increase happiness of this brownie by 'deltaHapp'."""
self.happiness += deltaHapp
def getHappiness(self):
"""Return happiness score of this brownie"""
return self.happiness
def getFriends(self):
"""Return names of brownies this brownie says she likes"""
return self.friends
def bonding(self, other):
"""Determine bond of this brownie with another.
If brownie likes other brownie, bond is 1.
If other brownie also likes this brownie, bond is 2.
If only other brownie likes this brownie, bond is 1.
"""
bond = 0
if self.name in other.friends:
bond += 1
if other.name in self.friends:
bond += 1
return bond
def getName(self):
"""Return name of brownie."""
return self.name
def __str__(self):
"""Print brownie name and friends."""
return self.name + " has friends "+ str(self.friends)
class Tent(object):
"""A kind of grouping (which may actually be a tent)
which will contain brownies. It will have a happiness,
set according to the friendship statuses of the occupant
brownies.
"""
def __init__(self, num):
self.num = num
self.brownies = []
self.brownie_profiles = ()# brownie names and happinesses
self.happiness = 0
def getNum(self):
"""Returning the identifying number of this tent"""
return self.num
def getCapacity(self):
"""Return the capacity of this tent."""
return self.capacity
def addBrownie(self, brownie):
"""Add a brownie to this tent.
"""
self.brownies.append(brownie)
def favIndex(self, otherBrownies):
"""Return the index of the brownie in a list
of brownies that is most favoured by the
occupants of this tent.
"""
favIndex, topBond = 0,0
for i in range(len(otherBrownies)):
tentBond = 0
for brownie in self.brownies:
tentBond += brownie.bonding(otherBrownies[i])
if tentBond > topBond:
topBond = tentBond
favIndex = i
return favIndex
def setHappiness(self):
"""Determine happiness of this tent
on the basis of declared friendships
amungst the brownies.
For example, if a brownie in the
tent likes another brownie in the tent,
this increases tent happiness by 1. If
the other brownie likes them back, happiness is
increased by not 1, but 2 etc.
"""
self.happiness = 0
for b1 in self.brownies:
for b2 in self.brownies:
if b1 == b2:
continue
if b1.getName() in b2.getFriends():
b1.setHappiness(1)
self.happiness += 1
for friend in b1.getFriends():
if friend == b2.getName():
b1.setHappiness(1)
self.happiness += 1
def getHappiness(self):
"Return happiness of the tent."""
return self.happiness
def getBrownies(self):
"""Return all brownies in the tent."""
return self.brownies
def __str__(self):
"""Print tent occupants and their happinesses"""
for brownie in self.brownies:
self.brownie_profiles += (brownie.getName().ljust(12, " ")\
+": "+str(brownie.happiness),)
summary = "Tent " + str(self.num + 1)+": "
summary += " Happiness: "+ str(self.getHappiness()) +"\n"
for profile in self.brownie_profiles:
summary += profile + "\n"
return summary
class Camp(object):
"""A camp of brownies that will comprise tents (
groupings of brownies) each with 'happiness' scores
that will depend on who they are sharing a tent with.
"""
def __init__(self, camp_name, num_tents =4):
self.name = camp_name
self.num_tents = num_tents
self.tents = []
self.allBrownies = []
self.availBrownies = []
self.happiness = 0
self.minHapp = 0
def getName(self):
"""Return name of camp."""
return self.name
def setTents(self, tent):
"""Add a tent to the camp if the total will
be less then or equal to that allowed.
"""
if len(self.tents) < self.num_tents:
self.tents.append(tent)
def getTents(self):
"""Return the tents in this camp."""
return self.tents
def addBrownie(self,brownie):
"""Add this brownie to the camp"""
self.allBrownies.append(brownie)
self.availBrownies = self.allBrownies[:]
def randSeedTents(self):
"""Place one brownie at random in each of the
empty tents. Then call voteFill() to fully
populate the tents with brownies.
"""
for i in range(self.num_tents):
t = Tent(i)
numBrownies = len(self.availBrownies)
randIndex = random.choice(range(numBrownies))
randBrownie = self.availBrownies.pop(randIndex)
t.addBrownie(randBrownie)
self.setTents(t)
self.voteFill()
def voteFill(self):
i = 0
"""Sequentially (tent by tent) get brownie(s) in each
tent to vote on which of the remaining brownies they
would (collectively) most like to join them. The chosen
brownie is added to the tent.
"""
while len(self.availBrownies)> 0:
tentNum = (i%self.num_tents)
tent =self.tents[tentNum]
favIndex = tent.favIndex(self.availBrownies)
tent.addBrownie(self.availBrownies.pop(favIndex))
i += 1
def setHappiness(self):
"""Determine happiness of the camp on basis of collective
happinesses of the tents (in turn dependent on the
happinesses of the brownies therein).
"""
self.happiness = 0
for tent in self.tents:
tent.setHappiness()
self.happiness += tent.getHappiness()
def getHappiness(self):
"""Return camp happiness."""
return self.happiness
def getRangeHapp(self):
"""Determine difference between most happy
and least happy tent in the camp.
"""
max = self.tents[0].getHappiness()
min = self.tents[0].getHappiness()
for tent in self.tents:
if tent.getHappiness() > max:
max = tent.getHappiness()
elif tent.getHappiness() < min:
min = tent.getHappiness()
return max-min
def setMinHapp(self):
"Determine happiness of least happy brownie in the camp."""
minHapp = self.tents[0].brownies[0].getHappiness()
for tent in self.tents:
for brownie in tent.getBrownies():
if brownie.getHappiness() < minHapp:
minHapp = brownie.getHappiness()
self.minHapp = minHapp
def getMinHapp(self):
"Return happiness of least happy brownie in the camp."""
return self.minHapp
def getTents(self):
"""Return tents"""
return self.tents
def __str__(self):
"""Print camp name and number of groups."""
return "Hypothetical camp " \
+ self.name \
+ " has " + str(self.num_tents) + " tents."
class CampOrganiser(object):
"""Class for finding for a camp a very good arrangement
of brownies into equally-sized groups on the basis of
friendships.
Arrangments are made on the basis of the organiser
declaring the number of tents (groups) OR their individual
capacities (i.e. the size of the groups):
The former will override the latter.
"""
def __init__(self, file = None, numTents = None,
capacityTents = None):
if not file == None:
self.file = file
self.brownies = []
self.friendlist = []
self.readFile()
self.brownieObjs = []
self.camps = []
if numTents == None:
try:
self.numTents = math.ceil(len(self.friendlist)/capacityTents)
except:
print ("Please declare the number of tents, ",\
"or the capacity of the tents as ints")
else:
self.numTents = numTents
def readFile(self):
"""Read in brownies and friends from a text file."""
try:
f = open(self.file)
except:
print("Error opening brownie file")
inFile = open(self.file)
for l in inFile:
try:
brownieAndFriends = l.rstrip('\n').split(',')
self.friendlist.append(brownieAndFriends)
except:
print("Error reading line")
def are_brownies(self, friend_list):
"""Check all friends quoted by brownies are
actually in the list of brownies coming to the
camp.
"""
for friend in friend_list:
if not friend in self.brownies:
print (friend + " is not a named brownie")
return False
return True
def addBrownies(self):
"""Generate list of brownie names with their chosen friends
and a list of brownies (objects).
"""
self.brownies, self.brownieObjs = [],[]
for brownieAndFriends in self.friendlist:
self.brownies.append(brownieAndFriends[0])
for brownieAndFriends in self.friendlist:
b = Brownie(brownieAndFriends[0])
if self.are_brownies(brownieAndFriends[1:]):
for friend in brownieAndFriends[1:]:
b.addFriend(friend)
else:
print ("A friend of "+ brownieAndFriends[0] \
+ " is not listed as brownie: " \
+ str(brownieAndFriends[1:]))
raise
self.brownieObjs.append(b)
def setCamps(self, numTrials):
"""Generate numTrials alternative camps"""
self.camps = []
for x in range(numTrials):
self.addBrownies()
camp = Camp (str(x), self.numTents)
for brownie in self.brownieObjs:
camp.addBrownie(brownie)
#seed tents with random brownies
#tents filled by voting of tent occupants
camp.randSeedTents()
#calc brownie, tent and camp happiness
camp.setHappiness()
#calc min happiness of brownie in camp
camp.setMinHapp()
self.camps.append(camp)
def happFilt(self):
happCamps = []
"""Filter alternative camps
to maximise camp happiness
"""
maxHappCamp = max(self.camps,
key =lambda x:x.getHappiness())
maxHapp = maxHappCamp.getHappiness()
for camp in self.camps:
if camp.getHappiness() == maxHapp:
happCamps.append(camp)
self.camps = happCamps
print("Max Happiness: "+str(maxHapp))
print("Num alternative camps after filtering"
+ " to maximise camp happiness: "
+ str(len(self.camps)))
def maxMinBrownieHappFilt(self):
"""Filter alternative camps
to minimize individual brownie unhappiness.
"""
maxMinHappCamps = []
maxMinBrownHappCamp = max(self.camps,
key = lambda x:x.getMinHapp())
maxMinBrownHapp = maxMinBrownHappCamp.getMinHapp()
for camp in self.camps:
if camp.getMinHapp() == maxMinBrownHapp:
maxMinHappCamps.append(camp)
self.camps = maxMinHappCamps
print("Max min brownie happiness: "
+ str(maxMinBrownHapp)
+ "\nNum alternative camps after filtering"
+ " for brownie happiness: "
+ str(len(self.camps)))
def rangeFilt(self):
"""Filter alternative camps
to minimize variation in tent happiness.
"""
minRangeCamps = []
minRangeHappCamp = min(self.camps,
key = lambda x:x.getRangeHapp())
minRangeHapp = minRangeHappCamp.getRangeHapp()
for camp in self.camps:
if camp.getRangeHapp() == minRangeHapp:
minRangeCamps.append(camp)
self.camps = minRangeCamps
print("Min range of tent happinesses: ",str(minRangeHapp))
print("Num alternative camps after filtering"
+ " to minimise range of tent happinesses: "
+ str(len(self.camps)))
def happTrial(self, numTrials, priority = None):
"""Find a good arrangement of brownies in tents
by generating numTrials numbers of possible camps
and then select an example of the best ones on
the basis of a 'priority'.
Keyword argument: priority (default = 'camp')
If priority is 'camp' the selection process favours
the camps with the overall highest happiness scores.
A priority of 'brownie' aims the maximise the 'happiness'
of the least happy brownie in the camp.
A priority of 'evenTents' tries to minimise the differences
in 'happiness scores' between the tents.
"""
print("NumTrials =", str(numTrials), "....")
self.setCamps(numTrials)
filters = {1:self.happFilt,
2:self.maxMinBrownieHappFilt,
3:self.rangeFilt}
if priority == None:
print("Prioritising camp happiness")
filter_order = [1,2,3]
elif priority == "brownie":
print("Prioritising no unhappy brownies")
filter_order = [2,1,3]
elif priority == "evenTents":
print("Prioritising evenly happy tents")
filter_order = [3,1,2]
for x in filter_order:
filters[x]()
print("Chosen camp has the following arrangement....\n")
for tent in self.camps[0].getTents():
print(tent)
def __str__(self):
"""print brownies alongside the other brownies they like.
e.g. Anna likes Susan Jane Julia...
(because Anna has declared liking the three other brownies
named above)
"""
summary = "Summary of Relationships\n"
for line in self.friendlist:
summary += line[0].ljust(12, " ") \
+ " likes "
for friend in line[1:]:
summary += friend.ljust(12, " ") + " "
summary += "\n"
return summary
def sorter(file, numTents = None, capacityTents = None, numTrials = 10000, priority = None):
global o
o = CampOrganiser("brownies193.txt", numTents, capacityTents)
o.happTrial(numTrials, priority)
def showFriends():
print (o)
if __name__ == "__main__":
sorter("brownies193.txt", capacityTents = 4)
showFriends()
| true |
da9f1be533944ae53e30bd8bfa1440d012780a0c | Python | ZXiaoheng/SHVC-bitrate-estimation | /SHVC_project/VIF/testvif.py | UTF-8 | 823 | 2.828125 | 3 | [] | no_license | import pandas as pd
import numpy as np
from statsmodels.stats.outliers_influence import variance_inflation_factor
# 宽表
data = pd.DataFrame([[15.9, 16.4, 19, 19.1, 18.8, 20.4, 22.7, 26.5, 28.1, 27.6, 26.3]
, [149.3, 161.2, 171.5, 175.5, 180.8, 190.7, 202.1, 212.1, 226.1, 231.9, 239]
, [4.2, 4.1, 3.1, 3.1, 1.1, 2.2, 2.1, 5.6, 5, 5.1, 0.7]
, [108.1, 114.8, 123.2, 126.9, 132.1, 137.7, 146, 154.1, 162.3, 164.3, 167.6]]).T
# 自变量
X = data[[1, 2, 3]]
# ✨✨✨务必注意✨✨✨,一定要加上常数项
X[4] = 1
# 计算第2个变量的(第二列)的方差膨胀因子
result=variance_inflation_factor(X[[1, 2, 3, 4]].values, 1)
print(X)
print(result)
# 如果没有常数项列,计算结果天差地别,可能VIF等于好几千 | true |
8b665e68dfd9d05fca0b2b43ffda42398e7d53c7 | Python | gotsulyakk/Learn-OpenCV | /warp_perspective.py | UTF-8 | 412 | 2.5625 | 3 | [] | no_license | import numpy as np
import cv2
img = cv2.imread('images/**ANY_IMAGE**')
width, height = 250, 350
pts1 = np.float32([[505, 157], [583, 261], [340, 225], [411, 342]])
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
output = cv2.warpPerspective(img, matrix, (width, height))
cv2.imshow('Cards', img)
cv2.imshow('Output', output)
cv2.waitKey(0) | true |
14809848c0de951998b816b8a781efe4b5cc7150 | Python | avontd2868/6.00.1x | /examples/break-test.py | UTF-8 | 169 | 3.75 | 4 | [] | no_license | for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print str(n) + ' equals ' + str(x) + ' * ' + str(n/x)
break
else:
print str(n) + ' is a prime'
| true |