blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
581717301e0e48d3259f6b908b8701320f69505d | Python | vakhnenko2/Math-Modeling_10_class | /Лаба 13 Задача 1.py | UTF-8 | 3,661 | 2.9375 | 3 | [] | no_license | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.animation import ArtistAnimation
second_in_year = 365 * 24 * 60 * 60
second_in_day = 24 * 60 * 60
years = 4
t = np.arange(0, years*second_in_year, second_in_day)
def move_func(s, t):
(x1, v_x1, y1, v_y1,
x2, v_x2, y2, v_y2,
x3, v_x3, y3, v_y3) = s
dxdt1 = v_x1
dv_xdt1 = (-G * m2 * (x1 - x2) / ((x1 - x2)**2+(y1 - y2)**2)**1.5
- G * m3 * (x1 - x3) / ((x1 - x3)**2+(y1 - y3)**2)**1.5
+ k * q1 * q2 / m1 * (x1 - x2) / ((x1 - x2)**2 + (y1 - y2)**2)**1.5
+ k * q1 * q3 / m1 * (x1 - x3) / ((x1 - x3)**2 + (y1 - y3)**2)**1.5)
dydt1 = v_y1
dv_ydt1 = (-G * m2 * (y1 - y2)/((x1 - x2)**2 + (y1 - y2)**2)**1.5
- G * m3 * (y1 - y3) / ((x1-x3)**2 + (y1-y3)**2)**1.5
+ k * q1 * q2 / m1 * (y1 - y2) / ((x1 - x2)**2 + (y1 - y2)**2)**1.5
+ k * q1 * q3 / m1 * (y1 - y3) / ((x1 - x3)**2 + (y1 - y3)**2)**1.5)
dxdt2 = v_x2
dv_xdt2 = (-G * m1 * (x2 - x1) / ((x2 - x1)**2+(y2 - y1)**2)**1.5
- G * m3 * (x2 - x3) / ((x2 - x3)**2+(y2 - y3)**2)**1.5
+ k * q2 * q1 / m2 * (x2 - x1) / ((x2 - x1)**2 + (y2 - y1)**2)**1.5
+ k * q2 * q3 / m2 * (x2 - x3) / ((x2 - x3)**2 + (y2 - y3)**2)**1.5)
dydt2 = v_y2
dv_ydt2 = (-G * m1 * (y2 - y1)/((x2 - x1)**2 + (y2 - y1)**2)**1.5
- G * m3 * (y2 - y3) / ((x2 - x3)**2 + (y2 - y3)**2)**1.5
+ k * q2 * q1 / m2 * (y2 - y1) / ((x2 - x1)**2 + (y2 - y1)**2)**1.5
+ k * q2 * q3 / m2 * (y2 - y3) / ((x2 - x3)**2 + (y2 - y3)**2)**1.5)
dxdt3 = v_x3
dv_xdt3 = (-G * m1 * (x3 - x1) / ((x3 - x1)**2+(y3 - y1)**2)**1.5
- G * m2 * (x3 - x2) / ((x3 - x2)**2+(y3 - y2)**2)**1.5
+ k * q3 * q1 / m3 * (x3 - x1) / ((x3 - x1)**2 + (y3 - y1)**2)**1.5
+ k * q3 * q2 / m3 * (x3 - x2) / ((x3 - x2)**2 + (y3 - y2)**2)**1.5)
dydt3 = v_y3
dv_ydt3 = (-G * m1 * (y3 - y1)/((x3 - x1)**2 + (y3 - y1)**2)**1.5
- G * m2 * (y3 - y2) / ((x3-x2)**2 + (y3-y2)**2)**1.5
+ k * q3 * q1 / m3 * (y3 - y1) / ((x3 - x1)**2 + (y3 - y1)**2)**1.5
+ k * q3 * q2 / m3 * (y3 - y2) / ((x3 - x2)**2 + (y3 - y2)**2)**1.5)
return (dxdt1, dv_xdt1, dydt1, dv_ydt1,
dxdt2, dv_xdt2, dydt2, dv_ydt2,
dxdt3, dv_xdt3, dydt3, dv_ydt3)
x10 = -6 * 10 **(-14)
v_x10 = 0
y10 = 0
v_y10 = 100
x20 = -149 * 10**9
v_x20 = 1
y20 = 0
v_y20 = -300
x30 = 0
v_x30 = 150
y30 = 3 * 10**(-14)
v_y30 = 0
s0 = (x10, v_x10, y10, v_y10,
x20, v_x20, y20, v_y20,
x30, v_x30, y30, v_y30)
m1 = 1.1 * 10 ** (-12)
q1 = -1.1 * 10 ** (20)
m2 = 2.1 * 10 ** (-12)
q2 = 2.1 * 10 ** (20)
m3 = 3.1 * 10 ** (-12)
q3 = -3.1 * 10 ** (20)
G = 6.67 * 10**(-11)
k = 8.98755 * 10 ** 9
sol = odeint(move_func, s0, t)
fig = plt.figure()
bodys = []
for i in range (0, len(t), 1):
body1, = plt.plot(sol[:i,0], sol[:i,2], '-', color='r')
body1_line, = plt.plot(sol[i, 0], sol[i,2], 'o', color='r')
body2, = plt.plot(sol[:i,4], sol[:i,6], '-', color='g')
body2_line, = plt.plot(sol[i, 4], sol[i,6], 'o', color='g')
body3, = plt.plot(sol[:i,8], sol[:i,10], '-', color='b')
body3_line, = plt.plot(sol[i, 8], sol[i,10], 'o', color='b')
bodys.append([body1, body1_line, body2, body2_line, body3, body3_line])
ani = ArtistAnimation(fig, bodys, interval=50)
plt.axis('equal')
ani.save('гифка.gif') | true |
75c7f38d704b4d3eac344880bac09ff83ab89525 | Python | kaiduohong/imageProcessing | /DIP/homeWorks/hw2.py | UTF-8 | 4,991 | 2.59375 | 3 | [] | no_license | #-*-coding:utf8-*-
import numpy as np
from scipy.misc import imread, imsave
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import os
import skimage
from skimage import io
import sys
def getNewHistogram(histogram,map):
level = 256
newHist = np.zeros(level)
for i in range(level):
newHist[int(map[i])] += histogram[i]
return newHist
def getHistogram(im):
[height,weight] = np.shape(im)
histogram = np.zeros(256)
for i in range(height):
for j in range(weight):
histogram[int(im[i,j])] += 1
return histogram / height / weight
def getHistogramMap(frequancyHistogram):
level = 256
ac = 0.
maps = np.zeros(level)
for i in range(level):
ac = ac + frequancyHistogram[i]
maps[i] = np.round((level - 1) * ac)
return maps
def histogramEqualized(im):
[height, weight] = np.shape(im)
histogram = getHistogram(im)
map = getHistogramMap(histogram)
newIm = np.zeros([height,weight])
for i in range(height):
for j in range(weight):
newIm[i,j] = map[im[i,j]]
return newIm
def getMatchingMap(im,targetHist):
level = 256
[height, weight] = np.shape(im)
hist = getHistogram(im)
map1 = getHistogramMap(hist)
hist = getNewHistogram(hist,map1)
map2 = getHistogramMap(targetHist)
targetHist = getNewHistogram(targetHist,map2)
map = np.zeros(level)
sk = 0
for i in range(level):
d = np.inf
sk += hist[i]
zk = 0
for j in range(level):
zk += targetHist[j]
newd = abs(np.round((level - 1) * (sk)) - np.round((level - 1) * (zk)))
if newd <= d:
d = newd
map[i] = j
return map
def histogramMatching(im,targetHist):
level = 256
map = getMatchingMap(im,targetHist)
[height,weight] = np.shape(im)
for i in range(height):
for j in range(weight):
im[i,j] = map[int(im[i,j])]
return im
def filter2d(im, filter):
[height,weight] = np.shape(im)
[h,w] = np.shape(filter)
newIm = np.zeros([height,weight])
for i in range(height):
for j in range(weight):
sum = 0
for k in range(h):
for l in range(w):
posi,posj = i + k - h / 2,j + l - w / 2
if posi < 0 or posi >= height or\
posj < 0 or posj >= weight:
continue
sum += im[posi,posj] * filter[h - k - 1,w - l - 1]
newIm[i,j] = sum
return newIm
def testFilter():
filename = os.path.join('..', 'hw2_input', '97.png')
im = imread(filename)
[height, weight] = np.shape(im)
plt.subplot(331)
plt.imshow(im, 'gray')
plt.title('origin', fontproperties='SimHei')
filter = np.ones([3,3]) / 9
newIm = filter2d(im,filter)
plt.subplot(332)
plt.imshow(newIm, 'gray')
plt.title('3*3 mean filter', fontproperties='SimHei')
filter = np.ones([7,7]) / 49
newIm = filter2d(im,filter)
plt.subplot(333)
plt.imshow(newIm, 'gray')
plt.title('7*7 mean filter', fontproperties='SimHei')
filter = np.ones([11,11]) / 121
newIm = filter2d(im,filter)
plt.subplot(334)
plt.imshow(newIm, 'gray')
plt.title('11*11 mean filter', fontproperties='SimHei')
laplacian = np.array([[-1,-1,-1],[-1,8,-1],[-1,-1,-1]])
newIm = filter2d(im, laplacian)
newIm[newIm > 255] = 255
newIm[newIm < 0] = 0
plt.subplot(335)
plt.imshow(newIm, 'gray')
plt.title('3*3 laplacian filter', fontproperties='SimHei')
newIm = im + newIm
newIm[newIm > 255] = 255
newIm[newIm < 0] = 0
plt.subplot(336)
plt.imshow(newIm, 'gray')
plt.title('shapened image', fontproperties='SimHei')
plt.show()
def hw2():
'''
filename = os.path.join('../', 'hw2_input', '97.png')
im = imread(filename)
plt.subplot(231)
print im,type(im[0,0])
plt.imshow(im, 'gray')
plt.title('origin', fontproperties='SimHei')
plt.subplot(232)
hist = getHistogram(im)
plt.bar(np.linspace(0, 256, 256, endpoint=False), \
hist, alpha=.8, color='r')
plt.title('origin histogram', fontproperties='SimHei')
newIm = histogramEqualized(im)
hist = getHistogram(newIm)
plt.subplot(233)
plt.imshow(newIm, 'gray')
plt.title('equalized image', fontproperties='SimHei')
plt.subplot(234)
plt.bar(np.linspace(0, 256, 256, endpoint=False), \
hist, alpha=.8, color='b')
plt.title('histogram', fontproperties='SimHei')
plt.subplot(235)
newIm = histogramEqualized(newIm)
newHist = getHistogram(newIm)
plt.bar(np.linspace(0, 256, 256, endpoint=False), \
newHist, alpha=.8, color='b')
plt.title('twice equalization', fontproperties='SimHei')
print np.max(np.abs(hist - newHist))
plt.show()
'''
testFilter()
if __name__ == '__main__':
hw2() | true |
e6b6150c9369067c32c85ca10145a48084279481 | Python | derekderie/challenges | /codechef/JUNE20/GUESSG/run_local.py | UTF-8 | 3,152 | 3.40625 | 3 | [] | no_license | from codechef.JUNE20.GUESSG.solution import search, SearchSpace
def truthful_answer(val, ans):
if ans == val:
return 'E'
elif ans < val:
return 'L'
else:
return 'G'
def lie_answer(val, ans):
if ans == val:
return 'E'
elif ans < val:
return 'G'
else:
return 'L'
class Asker:
def __init__(self, ans, verbose=False):
self.ans = ans
self.verbose = verbose
self.ask_count = 0
def ask(self, val):
self.ask_count += 1
class Truthful(Asker):
def ask(self, val):
super().ask(val)
ans = truthful_answer(val, self.ans)
if self.verbose:
print("asking ", val, "returning", ans)
return ans
class LieEveryOther(Asker):
def __init__(self, lie_on_start, ans):
self.lie = lie_on_start
super().__init__(ans)
def ask(self, val):
ans = lie_answer(val, self.ans) if self.lie else truthful_answer(val, self.ans)
if self.verbose:
print("asking ", val, "returning", ans)
self.lie = not self.lie
super().ask(val)
return ans
class MightLie(Asker):
def __init__(self, frac, ans):
self.did_lie = False
self.frac = frac
super().__init__(ans)
def ask(self, val):
if self.did_lie:
self.did_lie = False
ans = truthful_answer(val, self.ans)
elif random.random() < self.frac:
self.did_lie = True
ans = lie_answer(val, self.ans)
else:
ans = truthful_answer(val, self.ans)
super().ask(val)
return ans
def main():
n = 10 ** 9
# start_list = list(range(1, n + 1))
start_space = SearchSpace([(1, n)])
# for k in range(1, 10):
# ans = k
# assert ans == search(start_list, Truthful(ans)), "Error with Truthful and ans=%d" % ans
# assert (ans == search(start_list, LieEveryOther(True, ans))), "Error with LEO-T and ans=%d" % ans
# assert (ans == search(start_list, LieEveryOther(False, ans))), "Error with LEO-F and ans=%d" % ans
ans = 5
# print("Ans: ", ans)
# print("Truthful", search(start_space, Truthful(ans)))
# print("LieEveryOther (True)", search(start_space, LieEveryOther(True, ans)))
# print("LieEveryOther (False)", search(start_space, LieEveryOther(False, ans)))
# search(start_list, Gamer())
do_sampling_experiment()
import random
def do_sampling_experiment():
n = 10 ** 9
# start_list = list(range(1, n + 1))
search_space = SearchSpace([(1, n)])
make_askers = [lambda ans: Truthful(ans),
lambda ans: LieEveryOther(True, ans),
lambda ans: LieEveryOther(False, ans),
lambda ans: MightLie(0.5, ans)]
for k in range(20000):
ans = random.randint(1, n)
for make_asker in make_askers:
asker = make_asker(ans)
assert ans == search(search_space, asker), "Error with %s" % asker
print(f"Found {ans} with {asker} in {asker.ask_count} steps")
print("Everything went okay")
main()
| true |
d6cc380caf8eb14f3867152ada677d905bd43ad4 | Python | Hubert51/leetcode | /Citadel-OA1-matrix-summation.py | UTF-8 | 523 | 3.125 | 3 | [] | no_license |
def solution(after_matrix):
before_mat = []
for i in range(len(after_matrix)):
vector = []
sum = 0
for j in range(len(after_matrix[i])):
val = after_matrix[i][j] - sum
for k in range(i):
val -= after_matrix[k][j]
sum += val
vector.append(val)
before_mat.append(vector)
return before_mat
if __name__ == '__main__':
print(solution([[2,5], [7,17]]))
print(solution([[1,2], [3,4]])) | true |
e21a28d0a40517b8fa4297c921b3e51f20302ad8 | Python | Schnei1811/DotA2OptimizationStrategies | /WinnerPrediction.py | UTF-8 | 4,040 | 2.6875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import pickle
def datacreation(input_data):
input_data[radhero1-1] = 1
input_data[radhero2-1] = 1
input_data[radhero3-1] = 1
input_data[radhero4-1] = 1
input_data[radhero5-1] = 1
input_data[direhero1+112] = 1
input_data[direhero2+112] = 1
input_data[direhero3+112] = 1
input_data[direhero4+112] = 1
input_data[direhero5+112] = 1
return input_data
def PredictRandomForest(input_data):
input_data = input_data.reshape(1,-1)
rfclf = pd.read_pickle('SavedParameters/RFpickle.pickle')
rfpredict = int(rfclf.predict(input_data))
return rfpredict
def PredictSimpleNeuralNetwork():
X = np.array([input_data])
fmin = pd.read_pickle('SavedParameters/SNNpickle.pickle')
theta1 = np.matrix(np.reshape(fmin.x[:hidden_size * (num_features + 1)], (hidden_size, (num_features + 1))))
theta2 = np.matrix(np.reshape(fmin.x[hidden_size * (num_features + 1):], (num_classes, (hidden_size + 1))))
a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
snnPredict = np.array(np.argmax(h, axis=1))
snnPredict = snnPredict[0]
return snnPredict
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def forward_propagate(X, theta1, theta2):
m = X.shape[0]
a1 = np.insert(X, 0, values=np.ones(m), axis=1)
z2 = a1 * theta1.T
a2 = np.insert(sigmoid(z2), 0, values=np.ones(m), axis=1)
z3 = a2 * theta2.T
h = sigmoid(z3)
return a1, z2, a2, z3, h
radhero1 = 3
radhero2 = 9
radhero3 = 32
radhero4 = 63
radhero5 = 43
direhero1 = 73
direhero2 = 113
direhero3 = 22
direhero4 = 67
direhero5 = 87
Hero = {1:'Antimage',2:'Axe',3:'Bane',4:'Bloodseeker',5:'Crystal Maiden',6:'Drow Ranger',7:'Earthshaker',8:'Juggernaut',9:'Mirana',10:'Morphling',
11:'Shadow Fiend',12:'Phantom Lancer',13:'Puck',14:'Pudge',15:'Razor',16:'Sand King',17:'Storm Spirit',18:'Sven',19:'Tiny',20:'Vengeful Spirit',
21:'WindRanger',22:'Zeus',23:'Kunkka',24:'Blank',25:'Lina',26:'Lion',27:'Shadow Shaman',28:'Slardar',29:'Tidehunter',30:'Witch Doctor',
31:'Lich',32:'Riki',33:'Enigma',34:'Tinker',35:'Sniper',36:'Necrophos',37:'Warlock',38:'Beastmaster',39:'Queen of Pain',40:'Venomancer',
41:'Faceless Void',42:'Wraith King',43:'Death Prophet',44:'Phantom Assassin',45:'Pugna',46:'Templar Assassin',47:'Viper',48:'Luna',49:'Dragon Knight',50:'Dazzle',
51:'Clockwerk',52:'Leshrac',53:'Natures Prophet',54:'Lifestealer',55:'Dark Seer',56:'Clinkz',57:'Omniknight',58:'Enchantress',59:'Huskar',60:'Night Stalker',
61:'Brood Mother',62:'Bounty Hunter',63:'Weaver',64:'Jakiro',65:'Batrider',66:'Chen',67:'Spectre',68:'Ancient Apparition',69:'Doom',70:'Antimage',
71:'Spirit Breaker',72:'Gyrocopter',73:'Alchemist',74:'Invoker',75:'Silencer',76:'Outworld Devourer',77:'Lycan',78:'BrewMaster',79:'Shadow Demon',80:'Lone Druid',
81:'Chaos Knight',82:'Meepo',83:'Treant',84:'Ogre Magi',85:'Undying',86:'Rubick',87:'Disruptor',88:'Nyx Assassin',89:'Naga Siren',90:'Keeper of the Light',
91:'Wisp',92:'Visage',93:'Slark',94:'Medusa',95:'Troll Warlord',96:'Centaur Warrunner',97:'Magnus',98:'Timbersaw',99:'Bristleback',100:'Tusk',
101:'Skywrath Mage',102:'Abaddon',103:'Elder Titan',104:'Legion Commander',105:'Techies',106:'Ember Spirit',107:'Earth Spirit',108:'Abyssal Underlord',109:'TerrorBlade',110:'Pheonix',
111:'Oracle',112:'Winter Wyvern',113:'Arc Warden'}
Predict = {1:'Radiant Victory',2:'Dire Victory'}
num_features = 227
num_classes = 2
hidden_size = 1000
print("\nRadiant Team:\n",Hero[radhero1],",",Hero[radhero2],",",Hero[radhero3],",",Hero[radhero4],",",Hero[radhero5])
print("\nDire Team:\n",Hero[direhero1],",",Hero[direhero2],",",Hero[direhero3],",",Hero[direhero4],",",Hero[direhero5])
input_data = np.zeros((num_features-1,),dtype=np.int)
input_data = datacreation(input_data)
rfpredict = PredictRandomForest(input_data)
input_data = np.zeros((num_features,),dtype=np.int)
input_data = datacreation(input_data)
snnpredict = PredictSimpleNeuralNetwork()
print("\nRandom Forest Predicted", Predict[rfpredict])
print("\nSimple Neural Network Predicted", Predict[snnpredict])
| true |
e19d5b94b6c15f921e829d22d97c3456f95e56b2 | Python | SuzanaBhandari/Python_learning | /Strings/stringformatiing.py | UTF-8 | 602 | 3.953125 | 4 | [] | no_license | #string concatenation
a = "sujana"
b = "bhandari"
c = a + b
print(c)
age = 23
name = "sujana"
print("My age is " + str(age))
#manually insert
print("My age is " + str(age) +" " + "years")
#format(), dynamic procedure
print("My age is {0} years".format(age))
print ("My name is %s and My age is %d" % (name,age))
print("There are {0} days in {1},{2},{3},{4},{5},{6} and {7}".format(31,"Jan","May","Mar","July","August","October","December"))
print("""
Feb:{0}
March:{2}
Jan:{0}
April:{2}
may:{0}
Jun:{1}
""".format(28,230,31))
print("My age is %d %s %d %s" %(age ,"years",6,"month")) | true |
61974b1f6a11e8aed0eb49da548761368fdb3ff4 | Python | William-Mou/-py-1 | /Py大作業2 (1).py | UTF-8 | 1,757 | 2.953125 | 3 | [] | no_license |
# coding: utf-8
# In[ ]:
from PIL import Image
import random
K = 5 # number of colors
W = 800 # width of output image
H = 600 # height of output image
MAX_ITER = 3
def find_nearest(pixels, centroids):
re = []
for pixcel in range(len(pixels)):
a = [0,0,0,0,0]
for cen in range(K):
for rgb in range(3):
a[cen] += int((pixels[pixcel][rgb]-centroids[cen][rgb])**2)
a[cen]=a[cen]**0.5
cnt=a[0]
ans=0
for i in range(1,5):
if a[i]<=cnt:
ans = i
cnt = a[i]
re.append(ans)
return (re)
def compute_centroid(pixels, clusters):
scom =[[0,0,0],[0,0,0],[0,0,0],[0,0,0],[0,0,0]]
count = [0,0,0,0,0]
recom = []
for i in range(W*H):
for j in range(3):
cli=clusters[i]
scom[cli][j]+=pixels[i][j]
count[cli]+=1
for i in range(W*H):
cli = clusters[i]
rec = []
for j in range(3):
scom[cli][j]/=count[cli]
rec.append(scom[cli][j])
recom.append(rec)
return (recom)
im = Image.open('sample.jpg')
im = im.resize( (W, H) )
pixels = []
for i in range(W):
for j in range(H):
pixels.append(im.getpixel((i, j)))
centroids = random.sample(pixels, K)
for t in range(MAX_ITER):
print("Iter", t+1)
clusters = find_nearest(pixels, centroids)
centroids = compute_centroid(pixels, clusters)
clusters = find_nearest(pixels, centroids)
for i in range(K):
centroids[i] = tuple(map(int, centroids[i]))
nim = Image.new('RGB', (W, H))
for i in range(W):
for j in range(H):
nim.putpixel((i, j), centroids[clusters[i*H+j]])
nim.save('output2.jpg')
| true |
0a67dc283e490b12e1539208bdc214e3579a86a8 | Python | songzhipengn/store | /京东登录.py | UTF-8 | 1,124 | 2.71875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains #事件链对象
#当前浏览器
driver = webdriver.Chrome()
#打开
driver.get("http://www.jd.com")
#窗口最大化
driver.maximize_window()
#定位
#点击请登录
driver.find_element_by_xpath('//*[@id="ttbar-login"]/a[1]').click()
#点击账户登录
driver.find_element_by_xpath('//*[@id="content"]/div[2]/div[1]/div/div[3]/a').click()
#切换页面
date = driver.window_handles # ["s001","s002"]
driver.switch_to.window(date[0])
#输入账号
driver.find_element_by_xpath('//*[@id="loginname"]').send_keys("13780291681")
#输入密码
driver.find_element_by_xpath('//*[@id="nloginpwd"]').send_keys("13780291681a+")
#点击登录
driver.find_element_by_xpath('//*[@id="loginsubmit"]').click()
#滑动条
ac = ActionChains(driver)
ele = driver.find_element_by_xpath('//*[@id="JDJRV-wrap-loginsubmit"]/div/div/div/div[2]/div[3]').click() #点住滑动块/滑块元素
driver.implicitly_wait(2)
ac.click_and_hold(ele).move_by_offset(99,0).perform() #立即执行
ac.release() #释放鼠标
| true |
237b0af5d942c50bef24540ff0817a7017161d66 | Python | DavidRocha12/Tabela-de-calculos | /tabelasalarial.py | UTF-8 | 2,154 | 3.484375 | 3 | [] | no_license | #Meu primeiro projeto, estou aprendendo e procuro melhorar este programa e finalizar para
# adiquirir esperiência.
#e aprendendo com os erros.
#Projeto para fazer calculo trabalhista que vai servir para usuário empregador ou funcionário.
print('Calculo Salárial')
print('')
escolha = str(input('O calculo é para a empresa ou funcionário? ')).strip().title()#escolha de de empregador ou funcionário
if escolha == 'Empresa':#condição aninhada com if e elif para a escolha de funcionário ou empregador.
#adicionado a variante funcionario no if na condiçao aninhada
funcionario = str(input('Qual é o nome do funcionário? ')).strip().title()#nome do funcionário
salario = float(input('Qual valor do salário do funcionário? R$'))
elif escolha == 'Funcionário' or 'Funcionario':#condição aninhada
usuario = str(input('Qual é seu nome? ')).strip().title()#nome do usuário
#coleta de informações abaixo do funcionáiro oou usuário, para depois fazer os calculos.
salario = float(input('Qual é o valor do seu salário? R$'))
#adicionado a variante salario no elif condição aninhada
horastrabalhada = int(input('Qual é a carga horária do funcionário? '))
print('(Responda Sim ou Não se funcionário fez hora extra)')#frase modificada('Hora extra sim ou não')
horaextra = str(input('Funcionário fez Hora extra? ')).strip().title()
if horaextra == 'Sim':#Condição simples para perguntar se usuário ou funcionário fez ou não Hora extra
horasex = float(input('Quantas Horas extras foram feitas? '))
print('(Responda Sim ou Não se funcionário tem horas noturnas)')#frase alterada(adicional noturno sim ou não')
adicionalnot = str(input('Funcionário tem adicional noturno para receber? ')).strip().title()
if adicionalnot == 'Sim':#Condição simples para saber se funcionário tem ou não adicional noturno para
# receber
adn = float(input('Quantas Horas de adicional noturno foi trabalhado? '))
salariominimo = float(input('Qual o salário minimo atual para cálculo do inss? R$'))#pedindo o salario
# minimo para calculo do inss, sobre a nova lei clt de calculo trabalhista.
print('=' * 60)
| true |
9a303cd5f01bb57c072a2702fc539a9d117a823a | Python | sai-karthikeya-vemuri/PPP | /optimizers_comparision.py | UTF-8 | 5,132 | 3.671875 | 4 | [] | no_license | """
This is a comparision between the optimizers based on loss vs iterations
A simple loss function is defined commonly for all the optimizers .
The same Neural Network is instantiated individually for every optimizer and training is done for 1000 iterations.
Each optimizer object is created and loss is minimized for 50 data points
"""
#Importing required packages and functions
import numpy as np
import autodiff as ad
from NN_architecture_2 import *
from optimizers import *
plt.style.use('dark_background')
def loss_calc(model):
"""
Loss calculator function
Input:
model: The Neural Network object
returns total loss calculated at 50 data points
"""
def f(x):
"""
The function against which loss is calculated
inputs:
x : number or an array
return sine of given array x
"""
return np.sin(x)+np.cos(x) +x
x= np.linspace(-np.pi,np.pi,50)
y = f(x)
#instantiating the variable and reshaping accordingly
x= ad.Variable(x,"x")
x= ad.Reshape(x,(50,1))
#Predicted output by Neural network
y_pred = model.output(x)
#Vector of losses at data points
f = y_pred - y
#Sum of squared loss at all data points
loss = ad.ReduceSumToShape(ad.Pow(f,2),())
return loss
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=Adamax(len(model.get_weights()))
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="Adamax: lr=0.00146,b1=0.9,b2=0.99")
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=SGD(lr=1e-6)
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="SGD:lr=1e-6")
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=Momentum(len(model.get_weights()),lr=1e-6)
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="Momenta: lr=1e-6,gamma=0.9")
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=Adagrad(len(model.get_weights()))
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="Adagrad:lr=0.00146")
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=RMSProp(len(model.get_weights()))
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="RMSProp:lr=0.00146,decay_rate=0.9")
#Instantiating the Neural Network
model = NeuralNetLSTM(5,0,1,1)
#Instantiating the optimizer
optimizer=Adam(len(model.get_weights()))
loss_list =[]
#training for 1000 iterations
for i in range(1000):
params = model.get_weights()
loss = loss_calc(model)
print("iteration ",i)
loss_list.append(loss())
grad_params = ad.grad(loss,params)
new_params = optimizer([i() for i in params], [i() for i in grad_params])
#print(new_params)
model.set_weights(new_params)
x= np.linspace(0,1000,1000)
plt.plot(x,loss_list,label="Adam:lr=0.00146,b1=0.9,b2=0.999")
plt.xlabel("Iterations",fontsize=10)
plt.ylabel("Loss",fontsize=10)
plt.title("Loss vs Iterations",fontsize=15)
plt.legend()
plt.show()
| true |
4436561ac0937d9fc29f97832c59f9746b73ae69 | Python | shaffi3000/MarsRoverAttempt | /Rovers_List.py | UTF-8 | 1,343 | 3.578125 | 4 | [] | no_license | '''The RoverList class allows storage of all rovers run, and to provide the scope to have unlimited rovers. '''
class RoversList():
def __init__(self):
self.roverList = []
self.minSize = 0
self.maxSize = 0
self.currentRover = 0
self.roverRemaining = self.maxSize - self.minSize
self.roversDone = len(self.roverList)
def displayRoverJourneys(self):
for rover in self.roverList:
roverNum, roverStart, roverInstructions, roverEnd = rover
print(f'Rover{roverNum} started at co-ordinates {roverStart}, the intructions given were {roverInstructions} so it ended at co-ordinates {roverEnd}. \n')
def displayIndRover(self, roverReq):
for rover in self.roverList:
x = 0
roverNum, roverStart, roverInstructions, roverEnd = rover
if roverNum == roverReq:
print(f'Rover{roverNum} started at co-ordinates {roverStart}, the intructions given were {roverInstructions} so it ended at co-ordinates {roverEnd}. \n')
break
else:
if x < len(self.roverList):
pass
else:
print("Rover could not be found")
'''Creates a rovers list object'''
roverL = RoversList()
| true |
f8b18d2c7c29476e7869ec13e28e83418de5e089 | Python | Kilmani/CryptoPrim | /ciphers/rol.py | UTF-8 | 1,821 | 2.8125 | 3 | [] | no_license | import saveKey, random, Double, grouper, math
lengthBlock = 8
def encodeRol(text, iter, round):
# Генерация ключа и запись в файл
key = 1
saveKey.saveKey(key, "ROL", round, iter)
# Переводим в ASCII
asciiText = [ord(c) for c in text]
binaryText = []
for i in range(len(asciiText)):
binaryTextBlock = int(Double.double(asciiText[i]))
while len(str(binaryTextBlock)) != 8:
binaryTextBlock = "0" + str(binaryTextBlock)
binaryText.append(binaryTextBlock)
encodeText = ""
for i in binaryText:
temp = i
temp = shifttext(temp, 1)
temp = ''.join(e for e in temp)
encodeText += temp
encodeText = grouper.grouper(encodeText, lengthBlock)
encodeText = ''.join(chr(int(e, 2)) for e in encodeText)
return encodeText
def decodeRol(text, key):
# Переводим в ASCII
asciiText = [ord(c) for c in text]
binaryText = []
for i in range(len(asciiText)):
binaryTextBlock = int(Double.double(asciiText[i]))
while len(str(binaryTextBlock)) != 8:
binaryTextBlock = "0" + str(binaryTextBlock)
binaryText.append(binaryTextBlock)
decodeText = ""
for i in binaryText:
temp = i
# count += len(str(temp))
temp = shifttext(temp, -1)
temp = ''.join(e for e in temp)
decodeText += temp
decodeText = grouper.grouper(decodeText, lengthBlock)
decodeText = ''.join(chr(int(e, 2)) for e in decodeText)
return decodeText
def shifttext(lst, steps):
lst = list(str(lst))
if steps < 0:
steps = abs(steps)
for i in range(steps):
lst.append(lst.pop(0))
else:
for i in range(steps):
lst.insert(0, lst.pop())
return lst | true |
c814a2ef7be117843940220028cbbccf6613c1a2 | Python | Albinutte/football-prediction | /Extraction/season_2013_2014/form_extraction.py | UTF-8 | 1,895 | 3.078125 | 3 | [] | no_license | # Форма рассчитывается по формуле
# sum / 10, где
# sum - сумма очков за матч:
# 2 за победу
# 1 за ничью
# 0 за поражение
import useful_functions as uf
import re
def get_form(url):
"""Gets teams and their forms from url"""
soup = uf.get_soup(url)
res = []
#: adding names
res += uf.get_names(soup)
# : counting form
history = []
for i in soup.findAll(attrs={'class': re.compile('(_win)|(_tie)|(_lose)')}):
history.append(i['class'])
if len(history) < 10:
return None
elif len(history) < 12:
start1 = 0
start2 = 5
else:
start1 = 1
start2 = 7
form1 = 0
form2 = 0
for i in range(start1, start1 + 5):
if history[i] == ['_win']:
form1 += 2
elif history[i] == ['_tie']:
form1 += 1
for i in range(start2, start2 + 5):
if history[i] == ['_win']:
form2 += 2
elif history[i] == ['_tie']:
form2 += 1
form1 /= 10
form2 /= 10
res = res + [form1] + [form2]
#: adding result
res += uf.get_results(soup)
return res
def get_all_forms(path="./extracted_form_13_14.txt"):
"""Extracting all form to file"""
with open(path, "w", encoding='windows-1251') as handle:
soup = uf.get_soup()
cnt = 0
print("Starting extracting forms")
handle.write('name1\tname2\tform1\tform2\tresult\n')
for i in soup.findAll(attrs={'class': '_res'}):
cnt += 1
print(cnt)
form = get_form('http://www.championat.com' + i.findAll('a')[0]['href'])
if form is not None:
handle.write('\t'.join(str(e) for e in form) + '\n')
if cnt % 5 == 0:
handle.flush()
print("Forms extraction finished") | true |
ebdd04edd742b8d03fe3dc74c2eb854a8563ba8e | Python | gorilla-Kim/algorithm | /Basic/p1204.py | UTF-8 | 211 | 3.328125 | 3 | [] | no_license | strlist = {1:"st", 2:"nd", 3:"rd", 4:"th"}
num = input()
if((int(num)//10)==1 ):
print(num+strlist[4])
else:
print("{0}{1}".format(num, strlist[int(num)%10 if int(num)%10<4 and int(num)%10!=0 else 4]))
| true |
c278ae9a2d89629fd38907f2ac626723c6781c00 | Python | wenwei-dev/motor-calibration | /evaluate.py | UTF-8 | 1,045 | 2.6875 | 3 | [] | no_license | import pandas as pd
import numpy as np
import os
import yaml
def evaluate(shapekey_values, x):
param_num = shapekey_values.shape[1]
sum = x[:param_num]*shapekey_values + x[-1]
values = sum.sum(axis=1)
return values
def run(motor_config_file, pau_data_file, model_file):
params_df = pd.read_csv(model_file, index_col=0)
pau_values = pd.read_csv(pau_data_file)
with open(motor_config_file) as f:
motor_configs = yaml.load(f)
motor_names = params_df.columns.tolist()
for motor_name in motor_names:
try:
motor = [motor for motor in motor_configs if motor['name'] == motor_name][0]
except Exception as ex:
print 'Motor is not found in configs'.format(motor_name)
continue
x = params_df[motor_name]
values = evaluate(pau_values, x)
values = values*(motor['max']-motor['min'])+motor['init']
print values
if __name__ == '__main__':
run('motors_settings.yaml', 'data/shkey_frame_data.csv', 'motor_mapping_model.csv')
| true |
f30d87cf055551e6e288f2530d75735d51fcb81e | Python | wanleung/linne-analyzer | /src/linne/analyzer/sound/sound.py | UTF-8 | 539 | 2.609375 | 3 | [] | no_license | # Sound Data Type
class Sound:
def __init__(self):
self.phonetic = None
self.ipa = None
self.filter = None
self.threshold = None
self.remarks = None
def passThreshold(self,frame):
ret = False
if self.filter == "RMS":
ret = frame["RMS"] > self.threshold
elif self.filter == "SV":
ret = frame["Spectrum Variance"] > self.threshold
elif self.filter == "ZCR":
ret = frame["ZCR"] > self.threshold
return ret
| true |
cbae0736507d53f8e77f6e803b2402283fa61b3b | Python | lemduc/CSCI622-Advanced-NLP | /HW2/1.Create_bigram.py | UTF-8 | 2,218 | 2.703125 | 3 | [] | no_license | import collections
start_state = final_state = 0
lastest_state = 1
mapping_state = dict()
mapping_next = dict()
mapping_next['.'] = 0
mapping_next[','] = 0
total_per_state = dict()
with open('train-data') as f:
content = f.readlines()
count = 0
current_state = 0
next_state = 0
for line in content:
w = line.split('/')[0].lower()
t = line.split('/')[1][:-1]
if w == "#" or w == "''" or w == "'" or w == ":" or w == ";" or w == "$" or w == ")" or w == "(" or w == "?" or w == "!" or w == "}" or w == "{" or w == "``" or \
t == "#" or t == "''" or t == "'" or t == ":" or t == ";" or t == "$" or t == ")" or t == "(" or t == "?" or t == "!" or t == "}" or t == "{" or t == "``":
# w == "." or w == "," or
# t == "." or t == "," or
continue
all_tags = list()
split_t = t.split("|")
for single_t in split_t:
if single_t in mapping_next:
next_state = mapping_next[single_t]
else:
next_state = lastest_state
mapping_next[single_t] = next_state
lastest_state +=1
if (current_state, single_t) in mapping_state.keys():
all_tags = mapping_state[(current_state, single_t)]
all_tags.append(w)
mapping_state[(current_state, single_t)] = all_tags
current_state = next_state
count += 1
#print(single_t,w)
print(count)
# write wfst file
f = open('bigram.wfsa', 'w')
f.write('%%%%%% Filename: bigram.wfsa %%%%%%\n')
f.write(str(final_state) + '\n')
output = list()
for key in mapping_state.keys():
state = key[0]
total = 0
if state in total_per_state.keys():
total = total_per_state[state]
total += len(mapping_state[key])
total_per_state[state] = total
for key in mapping_state.keys():
state = key[0]
tag = key[1]
next_state = mapping_next[tag]
p = len(mapping_state[key])/total_per_state[state]
output.append((state, next_state, tag, p))
#print("done")
output.sort(key=lambda tup: tup[0])
for o in output:
f.write('({} ({} "{}" {}))'.format(*o) + "\n")
print('({} ({} "{}" {}))'.format(*o))
f.close() | true |
8eb0c9edc5a3b67ac03a09b276a661e73d9006c3 | Python | sapurvaa/HackerRank-Problems | /find_the_runner_up_score.py | UTF-8 | 343 | 2.84375 | 3 | [] | no_license | if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
largest = max(arr)
x = []
for i in arr:
if (largest-i) != 0:
x.append(largest-i)
if len(x) != 0:
smallest_diff = min(x)
print(largest-smallest_diff)
else:
print("no runner up")
| true |
742cd7a98d2afdf1d8899fa6f21356597451950a | Python | linter0663/EPS-Jetson-Nano | /visualize.py | UTF-8 | 5,702 | 2.609375 | 3 | [] | no_license | from keras.models import load_model
import numpy as np, pandas as pd, matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from keras.models import Sequential
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional
from sklearn.metrics import mean_squared_error, accuracy_score
from scipy.stats import linregress
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
import tensorflow as tf
import matplotlib.dates as mdates
import datetime
today = "data_2020W45"
print("Opening data...")
fi = 'data_'+str(today)+'.csv'
raw = pd.read_csv(fi, delimiter=',', engine='python' )
raw = raw.drop('Time stamp', axis=1)
print("raw shape:")
print (raw.shape)
def plot(true, predicted, divider):
predict_plot = scaler.inverse_transform(predicted[0])
true_plot = scaler.inverse_transform(true[0])
predict_plot = predict_plot[:,0]
true_plot = true_plot[:,0]
plt.figure(figsize=(16,6))
plt.plot(true_plot, label='True',linewidth=1)
#plt.plot(true_plot, label='True PVPG',linewidth=1)
plt.plot(predict_plot, label='CNN_LSTM_5',color='y',linewidth=1)
if divider > 0:
maxVal = max(true_plot.max(),predict_plot.max())
minVal = min(true_plot.min(),predict_plot.min())
plt.plot([divider,divider],[minVal,maxVal],label='train/test limit',color='k')
plt.ylabel('Active power consumed [W]')
plt.xlabel('Time [/min]')
plt.legend()
plt.show()
def plot2(true, predicted, divider):
predict_plot = scaler.inverse_transform(predicted[0])
true_plot = scaler.inverse_transform(true[0])
predict_plot = predict_plot[:,0]
true_plot = true_plot[:,0]
plt.figure(figsize=(16,6))
plt.plot(true_plot, label='True',linewidth=1)
plt.plot(predict_plot, label='CNN_LSTM_5',color='y',linewidth=1)
if divider > 0:
maxVal = max(true_plot.max(),predict_plot.max())
minVal = min(true_plot.min(),predict_plot.min())
plt.ylabel('Active power consumed [W]')
plt.xlabel('Time [/min]')
plt.legend()
plt.show()
scaler = MinMaxScaler(feature_range=(-1, 1))
raw = scaler.fit_transform(raw)
time_shift = 1 #shift is the number of steps we are predicting ahead
n_rows = raw.shape[0] #n_rows is the number of time steps of our sequence
n_feats = raw.shape[1]
train_size = int(n_rows * 0.8)
train_data = raw[:train_size, :] #first train_size steps, all 5 features
test_data = raw[train_size:, :] #I'll use the beginning of the data as state adjuster
x_train = train_data[:-time_shift, :] #the entire train data, except the last shift steps
x_test = test_data[:-time_shift,:] #the entire test data, except the last shift steps
x_predict = raw[:-time_shift,:] #the entire raw data, except the last shift steps
y_train = train_data[time_shift:, :]
y_test = test_data[time_shift:,:]
y_predict_true = raw[time_shift:,:]
x_train = x_train.reshape(1, x_train.shape[0], x_train.shape[1]) #ok shape (1,steps,5) - 1 sequence, many steps, 5 features
y_train = y_train.reshape(1, y_train.shape[0], y_train.shape[1])
x_test = x_test.reshape(1, x_test.shape[0], x_test.shape[1])
y_test = y_test.reshape(1, y_test.shape[0], y_test.shape[1])
x_predict = x_predict.reshape(1, x_predict.shape[0], x_predict.shape[1])
y_predict_true = y_predict_true.reshape(1, y_predict_true.shape[0], y_predict_true.shape[1])
print("\nx_train:")
print (x_train.shape)
print("y_train")
print (y_train.shape)
print("x_test")
print (x_test.shape)
print("y_test")
print (y_test.shape)
model_A = tf.keras.models.load_model('NN_'+str(today)+'.h5')
y_predict_model = model_A.predict(x_predict)
y_predict_model2 = model_A.predict(x_test)
y_predict_model3 = model_A.predict(x_train)
print("\ny_predict_true:")
print (y_predict_true.shape)
print("y_predict_model_global: ")
print (y_predict_model.shape)
print("y_predict_model_validation: ")
print (y_predict_model2.shape)
print("y_predict_model_train: ")
print (y_predict_model3.shape)
test_size = n_rows - train_size
print("test length: " + str(test_size))
#print("-------------------------------MSE------------------------------------------------")
mse = np.square(np.subtract(y_predict_true,y_predict_model)).mean()
mse2 = np.square(np.subtract(y_test,y_predict_model2)).mean()
mse3 = np.square(np.subtract(y_train,y_predict_model3)).mean()
#print("-------------------------------RMSE---------------------------------------------")
rmse = np.sqrt(mse)
rmse2 = np.sqrt(mse2)
rmse3 = np.sqrt(mse3)
#print("-------------------------------MAE------------------------------------------------")
mae = np.abs(np.subtract(y_predict_true,y_predict_model)).mean()
mae2 = np.abs(np.subtract(y_test,y_predict_model2)).mean()
mae3 = np.abs(np.subtract(y_train,y_predict_model3)).mean()
print("--------------------------------MSE-----------------------------------------------")
print("MSE metrics for CNN_LSTM_5 model:")
print("MSE validation: " + str(mse2))
print("MSE train: " + str(mse3))
print("MSE global: " + str(mse))
print("--------------------------------RMSE-----------------------------------------------")
print("RMSE metrics for CNN_LSTM_5 model:")
print("RMSE validation: " + str(rmse2))
print("RMSE train: " + str(rmse3))
print("RMSE global: " + str(rmse))
print("--------------------------------MAE-----------------------------------------------")
print("MAE metrics for CNN_LSTM_5 model:")
print("MAE validation: " + str(mae2))
print("MAE train: " + str(mae3))
print("MAE global: " + str(mae))
plot(y_predict_true,y_predict_model,train_size)
plot(y_predict_true[:,-2*test_size:],y_predict_model[:,-2*test_size:],test_size)
plot2(y_test,y_predict_model2,test_size)
| true |
b4e4d5cec45b0417b02c2c653fc0b011bb91204f | Python | YuiTH/ML-lec4 | /ML_Lec4/plot.py | UTF-8 | 2,582 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 10:32:09 2019
@author: Lenovo
"""
from readFile import get3ClassData
import numpy as np
import matplotlib.pyplot as plt
from bi_logistic_reg_sgd import logistic_reg_predict
from preprocess import preprocess
# x, y = get3ClassData()
# x0, y0 = x[0:50], y[0:50]
# x1, y1 = x[50:100], y[50:100]
# x2, y2 = x[100:150], y[100:150]
# plt.scatter(x0[:,0], x0[:,1], c='green')
# plt.scatter(x1[:,0], x1[:,1], c='blue')
# plt.scatter(x2[:,0], x2[:,1], c='red')
def plot_step(total_acc, total_loss, x, y, num_class, w_list, b_list,pred_fun):
if len(w_list) == 0:
return
# plt.figure(1)
plt.ion()
plt.cla()
plt.subplot(221)
plt.title('Acc')
plt.scatter(range(0, len(total_acc), 5),
total_acc[::5], s=9,color='blue') # acc plot
plt.plot(range(len(total_acc)), total_acc, color='blue')
plt.subplot(222)
plt.title('Loss')
plt.scatter(range(0, len(total_loss), 5),
total_loss[::5], s=9,color='red') # loss plot
plt.plot(range(len(total_loss)), total_loss, color='red')
plt.subplot(223)
# plot_decision_boundary(logistic_reg_predict, x, w_list[-1], b_list[-1], y)
plot_decision_boundary(pred_fun, x, w_list[-1], b_list[-1], y)
# plt.plot([3,4],[4,5])
# for i in range(num_class):
# xx = x[y == i]
# plt.scatter(xx[:, 0], xx[:, 1], s=5)
# plt.scatter(x[:,0],x[:,1],s=5)
plt.pause(0.005)
# plt.ioff()
plt.show()
def plot_steps(total_acc, total_loss, x, y, num_class, w_list, b_list,pred_fun):
x = preprocess(x)
if pred_fun == "per":
f = predict_multi_perception
elif pred_fun == "logi":
f = logistic_reg_predict
for i in range(len(total_acc)):
plot_step(total_acc[:i],
total_loss[:i],
x, y, num_class,
w_list[:i], b_list[:i],f)
plt.ioff()
plt.show()
def plot_decision_boundary(pred_func, X, w, b, y):
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = pred_func(w, np.c_[xx.ravel(), yy.ravel()], b)
Z = Z.reshape(xx.shape)
# print(Z)
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral,s=5)
def predict_multi_perception(w, x, b): # (N, 3)
z = x@w+b
pred_index = z.argmax(axis=1)
if z.shape[1] == 3:
return pred_index
return z > 0 | true |
68c16c6568ac9f5b465147483359d62419a6332b | Python | raniels/01a-Exercises-Arithmatic | /exercises.py | UTF-8 | 4,785 | 4.84375 | 5 | [
"MIT"
] | permissive | '''
01a Exercises
These exercises should help you get the flavor of how to perform arithmetic and string operations in Python.
You will also get to play with (pseudo-)random generators and the range operator.
These skills will all be used in assignment 2.
To answer these exercises, open the IDLE program that came with your Python installation. IDLE is a line-by-line Python interpreter.
You can copy lines from this file into IDLE to interpret them and produce a result. Then copy the result back to the following line in this file (after the #).
You will also need to answer several questions to show you understand what is happening.
'''
# Math in Python is what you would expect. Add comments with the answers the IDLE returns. I'll do the first one for you.
10 + 15
#25
8 - 1
#7
10 * 2
#20
35 / 5
#7.0
35 / 4
#8.75
35 // 4
#8
# What is the difference between the / operator and the // operator?
# The / operator divides and gives the exact value (if there is an exact value), while the // operator divides then rounds down to the nearest whole number.
2 ** 5
#32
# What does the ** operator do?
# The operator ** takes the the first number to the 2nd number's power. (So 2 ** 3 would be 2 * 2 * 2)
5 % 3
#2
5 % 2
#1
5 % 4
#1
# What does the % operator do?
# The % operator divides then outputs the remainder
(1 + 3) * 2
#8
# What effect do the parenthesis have on this statement?
#It makes it so that 1 and 3 are added first instead of 3 being multiplied by 2 then add 1. Order of operations.
# Data in python is of different flavors or "types," each with its own characteristics
type(3)
#<class 'int'>
type(3.0)
#<class 'float'>
type("word")
#<class 'str'>
type(True)
#<class 'bool'>
type(False)
#<class 'bool'>
type(None)
#<class 'NoneType'>
# None is a special object in python. We will talk more about it later
# It is possible to convert from one type to another.
int(3.0)
#3
float(7)
#7.0
str(55)
#'55'
bool(1)
#True
# How can you tell the difference between these four different types?
#Float gives a decimal, int gives an integer, str gives a string (is surrounded by ' '), while bool outputs true or fales
# Strings are created with single or double-quotes
"This is a string."
'This is also a string.'
"Hello " + "world!"
# What does the + operator do here?
#It combines both strings and outputs them as one (ex. 'Helloworld')
"This is a string"[0]
#T
"This is a string"[5]
#i
"This is a string"[8]
#a
# What is happening as you change the number?
#It changes what character is given, where the number is the palce of the character with 0 being first, it also ignores spaces. So in this example changing the number to 3 would give you 's')
"This is a string"[-1]
#'g'
# What happens when you use a negative number?
#It starts from the end of the string
"%s can be %s" % ("strings", "interpolated")
# What is happening here?
#can be is being inserted in the string between strings and interpolated
# A more robust (and modern) way to put things into strings is using the format method
"{0} can be {1}".format("strings", "formatted")
#'strings can be formatted'
# You can use names instead of numbers to make it easier to keep things straight
"{name} wants to eat {food}".format(name="Bob", food="lasagna")
#'Bob wants to eat lasagna'
# You have already met the print method
print("I'm Python. Nice to meet you!")
# Here is its sibling, the input method
n = input("What is your name? ")
print("Hello, " + n)
#Hello, Python
# What just happened?
#After entering the input command it asked me to provde an input (what n would stand for), and after entering the 2nd line it replaced + n with my answer.
# For your next assignment, you will need to use random numbers
# first we need to get a few methods from the library called random
from random import random,randint,shuffle,sample
random()
# Run this line a few times. What is happening here?
# It is giving me random numbers.
randint(1,100)
# How is this different?
#It changed the range to 1 to 100 as well as forcing the answer to be an integer
# The next few use a list of numbers from 0 to 9
items = [0, 1,2,3,4,5,6,7,8,9]
shuffle(items)
print(items)
# What just happened?
# It put the items in a random order, that does not change until you shuffle again.
print(sample(items, 1))
# What does this do?
# It gives me a random item
print(sample(items, 5))
# What does the second parameter control?
# The amount of items it will give me
for i in range(0,5):
print(i)
#0
#1
#2
#3
#4
# What is happening here? What happens if you change the two range parameters?
#It is giving me the integers between the 2 parameters, including the first number, but not the last one. If I change the parameters it will give me the integers between those, including the first parameter, but not the last one.
| true |
790da40149f7eaa72911c8403eec1007e39b8e6a | Python | gitdog01/AlgoPratice | /study/pratice/d.py | UTF-8 | 975 | 3.046875 | 3 | [] | no_license | def solve(snapshots, transactions):
my_snap = {}
my_tran = [False for _ in range(len(transactions))]
for snap in snapshots:
my_snap[snap[0]] = int(snap[1])
for tran in transactions:
if my_tran[int(tran[0])]:
continue
else:
my_tran[int(tran[0])] = True
if tran[2] not in my_snap:
my_snap[tran[2]] = 0
if tran[1] == "SAVE":
my_snap[tran[2]] += int(tran[3])
else:
my_snap[tran[2]] -= int(tran[3])
result = []
for key in my_snap:
result.append([key, my_snap[key]])
return result
snapshots = [
["ACCOUNT1", "100"],
["ACCOUNT2", "150"]
]
transactions = [
["1", "SAVE", "ACCOUNT2", "100"],
["2", "WITHDRAW", "ACCOUNT1", "50"],
["1", "SAVE", "ACCOUNT2", "100"],
["4", "SAVE", "ACCOUNT3", "500"],
["3", "WITHDRAW", "ACCOUNT2", "30"]
]
print(solve(snapshots, transactions))
| true |
9d08696a8a6eb2770aa2b4777a07db3f25ab3e90 | Python | p4telj/subnet-calculators | /networking/IPRange.py | UTF-8 | 3,539 | 3.375 | 3 | [] | no_license | """
IPRange.py
Contains class definition.
"""
import copy
from networking import IP
class IPRange:
"""Represents a range of IPv4 addresses."""
def __init__(self, *, first_ip=None, second_ip=None, cidr=None):
"""
Constructor.
(1) Create an IP range given 2 IPs.
or
(2) Create an IP range given a CIDR block.
• Utilizes netmask to determine IP range.
• E.g. 10.0.0.0/18
• Netmask = 11111111.11111111.11000000.00000000 = 255.255.192.0
• Range = 10.0.0.0 to 10.0.63.255
"""
if cidr is not None:
try:
base_ip = cidr.base_ip
mask = cidr.mask
hosts = cidr.hosts
# for each octet in an IP address
primary_ip_octets = []
secondary_ip_octets = []
for i in range(IP.OCTETS):
# for each octet, grab <= 8 bits (# bits per octet) from mask to use
bits = IP.BITS_PER_OCTET if mask >= IP.BITS_PER_OCTET else mask
mask -= bits
# bitwise ^ (xor) to calculate netmask segment
netmask_octet = IP.MAX_OCTET_NUM ^ ((2**(IP.BITS_PER_OCTET-bits)) - 1)
# ip segment bitwise & (and) with netmask segment to calculate primary IP
ip_octet = netmask_octet & base_ip[i]
primary_ip_octets.append(ip_octet)
secondary_ip_octets.append(ip_octet)
first_ip = IP(ip_list=primary_ip_octets)
second_ip = IP(ip_list=secondary_ip_octets).add_hosts(cidr.hosts - 1)
# now, gets evaluated by next "if" statement and gets placed into self.range
except Exception as e:
raise ValueError(f"({e}) Incorrect CIDR input to IPRange. Must be a valid instance of type CIDR.")
if isinstance(first_ip, IP) and isinstance(second_ip, IP):
# IPRange must be sorted at all times
if first_ip < second_ip:
self.range = [copy.deepcopy(first_ip), copy.deepcopy(second_ip)]
else:
self.range = [copy.deepcopy(second_ip), copy.deepcopy(first_ip)]
else:
raise ValueError("Incorrect IPRange inputs: " +
"Either pass in first_ip, second_ip of type IP or cidr of type CIDR.")
# determine # of hosts (inclusive start/end IPs)
self.hosts = self.range[1] - self.range[0]
return
def is_within(self, other):
"""Determines if current IPRange is within other IPRange."""
return self.range[1] <= other.range[1] if self.range[0] >= other.range[0] else False
def does_overlap(self, other):
"""Does current IPRange overlap with other IPRange."""
# internal
if self.is_within(other):
return True
# external
return not (self.range[1] < other.range[0] or self.range[0] > other.range[1])
def __str__(self):
"""String representation."""
return "{} to {}".format(self.range[0], self.range[1])
def __lt__(self, other):
"""< comparator. Assuming the IPRanges don't overlap."""
return self.range[1] < other.range[0]
def __le__(self, other):
"""<= comparator. Assuming the IPRanges don't overlap."""
return self.range[1] <= other.range[0]
def __getitem__(self, index):
"""[] override."""
return self.range[index]
| true |
cea5245ccd42d107c9087c7b6865d8d597005ce5 | Python | yeomye/pyworks | /day25/customer_manage/main2.py | UTF-8 | 852 | 3.8125 | 4 | [] | no_license | # 객체(인스턴스)를 리스트로 관리
from customer_class import Customer, GoldCustomer, VIPCustomer
# 객체 생성
c1 = Customer(101, '흥부')
c2 = Customer(102, '놀부')
gold1 = GoldCustomer(201,'콩쥐')
gold2 = GoldCustomer(202,'팥쥐')
vip = VIPCustomer(301, '심청', 777)
# 리스트로 관리
customer = [] #빈리스트 생성
customer.append((c1))
customer.append((c2))
customer.append((gold1))
customer.append((gold2))
customer.append((vip))
print('============ 구매가격과 보너스 포인트 계산 ============')
price = 10000 # 상품 - 10000원
for c in customer:
cost=c.calc_price(price) # 구매 가격(할인 적용)
print(c.getname()+'님의 지불 금액은 '+format(cost, ',d')+'원입니다.')
print('================== 고객 정보 출력 ==================')
for c in customer:
c.showInfo() | true |
5140d3852d28d72ec25295d7d963b8dc2297f4f5 | Python | mauricesandoval/Tech-Academy-Course-Work | /Python/Tkinter/Organizational_Widgets/01_frameOutput.py | UTF-8 | 525 | 2.640625 | 3 | [] | no_license | Python 3.5.1 (v3.5.1:37a07cee5969, Dec 6 2015, 01:54:25) [MSC v.1900 64 bit (AMD64)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> from tkinter import *
>>> from tkinter import ttk
>>> root = Tk()
>>>
>>> frame = ttk.Frame(root)
>>> frame.pack()
>>> frame.config(height = 100, width = 200)
>>> frame.config(relief = RIDGE)
>>> ttk.Button(frame, text = 'Click Me').grid()
>>> frame.config(padding = (30, 15))
>>> ttk.LabelFrame(root, height = 100, width = 200, text = 'My Frame').pack()
>>>
| true |
245deccba1032522c7e3d478e74164f4064e9da7 | Python | pierreCarvalho/Topicos_Avancados_em_Informatica | /CuboMagico/cubomagico.py | UTF-8 | 2,146 | 3.890625 | 4 | [] | no_license | #regra para a inserção dos numeros
# Defina a casa 1 como sendo a do meio da linha superior
# Você deve sempre preencher o número em sequência (1, 2, 3, 4 etc.),
# um para cima e um para direita
#condições:
# - Se a sequência terminar uma "casa" acima da fileira superior do quadrado mágico,
# continue nessa fileira, mas defina o número na fileira inferior dessa coluna.
# - Se a sequência terminar uma "casa" à direita da coluna mais à direita do quadrado mágico,
# continue nela, mas defina o número na coluna mais à esquerda dessa fileira.
# - Se a sequência terminar em uma casa já numerada, volte para a última casa que já
# foi numerada e defina o próximo número na casa diretamente abaixo dessa
from random import randint
n = int(input("Informe o n para o cubo:"))
c_magico = (n * (n*n + 1)) / 2
#esse numero tem que variar de 1 à (n*n)
valor = 1
print("A constante é: ",c_magico)
print("Os valores poderão ir de 1 à {}".format(n*n))
print(input("Tecle algo para continuar...."))
#o tamanho da matriz será de acordo com o numero N
#par impar par
#impar impar impar
flag = True
while flag:
matriz = []
numeros = []
for i in range(n*n):
numeros.append(i+1)
for i in range(n):
linha = []
for j in range(n):
valor = numeros[randint(0,(len(numeros)-1))]
linha.append(valor)
numeros.remove(valor)
matriz.append(linha)
contador = 0
#verifica a soma das linhas
for i in range(n):
valor = 0
for j in range(n):
valor += matriz[i][j]
if(valor == c_magico):
#print("A linha {} passou".format(i))
contador += 1
if contador == 3:
print(matriz)
flag = False
p#verifica a soma das colunas
for i in range(n):
valor = 0
for j in range(n):
valor += matriz[j][i]
if(valor == c_magico):
#print("A coluna {} passou".format(j))
contador += 1
if contador == 6:
flag = False
| true |
3fb5bd074b2d82f52fe077c7c18e739b64ec9b99 | Python | ddiazsouto/Sentencer | /Service1/test_ser1.py | UTF-8 | 2,271 | 2.71875 | 3 | [
"MIT"
] | permissive |
from unittest.mock import patch
from flask import url_for
from flask_testing import TestCase
from things import DanSQL, callme
from app import app
# pytest
# pytest --cov=app
# pytest --cov-config=.coveragec --cov=.
# pytest --cov=app --cov-report=term-missing
# pytest --cov . --cov-report html
class TestBase(TestCase): # main function to create the app environment
def create_app(self): # its configuration
return app
class TestViews(TestBase): # This test confirms that the page loads
def test_home_get(self):
response = self.client.get(url_for('main'))
self.assertEqual(response.status_code, 200)
def test_data_get(self):
response = self.client.get(url_for('data'))
self.assertEqual(response.status_code, 200)
class MyAlchemy():
def connects():
try:
attempt = DanSQL('mysql')
attempt.off()
return True
except: return False
def creates(value):
DanSQL('master').write('CREATE DATABASE IF NOT EXISTS testbase;')
DanSQL('testbase').write('CREATE TABLE IF NOT EXISTS Test(column1 VARCHAR(10));')
DanSQL('testbase').write(f'INSERT INTO Test(column1) values({str(value)});')
var = DanSQL('testbase').get('SELECT * FROM Test;')
DanSQL('master').write('DROP DATABASE testbase;')
return str(var)
def test4(): # Is the conection with the database successful ?
assert MyAlchemy.connects() == True
def test5(): # Checks that the object can interact with the database using an integer
assert '127' in MyAlchemy.creates(127)
def test6(): # Checks that the object can interact with the database using a string
assert 'Dan' in MyAlchemy.creates("'Dan'")
# class TestResponse(TestBase):
# def test_one(self):
# # We will mock a response of 1 and test that we get football returned.
# with patch('requests.get') as g:
# g.return_value = 'dasdd'
# response = self.client.get(url_for('main'))
# self.assertIn(b'Dan', response.data) | true |
ad59813badacd0a7e9ca83866baf51bfd7a8fdde | Python | lab11/time_series_project | /plaid_data/plaid_analysis.py | UTF-8 | 4,601 | 2.640625 | 3 | [] | no_license | #! /usr/bin/env python3
import os
import sys
import json
# check if plaid dataset exists
if not (os.path.exists("PLAID/") and os.path.isdir("PLAID/")):
print("PLAID not downloaded yet. Run `plaid_serializer.py`")
sys.exit()
metadata_filenames = ["PLAID/meta1.json",
#"PLAID/meta2.json",
"PLAID/meta2StatusesRenamed.json"]
# iterate through metadata files and each JSON blob in them
for infilename in sorted(metadata_filenames):
print('\n\n' + infilename)
locations = []
status_types = []
device_types = {}
with open(infilename, 'r') as infile:
metadata = json.load(infile)
for item in metadata:
# store data in a bunch of dicts!
if item['meta']['location'] not in locations:
locations.append(item['meta']['location'])
if item['meta']['instances']['status'] not in status_types:
status_types.append(item['meta']['instances']['status'])
if item['meta']['type'] not in device_types.keys():
device_types[item['meta']['type']] = {}
device_types[item['meta']['type']]['count'] = 0
device_types[item['meta']['type']]['locations'] = []
device_types[item['meta']['type']]['statuses'] = []
device_types[item['meta']['type']]['count'] += 1
if item['meta']['location'] not in device_types[item['meta']['type']]['locations']:
device_types[item['meta']['type']]['locations'].append(item['meta']['location'])
if item['meta']['instances']['status'] not in device_types[item['meta']['type']]['statuses']:
device_types[item['meta']['type']]['statuses'].append(item['meta']['instances']['status'])
print("")
print("Locations: " + str(len(locations)))
print("")
print("Status Types: " + str(len(status_types)))
print("")
print("Unique device types: (count " + str(len(device_types)) + ")")
for item in device_types.keys():
# calculate unique locations for each device
device_types[item]['unique'] = len(device_types[item]['locations'])
# spacing to make the text line up
space = "\t\t\t\t"
if len(item) > 4:
space = "\t\t\t"
if len(item) > 12:
space = "\t\t"
if len(item) > 16:
space = "\t"
print(" - " + item + space + "(count " + str(device_types[item]['count']) + ",\t number of locs " + str(device_types[item]['unique']) + ")")
print("\t" + str(device_types[item]['statuses']))
# special testing to answer some validity questions
if False:
dev_dict = {}
for loc in locations:
dev_dict[loc] = {}
for item in metadata:
if item['meta']['location'] != loc:
continue
dev_type = item['meta']['type']
if dev_type not in dev_dict[loc].keys():
dev_dict[loc][dev_type] = {}
dev_appliance = ''
for app_key in sorted(item['meta']['appliance'].keys()):
if app_key == 'notes':
continue
if dev_appliance != '' and item['meta']['appliance'][app_key] != '':
dev_appliance += '_'
dev_appliance += item['meta']['appliance'][app_key].replace(' ', '_').replace('-', '_').replace('.', '_').replace('(', '').replace(')', '').replace('/', '')
if dev_appliance not in dev_dict[loc][dev_type]:
dev_dict[loc][dev_type][dev_appliance] = []
dev_dict[loc][dev_type][dev_appliance].append(int(item['id']))
for loc in sorted(dev_dict.keys()):
print(loc)
for dev_type in sorted(dev_dict[loc].keys()):
print(' ' + dev_type)
for dev_appliance in sorted(dev_dict[loc][dev_type].keys()):
ids = ''
prev_id = 0
for dev_id in sorted(dev_dict[loc][dev_type][dev_appliance]):
if prev_id > 0 and dev_id != prev_id+1:
# Note, this was tested and never actually occurs
ids += '<-> '
ids += str(dev_id) + ' '
special = ''
if len(dev_dict[loc][dev_type][dev_appliance]) > 6:
special = ' RATHER LONG!!'
print(' ' + str(dev_appliance) + ' ' + str(ids) + special)
| true |
0f9241739c5c73227df5e65afc6b6c7e28e39697 | Python | DaiHanpeng/CentralDB | /DBInterface/ResultFlagInterface.py | UTF-8 | 2,055 | 2.828125 | 3 | [] | no_license | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from Tables import BaseModel,ResultFlagTable
class ResultFlagInterface():
"""
db interface for result flag table.
"""
def __init__(self):
DB_CONNECT_STRING = 'mysql+mysqldb://root:root@localhost/sys_info'
self.engine = create_engine(DB_CONNECT_STRING,echo=False)
DB_Session = sessionmaker(bind=self.engine)
self.session = DB_Session()
self.init_database()
def init_database(self):
self.init_tables()
self.session.commit()
def init_tables(self):
BaseModel.metadata.create_all(self.engine)
def add_new_record(self,code = None,rid = None):
#self.session.add(PatientTable(pid,fname,lname,birthday,sex,location))
# use merge() instead od add() to avoid duplicated insert error from MySQL.
self.session.add(ResultFlagTable(code = code,rid=rid))
def add_new_records(self, result_list):
if isinstance(result_list, list):
for item in result_list:
if isinstance(item,ResultFlagTable):
self.add_new_record(code=item.code,rid=item.rid)
self.write_to_db()
def write_to_db(self):
try:
self.session.flush()
self.session.commit()
except Exception as ex:
print 'database write failed!'
print ex
def mytest01():
from datetime import datetime
db_interface = ResultFlagInterface()
# insert normal data
print 'normal testing:'
db_interface.add_new_record(code='waived')
db_interface.add_new_record(code='hello')
db_interface.add_new_record(code='test',rid=228931)
db_interface.write_to_db()
# test insert abnormal data
# should fail because the foreign key constrain
print 'abnormal testing:'
try:
db_interface.add_new_record(code='test',rid=1122334455)
db_interface.write_to_db()
except Exception as e:
print e
if __name__ == '__main__':
mytest01() | true |
188cccf5a6890d99180d835c419079eccbcf19e6 | Python | guille3218/HLC_2122 | /Introduccion/00b_formateo.py | UTF-8 | 216 | 3.484375 | 3 | [] | no_license | print("Hola")
print("Adios")
print("Sevilla", end="")
print("Cádiz", end="")
print("Huelva", end=" ")
print("Granada", end=" ")
print("")
print("Córdoba", end=" ")
print("a")
i=3
print(f"valor de la variable {i}") | true |
40028d37afe5038adcc66e1b7438efa832f1139b | Python | alexssandroos/learn_formacaods_udmy | /scripts/testes_normal.py | UTF-8 | 252 | 2.546875 | 3 | [
"MIT"
] | permissive | ourfrom scipy import stats
from scipy.stats import norm
import matplotlib.pyplot as plt
dados = norm.rvs(size = 100)
stats.probplot(dados, plot = plt)
stats.shapiro(dados)
import pandas as pd
import numpy as np
a = pd.DataFrame(np.arange(10)*10)
a
| true |
76b6c6d6c5a8221f67ead6154a3a67233ce259a5 | Python | Jonasori/Outdated-Disk-Modeling | /baseline_cutoff.py | UTF-8 | 4,126 | 2.984375 | 3 | [] | no_license | """Run the ICR process while cutting off baselines below b_max.
Testing a change.
"""
import numpy as np
import pandas as pd
import argparse as ap
import subprocess as sp
import matplotlib.pyplot as plt
from tools import icr, imstat, already_exists, remove
from constants import today
# baselines = np.arange(0, 130, 5)
baselines = np.arange(0, 250, 5)
default_mol = 'hco'
def get_baseline_rmss(mol, niters=1e4, baselines=baselines, remake_all=False):
"""Iterate through a range of baseline cutoffs and compare the results.
Args:
vis (str): the name of the core data file that this is pulling.
baselines (list of ints): the baselines to check over.
"""
# Set up the symlink
run_dir = './baselines/baseline_' + mol + str(int(niters)) + '/'
scratch_dir = '/scratch/jonas/' + run_dir
orig_vis = './data/' + mol + '/' + mol
new_vis = run_dir + mol
if remake_all is True or already_exists(new_vis) is False:
remove(scratch_dir)
# :-1 because a symlink with a deleted root isn't a directory anymore
remove(run_dir[:-1])
sp.call(['mkdir {}'.format(scratch_dir)], shell=True)
sp.call(['ln', '-s', scratch_dir, './baselines/'])
sp.call(['cp', '-r', '{}.vis'.format(orig_vis),
'{}/'.format(run_dir)])
print "Made symlinked directory, copied core .vis over.\n\n"
data_list = []
for b in baselines:
print '\n\n\n NEW ITERATION\nBaseline: ', b, '\n'
name = run_dir + mol + str(b) if b != 0 else run_dir + mol
# Check if we've already icr'ed this one.
if already_exists(name + '.cm') is True:
print "File already exists; going straight to imstat"
mean, rms = imstat(name, ext='.cm')
else:
icr(new_vis, mol=mol, min_baseline=b, niters=niters)
mean, rms = imstat(name, ext='.cm')
step_output = {'RMS': rms,
'Mean': mean,
'Baseline': b}
data_list.append(step_output)
print step_output
data_pd = pd.DataFrame(data_list)
return data_pd
def analysis(df, mol, niters):
"""Read the df from find_baseline_cutoff and do cool shit with it."""
f, axarr = plt.subplots(2, sharex=True)
axarr[0].grid(axis='x')
axarr[0].set_title('RMS Noise')
# axarr[0].set_ylabel('RMS Off-Source Flux (Jy/Beam)')
# axarr[0].plot(df['Baseline'], df['RMS'], 'or')
axarr[0].plot(df['Baseline'], df['RMS'], '-b')
axarr[1].grid(axis='x')
axarr[1].set_title('Mean Noise')
axarr[1].set_xlabel('Baseline length (k-lambda)')
# axarr[1].set_ylabel('Mean Off-Source Flux (Jy/Beam)')
# axarr[1].plot(df['Baseline'], df['Mean'], 'or')
axarr[1].plot(df['Baseline'], df['Mean'], '-b')
im_name = 'imnoise_' + mol + str(int(niters)) + '.png'
plt.savefig(im_name)
# plt.show(block=False)
return [df['Baseline'], df['Mean'], df['RMS']]
def run_noise_analysis(mol, baselines=baselines,
niters=1e4):
"""Run the above functions."""
print "Baseline range to check: ", baselines[0], baselines[-1]
print "Don't forget that plots will be saved to /modeling, not here.\n\n"
ds = get_baseline_rmss(mol, niters, baselines)
analysis(ds, mol, niters)
"""
def main():
parser = ap.ArgumentParser(formatter_class=ap.RawTextHelpFormatter,
description='''Make a run happen.''')
parser.add_argument('-r', '--run',
action='store_true',
help='Run the analysis.')
parser.add_argument('-o', '--run_and_overwrite',
action='store_true',
help='Run the analysis, overwriting preexisting runs.')
args = parser.parse_args()
if args.run:
run_noise_analysis(default_mol, Baselines=baselines,
niters=1e4)
elif args.run_and_overwrite:
run_noise_analysis(default_mol, Baselines=baselines,
niters=1e4)
if __name__ == '__main__':
main()
"""
# The End
| true |
1834be7502a81538313ab138a52acb954c70cf90 | Python | nastevens/sandbox | /python/flushbot/oldcode/createlookup.py | UTF-8 | 1,666 | 2.875 | 3 | [] | no_license | import hands, stacks, sys, pickle
from card import card
def createdata(dataset):
depth = 53
dataset["all"] = set([])
for i in range(1,depth):
dataset[i] = set([])
for i in range(1,depth):
sys.stdout.writelines(["\n",str(i)])
for j in range(i+1,depth):
sys.stdout.write(".")
for k in range(j+1,depth):
for l in range(k+1,depth):
for m in range(l+1,depth):
res = "N"
st = stacks.stack([card(i),card(j),card(k),card(l),card(m)])
if hands.isRoyalFlush(st): res = "R"
elif hands.isStraightFlush(st): res = "T"
elif hands.isFourOAK(st): res = "4"
elif hands.isFullHouse(st): res = "H"
elif hands.isFlush(st): res = "F"
elif hands.isStraight(st): res = "S"
elif hands.isThreeOAK(st): res = "3"
elif hands.isTwoPair(st): res = "X"
elif hands.isPair(st): res = "P"
t = (i,j,k,l,m,res)
dataset[i].add(t)
dataset[j].add(t)
dataset[k].add(t)
dataset[l].add(t)
dataset[m].add(t)
dataset["all"].add(t)
if __name__ == '__main__':
dataset = {}
print "Creating data"
createdata(dataset)
print "Writing pickle"
output = open('data2.pkl', 'wb')
pickle.dump(dataset,output)
output.close() | true |
d0f98f2ca82274b5db273d6810c239e72e2ddeba | Python | BrianHicks/perch | /perch/utils.py | UTF-8 | 414 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from py.path import local
import os
class ClassRegistry(dict):
"hold and register classes by nickname, to select later"
def register(self, name):
def inner(cls):
self[name] = cls
return cls
return inner
def files_in_dir(target):
for l in local(target).visit(sort=True):
if l.isfile():
yield l
| true |
0193fe59189d18af00503738ee4a6664e784d1e8 | Python | tangingw/python_pymetheus | /monitor/monitor_net.py | UTF-8 | 2,716 | 2.5625 | 3 | [] | no_license | import os
import psutil
import platform
from datetime import datetime
from socket import AF_INET, AF_INET6, SOCK_DGRAM, SOCK_STREAM
class MonitorNetwork:
def __init__(self):
self.net_connections = psutil.net_connections()
self.network_interface_info = psutil.net_if_addrs()
def get_network_interface(self):
ip_layer_dict = {
AF_INET: "ipv4",
AF_INET6: "ipv6"
}
network_interface_dict = {
interface_name: [
{
"ip_type": ip_layer_dict[intf.family] if intf.family in ip_layer_dict.keys() else "mac-address",
"mac_address": intf.address if intf.family not in ip_layer_dict.keys() else None,
"ip_address": intf.address if intf.family in ip_layer_dict.keys() else None,
"netmask": intf.netmask, "broadcast": intf.broadcast
} for intf in interfaces
] for interface_name, interfaces in self.network_interface_info.items()
}
return network_interface_dict
def get_connection_process(self):
protocol_dict = {
(AF_INET, SOCK_DGRAM): "udp",
(AF_INET6, SOCK_DGRAM): "udp6",
(AF_INET6, SOCK_STREAM): "tcp6",
(AF_INET, SOCK_STREAM): "tcp"
}
connection_process = []
current_process = {
process.pid: process for process in psutil.process_iter(['pid', 'name', 'username'])
}
for p in self.net_connections:
network_connection_dict = {
"protocol": protocol_dict[(p.family, p.type)],
"local_address": f"{p.laddr.ip}:{p.laddr.port}",
"remote_address": f"{p.raddr.ip}:{p.raddr.port}" if p.raddr else "-"
}
if ((platform.system() == "Linux" and os.geteuid() == 0) or platform.system() == "Windows"):
if p.pid in current_process.keys():
network_connection_dict.update(
{
"process_name": current_process[p.pid].info["name"],
"status": current_process[p.pid].status(),
"started_at": datetime.fromtimestamp(
current_process[p.pid].create_time()
).isoformat(),
}
)
connection_process.append(network_connection_dict)
return connection_process
def get_all_info(self):
return {
"network_interfaces": self.get_network_interface(),
"network_netstats": self.get_connection_process()
}
| true |
35ad3205182795026dc04666aae8d0c14c174731 | Python | marquesarthur/w2v-rest-api | /fasttext/write_so_corpus.py | UTF-8 | 2,601 | 2.5625 | 3 | [] | no_license | # https://radimrehurek.com/gensim/models/fasttext.html
# https://stackoverflow.com/questions/58876630/how-to-export-a-fasttext-model-created-by-gensim-to-a-binary-file
# from gensim.models.fasttext import FastText
# from gensim.test.utils import datapath
# from gensim.utils import tokenize
# from gensim import utils
# class MyIter(object):
# def __iter__(self):
# path = datapath('crime-and-punishment.txt')
# with utils.open(path, 'r', encoding='utf-8') as fin:
# for line in fin:
# yield list(tokenize(line))
#
#
# model4 = FastText(vector_size=100)
# model4.build_vocab(sentences=MyIter())
# total_examples = model4.corpus_count
# model4.train(sentences=MyIter(), total_examples=total_examples, epochs=5)
# sentences = [[" ", "Yes", "Who"], ["I", "Yes", "Chinese"]]
# model = FastText(sentences, size=4, window=3, min_count=1, iter=10, min_n=3, max_n=6, word_ngrams=0)
# model[' '] # The way the word vector is obtained
# model.wv['you'] # The way the word vector is obtained
#
# import postgresql
# db = postgresql.open('pq://w2v:password123@127.0.0.1:5432/crokage')
#
# get_table = db.prepare("SELECT processedtitle, processedbody from postmin")
#
# # Streaming, in a transaction.
# with db.xact():
# for x in get_table.rows("tables"):
# print(x)
#
#
#
#
# Connection.query.load_chunks(collections.abc.Iterable(collections.abc.Iterable(parameters)))
# https://rizwanbutt314.medium.com/efficient-way-to-read-large-postgresql-table-with-python-934d3edfdcc
import psycopg2
from datetime import datetime
start = datetime.now()
connection = psycopg2.connect(
dbname='crokage',
user='w2v',
password='password123',
host='127.0.0.1',
port=5432
)
# https://stackoverflow.com/questions/49266939/time-performance-in-generating-very-large-text-file-in-python
# https://rizwanbutt314.medium.com/efficient-way-to-read-large-postgresql-table-with-python-934d3edfdcc
i = 0
data_file = open('corpus.txt', 'w', encoding='UTF-8')
with connection.cursor(name='SO_posts_cursor') as cursor:
cursor.itersize = 3000 # chunk size
query = 'SELECT processedtitle, processedbody from postsmin;'
cursor.execute(query)
for row in cursor:
title, body = row[0], row[1]
if title:
line = f"{title}\n"
data_file.write(line)
if body:
line = f"{body}\n"
data_file.write(line)
i += 1
if i % 25000 == 0:
print(f"{str(i)} rows processed")
data_file.close()
end = datetime.now()
print("-" * 10)
print("elapsed time %s" % (end - start))
| true |
a128fde5365dc3ea3c4c3dd788c8522858158fcc | Python | biggydbs/Sudoku-Solver | /sudoku.py | UTF-8 | 1,707 | 3.34375 | 3 | [] | no_license | import time
n = 9
m = int(n**0.5)
def findNextCellToFill(grid, i, j):
for x in range(i,n):
for y in range(j,n):
if grid[x][y] == 0:
return x,y
for x in range(0,n):
for y in range(0,n):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(n)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(n)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = m *(i/m), m *(j/m)
for x in range(secTopX, secTopX+m):
for y in range(secTopY, secTopY+m):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,n+1):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
return False
read = open("sudoku_input","r")
output = open("sudoku_output","w")
inp = []
for i in read.readlines():
temp = []
line = i.split(" ")
if len(line) == n:
for j in line:
temp.append(int(j))
inp.append(temp)
start = time.time()
solveSudoku(inp)
end = time.time()
for i in inp:
for j in i:
output.write(str(j)+" ")
output.write("\n")
output.write("\n\n")
output.write("Time Elapsed : " + str(end - start))
read.close()
output.close() | true |
fe57418dea247bd936572bb4f58354a770618c70 | Python | JeanPaiva42/recommendaJogos | /recommendaJogos/RecomendacaoJogos.py | UTF-8 | 7,651 | 2.96875 | 3 | [] | no_license | from numpy import *
import numpy as np
import Usuario
import Jogos
from Jogos import Jogos
from Usuario import Usuario
a = list()
j = 0
jogosLista = list()
with open("Jogos.txt", 'r+') as txtJogos:
for line in txtJogos:
if j < 5:
line = line.strip('\n')
a.append(line)
j += 1
else:
aux = Jogos(str(a[0]).upper(), a[1:])
jogosLista.append(aux)
del a
del aux
a = list()
line = line.strip('\n')
a.append(line)
j = 1
aux = Jogos(str(a[0]).upper(), a[1:])
jogosLista.append(aux)
del j
del a
nomes = ["Jean", "Lukkas", "Daniel", "Newt"] #"Jales", "Felipe", "Samuka", "Thales", "Hugazzo", "Romario"]
#eu sei que eu poderia ter feito isso de maneira mais automatica e simples mas fuck it
userJean = Usuario(nomes[0])
userJean.adicionaJogo("Silent Hill", 10)
userJean.adicionaJogo("Final Fantasy XII", 10)
userJean.adicionaJogo("Cory in the house", 10)
userJean.adicionaJogo("Crash Team Racing", 10)
usuariosLista = []
usuariosLista.append(userJean)
userLukkas = Usuario(nomes[1])
userLukkas.adicionaJogo("Silent Hill", 7)
userLukkas.adicionaJogo("Dragon Quest V", 10)
userLukkas.adicionaJogo("Crash Team Racing", 1)
userLukkas.adicionaJogo("NBA", 9)
usuariosLista.append(userLukkas)
userDaniel = Usuario(nomes[2])
userDaniel.adicionaJogo("Need for Speed", 9)
userDaniel.adicionaJogo("FIFA", 8)
userDaniel.adicionaJogo("The Walking Dead", 1)
userDaniel.adicionaJogo("Xenogears", 4)
usuariosLista.append(userDaniel)
userNewt = Usuario(nomes[3])
userNewt.adicionaJogo("Final Fantasy XII", 10)
userNewt.adicionaJogo("Dragon Quest V", 9)
userNewt.adicionaJogo("Crash Team Racing", 6)
userNewt.adicionaJogo("Silent hill", 8)
usuariosLista.append(userNewt)
numUsuarios = len(usuariosLista)
numJogos = len(jogosLista)
#print numJogos, numUsuarios
#criando uma matriz que vai guardar valores aleatorios que sao as notas dos jogos de cada usuario
notasM =[]
def colocaNotas():
for y in range(numUsuarios):
b =[]
for x in range(numJogos):
nomeJogo = jogosLista[x].getNomeJogo()
if nomeJogo in usuariosLista[y].getJogos():
b.append(float(usuariosLista[y].getNota(nomeJogo)))
else:
b.append(0)
notasM.append(b)
colocaNotas()
notasM = np.asarray(notasM).transpose()
#print notasM
'''
se a nota de um usuario para um jogo for igual a zero isso significa que esse jogo nao
foi avaliado pelo usuario em questao. 5 colunas representando os usuarios, 10 linhas representando o numero de jogos
'''
deuNota = (notasM != 0 ) * 1
#print deuNota
#print notas
#funcao que normaliza os dados, precisamos dela para ficar mais facil identificar elementos acima da media e abaixo.
# val - media = normalizo
def normalizaNotas(notasM, deuNota):
numJogos1 = notasM.shape[0]
notasMedia = zeros(shape = (numJogos1, 1))
notasNorma = zeros(shape = notasM.shape)
for i in range(numJogos1):
#pegando todos os elementos onde tem um 1
idx = where(deuNota[i]==1)[0]
#calcula media das notas dos usuarios que deram nota, ou seja != 0
notasMedia[i] = mean(notasM[i, idx])
notasNorma[i, idx] = notasM[i, idx] - notasMedia[i]
return notasNorma, notasMedia
notas, notasMedia = normalizaNotas(notasM, deuNota)
#features dos jogos, como por exemplo elementos que o distingue e tal
numFeatures = len(jogosLista[0].getListaFeatures())
jogoFeatures =[]
def colocaFeatures():
a = list()
for x in range(numJogos):
jogoFeatures.append(jogosLista[x].getListaFeatures())
return jogoFeatures
jogoFeatures = np.asarray(colocaFeatures())
print jogoFeatures
def usuarioPreferencias():
preferencias = []
for y in range(numUsuarios):
for x in range(numFeatures):
b = []
for z in range(numJogos):
nomeJogo = jogosLista[z].getNomeJogo()
if nomeJogo in usuariosLista[y].getJogos():
b.append(float(jogosLista[z].getFeature(x)*(usuariosLista[y].getNota(nomeJogo)/10.0)))
else:
b.append(0)
preferencias.append(b)
for i in range(len(preferencias)):
preferencias[i] = sum(preferencias[i])
usuariosLista[y].calculaPreferencias(preferencias)
preferencias = []
usuarioPreferencias()
def matrizPreferencia():
matrizPref = []
for i in range(numUsuarios):
matrizPref.append(usuariosLista[i].getPreferencias())
return matrizPref
usuarioPref = (0.12)*np.asarray(matrizPreferencia())
#usuarioPref = randn(numUsuarios, numFeatures)
print usuarioPref
#print usuarioPref
#a ideia do nome dessa variavel vem da formula de uma regressao linar, ainda nao compreendo totalmente o conceito
xInicialEteta = r_[jogoFeatures.T.flatten(), usuarioPref.T.flatten()]
# as 3 proximas funcoes nao foram desenvolvidas por mim
def unroll_params(xInicialEteta, numUsuarios, numJogos, numFeatures):
# Retorna as matrizes x e o teta do xInicialEteta, baseado nas suas dimensoes (numFeatures, numJogos, numJogos)
# --------------------------------------------------------------------------------------------------------------
# Pega as primeiras 30 (10 * 3) linhas in the 48 X 1 vetor coluna
first_30 = xInicialEteta[:numJogos * numFeatures]
# Reshape this column vector into a 10 X 3 matrix
X = first_30.reshape((numFeatures, numJogos)).transpose()
# Get the rest of the 18 the numbers, after the first 30
last_18 = xInicialEteta[numJogos * numFeatures:]
# Reshape this column vector into a 6 X 3 matrix
theta = last_18.reshape(numFeatures, numUsuarios).transpose()
return X, theta
def calculate_gradient(xInicialEteta, notasM, deuNota, numUsuarios, numJogos, numFeatures, reg_param):
X, theta = unroll_params(xInicialEteta, numUsuarios, numJogos, numFeatures)
# we multiply by deuNota because we only want to consider observations for which a rating was given
difference = X.dot(theta.T) * deuNota - notasM
X_grad = difference.dot(theta) + reg_param * X
theta_grad = difference.T.dot(X) + reg_param * theta
# wrap the gradients back into a column vector
return r_[X_grad.T.flatten(), theta_grad.T.flatten()]
def calculate_cost(xInicialEteta, notasM, deuNota, numUsuarios, numJogos, numFeatures, reg_param):
X, theta = unroll_params(xInicialEteta, numUsuarios, numJogos, numFeatures)
# we multiply (element-wise) by deuNota because we only want to consider observations for which a rating was given
cost = sum((X.dot(theta.T) * deuNota - notasM) ** 2) / 2
# '**' means an element-wise power
regularization = (reg_param / 2) * (sum(theta ** 2) + sum(X ** 2))
return cost + regularization
from scipy import optimize
regParam = 30
custoMin_e_paramOtimizados = optimize.fmin_cg(calculate_cost, fprime=calculate_gradient, x0=xInicialEteta, args=(notasM, deuNota, numUsuarios, numJogos, numFeatures, regParam), maxiter=1000, disp=True, full_output=True)
cost, optimal_movie_features_and_user_prefs = custoMin_e_paramOtimizados[1], custoMin_e_paramOtimizados[0]
jogoFeatures, usuarioPref = unroll_params(optimal_movie_features_and_user_prefs, numUsuarios, numJogos, numFeatures)
#print jogoFeatures
allPrev = jogoFeatures.dot(usuarioPref.T)
#print allPrev
previsoesJean = allPrev[:, 0:1] + notasMedia
print previsoesJean
print usuariosLista[0].getJogos()
#print jogoFeatures
#print jogos[0].getListaFeature() | true |
511d754f0a7520542df46aab91417faa9d61afc5 | Python | swakkhar/RNA-Editing | /source_code/gen 0/parser.py | UTF-8 | 870 | 2.765625 | 3 | [
"CC0-1.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 25 10:54:28 2018
@author: HiddenDimension
"""
import re
def createData(algo):
with open(algo+'.txt') as f:
lines = f.readlines()
p= re.compile("\d+")
a = p.findall(lines[-2])
b = p.findall(lines[-1])
tp = int(a[0])/(int(a[0])+int(a[1]) )
fp = int(b[0])/(int(b[0])+int(b[1]) )
acc = (int(a[0])+int(b[1]) )/(int(a[0])+int(a[1])+int(b[0])+int(b[1]) )
return algo+","+str(tp*100)+","+str(fp*100)+","+str(acc*100)+"\n"
algo = ['nb' ,'ada' , 'ht' ,'rf' ,'smo' ,'bagg']
headers= ["Algorithm name", "Sn(%)","Sp(%)","Accuracy(%)"]
data=""
for x in headers:
data=data+x+","
data=data[:-1]+"\n"
for x in algo:
data= data+createData(x)
f = open("compiled.csv", "w")
f.write(data)
f.close(); | true |
8fde556b30dead2bd9aac95dfb9f1391fb058857 | Python | WalidAshraf/ConvNet-Architectures | /VGG/data_utils.py | UTF-8 | 1,754 | 2.671875 | 3 | [] | no_license | import numpy as np
import matplotlib as plt
from scipy import misc
import os
def getNumImages(path):
cs = os.listdir(path)
num = 0
for c in cs:
num += len(os.listdir(path + '/' + c))
return num
def resizeImage(img, H, W):
return misc.imresize(img, (H, W), interp='cubic')
def loadDataSet(path):
classes = os.listdir(path)
classes_names = {}
num_images = getNumImages(path)
X = np.empty((num_images, 224, 224, 3), dtype=np.float32)
y = np.empty((num_images,), dtype=np.uint8)
j = 0
for i, c in enumerate(classes):
classes_names[i] = c
imgs = os.listdir(path + '/' + c)
for img_name in imgs:
img = misc.imread(path + '/' + c + '/' + img_name, mode='RGB')
img = resizeImage(img, 224, 224)
X[j] = img
y[j] = i
j += 1
return X, y, classes_names
def shuffleDataset(X, y):
s = np.arange(X.shape[0])
np.random.shuffle(s)
X = X[s]
y = y[s]
return X, y
def getDataSet(path, num_val=1000, num_test=1000):
X, y, classes_dic = loadDataSet(path)
X, y = shuffleDataset(X, y)
num_all = X.shape[0]
num_train = num_all - num_val - num_test
mask = range(num_train)
X_train = X[mask]
y_train = y[mask]
mask = range(num_train, num_train + num_val)
X_val = X[mask]
y_val = y[mask]
mask = range(num_train + num_val, num_train + num_val + num_test)
X_test = X[mask]
y_test = y[mask]
mean = np.mean(X_train, axis=0, dtype=np.float32)
X_train -= mean
X_val -= mean
X_test -= mean
return X_train, y_train, X_val, y_val, X_test, y_test, classes_dic, mean
def deprocessImage(img, mean):
return img + mean | true |
36a9f746d195641165f9e6fa0097e332a5d8ed28 | Python | sandeep-skb/Algorithms | /Dynamic Programming/findLongestPath.py | UTF-8 | 1,440 | 3.8125 | 4 | [] | no_license | # LINK: https://www.geeksforgeeks.org/find-the-longest-path-in-a-matrix-with-given-constraints/
# Given a n*n matrix where all numbers are distinct, find the maximum length path (starting from any cell) such that
# all cells along the path are in increasing order with a difference of 1. We can move in 4 directions from a given
# cell (i, j), i.e., we can move to (i+1, j) or (i, j+1) or (i-1, j) or (i, j-1) with the
# condition that the adjacent cells have a difference of 1.
def findLongest(mat, i, j, dp, cur, max_):
if (i < 0 or i >= len(mat)) or (j < 0 or j >= len(mat[0])):
return max_
dp[i][j] = cur
if cur > max_:
max_ = cur
if (i-1 >= 0) and (mat[i-1][j] == (mat[i][j] + 1)):
max_ = findLongest(mat, i-1, j, dp, cur+1, max_)
if (i+1 < len(mat)) and (mat[i+1][j] == (mat[i][j] + 1)):
max_ = findLongest(mat, i+1, j, dp, cur+1, max_)
if (j-1 >= 0) and (mat[i][j-1] == (mat[i][j] + 1)):
max_ = findLongest(mat, i, j-1, dp, cur+1, max_)
if (j+1 < len(mat[0])) and (mat[i][j+1] == (mat[i][j] + 1)):
max_ = findLongest(mat, i, j+1, dp, cur+1, max_)
return max_
mat = [[ 1, 2, 9 ],
[ 5, 3, 8 ],
[ 4, 6, 7 ]]
max_ = 0
dp = []
for _ in mat:
dp.append([0] * len(mat[0]))
for i in range(len(mat)):
for j in range(len(mat[0])):
if dp[i][j] == 0:
cur = 0
max_ = findLongest(mat, i, j, dp, cur+1, max_)
for x in dp:
print(x)
print(max_)
| true |
98745ebff3411749cefb7b10b6a0fac1a46a614f | Python | ccc96360/Algorithm | /BOJ/Gold IV/BOJ1744.py | UTF-8 | 661 | 3.296875 | 3 | [] | no_license | #BOJ1744 수 묶기 20210515
import sys
input = sys.stdin.readline
def calc(li):
ret = 0
while li and li[-1] == 1:
ret += li.pop()
tmp, cnt = 1,0
for v in li:
tmp *= v
cnt += 1
if cnt == 2:
cnt = 0
ret += tmp
tmp = 1
if len(li) % 2 == 1: ret += li[-1]
return ret
def main():
n = int(input())
li = [int(input()) for _ in range(n)]
li.sort()
minus = []
plus = []
for v in li:
tmp = minus if v <= 0 else plus
tmp.append(v)
plus.reverse()
ans = calc(minus) + calc(plus)
print(ans)
if __name__ == '__main__':
main() | true |
2ae129fce2f96db2ea73304b9cb6cfdce85aa2b9 | Python | CannonLock/PhotoDescrambler | /Timer.py | UTF-8 | 323 | 3.421875 | 3 | [] | no_license | import time
class Timer:
def __init__(self):
self.s = 0
def start(self):
self.s = time.time()
def step(self, string = ''):
print(string, time.time() - self.s)
self.s = time.time()
def end(self, string = ''):
print(string, time.time() - self.s)
self.s = 0 | true |
4ddfbd22a58bed496bcaa2f92d7df63e0bfcc761 | Python | seungjulee/brush-up-algo-ds | /hackerrank/test/strings/bubblesort.py | UTF-8 | 175 | 2.9375 | 3 | [] | no_license | A=[1,5,4,3,5,3,4,3]
# bubble sort A
def bubbleSort(A):
for v, i in enumerate(A):
for vv, ii in enumerate(v):
if v > vv:
s
bubbleSort(A) | true |
e16c6ca84e39c5c9bd6b6407acc6c0b7212cee41 | Python | gokulvasan/CapacityShifting | /list.py | UTF-8 | 1,871 | 3.421875 | 3 | [] | no_license |
class list_node:
def __init__(self, data, nxt, prev):
self.data = data
self.nxt = nxt
self.prev = prev
def get_nxt(self):
return self.nxt
def get_prev(self):
return self.prev
def get_data(self):
return self.data
def set_prev(self, prev):
self.prev = prev
def set_nxt(self, nxt):
self.nxt = nxt
def set_data(self, data):
self.data = data
class locallist:
def __init__(self):
print "Creating a new list"
self.head=None
def append(self, data):
node = list_node(data, None, None)
if self.head == None:
print "List seems empty"
self.head = node
node.set_nxt(node)
node.set_prev(node)
else:
print "Adding new data"
node.set_prev(self.head)
node.set_nxt(self.head.nxt)
self.head.set_nxt(node)
def insert(self, node, data):
if node == None:
print "Error: node data is empty"
return None
node.set_nxt( list_node(data, node, node.get_nxt()) )
def get_data(self, node):
if self.head == None:
print "Error: Empty List"
return None
if node == None:
return self.head.get_data()
return node.get_data()
def go_nxt(self, node):
if node == None:
if self.head != None:
return self.head
else:
print("Error: Empty List")
return None
return node.get_nxt()
def go_prev(self, node):
if node == None:
if self.head != None:
return self.head
else:
print("Error: Empty List")
return node.get_prev()
def get_head(self):
return self.head
i = locallist()
i.append(1)
i.append(2)
i.append(3)
i.append(4)
n = i.go_nxt(None)
print i.get_data(n)
n = i.go_nxt(n)
print i.get_data(n)
#i.insert(n,5)
#n = i.go_nxt(n)
print i.get_data(n)
n = i.go_nxt(n)
print i.get_data(n)
n = i.go_nxt(n)
print i.get_data(n)
# print i.get_data(None)
print "moving prev"
n = i.go_prev(n)
print i.get_data(n)
n = i.go_prev(n)
print i.get_data(n)
n = i.go_prev(n)
print i.get_data(n)
| true |
fc14938e2858909835d2e5b81f4b0c7d40afb79c | Python | yifanx0/project_euler_solutions | /0001-0100/euler_0019.py | UTF-8 | 1,583 | 4.1875 | 4 | [] | no_license | # date: 08/01/2018
# problem: how many Sundays fell on the first of the month during
# the 20th century (01/01/1901-12/31/2000)
century = {19000101 : "Monday"}
# define a function create_key that adds a date to the dictionary
def create_key(year, month, day) :
date = year * 10000 + month * 100 + day
century[date] = ""
# add all dates to the dictionary
for year in range(1900, 2001) :
for month in range(1, 13) :
if month in [4, 6, 9, 11] :
for day in range(1, 31) :
create_key(year, month, day)
elif month == 2 :
if year % 4 == 0 and year % 100 != 0 :
for day in range(1, 30) :
create_key(year, month, day)
elif year % 400 == 0 :
for day in range(1, 30) :
create_key(year, month, day)
else :
for day in range(1, 29) :
create_key(year, month, day)
else :
for day in range(1, 32) :
create_key(year, month, day)
print(len(century)) # check whether the number of days during the 101 years seems right
# update the values for the dates
for date in century :
num_days = sorted(century).index(date) + 1
weekday = num_days % 7
if weekday == 1 :
century[date] = "Monday"
elif weekday == 2 :
century[date] = "Tuesday"
elif weekday == 3 :
century[date] = "Wednesday"
elif weekday == 4 :
century[date] = "Thursday"
elif weekday == 5 :
century[date] = "Friday"
elif weekday == 6 :
century[date] = "Saturday"
elif weekday == 0 :
century[date] = "Sunday"
# find the answer
i = 0
for date in century :
if date >= 19010101 and date % 100 == 1 and century[date] == "Sunday" :
print(date)
i = i + 1
print(i) | true |
7c695073dc2b770c4d857ecd46b385de7a9baefe | Python | wesleychristelis/python-basic-blockchain-poc | /blockchain.py | UTF-8 | 12,719 | 2.546875 | 3 | [] | no_license | import json
import pickle
import requests
# Own lib
from utility.verification import Verification
from utility.hash_util import hash_block
from utility.global_constants import MINING_REWARD
from utility.helpers import sum_reducer
from wallet import Wallet
from block import Block
from transaction import Transaction
print(__name__)
class Blockchain:
def __init__(self, public_key, node_id):
print("Blockchain constructor")
# Initialise 1st block (genesis block)
genesis_block = Block(0, "", [], -1, 0)
# Empty list for the blockchain
self.node_id = node_id
self.chain = [genesis_block]
self.__open_transactions = []
self.public_key = public_key ## Public key
self.resovle_conflicts = False
self.__peer_nodes = set()
self.load_data()
@property
def chain(self):
# returns a copy of the list
return self.__chain[:]
@chain.setter
def chain(self, val):
self.__chain = val
def get_open_transactions(self):
return self.__open_transactions[:]
def load_data(self):
"""Initialize blockchain + open transactions data from a file."""
try:
with open(f'blockchain-{self.node_id}.txt', mode='r') as file_store:
file_content = file_store.readlines()
blockchain = json.loads(file_content[0][:-1]) # first line without carriage return
# We need to convert the loaded data because Transactions should use OrderedDict
updated_blockchain = []
for block in blockchain:
converted_tx = [Transaction(tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]
updated_block = Block(block['index'], block['previous_hash'], converted_tx, block['proof'], block['timestamp'])
updated_blockchain.append(updated_block)
self.chain = updated_blockchain
open_transactions = json.loads(file_content[1][:-1])
# We need to convert the loaded data because Transactions should use OrderedDict
updated_transactions = []
for tx in open_transactions:
updated_transaction = Transaction(tx['sender'], tx['recipient'], tx['signature'], tx['amount'])
updated_transactions.append(updated_transaction)
self.__open_transactions = updated_transactions
peer_nodes = json.loads(file_content[2])
self.__peer_nodes = set(peer_nodes)
except (IOError, IndexError):
print("Handled exception ... no blockchain store found")
finally:
print("Finally lets move on !!!")
# Using the JSON text version of saving data, so we can easily test the security by editeing the file and checking the chain fails
def save_data(self):
"""Save blockchain + open transactions snapshot to a file."""
try:
with open(f'blockchain-{self.node_id}.txt', mode='w') as file_store:
saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, [
tx.__dict__ for tx in block_el.transactions], block_el.proof, block_el.timestamp) for block_el in self.__chain]]
file_store.write(json.dumps(saveable_chain))
file_store.write('\n')
saveable_tx = [tx.__dict__ for tx in self.__open_transactions]
file_store.write(json.dumps(saveable_tx))
file_store.write('\n')
# Save node data
file_store.write(json.dumps(list(self.__peer_nodes)))
except IOError:
print('Saving failed!')
# Start: Use of Pickle instead. uses less code. not fully implemented yet.
def save_data_pickle(self):
"""Save blockchain + open transactions snapshot to a file."""
with open("blockchain.pickle", mode='wb') as file_store:
save_data = {
'chain': self.__chain,
'ot': self.__open_transactions
}
file_store.write(pickle.dumps(save_data))
def load_data_pickle(self):
with open('blockchain.pickle', mode='rb') as file_store:
file_content = pickle.loads(file_store.read())
self.__chain = file_content['chain']
self.__open_transactions = file_content['ot']
# End: Use of Pickle instead. uses less code. not fully implemented yet.
def proof_of_work(self):
"""Generate a proof of work for the open transactions, the hash of the previous block and a random number (which is guessed until it fits)."""
last_block = self.__chain[-1]
last_hash = hash_block(last_block)
proof_nonce = 0
while not Verification.valid_proof(self.__open_transactions, last_hash, proof_nonce):
proof_nonce += 1
return proof_nonce
def get_balance(self, sender=None):
""" Get amount(s) sent and recieved for a sender in the blockchain """
if(sender == None):
if(self.public_key == None):
return None
participant = self.public_key
else:
participant = sender
# Nested list comprehension
tx_sender = [[tx.amount for tx in block.transactions if tx.sender == participant] for block in self.__chain]
# Calculate open transaction not yet mined
open_tx_sender = [open_tx.amount for open_tx in self.__open_transactions if open_tx.sender == participant]
tx_sender.append(open_tx_sender)
amount_sent = sum_reducer(tx_sender)
# Todo: We can abstract this
tx_recipient = [[tx.amount for tx in block.transactions if tx.recipient == participant] for block in self.__chain]
amount_received = sum_reducer(tx_recipient)
return amount_received - amount_sent
def get_last_blockchain_value(self):
""" Returns the last value of a curretn blick chain"""
# If list is empty
if len(self.__chain) < 1:
return None
return self.__chain[-1]
def add_transaction(self, recipient, sender, signature, amount = 1.0, is_receiving=False):
""" Adds transaction to open transactions
Arguments:
:sender: To Who
:recipient: By Whom
:amount: How much
"""
if self.public_key == None:
return False
transaction = Transaction(sender, recipient, signature, amount)
if Verification.verify_transaction(transaction, self.get_balance):
self.__open_transactions.append(transaction)
self.save_data()
if not is_receiving:
for node in self.__peer_nodes:
url = 'http://{}/broadcast-transaction'.format(node)
try:
response = requests.post(url, json={
'sender': sender, 'recipient': recipient, 'amount': amount, 'signature': signature})
if response.status_code == 400 or response.status_code == 500:
print('Transaction declined, needs resolving')
return False
except requests.exceptions.ConnectionError:
continue
return True
return False
def add_block(self, block):
transactions = [Transaction(tx['sender'],tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]
is_valid_prood = Verification.valid_proof(transactions[:-1], block['previous_hash'],block['proof'])
hashes_match = hash_block(self.chain[-1]) == block['previous_hash']
if not is_valid_prood or not hashes_match:
return False
block_object = Block(block['index'], block['previous_hash'], transactions, block['proof'], block['timestamp'])
self.__chain.append(block_object)
# Make a copy becuaes we are manipkauting the original and dont wan to iterate on it
open_trns = self.__open_transactions[:]
# Could possibly refactor for better perfomance
# Update the open trnasaction on the peer node when a new block is braodcast
for incoming_trn in block['transactions']:
for open_trn in open_trns:
if(open_trn.sender == incoming_trn['sender'] and open_trn.recipient == incoming_trn['recipient'] and open_trn.amount == incoming_trn['amount'] and open_trn.signature == incoming_trn['signature'] ):
try:
self.__open_transactions.remove(open_trn)
except ValueError:
print("Item is already removed")
self.save_data()
return True
def resolve(self):
winner_chain = self.chain
replace = False
for node in self.__peer_nodes:
url = f'http://{node}/chain'
try:
response = requests.get(url)
node_chain = response.json()
node_chain = [Block(block['index'], block['previous_hash'], [Transaction(tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']],
block['proof'], block['timestamp']) for block in node_chain]
node_chain_length = len(node_chain)
local_chain_length = len(winner_chain)
if node_chain_length > local_chain_length and Verification.verify_chain(node_chain):
winner_chain = node_chain
replace = True
except requests.exceptions.ConnectionError:
continue
self.resolve_conflicts = False
self.chain = winner_chain
if replace:
# we assume transactions are correct after replace , we clear the transactions
self.__open_transactions = []
self.save_data()
return replace
def mine_block(self, node):
""" Adds all open transactions onto a block in the blockchain """
if self.public_key == None:
return None
last_block = self.get_last_blockchain_value()
# List comprehensions
hashed_block = hash_block(last_block)
proof = self.proof_of_work()
reward_transaction = Transaction("MINING", self.public_key, '', MINING_REWARD)
# Create copy of open transactions
copied_transactions = self.__open_transactions[:]
for tx in copied_transactions:
if not Wallet.verify_transaction(tx):
return None
# What if the append block fails. We use a copy of the list without affecting the original
copied_transactions.append(reward_transaction)
block = Block(len(self.__chain), hashed_block, copied_transactions, proof)
self.__chain.append(block)
self.__open_transactions = []
self.save_data()
# Broadcast to all registered peer nodes about mine
for node in self.__peer_nodes:
url = f'http://{node}/broadcast-block'
serializable_block = block.__dict__.copy()
serializable_block['transactions'] = [tx.__dict__ for tx in serializable_block['transactions']]
try:
result = requests.post(url, json={'block': serializable_block})
print(f'mine_block()-> Broadcast to url {url} with reponse of {result}')
if result.status_code == 400 or result.status_code == 500:
print("Block declined, needs resolving")
if result.status_code == 409:
self.resovle_conflicts = True
print(f'/mine_block() -> self.resovle_conflicts: {self.resovle_conflicts}')
except requests.exceptions.ConnectionError:
continue
return block
def add_peer_node(self, node):
""" Adds new node peer node set
Arguments:
:node: the node URL that should be added
"""
self.__peer_nodes.add(node)
self.save_data()
def remove_node(self, peer_node):
""" Removes node peer node set
Arguments:
:node: the node URL that should be deleted
"""
self.__peer_nodes.discard(peer_node)
self.save_data()
def get_peer_nodes(self):
""" Return a list of all connected peer nodes """
return list(self.__peer_nodes) | true |
359d20871003863c4d0998b3d2aa20140093b80a | Python | McNoah/Educational-Data-Mining | /IP2IDMapper.py | UTF-8 | 795 | 2.640625 | 3 | [] | no_license | import csv
# from collections import defaultdict
# reader1 = csv.reader(open('/Users/MCNOAH/Desktop/AccessLog_Tool-develop/MappedIP.csv', 'r'))
mylist = []
myset = set()
result = open('test.txt', 'w')
with open('IP.csv', 'r') as IPFile, open('Mapping2.csv', 'r') as IPMappedFile:
IPs = IPFile.read().splitlines()
IPIDs = IPMappedFile.read().splitlines()
# for xxxx in xrange(1,len(IPs)):
# print IPs[xxxx]
# pass
for x in range(1,len(IPIDs)):
Ipx = IPIDs[x].split(',')
# print IPs
if Ipx[0] in IPs:
if Ipx[0] not in myset:
mylist.append(Ipx[0])
myset.add(Ipx[0])
print (Ipx[0] + ',' + Ipx[1])
result.write(Ipx[0] + ',' + Ipx[1] + '\n')
# else:
# print Ipx[0] + "--------"
pass
# if (row1[0] == row2[0]):
# print ("equal")
# else:
# print ("different")
| true |
4fe2fc234c0138063917b1bf72e4e0fa78f2f070 | Python | IsseBisse/adventcode20 | /10/AdapterArray.py | UTF-8 | 1,895 | 3.515625 | 4 | [] | no_license | def get_data(path):
with open(path) as file:
data = file.read().split("\n")
for i, entry in enumerate(data):
data[i] = int(entry)
data.append(0)
data.append(max(data) + 3)
return data
def part_one():
data = get_data("input.txt")
print(data)
data.sort()
print(data)
jolt_differences = [0, 0]
for i, jolt in enumerate(data[:-1]):
diff = data[i+1] - jolt
if diff == 1:
jolt_differences[0] += 1
elif diff == 3:
jolt_differences[1] += 1
print(jolt_differences)
print(jolt_differences[0] * jolt_differences[1])
class Node:
def __init__(self, jolt):
self.jolt = jolt
self.children = list()
def __str__(self):
string = "%s: " % self.jolt
string += "%s" % [child.jolt for child in self.children]
return string
def add_connections(self, available_jolts):
for i in range(3):
child_jolt = self.jolt + i + 1
if child_jolt in list(available_jolts.keys()):
self.children.append(available_jolts[child_jolt])
def count_paths_to_end(self, end_jolt):
paths_to_end = 0
if self.children:
for child in self.children:
paths_to_end += child.count_paths_to_end(end_jolt)
else:
if self.jolt == end_jolt:
return 1
else:
return 0
return paths_to_end
def part_two():
data = get_data("input.txt")
data.sort()
split_data = list()
start_ind = 0
for i in range(len(data) - 1):
if data[i+1] - data[i] == 3:
split_data.append(data[start_ind:i+1])
start_ind = i+1
total_num_configs = 1
for sub_data in split_data:
available_jolts = dict()
for jolt in sub_data:
available_jolts[jolt] = Node(jolt)
root = available_jolts[sub_data[0]]
for key in available_jolts:
available_jolts[key].add_connections(available_jolts)
num_configurations = root.count_paths_to_end(sub_data[-1])
total_num_configs *= num_configurations
print(total_num_configs)
if __name__ == '__main__':
part_two() | true |
74b023c3e38c7ce2d3661aa2aa3b4c5a292fe11e | Python | ericgiunta/nebp | /unfolding_tool/origami.py | UTF-8 | 3,619 | 3.1875 | 3 | [
"MIT"
] | permissive | import numpy as np
from numpy.linalg import norm
from scipy.optimize import basinhopping
def preprocess(N, sigma2, R, f_def, params):
"""Apply any preprocessing steps to the data."""
#
if 'scale' in params:
if params['scale']:
#
N0 = np.sum(R * f_def, axis=1)
#
f_def *= np.average(N / N0)
return N, sigma2, R, f_def, params
def MAXED(N, sigma2, R, f_def, params):
"""The MAXED unfolding algorithm."""
# pull out algorithm-specific parameters
Omega = params['Omega']
# create the function that we will maximize, Z
def Z(lam, N, sigma2, R, f_def, Omega):
"""A function, the maximization of which is equivalent to the
maximization of """
A = - np.sum(f_def * np.exp(- np.sum((lam * R.T).T, axis=0)))
B = - (Omega * np.sum(lam**2 * sigma2))**(0.5)
C = - np.sum(N * lam)
# negate because it's a minimization
return - (A + B + C)
# create a lambda
lam = np.ones(len(N))
# apply the simulated annealing to the Z
mk = {'args': (N, sigma2, R, f_def, Omega)}
lam = basinhopping(Z, lam, minimizer_kwargs=mk).x
# back out the spectrum values from the lam
return f_def * np.exp(-np.sum((lam * R.T).T, axis=0))
def Gravel(N, sigma2, R, f_def, params):
"""The modified SandII algorithm used in the Gravel code."""
# pull out algorithm-specific parameters
max_iter = params['max_iter']
tol = params['tol']
# evolution
if 'evolution' in params:
evolution = params['evolution']
evolution_list = []
# initalize
iteration = 0
f = f_def
N0 = np.sum(R * f, axis=1)
# begin iteration
while iteration < max_iter and norm(N0 - N, ord=2) > tol:
# print info
message = 'Iteration {}: Error {}'.format(iteration, norm(N0 - N, ord=2))
print(message)
# add evolution
if evolution:
evolution_list.append(f)
# break down equations into simpler terms
a = (R * f)
b = np.sum(R * f, axis=1)
c = (N**2 / sigma2)
log_term = np.log(N / b)
# compute the uper and lower portion of the exponential
top = np.sum((((a.T / b) * c) * log_term).T, axis=0)
bot = np.sum(((a.T / b) * c).T, axis=0)
# compute the coefficient array
coef = np.exp(top / bot)
# update the new f
f = f * coef
# update f
N0 = np.sum(R * f, axis=1)
iteration += 1
# print info
message = 'Final Iteration {}: Error {}'.format(iteration, norm(N0 - N, ord=2))
print(message)
# add evolution
if evolution:
evolution_list.append(f)
return f, evolution_list
return f
def unfold(N, sigma2, R, f_def, method='MAXED', params={}):
"""A utility that deconvolutes (unfolds) neutron spectral data given
typical inputs and a selection of unfolding algorithm."""
# check input
available_methods = ('MAXED', 'Gravel')
assert method in available_methods, 'method must by literal in {}'.format(available_methods)
assert len(N) == len(sigma2), 'N and sigma2 must be the same length.'
assert R.shape == (len(N), len(f_def)), 'Shape of R must be consistent with other inputs.'
# preprocess the data
N, sigma2, R, f_def, params = preprocess(N, sigma2, R, f_def, params)
# unfold with MAXED
if method == 'MAXED':
return MAXED(N, sigma2, R, f_def, params)
# unfold with Gravel
elif method == 'Gravel':
return Gravel(N, sigma2, R, f_def, params)
return
| true |
af5621eb9aaf33aaa690ead83a17208eac331fcb | Python | dangkim/FBScanTool | /Code/utils.py | UTF-8 | 2,352 | 2.578125 | 3 | [
"MIT"
] | permissive | import os
# Create Original URL to crawl Data
def create_original_link(url):
if url.find(".php") != -1:
original_link = "https://en-gb.facebook.com/profile.php?id=" + ((url.split("="))[1])
else:
original_link = url
return original_link
# Get Section Route
def get_friend_section_route(url):
section = ["/friends",
"/friends_mutual",
"/following",
"/followers",
"/friends_work",
"/friends_college",
"/friends_current_city",
"/friends_hometown"]
if url.find(".php") != -1:
section = ["&sk=friends",
"&sk=friends_mutual",
"&sk=following",
"&sk=followers",
"&sk=friends_work",
"&sk=friends_college",
"&sk=friends_current_city",
"&sk=friends_hometown"]
return section
# Get Photos Section Route
def get_photos_section_route(url):
section = ["/photos_by",
"/photos_of"]
if url.find(".php") != -1:
section = ["&sk=photos_by",
"&sk=photos_of"]
return section
# Get Video Section Route
def get_video_section_route(url):
section = ["/videos",
"/videos_of"]
if url.find(".php") != -1:
section = ["&sk=videos",
"&sk=videos_of"]
return section
# Get Video Section Route
def get_about_section_route(url):
section = ["/about_overview",
"/about_work_and_education",
"/about_places",
"/about_contact_and_basic_info",
"/about_family_and_relationships",
"/about_details",
"/about_life_events"]
if url.find(".php") != -1:
section = ["&sk=about_overview", "&sk=about_work_and_education", "&sk=about_places",
"&sk=about_contact_and_basic_info", "&sk=about_family_and_relationships", "&sk=about_details",
"&sk=about_life_events"]
return section
# Get Profile Folder
def get_profile_folder(folder, url):
if url.find(".php") != -1:
target_dir = os.path.join(folder, url.split('/')[-1].replace('profile.php?id=', ''))
else:
target_dir = os.path.join(folder, url.split('/')[-1])
return target_dir
| true |
52829fca46bf28cb98a08a32b5e8aec6a6cb0630 | Python | mbr4477/frontpage | /frontpage/__main__.py | UTF-8 | 2,055 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | import argparse
import json
from subprocess import run
import os
import glob
import dropbox
import datetime
import random
def print_file(filename, printer_name):
# print this file
run(['mutool', 'poster', '-y', '2', filename, 'out.pdf'])
run(['cpdf', 'out.pdf', '-draft', '-boxes', '-o', 'out.pdf'])
run([
'lp', '-d', printer_name,
'-o', 'fit-to-page',
'-o', 'sides=two-sided-long-edge',
'-o', 'orientation-requested=4',
'out.pdf'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config_file", type=str, help="path to sources JSON file with links to PDFs")
parser.add_argument("--token", type=str, help="path to API token JSON file")
parser.add_argument("--no-print", action='store_true', default=False, help="prevents print (overrides setting from config file")
args = parser.parse_args()
if args.token:
with open(args.token, "r") as token_file:
token = json.loads(token_file.read())['dropbox']
dbx = dropbox.Dropbox(token)
with open(args.config_file, "r") as config_file:
config = json.loads(config_file.read())
print_key = config['print']
printer_name = config['printer']
sources = config['sources']
existing = glob.glob("*.pdf")
for file in existing:
os.remove(file)
for page_key in sources:
url = f"https://cdn.newseum.org/dfp/pdf{str(datetime.datetime.today().day)}/{page_key}.pdf"
run(['wget', '-q', url])
existing = glob.glob("*.pdf")
if print_key == "$RANDOM" and not args.no_print:
print_file(random.choice(existing), printer_name)
for file in existing:
if print_key and print_key != "$RANDOM" and file.startswith(print_key) and not args.no_print:
print_file(file, printer_name)
if args.token:
with open(file, "rb") as pdf_file:
dbx.files_upload(pdf_file.read(), f'/{file}', dropbox.files.WriteMode.overwrite)
if __name__ == "__main__":
main()
| true |
53130b6184eef2761adc99b71d5d2ccd9e60d9ad | Python | MYlindaxia/Python | /HomeWorkSystem/main.py | UTF-8 | 461 | 2.515625 | 3 | [] | no_license | import easygui as gui
import CheckDemo
t = gui.buttonbox(msg="已经有:"+str(CheckDemo.Sum)+"交了作业\n还有:"+str(CheckDemo.Total)+"名同学没有交",title="MADE IN MYlindaxia",choices=('打印未交作业的同学','打印交了作业的同学'))
if(t=='打印未交作业的同学'):
print("good")
gui.msgbox(str(CheckDemo.Fall),title='作业管理系统')
else:
print("bad")
gui.msgbox(str(CheckDemo.Good),title="作业管理系统") | true |
6c2879e1d76dc6ac73af2255d25b79116a7d6cf0 | Python | zm-reborn/zmr-vpk-tools | /material_textures.py | UTF-8 | 4,076 | 2.96875 | 3 | [] | no_license | """Prints model's /possible/ materials to a file."""
import argparse
import os
import re
import sys
import vpk_generator
def get_mat_paths(mats, lowercase=False):
ret = []
for p in mats['paths']:
for tex in mats['textures']:
s = os.path.join(
'materials',
os.path.join(p, tex))
if lowercase:
s = s.lower()
ret.append(s)
return ret
def int_from_file(fp):
return int.from_bytes(fp.read(4), 'little')
def read_string_from_file(fp):
bs = bytearray()
while True:
b = fp.read(1)[0]
if not b:
break
bs.append(b)
return bs.decode('utf-8')
def get_mdl_data(file):
ret = {
'textures': [],
'paths': []
}
with open(file, 'rb') as fp:
# Check magic number
if fp.read(4).decode('utf-8') != 'IDST':
raise Exception('Not an mdl file!')
# Texture names
fp.seek(204)
texture_count = int_from_file(fp)
texture_offset = int_from_file(fp)
fp.seek(texture_offset)
for i in range(0, texture_count):
pos = texture_offset + (i * 64)
fp.seek(pos)
name_offset = int_from_file(fp)
fp.seek(pos + name_offset)
ret['textures'].append(read_string_from_file(fp))
# Texture paths
fp.seek(212)
texturedir_count = int_from_file(fp)
texturedir_offset = int_from_file(fp)
fp.seek(texturedir_offset)
for i in range(0, texturedir_count):
pos = texturedir_offset + (i * 4)
fp.seek(pos)
name_offset = int_from_file(fp)
fp.seek(name_offset)
ret['paths'].append(read_string_from_file(fp))
return ret
def create_argparser():
parser = argparse.ArgumentParser(
description="Get material's texture paths.",
fromfile_prefix_chars='@')
parser.add_argument(
'mats',
nargs='+',
help="""List of models.""")
parser.add_argument(
'--output', '-o',
default='textures.txt',
help="""Output file. mats.txt by default.""")
parser.add_argument(
'--dir', '-d',
default=os.getcwd(),
help="""Directory to set.""")
parser.add_argument(
'--lowercase',
action='store_true',
default=False,
help='Lowercase the texture names')
return parser
if __name__ == '__main__':
parser = create_argparser()
args = parser.parse_args()
cwd = os.getcwd()
vpk_generator.change_cwd(args.dir)
textures = []
for mat in args.mats:
try:
with open(os.path.join('materials', mat + '.vmt'), 'r') as fp:
data = fp.read()
found_textures = re.findall(
r'^(?:\t| ){0,}(?!\/\/.{0,})"?(?:\$(?:basetexture|envmapmask|bumpmap|phongexponenttexture|lightwarptexture))"?(?:\t| ){0,}"?([^"]+)',
data,
flags=re.MULTILINE | re.IGNORECASE)
new_textures = []
# Check for duplicates
for tex in found_textures:
if args.lowercase:
tex = tex.lower()
if tex not in textures:
new_textures.append(tex)
# Check if the file exists.
for tex in new_textures[:]:
fullpath = os.path.join('materials', tex + '.vtf')
if not os.path.exists(fullpath):
print(
'Texture',
fullpath,
'does not exist! (Material: %s)' % mat)
new_textures.remove(tex)
textures = textures + new_textures
except IOError:
print('Could not find material', mat)
# Write them to file.
vpk_generator.change_cwd(cwd)
with open(args.output, 'w') as fp:
fp.write('\n'.join(textures))
| true |
66e1e86bd4ff53df9e3ac46892d9473e84ad159a | Python | kin5/react-flask-trivia | /db.py | UTF-8 | 668 | 2.78125 | 3 | [] | no_license | import sqlite3
class DB:
def query(query, data=None):
conn = sqlite3.connect("trivia-game.db")
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS trivia_games (
token PRIMARY KEY,
correct_answer,
lives,
score,
question_count,
multiplier,
time_stamp
);
""")
if type(data) == list:
cur.executemany(query, data)
else:
cur.execute(query, data)
result = cur.fetchall()
conn.commit()
conn.close()
return result | true |
d46a62f7c26c61598d3aa81a49fa7d90ac9b1684 | Python | krittinunt/RaspberryPi | /LED_Runing_I.py | UTF-8 | 393 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python3
# by krittinunt@gmail.com
from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
LED = [26, 19, 13, 6, 5, 21, 20, 16]
for i in range(8):
GPIO.setup(LED[i], GPIO.OUT)
GPIO.setwarnings(False)
try:
while True:
for i in range(8):
GPIO.output(LED[i], True)
sleep(0.5)
for i in range(8):
GPIO.output(LED[i], False)
sleep(0.5)
finally:
GPIO.cleanup()
| true |
787c4d52befdd17e5e743326dd3e6c60f3822b39 | Python | jyu001/New-Leetcode-Solution | /solved/457_circular_array_loop.py | UTF-8 | 1,581 | 3.734375 | 4 | [] | no_license | '''
457. Circular Array Loop
DescriptionHintsSubmissionsDiscussSolution
You are given an array of positive and negative integers. If a number n at an index is positive, then move forward n steps. Conversely, if it's negative (-n), move backward n steps. Assume the first element of the array is forward next to the last element, and the last element is backward next to the first element. Determine if there is a loop in this array. A loop starts and ends at a particular index with more than 1 element along the loop. The loop must be "forward" or "backward'.
Example 1: Given the array [2, -1, 1, 2, 2], there is a loop, from index 0 -> 2 -> 3 -> 0.
Example 2: Given the array [-1, 2], there is no loop.
Note: The given array is guaranteed to contain no element "0".
Can you do it in O(n) time complexity and O(1) space complexity?
'''
class Solution(object):
def circularArrayLoop(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
n = len(nums)
nl = set([])
for i in range(n):
l = set([])
if i in nl: continue
j = i
curr = [nums[j]]
while True:
if j in l:
if min(curr)*max(curr)>0: return True
else: break
else: l.add(j)
k = (j+nums[j])%n
if j==k: break
else:
j=k
curr.append(nums[j])
for e in l: nl.add(e)
#print(l, nl)
return False
| true |
e8a3f8037a93463008ff26ba000b2a0cc14871b9 | Python | Verlanti2002/TepsitProject | /database.py | UTF-8 | 4,710 | 2.71875 | 3 | [] | no_license | import mariadb
import threading
class Database: # Classe Database
# Costruttore
def __init__(self, user, password, host, database, port=3306):
# Connessione al database
self.conn = mariadb.connect(
user=user,
password=password,
host=host,
port=port,
database=database,
)
self.cursor = self.conn.cursor()
self.lock = threading.Lock()
# Metodo per ottenere i campi identificativi di ogni singolo dipendente
# def get_dipendenti_list(self):
# query = f"SELECT id, nome, cognome FROM dipendenti"
# self.cursor.execute(query)
# dati = self.cursor.fetchall()
# return dati
def get_zone_lavoro_list(self):
query = f"SELECT id, nome_zona FROM zone_lavoro"
self.cursor.execute(query)
dati = self.cursor.fetchall()
return dati
# Metodo per l'inserimento dei record nella tabella dipendenti
def insert_dipendenti(self, nome, cognome, posizione_lavoro, data_assunzione, stipendio, telefono, id_zone_lavoro):
with self.lock:
control_query = f"SELECT id FROM zone_lavoro WHERE 'id' = '{id_zone_lavoro}'"
self.cursor.execute(control_query)
if not self.cursor:
return
query = (
"INSERT INTO dipendenti (nome, cognome, posizione_lavoro, data_assunzione, stipendio, telefono, id_zone_lavoro)"
"VALUES (%s, %s, %s, %s, %s, %s, %s)"
)
parametri = (nome, cognome, posizione_lavoro, data_assunzione, stipendio, telefono, id_zone_lavoro)
self.cursor.execute(query, parametri) # Prende in ingresso una tupla
self.conn.commit() # Salva le modifiche nel database
# Metodo per l'inserimento dei record nella tabella zone_lavoro
def insert_zone_lavoro(self, nome_zona, numero_clienti):
with self.lock:
try:
numero_clienti = int(numero_clienti)
except ValueError:
raise Exception("Valore inserito non valido")
query = (
"INSERT INTO zone_lavoro (nome_zona, numero_clienti)"
"VALUES (%s, %s)"
)
parametri = (nome_zona, numero_clienti)
self.cursor.execute(query, parametri)
self.conn.commit()
# Metodo per la lettura dei record della tabella dipendenti
# def read_dipendenti(self, id):
# query = f"SELECT * FROM dipendenti WHERE id = '{id}'"
# self.cursor.execute(query)
# dati = self.cursor.fetchall()
# return dati
def read_all_dipendenti(self):
query = f"SELECT * FROM dipendenti"
self.cursor.execute(query)
dati = self.cursor.fetchall()
return dati
# Metodo per la lettura dei record della tabella zone_lavoro
# def read_zone_lavoro(self, id):
# query = f"SELECT * FROM zone_lavoro WHERE id = '{id}'"
# self.cursor.execute(query)
# dati = self.cursor.fetchall()
# return dati
def read_all_zone_lavoro(self):
query = f"SELECT * FROM zone_lavoro"
self.cursor.execute(query)
dati = self.cursor.fetchall()
return dati
# Metodo per l'aggiornamento dei record nella tabella dipendenti
def update_dipendenti(self, name, last_name, pos_lav, date, salary, phone, id_zone_lavoro, id):
with self.lock:
query = f"UPDATE dipendenti SET nome=%s , cognome=%s,posizione_lavoro=%s,data_assunzione=%s," \
f"stipendio=%s,telefono=%s,id_zone_lavoro=%s WHERE id=%s"
self.cursor.execute(query, (name, last_name, pos_lav, date, salary, phone, id_zone_lavoro, id))
self.conn.commit()
# Metodo per l'aggiornamento dei record nella tabella zone_lavoro
def update_zone_lavoro(self, name, num_clienti , id):
with self.lock:
query = f"UPDATE zone_lavoro SET nome_zona=%s, numero_clienti=%s WHERE id=%s"
self.cursor.execute(query, (name, num_clienti, id))
self.conn.commit()
# Metodo per l'eliminazione dei record dalla tabella dipendenti
def delete_dipendenti(self, id_da_eliminare):
with self.lock:
query = f"DELETE FROM dipendenti WHERE id = '{id_da_eliminare}'"
self.cursor.execute(query)
self.conn.commit()
# Metodo per l'eliminazione dei record dalla tabella zone_lavoro
def delete_zone_lavoro(self, id_da_elimianare):
with self.lock:
query = f"DELETE FROM zone_lavoro WHERE id = '{id_da_elimianare}'"
self.cursor.execute(query)
self.conn.commit()
| true |
51707f30eddc400adc71e5e63ad8a7b1759ea434 | Python | narutoben10af/cis | /PycharmProjects/Scraping/BeautifulSoup.py | UTF-8 | 3,013 | 3.15625 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
htmlFile = open("home.html")
htmlData = htmlFile.read()
htmlFile.close()
soup = BeautifulSoup(htmlData, "html.parser")
print(soup)
# prettify output
print(soup.prettify())
#Get the title tag
title = soup.title
print(title)
#Get the title text
titleText = soup.title.text
print(titleText)
divTag = soup.div #Will only get the first div
print(divTag)
divTagText = soup.div.text #Will only get the first div
print(divTagText)
#Get the div content
divTagFind = soup.find("div")
print(divTagFind)
#Get the div content of biglink2
divTag = soup.find("div", id = "biglink2")
print(divTag)
print("hi")
#Get the div content of biglink2
divTag = soup.find("div", id = "biglink2")
linkText = divTag.h2.a.text
print(linkText)
linkDescription = divTag.p.text
print(linkDescription)
print("hmm")
listDivTag = soup.find_all("div")
print(listDivTag)
print("space \n")
for divTag in listDivTag:
linkText = divTag.h2.a.text
print(linkText)
linkDescription = divTag.p.text
print(linkDescription)
print()
#must have underscore after class
divTag = soup.find("div", class_ = "testing")
print(divTag)
#Get The Table
tableTag = soup.find("table") #will return the table
tableData = []
tableRows = tableTag.find_all("tr")
print(tableTag.prettify())
print(tableRows)
print()
for row in tableRows:
tableCols = row.find_all('td') #find all td (cells)
# The result is now a table of tags, we must take the
#use the strip() method to remove surrounding spaces.
listData = []
for col in tableCols:
listData.append(col.text.strip())
tableData.append(listData)
#table data is a 2d list now
print(tableData)
#Getting a file from a server
import requests
from bs4 import BeautifulSoup
htmlFile = requests.get("http://first-web-scraper.readthedocs.io/en/latest/").text
# htmlFile = requests.get("http://www.tuj.ac.jp/ug/academics/semester-info/schedule/2019-spring-schedule.html").text
# soup = BeautifulSoup(htmlFile, "html.parser") #use html parser
#
# # print(soup.prettify())
#
# section = soup.find("h1")
#
# print(section)
#
#
# title = section.text
# print(title)
#
# section = soup.find_all("div", class_ = "section")
# print(section[1]) #second element in the list
# print(section[2].h2)
# print(section[2].h2.text)
#
# print(section[1].p.text)
#TUJ website
htmlFile = requests.get("http://www.tuj.ac.jp/ug/academics/semester-info/schedule/2019-spring-schedule.html").text
soup = BeautifulSoup(htmlFile, "html.parser") #use html parser
tableTag2 = soup.find("table", id = "myTable")
tableData2 = []
tableBody = tableTag2.find('tbody')
tableRows2 = tableTag2.find_all('tr')
for row2 in tableRows2:
tableCols2 = row2.find_all('td')
listData2 = []
for col2 in tableCols2:
listData2.append(col2.text.strip())
tableData2.append(listData2)
# print(tableData2)
# print()
print(tableData2[1][10])
print(tableBody)
# for i in range(0, len(data)):
# if(strName.lower() in data[i][8] | true |
5162d5898bb20667a79b3309d3e6b3b8581614b3 | Python | LaryLopes/Exercicios-Python | /média.py | UTF-8 | 185 | 3.625 | 4 | [] | no_license | n1 = float(input("nota 1: "))
n2 = float(input("nota 2: "))
n3 = float(input("nota 3: "))
n4 = float(input("nota 4: "))
m =(n1+n2+n3+n4)/4
print ("média: ", m)
| true |
8f7726a441367c5bff74c4c60daff68fe2b205cc | Python | AmauryVanEspen/craiglist_scraper | /spiders/jobs-titles.py | UTF-8 | 3,006 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
class JobsSpider(scrapy.Spider):
# name of the spider.
name = 'jobs-titles'
# allowed_domains contains the list of the domains that the spider is allowed scrape.
allowed_domains = ['newyork.craigslist.org/search/egr']
# start_urls contains the list of one or more URL(s) with which the spider starts crawling.
"""
Warning: Scrapy adds extra http:// at the beginning of the URL in start_urls and it also adds a trailing slash.
As we here already added https:// while creating the spider, we must delete the extra http://.
=> $ scrapy genspider jobs-titles https://newyork.craigslist.org/search/egr
So double-check that the URL(s) in start_urls are correct or the spider will not work.
"""
# start_urls = ['http://https://newyork.craigslist.org/search/egr/']
start_urls = ['https://newyork.craigslist.org/search/egr/']
# the main function of the spider. Do NOT change its name; however, you may add extra functions if needed.
def parse(self, response):
# pass
"""
titles is a [list] of text portions extracted based on a rule.
response is simply the whole html source code retrieved from the page.
:param response:
:return:
- print(response) should return HTTP status code. 200 for OK.
see https://en.wikipedia.org/wiki/List_of_HTTP_status_codes.
- print(response.body) should return the whole source code of the page.
- response.xpath(). xpath is how we will extract portions of text and it has rules.
"""
titles = response.xpath('//a[@class="result-title hdrlnk"]/text()').extract()
"""
Inside response.xpath
- // means instead of starting from the <html>, just start from the tag that I will specify after it.
- /a simply refers to the <a> tag.
-[@class="result-title hdrlnk"] that is directly comes after /a means the <a> tag must have this class name in it.
- text() refers to the text of the <a> tag, which is”Chief Engineer”.
related methods
- extract() means extract every instance on the web page that follows the same XPath rule into a [list].
- extract_first() if you use it instead of extract() it will extract only the first item in the list.
"""
for title in titles:
yield {'Title': title}
"""
In order to store result into CSV file, run :
$ scrapy crawl -titles -o result-titles.csv
'downloader/response_status_count/200' tells you how many requests succeeded
'finish_reason and time tell the result and timestamp of the run.
'item_scraped_count' refers to the number of titles scraped from the page.
'log_count/DEBUG' and 'log_count/INFO' are okay;
however, if you received
'log_count/ERROR' you should find out which errors you get during scraping are fix your code.
""" | true |
1b41ab7d9675398b68758b2a510ed899ac28cadd | Python | gusye1234/PRank | /prank/object.py | UTF-8 | 8,135 | 2.53125 | 3 | [] | no_license | import spacy
from spacy.tokens.doc import Doc
from .world import *
from tqdm import tqdm
import numpy as np
import pickle
from .utils import pattern_match_backward, pattern_match_forward
from .utils import isLine, span2low, span2pos, span2tag, generate_wildcard, str2span, low2str
class Docs:
"""
:class A wrapper for spacy.Doc
:method
"""
block_num = 1000000
@staticmethod
def getMatchesDoc(matches, doc):
texts = []
for _, start, end in matches:
texts.append(doc[start:end])
return texts
@staticmethod
def partition(file, size = block_num):
'''
:return yield the input file blocks 1000000
'''
with open(file, 'r') as f:
while True:
data = f.read(size)
if not data:
break
yield data
# ----------------------------------------------------
def __init__(self, filename, load=False):
self.dir = os.path.dirname(__file__)
self._file = filename
self._filebase = os.path.basename(filename)
self._docs = []
self._readPtr = 0
def __repr__(self):
strs=f"<Docs> {self._filebase} => "
strs = strs + f"have {self._readPtr} docs({Docs.block_num} bytes)"
return strs
def __getitem__(self, index):
return self._docs[index]
def __len__(self):
return self._readPtr
def load(self, name):
with open(name, 'rb') as f:
self._docs = pickle.load(f)
self._readPtr = len(self._docs)
def save(self, name):
with open(name, 'wb') as f:
pickle.dump(self._docs, f)
def initialize(self, preload=None):
print(gstr("Start to load docs"))
for i, data in tqdm(enumerate(Docs.partition(self._file))):
if preload is not None and i >= preload:
break
doc = NLP(data)
self._docs.append(doc)
self._readPtr += 1
def iter(self, shuffle=False):
index = np.arange(len(self._docs))
if shuffle:
np.random.shuffle(index)
for i in index:
yield self._docs[i]
def match(self, patterns):
matcher = Matcher(NLP.vocab)
matcher.add("_", patterns)
match_span = []
for doc in self._docs:
match = matcher(doc)
match_span.extend(Docs.getMatchesDoc(match, doc))
return match_span
# -------------------------------------------
class Pattern:
"""
Design for binary relationship
"""
__Pattern_hash = {}
__P_id = 0
def __new__(cls, left_phrase, right_phrase):
"""
Store patterns
:param: left_phrase tuple(span, span)
:param: right_phrase tuple(span, span)
"""
label = (left_phrase[0].text, left_phrase[1].text, right_phrase[0].text, right_phrase[1].text)
already = cls.__Pattern_hash.get(label, None)
if already is None:
cls.__P_id += 1
self = object.__new__(cls)
cls.__Pattern_hash[label] = self
return self
else:
already.appear += 1
return already
@staticmethod
def patterns():
return list(Pattern.__Pattern_hash.values())
@staticmethod
def pattern_num():
return Pattern.__P_id
def __init__(self, left_phrase, right_phrase):
if not hasattr(self, "appear"):
self._left = [span2low(left_phrase[0]), span2low(left_phrase[1])]
self._right = [span2low(right_phrase[0]), span2low(right_phrase[1])]
self.max_cards = MAX_RELATION - len(self._left[1]) - len(self._right[0])
assert self.max_cards >= 0
self.appear = 1
def __repr__(self):
return f"<P> {low2str(self._left[0])} #E {low2str(self._left[1])}" \
" ... " \
f"{low2str(self._right[0])} #E {low2str(self._right[1])}"
def attribute_pattern(self, one_tuple, left=True):
"""
:method: phrase, generate matcher rules
:param: tuple
:param: left, if true, then use the left attribute to search
:return: patterns [{...},{...},...]
"""
if left:
search = one_tuple[0]
search = span2low(search)
# form = one_tuple[1]
# form = span2pos(form)
patterns = []
for i in range(1, MAX_ENTITY+1):
form = [{}]*i
patterns.extend(self._phrase(search, form))
else:
search = one_tuple[1]
search = span2low(search)
patterns = []
for i in range(1, MAX_ENTITY+1):
form = [{}]*i
patterns.extend(self._phrase(form, search))
return patterns
def getTuple(self, span):
if not pattern_match_forward(0, span, self._left[0]):
return None
left_start = len(self._left[0])
left_end = left_start+1
while not pattern_match_forward(left_end, span, self._left[1]):
left_end += 1
left_entity = span[left_start:left_end]
end = len(span)
if not pattern_match_backward(end, span, self._right[1]):
return None
right_end = end - len(self._right[1])
right_start = right_end - 1
while not pattern_match_backward(right_start, span, self._right[0]):
right_start -= 1
right_entity = span[right_start:right_end]
return Tuple(left_entity, right_entity)
def _phrase(self, left_tuple, right_tuple):
phrase1 = self._left[0] + left_tuple + self._left[1]
phrase2 = self._right[0] + right_tuple + self._right[1]
P_candi = generate_wildcard(phrase1, phrase2, cards=self.max_cards, minimal=0)
return P_candi
class Tuple:
"""
desgin for binary relationship
"""
__Tuple_hash = {}
__T_id = 0
def __new__(cls, tuple_left,tuple_right, seed=False):
"""
Store tuples
:param: tuple_left str or Span
:param: tuple_right str or Span
"""
label = (str(tuple_left), str(tuple_right))
already = Tuple.__Tuple_hash.get(label, None)
if already is None:
self = object.__new__(cls)
Tuple.__Tuple_hash[label] = self
Tuple.__T_id += 1
return self
else:
return already
@staticmethod
def tuples():
return list(Tuple.__Tuple_hash.values())
@staticmethod
def tuple_num():
return Tuple.__T_id
@staticmethod
def remainTopK(topk):
if len(Tuple.__Tuple_hash) <= topk:
pass
else:
arg_sort = sorted(Tuple.__Tuple_hash.items(), key= lambda x : x[1].appear)
Tuple.__T_id = topk
for i in range(len(Tuple.__Tuple_hash) - topk):
if arg_sort[i][1].is_seed():
continue
arg = arg_sort[i][0]
Tuple.__Tuple_hash.pop(arg)
return list(Tuple.__Tuple_hash.values())
def __init__(self, tuple_left, tuple_right, seed=False):
if not hasattr(self, 'appear'):
tuple_left = tuple_left if isinstance(tuple_left, Span) else str2span(tuple_left)
tuple_right = tuple_right if isinstance(tuple_right, Span) else str2span(tuple_right)
self._tuple = (tuple_left, tuple_right)
self.relationship = {}
self.appear = 1
self.__seed = seed
def is_seed(self): return self.__seed
def relate(self, pat : Pattern):
already = self.relationship.get(pat, None)
self.appear += 1
if already is None:
self.relationship[pat] = 1
else:
self.relationship[pat] += 1
def __getitem__(self, index):
return self._tuple[index]
def __repr__(self): return "<T> " + str(self._tuple)
| true |
62a081d5dbe9e45841e0c122fa4122a431b4bc9a | Python | jim-schwoebel/voicebook | /chapter_5_generation/make_chatbot.py | UTF-8 | 3,783 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## MAKE_CHATBOT.PY ##
================================================
Scrape a Drupal FAQ page, then build a chatbot that can be used
to answer all the questions from a given query.
Following tutorial of http://chatterbot.readthedocs.io/en/stable/training.html
Trains using a list trainer.
More advance types of Q&A pairing are to come.
'''
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
import os, requests
from bs4 import BeautifulSoup
# works on Drupal FAQ forms
page=requests.get('http://cyberlaunch.vc/faq-page')
soup=BeautifulSoup(page.content, 'lxml')
g=soup.find_all(class_="faq-question-answer")
y=list()
# initialize chatbot parameters
chatbot = ChatBot("CyberLaunch")
chatbot.set_trainer(ListTrainer)
# parse through soup and get Q&A
for i in range(len(g)):
entry=g[i].get_text().replace('\xa0','').split(' \n\n')
newentry=list()
for j in range(len(entry)):
if j==0:
qa=entry[j].replace('\n','')
newentry.append(qa)
else:
qa=entry[j].replace('\n',' ').replace(' ','')
newentry.append(qa)
y.append(newentry)
# train chatbot with Q&A training corpus
for i in range(len(y)):
question=y[i][0]
answer=y[i][1]
print(question)
print(answer)
chatbot.train([
question,
answer,
])
# now ask the user 2 sample questions to get response.
for i in range(2):
question=input('how can I help you? \n')
response = chatbot.get_response(question)
print(response)
| true |
3ba7860335a0fa3dad1acb36c4b3e745a08b17fa | Python | raphaelbomeisel/VamoRachar2 | /Cardapio.py | UTF-8 | 1,033 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed May 27 15:18:05 2015
@author: Raphael
"""
class cardapio():
def __init__(self):
self.bebidas = dict()
self.pratos = dict()
self.sobremesas = dict()
def AdicionaBebida(self,bebida,preco):
self.bebidas[bebida] = preco
def AdicionaPrato(self,prato,preco):
self.pratos[prato] = preco
def AdicionaSobremesa(self,sobremesa,preco):
self.sobremesas[sobremesa] = preco
def __str__(self):
return 'Bebidas: {0}\nLanches: {1}\nSobremesas: {2}'.format(self.bebidas,self.pratos,self.sobremesas)
menu =cardapio()
menu.AdicionaBebida('suco',3.50)
menu.AdicionaBebida('milkshake',6.00)
menu.AdicionaBebida('cerveja',5.00)
menu.AdicionaPrato('xburger',25.00)
menu.AdicionaPrato('salada caeser', 23.00)
menu.AdicionaPrato('batata-frita', 15.00)
menu.AdicionaSobremesa('sundae', 10.00)
menu.AdicionaSobremesa('sorvete simples',7.00)
print(menu)
| true |
8ba934f4acf4b4c7b3f1ee321d41ae4a4a93ed57 | Python | Instagram/LibCST | /native/libcst/tests/fixtures/malicious_match.py | UTF-8 | 896 | 2.9375 | 3 | [
"Python-2.0",
"MIT",
"Apache-2.0"
] | permissive |
# foo
match ( foo ) : #comment
# more comments
case False : # comment
...
case ( True ) : ...
case _ : ...
case ( _ ) : ... # foo
# bar
match x:
case "StringMatchValue" : pass
case [1, 2] : pass
case [ 1 , * foo , * _ , ]: pass
case [ [ _, ] , *_ ]: pass
case {1: _, 2: _}: pass
case { "foo" : bar , ** rest } : pass
case { 1 : {**rest} , } : pass
case Point2D(): pass
case Cls ( 0 , ) : pass
case Cls ( x=0, y = 2) :pass
case Cls ( 0 , 1 , x = 0 , y = 2 ) : pass
case [x] as y: pass
case [x] as y : pass
case (True)as x:pass
case Foo:pass
case (Foo):pass
case ( Foo ) : pass
case [ ( Foo ) , ]: pass
case Foo|Bar|Baz : pass
case Foo | Bar | ( Baz): pass
case x,y , * more :pass
case y.z: pass
| true |
09d2f138a7dbad38ea5e32d554ee45a3cb552857 | Python | behrouzmadahian/python | /Tesnorflow2_05-12-20/10_Images/03_transfer_learning_tfHuB.py | UTF-8 | 7,456 | 3.140625 | 3 | [] | no_license | """
TensorFlow Hub is a way to share pre-trained model components.
See the TensorFlow Module Hub for a searchable listing of pre-trained models.
This tutorial demonstrates:
- How to use TensorFlow Hub with tf.keras.
- How to do image classification using TensorFlow Hub.
- How to do simple transfer learning.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
import PIL.Image as Image
from matplotlib import pyplot as plt
import numpy as np
# Download the classifier
"""
Use hub.module to load a mobilenet, and tf.keras.layers.Lambda to wrap it up as a keras layer.
Any TensorFlow 2 compatible image classifier URL from tfhub.dev will work here.
"""
classifier_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/classification/2"
IMAGE_SHAPE = (224, 224)
print(IMAGE_SHAPE + (3,))
classifier = keras.Sequential([hub.KerasLayer(classifier_url, input_shape=IMAGE_SHAPE+(3,))])
print(classifier.summary())
# Download a single image and run the model on it!
img_address = 'https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg'
grace_hopper = tf.keras.utils.get_file('image.jpg', img_address)
# resizing the image to match input of the mobilenet!
grace_hopper = Image.open(grace_hopper).resize(IMAGE_SHAPE)
grace_hopper = np.array(grace_hopper)/255.0
print("Shape of the image: after resize: {}".format(grace_hopper.shape))
pred_results = classifier.predict(grace_hopper[np.newaxis, ...])
# results are logits across 1001 classes!
print("Shape of prediction vector: {}".format(pred_results.shape))
pred_class = np.argmax(pred_results[0], axis=-1)
print('predicted class: {}'.format(pred_class))
# decode the predictions:
# We have the predicted class ID, Fetch the ImageNet labels, and decode the predictions
labels_path = 'https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt'
labels_path = tf.keras.utils.get_file('ImageNetLabels.txt', labels_path)
imagenet_labels = np.array(open(labels_path).read().splitlines())
print(imagenet_labels)
plt.imshow(grace_hopper)
plt.axis('off')
pred_class_name = imagenet_labels[pred_class]
print(pred_class_name)
plt.title('Prediction: ' + pred_class_name.title()) # Capitalize the first char in word
plt.show()
"""
Using TF Hub it is simple to retrain the top layer of the model to recognize the classes in our dataset
"""
new_data_path = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
data_root = tf.keras.utils.get_file('flower_photos', new_data_path, untar=True)
image_generator = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
image_data = image_generator.flow_from_directory(str(data_root), batch_size=32, target_size=IMAGE_SHAPE)
# the resulting object is an iterator that returns image_batch, labels_batch pairs
for image_batch, label_batch in image_data:
print('Image batch shape: ', image_batch.shape)
print('Label batch shape: ', label_batch.shape)
break
# run the classifier on a batch of images:
result_batch = classifier.predict(image_batch)
print('result_batch shape: {}'.format(result_batch.shape))
predicted_class_names = imagenet_labels[np.argmax(result_batch, axis=-1)]
print('predicted classes:\n {}'.format(predicted_class_names))
plt.figure(figsize=(10, 9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6, 5, n+1)
plt.imshow(image_batch[n])
plt.title(predicted_class_names[n])
plt.axis('off')
plt.suptitle("ImageNet predictions")
plt.show()
"""
The results are far from perfect, but reasonable considering that
these are not the classes the model was trained for (except "daisy").
"""
"""
Download the headless model
TensorFlow Hub also distributes models without the top classification layer.
These can be used to easily do transfer learning.
Any Tensorflow 2 compatible image feature vector URL from tfhub.dev will work here.
"""
feature_extractor_url = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/2"
feature_extractor_layer = hub.KerasLayer(feature_extractor_url, input_shape=(224, 224, 3))
feature_batch = feature_extractor_layer(image_batch)
print("Shape of features out of headless mobilenet: {}".format(feature_batch.shape))
# Freeze the variables in the feature extractor layer, so that the training only modifies the new classifier layer
feature_extractor_layer.trainable = False
# attach a classifier head
model = keras.Sequential([feature_extractor_layer, keras.layers.Dense(image_data.num_classes)])
print(model.summary())
predictions = model(image_batch)
print('Shape of predictions -logits- before fine tuning final layer :{}'.format(predictions.shape))
model.compile(optimizer=keras.optimizers.Adam(),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# To visualize the training progress, use a custom callback to log the loss and
# accuracy of each batch individually, instead of the epoch average.
class CollectBatchStats(tf.keras.callbacks.Callback):
def __init__(self):
self.batch_losses = []
self.batch_acc = []
def on_train_batch_end(self, batch, logs=None):
self.batch_losses.append(logs['loss'])
self.batch_acc.append(logs['accuracy'])
self.model.reset_metrics() # this will reset the metrics so we cannot get the epoch end loss and metrics!!
steps_per_epoch = np.ceil(image_data.samples/image_data.batch_size)
batch_stats_callback = CollectBatchStats()
model.fit_generator(image_data, epochs=2,
steps_per_epoch=steps_per_epoch,
callbacks=[batch_stats_callback])
print(batch_stats_callback.batch_losses)
print(batch_stats_callback.batch_acc)
fig, ax = plt.subplots(2, 1, sharex='col')
ax[0].plot(batch_stats_callback.batch_losses)
ax[0].set_title('batch Losses during training')
ax[1].plot(batch_stats_callback.batch_acc)
ax[1].set_title('batch Accuracy during training')
plt.show()
# To redo the plot from before, first get the ordered list of class names:
print(image_data.class_indices.items())
class_names = sorted(image_data.class_indices.items(), key=lambda pair: pair[1])
class_names = np.array([key.title() for key, value in class_names])
print(class_names)
predicted_batch = model.predict(image_batch)
predicted_id = np.argmax(predicted_batch, axis=-1)
predicted_label_batch = class_names[predicted_id]
label_id = np.argmax(label_batch, axis=-1)
plt.figure(figsize=(10, 9))
plt.subplots_adjust(hspace=0.5)
for n in range(30):
plt.subplot(6, 5, n+1)
plt.imshow(image_batch[n])
color = "green" if predicted_id[n] == label_id[n] else "red"
plt.title(predicted_label_batch[n].title(), color=color)
plt.axis('off')
plt.suptitle("Model predictions (green: correct, red: incorrect)")
plt.show()
# Export your model:
import time
t = time.time()
export_path = "/tmp/saved_models/{}".format(int(t))
model.save(export_path, save_format='tf')
print('Model saved to: {}'.format(export_path))
# reload the model:
reloaded = keras.models.load_model(export_path)
result_batch = model.predict(image_batch)
reloaded_result_batch = reloaded.predict(image_batch)
print('Are there any difference in prediction of fine tuned model before and after reloading from file: ')
print(abs(reloaded_result_batch - result_batch).max())
# This saved model can be loaded for inference later, or converted to TFLite or TFjs.
| true |
df5931bd615b72bf63e045a6e6a497a6d40d81d1 | Python | Its-a-me-Ashwin/DBaaS | /Dbaas/dbass.py | UTF-8 | 3,822 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 16:07:42 2020
@author: 91948
"""
# import libraries
from flask import Flask,jsonify,request
import pymongo
import json
# set up the DB
# runs on port 27017
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mydatabase"]
# set up collections (tables)
#set up the nmes of the collections
userDB = mydb["userDB"] # can be removed
rideDB = mydb["rideDB"] # can be removed
app = Flask(__name__)
#global declarations
port = 8000
ip = '127.0.0.1'
# Read API
'''
input {
"table" : "table name",
"columns" : ["col1","col2"],
"where" : ["col=val","col=val"]
}
'''
@app.route("/api/v1/db/read",methods=["POST"])
def ReadFromDB():
# get the input query
data = request.get_json()
# decode the query
collection = data["table"]
columns = data["columns"]
where = data["where"]
query = dict()
for q in where:
query[q.split('=')[0]] = q.split('=')[1]
query_result = None
# select the correct collection and apply the query
try:
query_result = mydb[collection].find(query)
except:
print("Table Not Pressent");
return jsonify({}),400
## print the contents of the data
if True:
for i in mydb[collection].find({}): print(i)
try:
# data present
query_result[0]
except IndexError:
# no data present
return jsonify({}),204
# format the output (slice the data)
try:
num = 0
res_list = list()
for ret in query_result:
result = dict()
for key in columns:
try:
result[key] = ret[key]
except:
pass
res_list.append(result)
num += 1
except KeyError:
print("One of the coulumns given was not present in the data base")
return jsonify({}),400
# return the result
return json.dumps(res_list),200
# write api
'''
input {
"method" : "write"
"table" : "table_name",
"data" : {"col1":"val1","col2":"val2"}
}
{
"method" : "delete"
"table" : "table_name",
"data" : {"col1":"val1","col2":"val2"}
}
{
"method" : "update"
"table" : "table_name",
"query" : {"col1":"val1","col2":"val2"},
"insert" : {"$set" :
{
"b" : "c"
}
}
}
'''
@app.route("/api/v1/db/write",methods=["POST"])
def WriteToDB():
data = request.get_json()
if (data["method"] == "write"):
# insert method
collection = data["table"]
insert_data = data["data"]
try:
mydb[collection].insert_one(insert_data)
except:
print("Table Not Pressent");
return jsonify({}),400
return jsonify(),200
elif (data["method"] == "delete"):
# delete method
collection = data["table"]
delete_data = data["data"]
try:
mydb[collection].delete_many(delete_data)
except:
print("Table not Present")
return jsonify({}),400
return jsonify(),200
elif (data["method"] == "update"):
# update method
collection = data["table"]
try:
mydb[collection].update_one(data["query"],data["insert"])
except:
print("Table not present")
return jsonify({}),400
return jsonify(),200
else:
# bad method
return jsonify({}),400
# start the application
if __name__ == '__main__':
app.debug=True
app.run(host = ip, port = port)
| true |
73ab31094f3dcb85549fd28c3815a8b032a4c171 | Python | ADQF/tutorial | /L40爬虫入门/4urllib代理.py | UTF-8 | 826 | 2.71875 | 3 | [] | no_license | # urllib代理示例
#为了防止同一个ip频繁访问服务器被封锁,需要不断变化ip通过别人的电脑代理访问服务器。
"""
从哪里找代理?
1. ip代理平台 http:/www.xicidaili.com/nn/
免费的不太稳定,有些不可用,付费的稳定。
2. 网友搜索爬取的ip代理池。
"""
import urllib.request
# import random
#
# proxies = [
# {},
# {},
# {},
# ]
# proxy = random.choice(proxies)
proxy = urllib.request.ProxyHandler({'http': 'http://125.40.29.100:8118'})
opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
# 请求百度搜索关键字ip
response = urllib.request.urlopen('http://www.baidu.com/s?wd=ip')
html_content = response.read().decode('utf-8')
print(html_content)
"""
可能出现的错误
""" | true |
f1b8984ddf78ac929ca1519f98bf687a169c3a54 | Python | joshyfrott/exercises | /integer2words.pyw | UTF-8 | 6,177 | 3.59375 | 4 | [] | no_license | from tkinter import *
import tkinter.messagebox
def affix(string_aff, digit_aff):
#function for appending a postfix
#string_aff is the whole original input in string format
#digit_aff is the current digit checked
post_fix = "" #variable for the postfix
lower = True #variable for checking the lower digit
def check_lower(string_low, digit_low):
#function for checking if the lower digit is zero
#string_low is the whole original input in string format
x = (-digit_low) + 1
y = (-digit_low) + 2
#the above 2 variables are used for indexing the lower digit
if string_low[x:y] == "0": #checks if the lower digit is zero
check = True
else:
check = False
return check #returns the value to the 'lower' boolean variable inside affix()
if digit_aff == 3 or digit_aff == 6 or digit_aff == 9 or digit_aff == 12 or digit_aff == 15: #these numbers are the digits of hundreds
post_fix = " hundred"
else:
pass
if digit_aff == 4 or digit_aff == 5 or digit_aff == 6: #these numbers are the three thousand places
while digit_aff != 4: #while it's not the one thousands places
lower = check_lower(string_aff, digit_aff) #passes the whole input and the current digit as arguments
if lower == True:
digit_aff = digit_aff - 1
else:
digit_aff = 4
post_fix = post_fix + " thousand,"
elif digit_aff == 7 or digit_aff == 8 or digit_aff == 9: #these numbers are the millions places
while digit_aff != 7: #while it's not the one millions places
lower = check_lower(string_aff, digit_aff)
if lower == True:
digit_aff = digit_aff - 1
else:
digit_aff = 7
post_fix = post_fix + " million,"
elif digit_aff == 10 or digit_aff == 11 or digit_aff == 12: #these numbers are the billions places
while digit_aff != 10: #while it's not the one billions places
lower = check_lower(string_aff, digit_aff)
if lower == True:
digit_aff = digit_aff - 1
else:
digit_aff = 10
post_fix = post_fix + " billion,"
elif digit_aff == 13 or digit_aff == 14 or digit_aff == 15: #these numbers are the trillions places
while digit_aff != 13: #while it's not the one trillions places
lower = check_lower(string_aff, digit_aff)
if lower == True:
digit_aff = digit_aff - 1
else:
digit_aff = 13
post_fix = post_fix + " trillion,"
return post_fix
def normal(string_norm, digit_norm, value_norm):
add_normal = ""
if value_norm == "1":
add_normal = " one"
elif value_norm == "2":
add_normal = " two"
elif value_norm == "3":
add_normal = " three"
elif value_norm == "4":
add_normal = " four"
elif value_norm == "5":
add_normal = " five"
elif value_norm == "6":
add_normal = " six"
elif value_norm == "7":
add_normal = " seven"
elif value_norm == "8":
add_normal = " eight"
elif value_norm == "9":
add_normal = " nine"
output_number.set(add_normal + affix(string_norm, digit_norm) + output_number.get())
def teens(string_teens, digit_teen, value_teen):
add_teen = ""
if value_teen == "0":
add_teen = " ten"
elif value_teen == "1":
add_teen = " eleven"
elif value_teen == "2":
add_teen = " tweleve"
elif value_teen == "3":
add_teen = " thirteen"
elif value_teen == "4":
add_teen = " fourteen"
elif value_teen == "5":
add_teen = " fifteen"
elif value_teen == "6":
add_teen = " sixteen"
elif value_teen == "7":
add_teen = " seventeen"
elif value_teen == "8":
add_teen = " eighteen"
elif value_teen == "9":
add_teen = " nineteen"
output_number.set(add_teen + affix(string_teens, digit_teen) + output_number.get())
def tyty(string_tyty, digit_ty, value_ty):
add_ty = ""
if value_ty == "2":
add_ty = " twenty"
elif value_ty == "3":
add_ty = " thirty"
elif value_ty == "4":
add_ty = " fourty"
elif value_ty == "5":
add_ty = " fifty"
elif value_ty == "6":
add_ty = " sixty"
elif value_ty == "7":
add_ty = " seventy"
elif value_ty == "8":
add_ty = " eighty"
elif value_ty == "9":
add_ty = " ninety"
output_number.set(add_ty + affix(string_tyty, digit_ty) + output_number.get())
def converter():
in_val = ""
try:
in_val = int(input_number.get().strip().replace(",",""))
except Exception as ex:
tkinter.messagebox.showerror("Error!", "Invalid Input!\n%s" % ex)
str_val = str(in_val)
output_number.set("")
if in_val <= 999999999999999:
digit_count = 1
while len(str_val) + 1 != digit_count:
if digit_count == 1:
current_number = str_val[-1:]
else:
current_number = str_val[-digit_count:(-digit_count)+1]
if digit_count == 1 or digit_count == 4 or digit_count == 7 or digit_count == 10 or digit_count == 13:
#isolates the 1s, 1 thousands, 1 millions, 1 billions, and 1 trillions
if str_val[-(digit_count+1):-digit_count] == "1":
#checks if the 10s', 10 thousands', 10 millions',
#10 billions' and 10 trillions' places are 1
teens(str_val, digit_count, current_number)
digit_count = digit_count + 2 #skips the next digit
else:
normal(str_val, digit_count, current_number)
digit_count = digit_count + 1
elif digit_count == 2 or digit_count == 5 or digit_count == 8 or digit_count == 11 or digit_count == 14:
#isolates the 20s, 30s, 40s, 50s, 60s, 70s, 80s, and 90s
# of 10s', 10 thousands',' 10 millions', 10 billions', and 10 trillions' place
tyty(str_val, digit_count, current_number)
digit_count = digit_count + 1
else:
normal(str_val, digit_count, current_number)
digit_count = digit_count + 1
else:
tkinter.messagebox.showerror("Error!", "Input Cannot be Larger than Trillions!")
pass
#start of gooey
app = Tk()
app.title("Mark Kupit's Integer to English Converter")
app.geometry('450x100+200+100')
output_number = StringVar()
output_number.set("")
Label(app, textvariable = output_number).pack()
Label(app, text = "Enter Number to Convert:").pack()
input_number = Entry(app, width = 50)
input_number.pack()
convert = Button(app, text = 'Convert', width = 10, command = converter)
convert.pack(side = 'bottom', padx = 10, pady = 10)
app.mainloop() | true |
795928164d88439cb9dc5e072b5b84f2337c8424 | Python | leonardocroda/tcc | /transformacoes/pre_processamento.py | UTF-8 | 2,860 | 2.96875 | 3 | [] | no_license | import nltk
from nltk import tokenize
from string import punctuation
import unidecode
import pandas as pd
import re
def execute(dataframe, coluna_texto):
def remove_links(dataframe,coluna_texto):
frase_processada = list()
for tweet in dataframe[coluna_texto]:
tweet_processado= re.sub(r"http\S+", "", tweet)
frase_processada.append(tweet_processado)
return frase_processada
dataframe['sem_links']= remove_links(dataframe,coluna_texto)
def remove_pontuacao(dataframe,coluna_texto):
token_pontuacao = tokenize.WordPunctTokenizer()
pontuacao = list()
for ponto in punctuation:
pontuacao.append(ponto)
frase_processada = list()
for tweet in dataframe[coluna_texto]:
nova_frase = list()
palavras_texto = token_pontuacao.tokenize(tweet)
for palavra in palavras_texto:
if palavra not in pontuacao:
nova_frase.append(palavra)
frase_processada.append(' '.join(nova_frase))
return frase_processada
dataframe["sem_pontuacao"]=remove_pontuacao(dataframe,'sem_links')
def remove_acentos(dataframe,coluna_texto):
sem_acentos = [unidecode.unidecode(tweet) for tweet in dataframe[coluna_texto]]
return sem_acentos
dataframe['sem_acentos']=remove_acentos(dataframe,'sem_pontuacao')
def lowercase(dataframe, coluna_texto):
minusculos = list()
for tweet in dataframe[coluna_texto]:
minusculos.append(tweet.lower())
return minusculos
dataframe['lowercase']=lowercase(dataframe,'sem_acentos')
def remove_stopwords(dataframe, coluna_texto):
nltk.download('stopwords')
#removendo stopwords
palavras_irrelevantes = nltk.corpus.stopwords.words("portuguese")
palavras_irrelevantes.extend(['balneario','camboriu','Balneário',"Camboriú",'.'])
frase_processada = list()
token_espaco = nltk.tokenize.WhitespaceTokenizer()
for tweet in dataframe[coluna_texto]:
nova_frase = list()
palavras_texto = token_espaco.tokenize(tweet)
for palavra in palavras_texto:
if palavra not in palavras_irrelevantes:
nova_frase.append(palavra)
frase_processada.append(' '.join(nova_frase))
return frase_processada
dataframe["stopwords"]=remove_stopwords(dataframe,'lowercase')
def stemmer(dataframe, coluna_texto):
token_pontuacao = tokenize.WordPunctTokenizer()
nltk.download('rslp')
stemmer = nltk.RSLPStemmer()
#faz o stemmer
frase_processada = list()
for tweet in dataframe[coluna_texto]:
nova_frase = list()
palavras_texto = token_pontuacao.tokenize(tweet)
for palavra in palavras_texto:
nova_frase.append(stemmer.stem(palavra))
frase_processada.append(' '.join(nova_frase))
return frase_processada
dataframe["stemmer"] = stemmer(dataframe,'sem_acentos')
return dataframe
| true |
d704ea44b2a51e8910c6f988cd6b7d8b4e3fc0f1 | Python | abhijeetjoshi0594/courseradatascience | /firstpython.py | UTF-8 | 235 | 3.234375 | 3 | [] | no_license | #python code to check duplicate in a string
check_string = "Abhijeemmt"
count = {}
for s in check_string:
if s in count:
count[s] += 1
else:
count[s] = 1
for key in count:
if count[key] > 1:
print (key, count[key])
| true |
072295e73df9f5e4b210f75f124dceb447e24fde | Python | malcolmmcswain/141l-project | /assembler/assembler.py | UTF-8 | 1,691 | 2.703125 | 3 | [] | no_license | import sys
from isa_map import (
opcode_dict, # opcode dictionary
std_reg_dict, # standard register dictionary
ext_reg_dict # extended register dictionary
)
### Reads in a decimal integer n and returns 6-bit binary
### representation string within unsigned range
def toBinary(n):
if n >= 64 or n < 0: return "######"
else: return bin(n).replace("0b", "").zfill(6)
### Assembler thread
### - istream represents the input file passed by cmd line arg
### - ostream represents the output file "machinecode.txt"
with open(sys.argv[1]) as istream:
for line in istream: # read each line of assembly
# opens output file in append mode
ostream = open("machine_code.txt", 'a')
if (line == '\n'): continue
# write opcode
ostream.write(opcode_dict[line[0:3]].get("opcode", "###"))
# decode r-type instruction (standard registers only)
if opcode_dict[line[0:3]]["type"] == "r":
ostream.write(std_reg_dict.get(line[4:6], "##"))
ostream.write(std_reg_dict.get(line[7:9], "##"))
ostream.write(std_reg_dict.get(line[10:12], "##"))
# decode x-type instruction (standard & extended registers)
elif opcode_dict[line[0:3]]["type"] == "x":
ostream.write(std_reg_dict.get(line[4:6], "##"))
ostream.write(ext_reg_dict.get(line[7:10], "####"))
# decode i-type instruction (immediate)
elif opcode_dict[line[0:3]]["type"] == "i":
immediate = int(line[4:])
ostream.write(toBinary(immediate))
else:
ostream.write("######")
ostream.write('\n')
ostream.close()
| true |
e5476438749a1549c6362a39e1f600609e4aa0c1 | Python | gujie1216933842/codebase | /时间模块time和datetime/01_time.py | UTF-8 | 1,161 | 3.9375 | 4 | [] | no_license |
'''
time模块学习
time.time() 生成当前的时间戳,格式为10位整数的浮点数。
time.strftime()根据时间元组生成时间格式化字符串。
time.strptime()根据时间格式化字符串生成时间元组。time.strptime()与time.strftime()为互操作。
time.localtime()根据时间戳生成当前时区的时间元组。
time.mktime()根据时间元组生成时间戳。
区分 strftime()和strptime()的方法,方便记忆
strftime- str_format_time 格式化(format)
strptime- str_parse_time 解析(parse)
'''
import time
a = time.time()
b = time.localtime()
print(b)
print(int(a)) #当前时间戳取整
print(b[0])
# time.sleep(2)
print(b.tm_year)
#struct_time转换成 '2018-04-25 14:59:47'格式
d = time.strftime('%Y-%m-%d %H:%M:%S',b)
print(d)
#时间戳转换成'2018-04-25 14:59:47'格式
e = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(a))
#'2018-04-25 14:59:47'格式转化为struct_time
print("***********格式化时间转化成时间戳*************")
f = "2018-04-25 15:10:56"
g = time.strptime(f,'%Y-%m-%d %H:%M:%S')
# struct_time转换成时间戳
h = time.mktime(g)
print(int(h))
| true |
9ff4a0b00fc9ec725088fcaf93070966cf66dae1 | Python | hyunsang-ahn/algorithm | /문제풀이/홀수만 더하기/홀수만 더하기.py | UTF-8 | 267 | 2.890625 | 3 | [] | no_license | import sys
sys.stdin = open('input.txt', 'r')
T = int(input())
for tc in range(1, T+1):
arr = list(map(int, input().split()))
res = []
for i in range(10):
if arr[i] %2 != 0:
res.append(arr[i])
print("#{} {}".format(tc, sum(res))) | true |
c9b30321bde82b1a80ec64b3a7ce0e5b6465a50f | Python | gau-nernst/search-algos | /search.py | UTF-8 | 9,761 | 3.25 | 3 | [] | no_license | class Search():
valid_strat = {'bfs', 'dfs', 'ldfs', 'ids', 'ucs', 'greedy', 'a_star'}
def __init__(self, strategy):
assert strategy in self.valid_strat
self.strat = strategy
def __call__(self, start, end, adj_list, max_depth=3, heuristic=None):
print("Strategy:", self.strat)
print("Start:", start)
print("End:", end)
print()
if self.strat == 'dfs' or self.strat == 'bfs':
self.bfs_dfs(self.strat, start, end, adj_list)
elif self.strat == 'ldfs':
self.ldfs(start, end, adj_list, max_depth=max_depth)
elif self.strat == 'ids':
for i in range(1, max_depth+1):
print("Max depth:", i)
self.ldfs(start, end, adj_list, max_depth=i)
print()
print()
elif self.strat == 'ucs':
self.ucs(start, end, adj_list)
elif self.strat == 'greedy':
self.greedy(start, end, adj_list, heuristic=heuristic)
elif self.strat == 'a_star':
self.a_star(start, end, adj_list, heuristic=heuristic)
def bfs_dfs(self, strat, start, end, adj_list):
from collections import deque
assert strat == 'bfs' or strat == 'dfs'
if strat == 'dfs':
candidates = []
elif strat == 'bfs':
candidates = deque()
candidates.append(start)
visited = set()
parent = {}
step = 1
while candidates:
print("Step", step)
step += 1
if strat == 'dfs':
current_node = candidates.pop()
elif strat == 'bfs':
current_node = candidates.popleft()
print("Current node:", current_node)
if current_node == end:
print("Found the destination")
print()
self.print_path(start, end, parent, adj_list)
return
visited.add(current_node)
print("Visited nodes:", visited)
print(f"Neighbors of {current_node}: {adj_list[current_node]}")
print()
for x in adj_list[current_node]:
if x not in visited and x not in candidates:
candidates.append(x)
parent[x] = current_node
print("Candidates:", candidates)
if candidates:
print("Next node to examine:", candidates[-1] if strat == 'dfs' else candidates[0])
print()
print()
print(f"Does not found a path from {start} to {end}")
def ldfs(self, start, end, adj_list, max_depth=1):
candidates = []
candidates.append((start,0))
parent = {}
step = 1
print("start:", candidates)
print()
print()
while candidates:
print("Step", step)
step += 1
current_node, depth = candidates.pop()
print("Current node:", current_node)
print("Current depth:", depth)
print(f"Neighbors of {current_node}: {adj_list[current_node]}")
if current_node == end:
print("Found the destination")
print()
self.print_path(start, end, parent, adj_list)
return
if depth < max_depth:
for x in adj_list[current_node]:
if current_node in parent and x == parent[current_node]:
continue
candidates.append((x,depth+1))
parent[x] = current_node
else:
print("Reach max depth")
print(candidates)
print()
print()
print(f"Does not found a path from {start} to {end} with depth {depth}")
def ucs(self, start, end, adj_list):
candidates = set()
path_cost = {}
parent = {}
step = 1
candidates.add(start)
path_cost[start] = 0
while candidates:
print("Step", step)
step += 1
min_node = None
min_cost = float('inf')
for node in candidates:
if path_cost[node] < min_cost:
min_node = node
min_cost = path_cost[node]
candidates.remove(min_node)
current_node = min_node
print("Current node:", current_node)
if current_node == end:
print("Found the destination")
print()
self.print_path(start, end, parent, adj_list)
return
print(f"Neighbors of {current_node}: {adj_list[current_node]}")
print("Path cost:", path_cost)
print()
for x in adj_list[current_node]:
if x in parent and parent[x] == current_node:
continue
new_cost = path_cost[current_node] + adj_list[current_node][x]
if x not in path_cost or new_cost < path_cost[x]:
parent[x] = current_node
path_cost[x] = new_cost
candidates.add(x)
print("Candidates:", candidates)
print()
print()
print(f"Does not found a path from {start} to {end} with depth {depth}")
def greedy(self, start, end, adj_list, heuristic):
assert heuristic
current_node = start
path = []
step = 1
path.append(start)
while current_node != end:
print("Step", step)
step += 1
print("Current node:", current_node)
neighbors = list(adj_list[current_node].keys())
neighbors_est_cost = [heuristic(x, end) for x in neighbors]
if not neighbors:
print(f"Does not found a path from {start} to {end} with depth {depth}")
return
n = {neighbors[i]: round(neighbors_est_cost[i]) for i in range(len(neighbors))}
print(f"Neighbors of {current_node}: {n}")
next_node = None
min_est_cost = float('inf')
for i in range(len(neighbors)):
if neighbors_est_cost[i] < min_est_cost:
next_node = neighbors[i]
min_est_cost = neighbors_est_cost[i]
path.append(next_node)
current_node = next_node
print()
print()
print("Found the destination")
print()
print("Full path: ", end="")
print(*path, sep=' → ')
total = 0
for i in range(len(path)-1):
a = path[i]
b = path[i+1]
total += adj_list[a][b]
print(f"\t{a} → {b}: {adj_list[a][b]}")
print(f"Total cost: {total}")
def a_star(self, start, end, adj_list, heuristic):
assert heuristic
candidates = set()
path_cost = {}
heuristic_cost = {}
parent = {}
step = 1
candidates.add(start)
path_cost[start] = 0
while candidates:
print("Step", step)
step += 1
min_node = None
min_cost = float('inf')
for node in candidates:
if node not in heuristic_cost:
heuristic_cost[node] = heuristic(node, end)
total_cost = path_cost[node] + heuristic_cost[node]
if total_cost < min_cost:
min_node = node
min_cost = total_cost
candidates.remove(min_node)
current_node = min_node
print("Current node:", current_node)
if current_node == end:
print("Found the destination")
print()
self.print_path(start, end , parent, adj_list)
return
print(f"Neighbors of {current_node}: {adj_list[current_node]}")
print("Path cost:", path_cost)
n = {k: round(v) for k,v in heuristic_cost.items()}
print("Heuristic cost:", n)
print()
for x in adj_list[current_node]:
if x in parent and parent[x] == current_node:
continue
new_cost = path_cost[current_node] + adj_list[current_node][x]
if x not in path_cost or new_cost < path_cost[x]:
parent[x] = current_node
path_cost[x] = new_cost
candidates.add(x)
print("Candidates:", candidates)
print()
print()
print(f"Does not found a path from {start} to {end} with depth {depth}")
def print_path(self, start, end, parent, adj_list):
print("Full path: ", end="")
x = end
path = [x]
while x != start:
x = parent[x]
path.append(x)
path.reverse()
print(*path, sep=' → ')
total = 0
for i in range(len(path)-1):
a = path[i]
b = path[i+1]
total += adj_list[a][b]
print(f"\t{a} → {b}: {adj_list[a][b]}")
print(f"Total cost: {total}") | true |
47e43b6f86717f39898879a45abd91eb5e0bd4b9 | Python | joshearl/ThreadedPackageLister | /threaded_package_lister.py | UTF-8 | 1,985 | 2.890625 | 3 | [] | no_license | import sublime
import sublime_plugin
import threading
import os
class ListPackagesCommand(sublime_plugin.WindowCommand):
def __init__(self, window):
self.view = window.active_view()
def run(self):
threaded_package_lister = ThreadedPackageLister()
print "Starting thread ..."
threaded_package_lister.start()
print "Setting thread handler on main thread ..."
self.handle_thread(threaded_package_lister)
def handle_thread(self, thread, i=0, direction=1):
if thread.is_alive():
print "Thread is running ..."
before = i % 8
after = (7) - before
if not after:
direction = -1
if not before:
direction = 1
i += direction
if (self.view):
self.view.set_status('threaded_package_lister', 'PackageLister [%s=%s]' % \
(' ' * before, ' ' * after))
sublime.set_timeout(lambda: self.handle_thread(thread, i, direction), 20)
return
packages_list = thread.result
if (self.view):
self.view.erase_status('threaded_package_lister')
print "Thread is finished."
print "Installed packages: " + ", ".join(packages_list)
class ThreadedPackageLister(threading.Thread):
def __init__(self):
self.result = None
threading.Thread.__init__(self)
def run(self):
print "Starting work on background thread ..."
self.result = self.get_packages_list()
def get_packages_list(self):
package_set = set()
package_set.update(self._get_packages_from_directory(sublime.packages_path()))
return sorted(list(package_set))
def _get_packages_from_directory(self, directory):
package_list = []
for package in os.listdir(directory):
package_list.append(package)
print "Package list retrieved ..."
return package_list | true |
db158f2a1b3e13cdaa33ede47e547e3de132d5f8 | Python | oyuchangit/Competitive_programming_exercises | /algorithm_practices/ABC/ABC_exercises/B074.py | UTF-8 | 258 | 2.890625 | 3 | [] | no_license | # https://atcoder.jp/contests/abc074/tasks/abc074_b
N = int(input())
K = int(input())
x_list = list(map(int, input().split()))
ans = 0
for x in x_list:
K_x = K - x
if K_x >= x:
ans += x*2
elif K_x < x:
ans += K_x*2
print(ans) | true |
1d4c807f7d3039c78729f513dba4fa2532d6a170 | Python | AAbhishekReddy/Portfolio-Optimisation | /script.py | UTF-8 | 772 | 2.65625 | 3 | [
"MIT"
] | permissive | import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
plt.style.use("classic")
etf = pd.read_csv("/home/abhishek/Desktop/major/mutual/ETFs.csv")
mut = pd.read_csv("/home/abhishek/Desktop/major/mutual/Mutual Funds.csv")
# mut.head()
# mut.shape
# etf.shape
plot = plt.matshow(etf.corr())
plt.show()
plot.savefig("Correlation")
# Correlation matrix
etf_corr = etf.corr()
mut_corr = mut.corr()
# Writting the correlation matrix to a csv
etf_corr.to_csv(r"/home/abhishek/Desktop/major/Portfolio-Optimisation/mutual/etf_correlation.csv")
mut_corr.to_csv(r"mutual/mut_correlation.csv")
cor = mut.corr()
sns.heatmap(cor,robust=True,annot=True)
# sns.pairplot(cor) | true |
acc43980e4c6095cf47409bd7673a4a0a5cb3bdb | Python | jasperchn/bootstrap | /testerEntry.py | UTF-8 | 574 | 3 | 3 | [] | no_license | from utils.FileWriter import FileWriter
if __name__ == "__main__":
path = "C:/temp"
filename = "tester.txt"
# 第一次新建文件并且写入
fileWriter = FileWriter(path=path, filename=filename)
fileWriter.writeLine("this a test file")
fileWriter.writeLine("first writing")
fileWriter.destory()
# 第二次找到已有文件并追加
fileWriter = FileWriter(path=path, filename=filename)
fileWriter.writeLine("")
fileWriter.writeLine("this is a test file")
fileWriter.writeLine("second writing")
fileWriter.destory() | true |
f957235214561e15888f568caa263155057c4783 | Python | Project-X9/Testing | /Web_Testing/Pages/PlaylistSongs.py | UTF-8 | 6,026 | 3.203125 | 3 | [] | no_license | import time
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from Web_Testing.Pages.WebPlayerMenu import WebPlayerMenu
class PlaylistSongs(WebPlayerMenu):
"""
A class representing the Web Player's playlist songs
...
Attributes
----------
search_btn_xpath : string
A string containing the xpath of search button in home menu
search_textbox_xpath : string
A string containing the xpath of the search textbox in search page
song_xpath : string
A sting containing the xpath of the song appear after search in search page
context_menu_xpath : string
A string containing the xpath of context menu of the chosen song
remove_from_playlist_btn_xpath : string
A string containing the xpath of remove from playlist button in the context menu of the chosen song
add_to_playlist_btn_xpath : string
A string containing the xpath of add to playlist button in the context menu of the chosen song
playlist_xpath : string
A string containing the xpath of the playlist in the add to playlist modal
your_library_btn_xpath : string
A string containing the xpath of your library button in home menu
first_playlist_xpath : string
A sting containing the xpath of the first playlist in home menu
playlist_songs_list_xpath : string
A string containing the xpath of the list that contain all playlist songs
first_playlist_song_xpath : string
A sting containing the xpath of the first song in the playlist
song_name : string
A string containing the name of the song to be added to the playlist
Methods
-------
overview()
get number of songs before any action
add_song_to_playlist()
add new song to playlist
remove_song_from_playlist()
remove a song from playlist
"""
search_btn_xpath = "//*[@id='main']/div/div[2]/div[2]/nav/ul/li[2]/a"
search_textbox_xpath = "//*[@id='main']/div/div[2]/div[1]/header/div[3]/div/div/input"
song_xpath = "//*[@id='searchPage']/div/div/section[1]/div/div[2]/div/div/div/div[4]"
context_menu_xpath = "//*[@id='main']/div/nav[1]"
remove_from_playlist_btn_xpath = "// *[ @ id = 'main'] / div / nav[1] / div[5]"
add_to_playlist_btn_xpath = "//*[@id='main']/div/nav[1]/div[4]"
playlist_xpath = "// *[ @ id = 'main'] / div / div[3] / div / div[2] / div"
your_library_btn_xpath = "//*[@id='main']/div/div[2]/div[2]/nav/ul/li[3]/div/a"
first_playlist_xpath = "//*[@id='main']/div/div[2]/div[2]/nav/div[2]/div/div/ul/div[1]/li/div/div/div/a"
playlist_songs_list_xpath = "//*[@id='main']/div/div[2]/div[4]/div[1]/div/div[2]/section[1]/div[4]/section/ol/div"
first_playlist_song_xpath = "//*[@id='main']/div/div[2]/div[4]/div[1]/div/div[2]/section[1]/div[4]/section/ol/div[1]/div/li"
song_name="memories maroon 5"
def __init__(self, driver):
"""
Initializes the driver
:param driver : the driver to which the super class' driver is to be set
:type driver: WebDriver
"""
super().__init__(driver)
def overview(self):
"""get number of songs before any action"""
self.driver.find_element_by_xpath(self.first_playlist_xpath).click()
time.sleep(3)
self.no_of_playlist_songs_before_add = len(self.driver.find_elements(By.XPATH, self.playlist_songs_list_xpath))
def add_song_to_playlist(self):
"""
add new song to playlist
:return: boolean true if no. of songs before add is smaller than no. of songs after add
:rtype: bool
"""
self.driver.find_element_by_xpath(self.search_btn_xpath).click()
self.driver.find_element_by_xpath(self.search_textbox_xpath).send_keys(self.song_name)
time.sleep(15)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.song_xpath)).context_click().context_click().perform()
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.context_menu_xpath))
time.sleep(5)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.add_to_playlist_btn_xpath)).click().perform()
time.sleep(3)
self.driver.find_element_by_xpath(self.playlist_xpath).click()
time.sleep(3)
self.driver.find_element_by_xpath(self.first_playlist_xpath).click()
time.sleep(3)
no_of_playlist_songs_after_add = len(self.driver.find_elements(By.XPATH, self.playlist_songs_list_xpath))
if self.no_of_playlist_songs_before_add < no_of_playlist_songs_after_add:
return True
else:
return False
def remove_song_from_playlist(self):
"""
remove a song from playlist
:return: boolean true if no. of songs before remove is greater than no. of songs after remove
:rtype: bool
"""
if self.no_of_playlist_songs_before_add != 0:
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.first_playlist_song_xpath)).context_click().context_click().perform()
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.context_menu_xpath))
time.sleep(5)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_xpath(self.remove_from_playlist_btn_xpath)).click().perform()
time.sleep(5)
no_of_playlist_songs_after_add = len(self.driver.find_elements(By.XPATH, self.playlist_songs_list_xpath))
if self.no_of_playlist_songs_before_add > no_of_playlist_songs_after_add:
return True
else:
return False
else:
print("there is no song to remove") | true |
2ea1beaea82c3f05425610d36b4eb0a6a67c14bc | Python | j-tyler/learnProgramming | /TheCProgrammingLanguage/python-celsiustofahr-e1p4.py | UTF-8 | 178 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
lower = -20
upper = 100
step = 5
celsius = lower
while celsius <= upper:
fahr = celsius * 9 / 5 + 32
print "%3d %6d" % (celsius, fahr)
celsius += step
| true |
ba673439e837ec4829dacdbb6cdb7f2c5b52d443 | Python | ashurzp/tradingpy | /candle_stick_plot.py | UTF-8 | 1,172 | 3.0625 | 3 | [] | no_license | import matplotlib
import matplotlib.pyplot as plt
import mpl_finance
import pandas
matplotlib.style.use('ggplot')
def stockPricePlot(ticker):
print('dsqdsq')
# Step 1. load data
history = pandas.read_csv(
'./Data/IntradayUS/' + ticker + '.csv', parse_dates=True, index_col=0)
# Step 2. Data manipulation
close = history['close']
close = close.reset_index()
close['timestamp'] = close['timestamp'].map(matplotlib.dates.date2num)
ohlc = history[['open', 'high', 'low', 'close']].resample('1H').ohlc()
ohlc = ohlc.reset_index()
ohlc['timestamp'] = ohlc['timestamp'].map(matplotlib.dates.date2num)
# Step 3. Plot Figures.
# Subplot 1. scatter plot.
subplot1 = plt.subplot2grid((2, 1), (0, 0), rowspan=1, colspan=1)
subplot1.xaxis_date()
subplot1.plot(close['timestamp'], close['close'], 'b.')
plt.title(ticker)
# Subplot 2. candle stick plot
subplot2 = plt.subplot2grid(
(2, 1), (1, 0), rowspan=1, colspan=1, sharex=subplot1)
mpl_finance.candlestick_ohlc(
ax=subplot2, quotes=ohlc.values, width=0.01, colorup='g', colordown='r')
plt.show()
stockPricePlot('AAWW')
| true |
9bbcb857bd64e6f58e3b01a910edb35b2b2254a4 | Python | Onodric/Bangazon-Orientation-Classes | /department.py | UTF-8 | 602 | 3.484375 | 3 | [] | no_license | class Department(object):
"""Parent class for all departments
Methods: __init__,meet , get_name, get_supervisor
"""
def __init__(self, name, supervisor, employee_count):
self.name = name
self.supervisor = supervisor
self.size = employee_count
def meet():
print("Everyone meet in {}'s office".format(self.supervisor))
def get_name(self):
"""Returns the name of the department"""
return self.name
def get_supervisor(self):
"""Returns the name of the supervisor"""
return self.supervisor
| true |
323894b202ed7d68c1f3c4f522025f2416829aca | Python | J-Seo/sgg | /lib/get_union_boxes.py | UTF-8 | 4,020 | 2.578125 | 3 | [
"MIT"
] | permissive | import torch
from torch.nn import functional as F
from lib.pytorch_misc import enumerate_by_image
from torch.nn.modules.module import Module
from torch import nn
from config import BATCHNORM_MOMENTUM
class UnionBoxesAndFeats(Module):
def __init__(self, pooling_size=7, stride=16, dim=256, concat=False, use_feats=True, SN=False):
"""
:param pooling_size: Pool the union boxes to this dimension
:param stride: pixel spacing in the entire image
:param dim: Dimension of the feats
:param concat: Whether to concat (yes) or add (False) the representations
"""
super(UnionBoxesAndFeats, self).__init__()
conv_layer = lambda n_in, n_out, ks, stide, pad, bias: nn.Conv2d(n_in, n_out,
kernel_size=ks,
stride=stride,
padding=pad, bias=bias)
self.pooling_size = pooling_size
self.stride = stride
self.dim = dim
self.use_feats = use_feats
self.conv = nn.Sequential(
conv_layer(2, dim //2, 7, 2, 3, True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim//2, momentum=BATCHNORM_MOMENTUM),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
conv_layer(dim // 2, dim, 3, 1, 1, True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim, momentum=BATCHNORM_MOMENTUM) # remove batch norm here to make features relu'ed
)
self.concat = concat
def forward(self, union_pools, rois, union_inds, im_sizes):
boxes = rois[:, 1:].clone()
# scale boxes to the range [0,1]
scale = boxes.new(boxes.shape).fill_(0)
for i, s, e in enumerate_by_image(rois[:, 0].long().data):
h, w = im_sizes[i][:2]
scale[s:e, 0] = w
scale[s:e, 1] = h
scale[s:e, 2] = w
scale[s:e, 3] = h
boxes = boxes / scale
try:
rects = draw_union_boxes_my(boxes, union_inds, self.pooling_size * 4 - 1) - 0.5
except Exception as e:
# there was a problem with bboxes being larger than images at test time, had to clip them
print(rois, boxes, im_sizes, scale)
raise
if self.concat:
return torch.cat((union_pools, self.conv(rects)), 1)
return union_pools + self.conv(rects)
def draw_union_boxes_my(boxes, union_inds, sz):
"""
:param boxes: in range [0,1]
:param union_inds:
:param sz:
:return:
"""
assert boxes.max() <= 1.001, boxes.max()
boxes_grid = F.grid_sample(boxes.new(len(boxes), 1, sz, sz).fill_(1), _boxes_to_grid(boxes, sz, sz))
out = boxes_grid[union_inds.reshape(-1)].reshape(len(union_inds), 2, sz, sz)
return out
def _boxes_to_grid(boxes, H, W):
# Copied from https://github.com/google/sg2im/blob/master/sg2im/layout.py#L94
"""
Input:
- boxes: FloatTensor of shape (O, 4) giving boxes in the [x0, y0, x1, y1]
format in the [0, 1] coordinate space
- H, W: Scalars giving size of output
Returns:
- grid: FloatTensor of shape (O, H, W, 2) suitable for passing to grid_sample
"""
O = boxes.size(0)
boxes = boxes.view(O, 4, 1, 1)
# All these are (O, 1, 1)
x0, y0 = boxes[:, 0], boxes[:, 1]
x1, y1 = boxes[:, 2], boxes[:, 3]
ww = x1 - x0
hh = y1 - y0
X = torch.linspace(0, 1, steps=W).view(1, 1, W).to(boxes)
Y = torch.linspace(0, 1, steps=H).view(1, H, 1).to(boxes)
X = (X - x0) / ww # (O, 1, W)
Y = (Y - y0) / hh # (O, H, 1)
# Stack does not broadcast its arguments so we need to expand explicitly
X = X.expand(O, H, W)
Y = Y.expand(O, H, W)
grid = torch.stack([X, Y], dim=3) # (O, H, W, 2)
# Right now grid is in [0, 1] space; transform to [-1, 1]
grid = grid.mul(2).sub(1)
return grid
| true |
871495337c13d2ce57af92f766145f7022ab01ed | Python | LinXueyuanStdio/EchoEA | /toolbox/DatasetSchema.py | UTF-8 | 19,358 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # 数据集路径,下载数据集
# outline
# 1. utils function
# - extract_tar(tar_path, extract_path='.')
# - extract_zip(zip_path, extract_path='.')
# 2. remote dataset
# - RemoteDataset
# - fetch_from_remote(name: str, url: str, root_path: Path)
# 3. RelationalTriplet class
# - RelationalTriplet
# - RelationalTripletDatasetMeta
# - RelationalTripletDatasetCachePath
# - RelationalTripletDatasetSchema
# 1. FreebaseFB15k
# 2. DeepLearning50a
# 3. WordNet18
# 4. WordNet18_RR
# 5. YAGO3_10
# 6. FreebaseFB15k_237
# 7. Kinship
# 8. Nations
# 9. UMLS
# 10. NELL_995
# - get_dataset(dataset_name: str, custom_dataset_path=None)
# 3. custom dataset
import shutil
import tarfile
import pickle
import os
import zipfile
import urllib.request
from pathlib import Path
from typing import Dict
from toolbox.Log import Log
# region 1. utils function
def extract_tar(tar_path, extract_path='.'):
"""This function extracts the tar file.
Most of the knowledge graph datasets are downloaded in a compressed
tar format. This function is used to extract them
Args:
tar_path (str): Location of the tar folder.
extract_path (str): Path where the files will be decompressed.
"""
tar = tarfile.open(tar_path, 'r')
for item in tar:
tar.extract(item, extract_path)
if item.name.find(".tgz") != -1 or item.name.find(".tar") != -1:
extract_tar(item.name, "./" + item.name[:item.name.rfind('/')])
def extract_zip(zip_path, extract_path='.'):
"""This function extracts the zip file.
Most of the knowledge graph datasets are downloaded in a compressed
zip format. This function is used to extract them
Args:
zip_path (str): Location of the zip folder.
extract_path (str): Path where the files will be decompressed.
"""
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(extract_path)
# endregion
# region 2. remote dataset
class RemoteDataset:
def __init__(self, name: str, url: str, root_path: Path):
root_path.mkdir(parents=True, exist_ok=True)
self._logger = Log(str(root_path / "fetch.log"))
self.name = name
self.url = url
self.root_path: Path = root_path
self.tar: Path = self.root_path / ('%s.tgz' % self.name)
self.zip: Path = self.root_path / ('%s.zip' % self.name)
def download(self):
""" Downloads the given dataset from url"""
self._logger.info("Downloading the dataset %s" % self.name)
if self.url.endswith('.tar.gz') or self.url.endswith('.tgz'):
with urllib.request.urlopen(self.url) as response, open(str(self.tar), 'wb') as out_file:
shutil.copyfileobj(response, out_file)
elif self.url.endswith('.zip'):
with urllib.request.urlopen(self.url) as response, open(str(self.zip), 'wb') as out_file:
shutil.copyfileobj(response, out_file)
else:
raise NotImplementedError("Unknown compression format")
def extract(self):
""" Extract the downloaded file under the folder with the given dataset name"""
try:
if os.path.exists(self.tar):
self._logger.info("Extracting the downloaded dataset from %s to %s" % (self.tar, self.root_path))
extract_tar(str(self.tar), str(self.root_path))
return
if os.path.exists(self.zip):
self._logger.info("Extracting the downloaded dataset from %s to %s" % (self.zip, self.root_path))
extract_zip(str(self.zip), str(self.root_path))
return
except Exception as e:
self._logger.error("Could not extract the target file!")
self._logger.exception(e)
raise
def fetch_from_remote(name: str, url: str, root_path: Path):
remote_data = RemoteDataset(name, url, root_path)
remote_data.download()
remote_data.extract()
# endregion
# region 3. Relational Triplet
class RelationalTriplet:
""" The class defines the datastructure of the knowledge graph triples.
Triple class is used to store the head, tail and relation triple in both its numerical id and
string form. It also stores the dictonary of (head, relation)=[tail1, tail2,..] and
(tail, relation)=[head1, head2, ...]
Args:
h (str or int): String or integer head entity.
r (str or int): String or integer relation entity.
t (str or int): String or integer tail entity.
Examples:
>>> from toolbox.DatasetSchema import RelationalTriplet
>>> trip1 = RelationalTriplet(2,3,5)
>>> trip2 = RelationalTriplet('Tokyo','isCapitalof','Japan')
"""
def __init__(self, h, r, t):
self.h = h
self.r = r
self.t = t
def set_ids(self, h, r, t):
""" This function assigns the head, relation and tail.
Args:
h (int): Integer head entity.
r (int): Integer relation entity.
t (int): Integer tail entity.
"""
self.h = h
self.r = r
self.t = t
class BaseDatasetSchema:
def __init__(self, name: str, home: str = "data"):
self.name = name
self.root_path = self.get_dataset_home_path(home) # ./data/${name}
def get_dataset_home_path(self, home="data") -> Path:
data_home_path: Path = Path('.') / home
data_home_path.mkdir(parents=True, exist_ok=True)
data_home_path = data_home_path.resolve()
return data_home_path / self.name
def force_fetch_remote(self, url):
fetch_from_remote(self.name, url, self.root_path)
def try_to_fetch_remote(self, url):
if not (self.root_path / "fetch.log").exists():
self.force_fetch_remote(url)
def dump(self):
""" Displays all the metadata of the knowledge graph"""
log_path = self.root_path / "DatasetSchema.log"
_logger = Log(str(log_path), name_scope="DatasetSchema")
for key, value in self.__dict__.items():
_logger.info("%s %s" % (key, value))
class RelationalTripletDatasetSchema(BaseDatasetSchema):
"""./data
- dataset name
- name.zip
- name (extracted from zip)
- cache
- cache_xxx.pkl
- cache_xxx.pkl
- ${prefix}train.txt
- ${prefix}test.txt
- ${prefix}valid.txt
if dataset can be downloaded from url, call self.try_to_fetch_remote(url: str) after __init__
Args:
name (str): Name of the datasets
Examples:
>>> from toolbox.DatasetSchema import RelationalTripletDatasetSchema
>>> kgdata = RelationalTripletDatasetSchema("dL50a")
>>> kgdata.dump()
"""
def __init__(self, name: str, home: str = "data"):
BaseDatasetSchema.__init__(self, name, home)
self.dataset_path = self.get_dataset_path()
self.cache_path = self.get_dataset_path_child("cache")
self.cache_path.mkdir(parents=True, exist_ok=True)
self.data_paths = self.get_data_paths()
def get_dataset_path(self) -> Path:
return self.root_path / self.name
def get_dataset_path_child(self, name) -> Path:
return self.dataset_path / name
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def default_data_paths(self, prefix="") -> Dict[str, Path]:
"""default data paths, using prefix
:param prefix: for example, "${self.dataset_path}/${prefix}train.txt"
"""
return {
'train': self.get_dataset_path_child('%strain.txt' % prefix),
'test': self.get_dataset_path_child('%stest.txt' % prefix),
'valid': self.get_dataset_path_child('%svalid.txt' % prefix)
}
class FreebaseFB15k(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading Freebase dataset.
FreebaseFB15k module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(FreebaseFB15k, self).__init__("FB15k", home)
url = "https://everest.hds.utc.fr/lib/exe/fetch.php?media=en:fb15k.tgz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths("freebase_mtr100_mte100-")
class DeepLearning50a(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading DeepLearning50a dataset.
DeepLearning50a module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(DeepLearning50a, self).__init__("dL50a", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/dL50a.tgz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths('deeplearning_dataset_50arch-')
class WordNet18(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading WordNet18 dataset.
WordNet18 module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(WordNet18, self).__init__("WN18", home)
url = "https://everest.hds.utc.fr/lib/exe/fetch.php?media=en:wordnet-mlj12.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths('wordnet-mlj12-')
def get_dataset_path(self):
return self.root_path / 'wordnet-mlj12'
class WordNet18_RR(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading WordNet18_RR dataset.
WordNet18_RR module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(WordNet18_RR, self).__init__("WN18RR", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/WN18RR.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class YAGO3_10(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading YAGO3_10 dataset.
YAGO3_10 module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(YAGO3_10, self).__init__("YAGO3_10", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/YAGO3-10.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class FreebaseFB15k_237(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading FB15k-237 dataset.
FB15k-237 module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(FreebaseFB15k_237, self).__init__("FB15K_237", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/fb15k-237.tgz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class Kinship(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading Kinship dataset.
Kinship module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(Kinship, self).__init__("Kinship", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/kinship.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class Nations(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading Nations dataset.
Nations module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(Nations, self).__init__("Nations", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/nations.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class UMLS(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading UMLS dataset.
UMLS module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(UMLS, self).__init__("UMLS", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/umls.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
class NELL_995(RelationalTripletDatasetSchema):
"""This data structure defines the necessary information for downloading NELL-995 dataset.
NELL-995 module inherits the KnownDataset class for processing
the knowledge graph dataset.
"""
def __init__(self, home: str = "data"):
super(NELL_995, self).__init__("NELL_995", home)
url = "https://github.com/louisccc/KGppler/raw/master/datasets/NELL_995.zip"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
return self.default_data_paths()
def get_dataset_path(self):
return self.root_path
def get_dataset(dataset_name: str):
if dataset_name.lower() == 'freebase15k' or dataset_name.lower() == 'fb15k':
return FreebaseFB15k()
elif dataset_name.lower() == 'deeplearning50a' or dataset_name.lower() == 'dl50a':
return DeepLearning50a()
elif dataset_name.lower() == 'wordnet18' or dataset_name.lower() == 'wn18':
return WordNet18()
elif dataset_name.lower() == 'wordnet18_rr' or dataset_name.lower() == 'wn18_rr':
return WordNet18_RR()
elif dataset_name.lower() == 'yago3_10' or dataset_name.lower() == 'yago':
return YAGO3_10()
elif dataset_name.lower() == 'freebase15k_237' or dataset_name.lower() == 'fb15k_237':
return FreebaseFB15k_237()
elif dataset_name.lower() == 'kinship' or dataset_name.lower() == 'ks':
return Kinship()
elif dataset_name.lower() == 'nations':
return Nations()
elif dataset_name.lower() == 'umls':
return UMLS()
elif dataset_name.lower() == 'nell_995':
return NELL_995()
elif dataset_name.lower() == 'dbp15k':
return DBP15k()
elif dataset_name.lower() == 'dbp100k':
return DBP100k()
else:
raise ValueError("Unknown dataset: %s" % dataset_name)
class DBP15k(RelationalTripletDatasetSchema):
def __init__(self, name="fr_en", home: str = "data"):
"""
:param name: choice "fr_en", "ja_en", "zh_en"
"""
self.dataset_name = name
super(DBP15k, self).__init__("DBP15k", home)
url = "http://ws.nju.edu.cn/jape/data/DBP15k.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
kg1, kg2 = self.dataset_name.split("_")
return {
'train': self.get_dataset_path_child('train.txt'),
'test': self.get_dataset_path_child('test.txt'),
'valid': self.get_dataset_path_child('valid.txt'),
'seeds': self.get_dataset_path_child('ent_ILLs'),
'kg1_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg1),
'kg1_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg1),
'kg2_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg2),
'kg2_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg2),
}
def get_dataset_path(self):
return self.root_path / self.name / self.dataset_name
class DBP100k(RelationalTripletDatasetSchema):
def __init__(self, name="fr_en", home: str = "data"):
"""
:param name: choice "fr_en", "ja_en", "zh_en"
"""
self.dataset_name = name
super(DBP100k, self).__init__("DBP100k", home)
url = "http://ws.nju.edu.cn/jape/data/DBP100k.tar.gz"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
kg1, kg2 = self.dataset_name.split("_")
return {
'train': self.get_dataset_path_child('train.txt'),
'test': self.get_dataset_path_child('test.txt'),
'valid': self.get_dataset_path_child('valid.txt'),
'seeds': self.get_dataset_path_child('ent_ILLs'),
'kg1_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg1),
'kg1_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg1),
'kg2_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg2),
'kg2_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg2),
}
def get_dataset_path(self):
return self.root_path / self.name / self.dataset_name
class SimplifiedDBP15k(RelationalTripletDatasetSchema):
def __init__(self, name="fr_en", home: str = "data"):
"""
:param name: choice "fr_en", "ja_en", "zh_en"
"""
self.dataset_name = name
super(SimplifiedDBP15k, self).__init__("SimplifiedDBP15k", home)
url = "https://github.com/LinXueyuanStdio/KG_datasets/raw/master/datasets/SimplifiedDBP15k.zip"
self.try_to_fetch_remote(url)
def get_data_paths(self) -> Dict[str, Path]:
kg1, kg2 = self.dataset_name.split("_")
return {
'train': self.get_dataset_path_child('train.txt'),
'test': self.get_dataset_path_child('test.txt'),
'valid': self.get_dataset_path_child('valid.txt'),
'seeds': self.get_dataset_path_child('ent_ILLs'),
'kg1_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg1),
'kg1_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg1),
'kg2_attribute_triples': self.get_dataset_path_child('%s_att_triples' % kg2),
'kg2_relational_triples': self.get_dataset_path_child('%s_rel_triples' % kg2),
}
def get_dataset_path(self):
return self.root_path / self.name / self.dataset_name
# endregion
| true |
916e980408ffc41420083910d154c22b032d4c61 | Python | BiancaChirica/Lego-Framework | /Page4.py | UTF-8 | 4,141 | 2.796875 | 3 | [] | no_license | import pickle
import random
import numpy as np
import Pieces
from Configuration import Configuration
from Page import Page
import tkinter as tk
from Render import Render
from tkinter import messagebox
class Page4(Page):
def __init__(self, mainPage, data):
Page.__init__(self, mainPage)
self.data = data
self.name = ''
self.configure(bg="#fcdfc7")
self.space = np.zeros((self.data.SPACE_WIDTH, self.data.SPACE_HEIGHT, self.data.SPACE_LENGTH), dtype=int)
photo = tk.PhotoImage(file=r"images\b4.png")
w = tk.Label(self, image=photo)
w.place(x=0, y=0, relwidth=1, relheight=1)
w.image = photo
img_list = list(Pieces.pieces.keys())
canvas = tk.Canvas(self)
canvas.pack(side=tk.LEFT, fill='both', expand=True, padx=100)
canvas.configure(bg="#d5e8d8")
scroll = tk.Scrollbar(self, orient=tk.VERTICAL, command=canvas.yview)
scroll.pack(side=tk.RIGHT, fill='y')
scrollable_frame = tk.Frame(canvas)
scrollable_frame.bind("<Configure>", lambda e: canvas.configure(scrollregion=canvas.bbox("all")))
scrollable_frame.configure(bg='#d5e8d8')
canvas.create_window(0, 0, window=scrollable_frame, anchor='nw')
label_entry = tk.Label(canvas, text='Name your configuration:', font="Arial", fg="black",
bg="#d5e8d8").place(x=330, y=30)
self.e = tk.Entry(canvas)
self.e.pack(anchor='ne', pady=67, padx=80)
saveConf = tk.Button(self, text="Save configuration", width=20, height=1, background='#b2ebe3', command=self.saveConfig,
font='Arial').place(x=420, y=100)
clear = tk.Button(self, text="Reset configuration",command=self.newSpace, width=20, height=1, background='#b2ebe3',
font='Arial').place(x=420, y=150)
for i in range(len(img_list)):
btn1 = tk.Button(scrollable_frame, text="Add", width=20, height=1, background='#b2ebe3',
font='Arial',
command=lambda arg=i: self.draw(img_list[arg]))
label_img = tk.Label(scrollable_frame, text='{} :'.format(img_list[i]), font="Arial 14", fg="black",
bg="#d5e8d8")
label_img.pack(anchor='w', padx=30, pady=7, expand=True)
btn1.pack(anchor='w', padx=30, pady=7, expand=True)
photo = tk.PhotoImage(file=r"images\img\{}.PNG".format(img_list[i]))
w = tk.Label(scrollable_frame, image=photo)
w.pack(anchor='w', padx=30, pady=7, expand=True)
w.image = photo
canvas.config(yscrollcommand=scroll.set)
canvas.pack()
def draw(self, arg):
if self.e.get() == '':
messagebox.showwarning("Warning", "Please name your configuration first")
return
if ' ' in self.e.get():
messagebox.showwarning("Warning", "Invalid name.\nDon't use spaces for name.")
return
render = Render(self.data.SPACE_WIDTH, self.data.SPACE_HEIGHT, self.data.SPACE_LENGTH, (7,6,-8))
piece = Pieces.Piece(Pieces.pieces[arg], random.randint(0, 6))
new_space = render.render("add", piece, self.space, save_name=self.e.get())
if type(new_space) == list:
self.space = new_space
def newSpace(self):
self.space = np.zeros((self.data.SPACE_WIDTH, self.data.SPACE_HEIGHT, self.data.SPACE_LENGTH), dtype=int)
self.e.delete(0, tk.END)
def saveConfig(self):
if self.e.get() == '':
tk.messagebox.showwarning("Warning", "Please set a name for your configuration.")
return
if ' ' in self.e.get():
tk.messagebox.showwarning("Warning", "Invalid name.\nDon't use spaces for name.")
return
conf = Configuration(self.e.get(), self.space)
with open('ConfigurationsList.bin', 'rb') as f:
data_loaded = pickle.load(f)
data_loaded.append(conf)
with open('ConfigurationsList.bin', 'wb') as f:
pickle.dump(data_loaded, f)
| true |
aa2d9d845c18716e0ca6b887d246f75f93f9f0d1 | Python | sashamerchuk/algo_lab_1 | /venv/training.py | UTF-8 | 4,831 | 3.46875 | 3 | [] | no_license | import random
a=[1,2,68,2,3,5]
b=[21,23,68,24,31,5]
c=[121,233,648,254,311,54]
q = [32,48,356,54,67,76]
z=[983,234,765,4321,342,23,12]
import time
def bubble_sort(arr):
swapped=True
while swapped:
swapped=False
for i in range(len(a)-1):
if arr[i]>arr[i+1]:
arr[i],arr[i+1]=arr[i+1],arr[i]
swapped=True
bubble_sort(a)
print("bubble sort",a)
def selection_sort(arr):
for i in range(len(arr)):
min_index = i
for j in range(i+1,len(arr)):
if arr[j]> arr[min_index]:
min_index=j
arr[i],arr[min_index], = arr[min_index],arr[i]
selection_sort(a)
print("selection sort",a)
def insertion_sort(arr):
for i in range(1,len(arr)):
item_to_insert=arr[i]
j=i-1
while j>=0 and arr[j]<item_to_insert:
arr[j+1]=arr[j]
j-=1
arr[j+1]=item_to_insert
insertion_sort(b)
print("insertion sort",b)
def merge_sort(arr):
if len(arr)>1:
mid = len(arr)//2
left = arr[:mid]
right = arr[mid:]
merge_sort(left)
merge_sort(right)
i=j=k=0;
while i<len(left) and j<len(right):
if left[i]<right[j]:
arr[k]=left[i]
i+=1
else:
arr[k]=right[j]
j+=1
k+=1
while i <len(left):
arr[k]=left[i]
i+=1
k+=1
while j <len(right):
arr[k]=right[j]
j+=1
k+=1
merge_sort(c)
print("merge sort",c)
def partition(arr,low,high):
pivot = arr[(low+high)//2]
i=low-1
j=high+1
while True:
i+=1
while arr[i]<pivot:
i+=1
j-=1
while arr[j]>pivot:
j-=1
if i>=j:
return j
arr[i],arr[j]=arr[j],arr[i]
def quick_sort(arr):
def _quick_sort(items,low,high):
if low<high:
split_index=partition(items,low,high)
_quick_sort(items,low,split_index)
_quick_sort(items,split_index+1,high)
_quick_sort(arr,0,len(arr)-1)
quick_sort(q)
print("quick sort",q)
def heapify(nums,heap_size,root_index):
# Предположим, что индекс самого большого элемента является корневым индексом
largest=root_index
left_child=(2*root_index)+1
right_child=(2*root_index)+2
# Если левый потомок корня является допустимым индексом, а элемент больше
# чем текущий самый большой элемент, то обновляем самый большой элемент
if left_child < heap_size and nums[left_child]>nums[largest]:
largest=left_child
if right_child<heap_size and nums[right_child]>nums[largest]:
largest=right_child
# Если самый большой элемент больше не является корневым элементом, меняем их местами
if largest !=root_index:
nums[root_index],nums[largest]=nums[largest],nums[root_index]
# Heapify the new root element to ensure it's the largest
heapify(nums,heap_size,largest)
def heap_sort(nums):
n = len(nums)
# Создаем Max Heap из списка
# Второй аргумент означает, что мы останавливаемся на элементе перед -1, то есть на первом элементе списка.
# Третий аргумент означает, что мы повторяем в обратном направлении, уменьшая количество i на 1
for i in range(n,-1,-1):
heapify(nums,n,i)
# Перемещаем корень max hea в конец
for i in range(n-1,0,-1):
nums[i],nums[0]= nums[0],nums[i]
heapify(nums,i,0)
heap_sort(z)
print("heap sort",z)
def partition(arr,low,high):
pivot = arr[(low+high)//2]
i=low-1
j=high+1
while True:
i+=1
while arr[i]<pivot:
i+=1
j-=1
while arr[j]>pivot:
j-=1
if i>=j:
return j
arr[i],arr[j]=arr[j],arr[i]
def quick_sort(arr):
def _quick_sort(items,low,high):
if low<high:
split_index=partition(items,low,high)
_quick_sort(items,low,split_index)
_quick_sort(items,split_index+1,high)
_quick_sort(arr,0,len(arr)-1)
quick_sort(q)
print("quick sort",q)
def selection_sort1(arr):
for i in range(len(arr)):
min_index=i
for j in range(i+1,len(arr)):
if arr[j]>arr[min_index]:
min_index=j
arr[i],arr[min_index]=arr[min_index],arr[i]
selection_sort1(z)
print(z)
| true |
2fc2e9f44ea9babbe8f7f0b90e2a3ba4309070e5 | Python | hlfshell/pyimagesearch | /animals/dataset.py | UTF-8 | 882 | 2.96875 | 3 | [] | no_license | from torch.utils.data.dataset import Dataset
import os
from PIL import Image
import torch
import numpy as np
class AnimalsDataset(Dataset):
def __init__(self, filepath, transforms=None):
self.filepath = filepath
self.transforms = transforms
def __getitem__(self, index):
#Get the item from that index
all_files = os.listdir(self.filepath)
chosen_file = all_files[index]
label = chosen_file.split(".")[0]
if label == "cat":
label = [0, 1]
elif label == "dog":
label = [1, 0]
label = torch.from_numpy(np.array(label))
image = Image.open(self.filepath + "/" + chosen_file)
if self.transforms is not None:
image = self.transforms(image)
return (image, label)
def __len__(self):
return len(os.listdir(self.filepath)) | true |
c3438173f86322cf97e8d37208389b62933f79b6 | Python | Hwenhan/Physiological_signal_processing | /dataset_format/TxDatasetTable.py | UTF-8 | 964 | 2.71875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from pandas import DataFrame
class TxDatasetTable:
def __init__(self,datasetid,path):
self.id=[];
self.datasetid=[];
self.data=DataFrame([]);
self.rowcount=[];
self.colcount=[];
self.__path=path+datasetid+'.csv';
def load(self):
if os.path.exists(self.__path):
self.data=pd.read_csv(self.__path);
dims=self.data.shape;
self.rowcount=dims[0];
self.colcount=dims[1];
def save(self):
self.data.to_csv(self.__path+tableid+'.csv',header=False);
print('table.data is saved.');
def get(self,x,y):
return self.data[x][y];
def column(self,x):
return self.data[:][x];
def row(self,y):
return self.data[y][:];
def rowcount(self):
shape=self.data.shape;
return shape[0];
def colcount(self):
shape=self.data.shape;
return shape[1];
def set(self,x,y,value):
self.data[x][y]=value;
def select(self,columns,rowbegin,rowend):
return self.data[rowbegin:rowend+1,columns];
| true |
cb717044d964523f40f69230a99996b02350c976 | Python | nima14/Coursera_P4E_Specialization | /03. Using PythonTo Access Web Data/myurllib.py | UTF-8 | 220 | 2.578125 | 3 | [] | no_license | import urllib.request, urllib.parse, urllib.error
url = 'http://data.pr4e.org/romeo.txt'
fhand=urllib.request.urlopen(url)
print(urllib.request.urlopen(url).read())
for line in fhand:
print(line.decode().strip()) | true |
871771dbd4036f9542ee9ece3b16511942c328dc | Python | anaswara-97/python_project | /function/func_with_args.py | UTF-8 | 68 | 3.140625 | 3 | [] | no_license | def add(n1,n2):
print("result :",n1,"+",n2," = ",n1+n2)
add(3,5) | true |
639c186cdda26133a724fb94e9f969747486a42a | Python | pwdemars/projecteuler | /josh/Problems/69.py | UTF-8 | 451 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 14 02:14:22 2017
@author: joshuajacob
"""
import numpy
num =1000000
def primes_list(n):
x = numpy.ones(n, dtype = numpy.bool)
for i in range(2,int(n**0.5)+1):
if (i-1)%6 == 0 or (i+1)%6 == 0 or i == 2 or i == 3 and i<n+1:
x[2*i-1::i] = False
l = numpy.array(range(1,int(n)+1))
return([z for z in x*l[:n] if z > 1])
print(primes_list(1020))
| true |
6d6bcf131ff76e11767ac1db028b3276c5c4c4b4 | Python | orriborri/AdventOfCode | /day10/main.py | UTF-8 | 699 | 3.09375 | 3 | [] | no_license |
from collections import defaultdict
def readfile():
with open("input.txt", "r") as f:
lines = list(map(lambda x: int(x), f.read().split('\n')))
return lines
arr = readfile()
arr.sort()
arr2 = arr.copy()
arr = [0] + arr + [arr[-1] + 3]
i = 0
one = 0
three = 0
while(i+1 < len(arr)):
diff = arr[i+1]-arr[i]
if(diff > 3):
break
if(diff == 1):
one += 1
elif(diff == 3):
three += 1
i += 1
print((one)*(three))
# No idea why this works, got the quite a lot of help from reddit
dyn = [1] + [2] + [0] + [0] * (max(arr2)-1)
for i in arr2:
for j in [1, 2, 3]:
if (i-j in arr2):
dyn[i] += dyn[i-j]
print(dyn[-2])
| true |
71293520f2ef67c14b0fa7a2ffa9390751b693b1 | Python | KellyDeveloped/git-issue | /Git-Issue/git_issue/comment/comment.py | UTF-8 | 1,122 | 2.875 | 3 | [] | no_license | from git_issue.gituser import GitUser
from git_issue.utils import date_utils
from git_issue.utils.json_utils import JsonConvert
import uuid as unique_identifier
@JsonConvert.register
class Comment(object):
""" Class represents what a comment is. The default date of a comment is the current datetime
in UTC formatted as ISO. As issue contributors may be situated all across the world,
using their system time could be dangerous.
For example, if someone from Bangalore were to make a comment and synchronise with the
repository, and then immediately after someone from New York were to then add another
comment the New York's user would appear to be made before the Bangalore comment due
to it being in an earlier timezone. """
def __init__(self, comment: str="", user: GitUser=None, date=None, uuid=None):
self.comment = comment
self.user = user if user is not None else GitUser()
self.date = date if date is not None else date_utils.get_date_now()
self.uuid = uuid if uuid is not None else unique_identifier.uuid4().int
| true |
dd28c5b9cb528c0607027dacb2d9cb0c7281f6a2 | Python | vietanh125/cds_scripts | /test_mpu.py | UTF-8 | 2,569 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import Imu
from math import sin, asin,sqrt, atan2, pi
import time
gyro_x_cal = 0
gyro_y_cal = 0
gyro_z_cal = 0
angle_pitch = 0
angle_roll = 0
skip = 1001
angle_pitch_output = 0
angle_roll_output = 0
set_gyro_angles = False
max_value = -1000000
min_value = 1000000
def imu_cb(imu):
# global skip, gyro_x_cal, gyro_y_cal, gyro_z_cal, angle_pitch, angle_roll, set_gyro_angles, angle_pitch_output, angle_roll_output
# # get data from imu
# gyro_x = imu.angular_velocity.x
# gyro_y = imu.angular_velocity.y
# gyro_z = imu.angular_velocity.z
global min_value, max_value
acc_x = imu.linear_acceleration.x
acc_y = imu.linear_acceleration.y
acc_z = imu.linear_acceleration.z
pitch = (atan2(acc_x, sqrt(acc_y * acc_y + acc_z * acc_z)) * 180) / pi
max_value = max(pitch, max_value)
min_value = min(pitch, min_value)
print pitch, min_value, max_value
# #setup
# if skip > 1:
# gyro_x_cal += gyro_x
# gyro_y_cal += gyro_y
# gyro_z_cal += gyro_z
# skip -= 1
# return
# elif skip == 1:
# gyro_x_cal /= 1000
# gyro_y_cal /= 1000
# gyro_z_cal /= 1000
# skip -= 1
# return
# # substract offset values from raw gyro values
# gyro_x -= gyro_x_cal
# gyro_y -= gyro_y_cal
# gyro_z -= gyro_z_cal
# # gyro angle calculation: 0.000508905 = 1 / (30Hz x 65.5)
# angle_pitch += gyro_x * 0.000508905
# angle_roll += gyro_y * 0.000508905
# # 0.000008882 = 0.000508905 * pi / 180
# angle_pitch += angle_roll * sin(gyro_z * 0.000008882)
# angle_roll -= angle_pitch * sin(gyro_z * 0.000008882)
# acc_total_vector = sqrt((acc_x*acc_x)+(acc_y*acc_y)+(acc_z*acc_z))
# angle_pitch_acc = asin(acc_y/acc_total_vector) * 57.296
# angle_roll_acc = asin(float(acc_x/acc_total_vector)) * (-57.296)
# angle_pitch_acc -= 0.0
# angle_roll_acc -= 0.0
# if set_gyro_angles:
# angle_pitch = angle_pitch * 0.9996 + angle_pitch_acc * 0.0004
# angle_roll = angle_roll * 0.9996 + angle_roll_acc * 0.0004
# else:
# angle_pitch = angle_pitch_acc
# angle_roll = angle_roll_acc
# set_gyro_angles = True
# angle_pitch_output = angle_pitch_output * 0.9 + angle_pitch * 0.1
# angle_roll_output = angle_roll_output * 0.9 + angle_roll * 0.1
# print 'angle = ', angle_pitch_output
imu_sub = rospy.Subscriber('/mpu_9250/imu', Imu, imu_cb, queue_size=1)
rospy.init_node('test')
rospy.spin()
| true |
1ff6166188ab309cfb293cdec847204aaa69647d | Python | reint-fischer/MAIOproject | /computedistance.py | UTF-8 | 2,861 | 2.78125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Chance pair distance timeseries
Created on Sat Oct 12 13:42:52 2019
@author: Gebruiker
"""
import numpy as np
import pandas as pd
def ComputeDistance(ID1,ID2,Data_Mediterrenean):
id1 = [] #select only the 1st ID from all Mediterrenean data
id2 = [] #select only the 2nd ID from all Mediterrenean data
for i in range(len(Data_Mediterrenean[0])):
if Data_Mediterrenean[0,i] == ID1: #select right ID
id1 +=[[Data_Mediterrenean[1,i],Data_Mediterrenean[2,i],Data_Mediterrenean[3,i]]] #save latitude, longitude, time
if Data_Mediterrenean[0,i] == ID2: #select right ID
id2 +=[[Data_Mediterrenean[1,i],Data_Mediterrenean[2,i],Data_Mediterrenean[3,i]]] #save latitude, longitude, time
id1 = np.asarray(id1) #save as array for easy indexing
id2 = np.asarray(id2) #save as array for easy indexing
distance = [] #generate empty distance timeseries
time = [] #generate corresponding timeaxis
for i in range(len(id1)): #compare all measurement data
for j in range(len(id2)):
if id1[i,2]==id2[j,2]: # if the time is equal
distance += [np.sqrt((id1[i,0]-id2[j,0])**2+(id1[i,1]-id2[j,1])**2)] #compute distance in km and add to timeseries
time += [id1[i,2]] #add timestamp to timeaxis
mind = distance.index(min(distance)) #find the index of the minimum separation distance to slice both 'distance' and 'time'
d1 = list(reversed(distance[:mind+1])) #slice the timeseries up to the minimum and reverse it to create a backward timeseries
d2 = distance[mind:] #slice the timeseries from the minimum onwards to create a forward timeseries
t1 = list(reversed(time[:mind+1])) #slice te timeaxis in the same way as the timeseries
t2 = time[mind:] #slice te timeaxis in the same way as the timeseries
for n in range(len(t1)-1): #check for continuity
if t1[n]-1 != t1[n+1]: #In backward timeaxis each next timestep should be 1 smaller
t1 = t1[:n] #slice continuous timeaxis
d1 = d1[:n] #slice corresponding backward distance timeseries
break #stop for-loop when discontinuity is found
for n in range(len(t2)-1): #do the same for the forward timeseries
if t2[n]+1 != t2[n+1]:
t2 = t2[:n]
d2 = d2[:n]
break
return distance,time,d1,d2,t1,t2,mind
if __name__ == "__main__":
nd = np.genfromtxt('Data/MedSeaIDs.txt',delimiter=',')
pairs = np.genfromtxt('Data/UnPair.txt', delimiter=',')
for i in range(len(pairs)):
d,t,d1,d2,t1,t2,mind = ComputeDistance(pairs[i,0],pairs[i,1],nd)
np.savetxt('Data/BackwardsDistances/BDPair{0}.csv'.format(i),np.asarray((d1,t1)),delimiter = ',')
np.savetxt('Data/ForwardDistances/FDPair{0}.csv'.format(i),np.asarray((d2,t2)),delimiter = ',')
| true |
a16c893cca35484d1adb7eb026ebf3e979c34abf | Python | itsmenick212/algorithm-in-leetcode | /lc_problems/137.SingleNumberII.py | UTF-8 | 1,568 | 3.734375 | 4 | [] | no_license | from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
'''
states:
00 -> 01 -> 10 -> 00
our goal is to make state go back to zero using
bit manipulation when a bit appeared to be same
value for three times, when 0 appeared three
times, we can probably ignore it if we are not using `~`
let's focus on 1 appeared three times:
for digits[1], if digits[0] is 0, we just do xor
with incoming 1s
if digits[0] is 1, we stay at zero
for digits[0], we xor it with 1 if next digits[1]
is not 1, else stay at 0
'''
a, b = 0, 0
for n in nums:
b = (b ^ n) & ~a
a = (a ^ n) & ~b
return b
def singleNumber_five(self, nums: List[int]) -> int:
'''
states:
000 -> 001 -> 010 -> 011 -> 100 -> 000
'''
a,b,c=0,0,0
for n in nums:
b = b ^ (n & c)
c = (n ^ c) & ~a
a = (n ^ a) & ~c & ~b
return c
def singleNumber_seven(self, nums: List[int]) -> int:
'''
states:
000 -> 001 -> 010 -> 011 -> 100 -> 101 -> 110 -> 000
'''
a,b,c = 1,0,1
for n in nums:
old_b = b
b = (c&b) ^ n
c = (n^c) & ~(a&old_b)
a = (a^n) & ~b & ~c
print(a,b,c)
return c
solution = Solution()
print(solution.singleNumber_seven([3,0,3,3,3,3,3,3]))
| true |
d6560b5440923692c2775cef4781d5dd42ed9791 | Python | ashleighyslop/CFG | /session_2/arrays.py | UTF-8 | 734 | 3.328125 | 3 | [] | no_license | my_list = ['pc', 'clothes', 'food']
#for items in my_list:
# message = 'hello '
# print message + items
#print 'done shopping'
#print 'xxxxxxx ' + message
#print my_list[2]
#for x in range (0,9):
# print x
available_money = 300
running_total = 0
items_bought = []
money_spent =0
for item in my_list:
if (item =="pc"):
running_total = running_total + 240
elif item =='clothes' :
running_total = running_total + 50
elif item == 'food':
running_total = running_total + 20
if (running_total > available_money):
break
else :
items_bought.append(item)
money_spent = running_total
print(items_bought)
print 'money left ' + str(available_money - money_spent)
| true |
dbfeb95bec36e20049967d240db47a8df58c96f2 | Python | heihachi/Coding-Projects | /Python/upload.py | UTF-8 | 2,251 | 2.78125 | 3 | [] | no_license | import ClientForm
import urllib2
request = urllib2.Request(
"http://jamez.dyndns.org/?p=custom&sub=upload")
response = urllib2.urlopen(request)
forms = ClientForm.ParseResponse(response, backwards_compat=False)
response.close()
## f = open("example.html")
## forms = ClientForm.ParseFile(f, "http://example.com/example.html",
## backwards_compat=False)
## f.close()
form = forms[0]
print form # very useful!
# A 'control' is a graphical HTML form widget: a text entry box, a
# dropdown 'select' list, a checkbox, etc.
# Indexing allows setting and retrieval of control values
## original_text = form["comments"] # a string, NOT a Control instance
## form["comments"] = "Blah."
# Controls that represent lists (checkbox, select and radio lists) are
# ListControl instances. Their values are sequences of list item names.
# They come in two flavours: single- and multiple-selection:
## form["favorite_cheese"] = ["brie"] # single
## form["cheeses"] = ["parmesan", "leicester", "cheddar"] # multi
# equivalent, but more flexible:
## form.set_value(["parmesan", "leicester", "cheddar"], name="cheeses")
# Add files to FILE controls with .add_file(). Only call this multiple
# times if the server is expecting multiple files.
# add a file, default value for MIME type, no filename sent to server
## form.add_file(open("data.dat"))
# add a second file, explicitly giving MIME type, and telling the server
# what the filename is
## form.add_file(open("data.txt"), "text/plain", "data.txt")
# All Controls may be disabled (equivalent of greyed-out in browser)...
control = form.find_control("comments")
print control.disabled
# ...or readonly
print control.readonly
# readonly and disabled attributes can be assigned to
control.disabled = False
# convenience method, used here to make all controls writable (unless
# they're disabled):
form.set_all_readonly(False)
control= form.find_control(label="accepted")
print "this is control! "
print control
request2 = form.click() # urllib2.Request object
try:
response2 = urllib2.urlopen(request2)
except urllib2.HTTPError, response2:
pass
print response2.geturl()
print response2.info() # headers
print response2.read() # body
response2.close()
| true |
2255331631511c9878ed26be29f8a7d81c8a4d02 | Python | garydoranjr/mikernels | /src/convert_multiclass.py | UTF-8 | 2,033 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
import os
import numpy as np
import pylab as pl
from collections import defaultdict
DATA_DIR = 'data'
NAT = 'data/natural_scene.data'
NAT_NAMES = 'data/natural_scene.names'
CLASSES = [
'desert',
'mountains',
'sea',
'sunset',
'trees',
]
NAT_N = len(CLASSES)
def main():
with open(NAT, 'r') as f:
data = [line.strip().split(',') for line in f]
with open(NAT_NAMES, 'r') as f:
names_file = list(f)
names_file = ''.join(names_file[:-NAT_N])
labels = np.array([d[-NAT_N:] for d in data], dtype=int).astype(bool)
for i in range(1, NAT_N + 1):
for j in range(1, NAT_N + 1):
if i == j: continue
ci = CLASSES[-i]
cj = CLASSES[-j]
basename = ('%s_no_%s' % (ci, cj))
namesfilename = os.path.join(DATA_DIR, basename + '.names')
datafilename = os.path.join(DATA_DIR, basename + '.data')
datalines = []
pos = 0
for di, li in zip(data, labels):
datalines.append(','.join(di[:-NAT_N]))
label = int(li[-i] & (li[-j] == 0))
if label > 0: pos += 1
datalines[-1] = ('%s,%d\n' % (datalines[-1], label))
datastr = ''.join(datalines)
with open(namesfilename, 'w+') as f: f.write(names_file)
with open(datafilename, 'w+') as f: f.write(datastr)
exit()
labels = np.array(dict([(int(d[0]), d[-NAT_N:]) for d in data]).values(), dtype=int).astype(bool)
counts = np.array(
[[np.sum(labels[:, i] & labels[:, j]) for i in range(NAT_N)]
for j in range(NAT_N)])
counts2 = np.array(
[[np.sum(labels[:, i] & (labels[:, j] == 0)) for i in range(NAT_N)]
for j in range(NAT_N)])
counts3 = np.array(
[[np.sum(labels[:, i] == 0) for i in range(NAT_N)]
for j in range(NAT_N)])
pos = counts2
neg = (counts + counts3)
print pos
print neg
print np.sort(pos.astype(float) / neg)
if __name__ == '__main__':
main()
| true |