blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d842a463d752ea0b51d54038155f765f14790a2b | Python | rtnF14/rdef | /rdef.py | UTF-8 | 713 | 2.59375 | 3 | [] | no_license | import urllib2
import sys
from xml.dom import minidom
#x = raw_input("Input Word : ")
#print 'Looking definition for word "' + x + '"'
x = sys.argv[1]
url = "http://www.dictionaryapi.com/api/v1/references/collegiate/xml/" + x + "?key=473f46b5-fd91-4d80-b0d0-dd19502e1022"
f = urllib2.urlopen(url)
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
r = opener.open(url)
xml_fetched = r.read()
preprocessed_xml = xml_fetched.replace('<d_link>','|')
preprocessed_xml = preprocessed_xml.replace('</d_link>','|')
xmldoc = minidom.parseString(preprocessed_xml)
itemlist = xmldoc.getElementsByTagName('dt')
for s in itemlist:
print(' '+s.childNodes[0].nodeValue)
print r.read() | true |
e6cb33ad7cb91b1c7d0a763f07e33851c735dd40 | Python | MengSunS/daily-leetcode | /sweeping_line/1229.py | UTF-8 | 522 | 2.609375 | 3 | [] | no_license | class Solution:
def minAvailableDuration(self, A: List[List[int]], B: List[List[int]], k: int) -> List[int]:
C = list(filter(lambda x: x[1] - x[0] >= k, A + B))
C.sort()
if not C: return []
n = len(C)
last_end = C[0][1]
for i in range(1, n):
if C[i][0] + k <= last_end:
return [C[i][0], C[i][0] + k]
if C[i][1] >= last_end:
last_end = C[i][1]
return []
| true |
1465c4a18e67922f982ef108d60fd99560e1fc63 | Python | Has3ong/OpenCV-SimpleProject | /Project1/src/Section5-1.py | UTF-8 | 919 | 2.625 | 3 | [] | no_license | import numpy as np
import cv2
import os
def null(x):
pass
def ImageProcessing():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
imgsrc = BASE_DIR + '/document.jpg'
img = cv2.imread(imgsrc, cv2.IMREAD_GRAYSCALE)
r = 600.0 / img.shape[0]
dim = (int(img.shape[1] * r), 600)
img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
window = 'Window'
TrackBar = 'TrackBar'
cv2.namedWindow(window)
cv2.createTrackbar(TrackBar, window, 127, 255, null)
Threshold = np.zeros(img.shape, np.uint8)
while 1:
TrackBarPos = cv2.getTrackbarPos(TrackBar, window)
cv2.threshold(img, TrackBarPos, 255, cv2.THRESH_BINARY, Threshold)
cv2.imshow(window, Threshold)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
cv2.waitKey(1)
break
return
if __name__=='__main__':
ImageProcessing() | true |
5e2617a26fbca460e452d620d5d8fc4e589a9071 | Python | GauthamAjayKannan/guvi | /indexmatch.py | UTF-8 | 167 | 2.65625 | 3 | [] | no_license | # your code goes here
#indexmatch
n=input()
l=list(map(int,input().split(" ")))
t=enumerate(l)
l=[i[0] for i in t if i[0]==i[1]]
if l==[]:
print(-1)
else:
print(*l)
| true |
e853ab608958f1a6cbb6add83f165d9a4bb211f6 | Python | RShveda/pygame-practice | /catch-ball-game/test_game.py | UTF-8 | 1,028 | 3 | 3 | [] | no_license | """
Tests can be run from command line: python -m unittest
"""
import unittest
from models import load_scores, save_scores
# Models tests
class LoadScores(unittest.TestCase):
def test_output(self):
scores = load_scores()
self.assertTrue(len(scores) == 3)
class SaveScores(unittest.TestCase):
def test_file_creation(self):
import os.path
if os.path.isfile("scores.json"):
os.remove("scores.json")
data = [
{"name": "empty", "score": 0},
{"name": "empty", "score": 0},
{"name": "empty", "score": 0},
]
save_scores(data)
self.assertTrue(os.path.isfile("scores.json"))
def test_with_invalid_data(self):
import os.path
if os.path.isfile("scores.json"):
os.remove("scores.json")
data = [
{"name": "empty", "score": 0},
{"name": "empty", "score": 0},
]
save_scores(data)
self.assertFalse(os.path.isfile("scores.json"))
| true |
254e8d2eb025038f3dad437ae31b697fe0342118 | Python | davidcGIThub/pythonControls | /ballOnBeam/bobParam.py | UTF-8 | 1,476 | 2.6875 | 3 | [] | no_license | # Ball on Beam Parameters file
import numpy as np
# Physical parameters of the ball and beam system
m1 = 0.35 # Mass of ball, kg
m2 = 2.0 # Mass of beam, kg
L = .5 # Length of Beam, m
g = 9.8 # gravit constant, m/s^2
#Uncertain parameters
uncertian = False
sign = -1
if(np.random.rand() > .5 ):
sign = 1
m1_ = m1 + .2*m1*np.random.rand()*sign
m2_ = m2 + .2*m2*np.random.rand()*sign
L_ = L + .2*L*np.random.rand()*sign
g_ = g
# parameters for animation
w = .01 # width of beam, m
D = .09
# Initial Conditions
theta0 = 0 # angle of beam, radians
thetadot0 = 0 # angular speed beam, radians/sec
z0 = L/2 # positon of ball, m
zdot0 = 0 # speed of the ball, m/s
# Simulation Parameters
tstep = 0.01 # time step size for simulation
time = 20.0 # time in seconds of simulation
steps = int(time/tstep) # number of steps in simulation
# dirty derivative parameters
tr_In = 1
Zeta_In = .707
Wn_In = 2.2/tr_In
KpIn = Wn_In**2 * ( (m1*L**2)/4 + (m2*L**2)/3 ) / L # control theta
KdIn = 2*Zeta_In*Wn_In * ( (m1*L**2)/4 + (m2*L**2)/3 ) / L
KpIn = 126
KdIn = 9.77
#KdIn = 1.9550 # derivatice gain
#KpIn = 5.06967 # proportional gain
tr_Out = 10*tr_In
Zeta_Out = .707
Wn_Out = 2.2/tr_Out
KdOut = 2*Zeta_Out*Wn_Out / (-g)
KpOut = (Wn_Out**2)/(-g)
KpOut = -.34
KdOut = -.03
#KdOut = -.0528508 # derivatice gain
#KpOut = -.0137048# proportional gain
# saturation limits
saturated = True
upper = 15
lower = -15
#desired reference
amplitude = .15
frequency = .02
offset = .25
| true |
5895b6be7a08b21dc2225c3f194dedcdf686ceb0 | Python | lacklust/coding-winter-session | /homework/oop/challenge_1.py | UTF-8 | 2,893 | 4.15625 | 4 | [
"MIT"
] | permissive | """
Create a menagerie of animals
NOTE: inheritance layer
Some suggestions:
Animal:
Dog
Tiger
Wolf
ALTERNATE:
Animal
Domesticated:
Dog
Tiger
Wild:
Wolf
Write some test code to experiment with the behavior and functionality of your code!
"""
class Animal:
def __init__(self, color, weight, breed):
self.color = color
self.weight = weight
self.breed = breed
def eat(self):
print(f"nom nom i am {self.weight} lbs")
class Domesticated(Animal):
def __init__(self, name, color, weight, breed, owner):
super().__init__(color, weight, breed)
self.name = name
self.owner = owner
class Wild(Animal):
def __init__(self, color, weight, breed):
super().__init__(color, weight, breed)
self.ailments = []
def add_ailment(self, disease):
self.ailments.append(disease)
def get_cured(self, index):
if index > len(self.ailments):
print("not a valid illness")
return
print(f"removing this disease: {self.ailments.pop(index)}")
class Dog(Domesticated):
def __init__(self, name, color, weight, breed, owner):
super(Dog, self).__init__(name, color, weight, breed, owner)
self.num_barks = 0
self.is_neutered = False
def bark(self):
print(f"woof woof american dawg {self.name}")
self.num_barks += 1
if self.num_barks >= 10:
self.neuter()
def neuter(self):
if self.is_neutered:
print("already did the snip")
return
print("snip snip doggy")
self.is_neutered = True
class Tiger(Domesticated):
def __init__(self, name, color, weight, breed, owner, num_stripes):
super(Tiger, self).__init__(name, color, weight, breed, owner)
self.num_stripes = num_stripes
def purr(self):
print(f"meow meow or wtv {self.name.upper()}")
class Wolf(Wild):
def __init__(self, color, weight, breed):
super(Wolf, self).__init__(color, weight, breed)
def growl(self):
print("growling")
def health_status(self):
if len(self.ailments) > 1:
print("bad condition")
else:
print("not too bad")
def main():
# TEST CODE FOR DOG
# doggo = Dog("doge", "beige", 22, "shiba", "me")
# for i in range(10):
# doggo.bark()
# doggo.neuter()
# doggo.eat()
# TEST CODE FOR TIGER
# my_tiger = Tiger("rahah", "yellow", 336, "middle eastern", "aladding", 200)
# my_tiger.purr()
# my_tiger.eat()
# TEST CODE FOR WOLF
wolfy = Wolf("grayish brown", 420, "timber wolf")
wolfy.growl()
wolfy.add_ailment("rabies")
wolfy.add_ailment("alzheimers")
wolfy.health_status()
wolfy.get_cured(10)
wolfy.get_cured(1)
wolfy.health_status()
wolfy.eat()
main()
| true |
5fd6947536ff75394c7a80a6208bc37a334ace32 | Python | Nizor22/Python-Automation | /threading/old_way.py | UTF-8 | 681 | 3.90625 | 4 | [] | no_license | import threading
import time
start = time.perf_counter()
# Sleeps the program for {sec} second(s)
def do_something(secs):
print(f'Sleeping {secs} second(s)...')
time.sleep(secs)
print(f'Done Sleeping...')
threads = []
# _ is a throw away variable(throw away=not used in a loop)
# Running the do_something(sleep) method 10 times, but actually sleeping only 1 second.
for _ in range(10):
t = threading.Thread(target=do_something, args=[1])
t.start()
threads.append(t)
# Makes sure every thread finishes before going to the rest of the program
for thread in threads:
thread.join()
finish = time.perf_counter()
print(f'Finished in {round(finish - start, 2)} second(s)')
| true |
fc42c2529a1aba2107f497d68a9ac2796769b9f2 | Python | veronikaKochugova/algorithms | /py/task2_1.py | UTF-8 | 399 | 3.421875 | 3 | [] | no_license | # f0 = 0, f1 = 1, f2 = 2, fk = fk–1 + fk–3
# f1, f2, f3 ... fn
n = int(input())
k_array = [int(i) for i in input().split()]
def func(n):
if n <= 2: return n
return func(n - 1) + func(n - 3)
result = list()
result.append(func(k_array[0]))
result.append(func(k_array[1]))
result.append(func(k_array[2]))
for k in k_array[3:]:
result.append(result[-3] + result[-1])
print(result)
| true |
10edfd837e9dd10d234205b8e9d810ab6279c5ac | Python | ngyygm/chia-plot-copy | /chia-plot-copy.py | UTF-8 | 5,640 | 2.703125 | 3 | [] | no_license | import os, shutil, time
import platform
import ctypes
def get_free_space_mb(folder):
"""
获取磁盘剩余空间
:param folder: 磁盘路径 例如 D:\\
:return: 剩余空间 单位 G
"""
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
return free_bytes.value / 1024 / 1024 // 1024
else:
st = os.statvfs(folder)
return st.f_bavail * st.f_frsize / 1024 // 1024
def copy_data(file_inp_list, file_tar_list, key):
file_list_inp = []
file_list_tar = []
for file_tar in file_tar_list:
for root, dirs, files in os.walk(file_tar):
for item in files:
if item[-5:] == '.plot':
file_list_tar.append([root, item])
for file_inp in file_inp_list:
for root, dirs, files in os.walk(file_inp):
for idx, item in enumerate(files):
if item[-5:] == '.plot':
#print(root)
if len(file_list_tar) > 0:
if item not in [item[1] for item in file_list_tar]:
file_list_inp.append([root, item])
else:
tar_index = [item[1] for item in file_list_tar].index(item)
if os.path.getsize(file_list_tar[tar_index][0] + '\\' + file_list_tar[tar_index][1]) < os.path.getsize(root + '\\' + item):
file_list_inp.append([root, item])
else:
print(root + '\\' + item + '\t已存在于\n' + file_list_tar[tar_index][0] + '\\' + file_list_tar[tar_index][1] + '\n')
else:
file_list_inp.append([root, item])
if len(file_list_inp) > 0:
print('\n待拷文件有(' + str(len(file_list_inp)) + ')个:')
for item in file_list_inp:
print(item[0] + '\\' + item[1])
for file_tar in file_tar_list:
if get_free_space_mb(file_tar) > 103:
start = time.time()
print('\n【' + str(key) +'】开始拷贝...\t' + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + '\n' + file_list_inp[0][0] + '\\' + file_list_inp[0][1] + '\tTo\n' + file_tar + '\\' +
file_list_inp[0][1])
try:
shutil.move(file_list_inp[0][0] + '\\' + file_list_inp[0][1], file_tar + '\\' + file_list_inp[0][1])
except:
print('拷贝出错:' + file_list_inp[0][0] + '\\' + file_list_inp[0][1])
#time.sleep(10)
end = time.time()
print('拷贝结束,耗时:{:.2f}分钟...\n'.format((end - start)/60))
return 1
else:
print('【' + file_tar + '】空间不足')
print('【所有】空间均不足\n')
for i in range(3 * 60):
st = '.' * (i % 7) + ' ' * (6 - (i % 7))
print('\r已存在【' + str(len(file_list_tar)) + '】个文件,等待中' + st, end='')
time.sleep(1)
return 0
else:
for i in range(3 * 60):
st = '.' * (i % 7) + ' ' * (6 - (i % 7))
print('\r已存在【' + str(len(file_list_tar)) + '】个文件,等待中' + st, end='')
time.sleep(1)
return 0
def getPath():
path_inp = []
path_tar = []
if os.path.exists('SSD2HDD.txt'):
with open('SSD2HDD.txt', 'r', encoding='utf-8') as f:
data = f.read().split('\n')
data = [item for item in data if len(item) > 0]
ssd_index = data.index('SSD')
hdd_index = data.index('HDD')
path_inp = path_inp + data[ssd_index + 1: hdd_index]
path_tar = path_tar + data[hdd_index + 1:]
for inp in path_inp:
print('默认的【监控】固态硬盘地址:' + inp)
print()
for tar in path_tar:
print('默认的【存入】机械硬盘地址:' + tar)
print()
# print('拷贝中...' + '{:.2f}%'.format(10 / 30 * 100))
path1 = input('请输入【监控】固态硬盘地址:')
while (path1 != ''):
path_inp.append(path1)
path1 = input('请输入【监控】固态硬盘地址:')
print()
path2 = input('请输入【存入】机械硬盘地址:')
while (path2 != ''):
path_tar.append(path2)
path2 = input('请输入【存入】机械硬盘地址:')
print()
copy_inp = []
copy_tar = []
for inp in path_inp:
if os.path.exists(inp):
copy_inp.append(inp)
else:
print('【监控】地址【不存在】:' + inp)
for tar in path_tar:
if os.path.exists(tar):
copy_tar.append(tar)
else:
print('【存入】地址【不存在】:' + tar)
if len(copy_tar) > 0 and len(copy_inp) > 0:
print()
return copy_inp, copy_tar
else:
return getPath()
if __name__ == '__main__':
path_inp, path_tar = getPath()
for inp in path_inp:
print('【监控】地址:' + inp)
print()
for tar in path_tar:
print('【存入】地址:' + tar)
print()
key = 1
while key > 0:
key += copy_data(path_inp, path_tar, key) | true |
4f3d8df3750b169a9fd4a18f515c4a6209507c55 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_2/shanna/program.py | UTF-8 | 726 | 3.328125 | 3 | [] | no_license | #!/usr/bin/python
def solve(sequence):
flips = 0
while True:
if all(i for i in sequence):
return flips
elif all(not i for i in sequence):
return flips + 1
if sequence[0]:
flips += 2
else:
flips += 1
for i in range(sequence.index(False), len(sequence)):
if not sequence[i]:
sequence[i] = True
else:
break
def main():
t = int(input())
for i in range(1, t + 1):
sequence = input().strip()
sequence = [True if c == '+' else False for c in sequence]
print('Case #{}: {}'.format(i, solve(sequence)))
if __name__ == '__main__':
main()
| true |
a479219ee3b6685049adcb4a1acd5e4441d2a3c6 | Python | ArnarJonasson/Ejercicios | /Ejercicio1/ejercicio1.py | UTF-8 | 439 | 3.78125 | 4 | [] | no_license |
numbers_list = [46, 56, 112, 28, 17, 496, 23, 555, 8128, 156, 6544, 1235455]
def check_if_perfect(numbers_list):
for n in numbers_list:
sum = 0
for i in range(1, n):
if n%i == 0:
sum +=i
if sum < n: print('Number is defective')
if sum > n: print('Number is abundant')
if sum == n: print('Number is perfect')
check_if_perfect(numbers_list)
| true |
0b782d82f641ffe563afa0bff2170c36d080423d | Python | kaizhiyu/libharmo.github.io | /code/py/py_code/UserEmail/Test.py | UTF-8 | 110 | 2.625 | 3 | [] | no_license | import requests
if __name__ == '__main__':
r = requests.get("https://www.youtube.com")
print(r.text)
| true |
7a78a3ebdf3c6cd82e3a8db517a582c19524e886 | Python | seasign10/TIL | /00_startcamp/03_day/naver_rank.py | UTF-8 | 1,033 | 2.96875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
url = 'https://www.naver.com/'
# 요청 보내서 html 파일 받고
html = requests.get(url).text
# 뷰숲으로 정체
soup = BeautifulSoup(html, 'html.parser')
# select 메서드로 사용해서 list 를 얻어낸다
rank = soup.select('#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li > a .ah_k')
for i in rank:
print(i.text)
# 뽑은 list를 with 구문으로 잘 작성해보자.(txt)
with open('naver_rank.txt', 'w') as f:
for i in rank:
f.write(f'{i.text}\n')
'#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li:nth-child(15) > a > span.ah_r'
'#PM_ID_ct > div.header > div.section_navbar > div.area_hotkeyword.PM_CL_realtimeKeyword_base > div.ah_roll.PM_CL_realtimeKeyword_rolling_base > div > ul > li:nth-child(15) > a > span.ah_k' | true |
13790a34fab653e594a81a5be3bd4f1c45b8e258 | Python | Pingxia/Image-denoise-and-segmentation | /code/em.py | UTF-8 | 5,301 | 2.65625 | 3 | [] | no_license | from io_data import read_data, write_data
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as imageplt
import cv2
import sys
import warnings
'''
EM algorithm
input params:
pixels - array of values, H - img height, W - img width, k - number of clusters
output:
segments - segments of original image
'''
warnings.filterwarnings("ignore")
def EM(pixels, H, W, k):
n, c = pixels.shape # n: number of pixels, c: number of channels: 3
(mu, sigma, pi) = init(pixels, n, c, k)
R = np.zeros((n, k))
loglikes = []
loglike_prev = -np.infty
num_step = 0
while True:
num_step = num_step + 1
## expectation step
R, loglike = expectation(R, pixels, mu, sigma, pi, k)
loglikes.append(loglike)
## maximization step
(mu, sigma, pi) = maximization(R, pixels, mu, sigma, pi, k, n)
## check for convergence
if (meet_convergence(loglike_prev, loglike)):
break
loglike_prev = loglike
# plot_log_likehoods(loglikes)
# Assign pixels to guassian with higher prob
mask = np.full((H, W, 3), 0, dtype=np.uint8)
foreground = np.full((H, W, 3), 0, dtype=np.float32)
background = np.full((H, W, 3), 0, dtype=np.float32)
for i in range(H):
for j in range(W):
idx = (i-1) * W + j
pixel = pixels[idx]
# print (pixel)
# print (R[idx].shape)
pixel_segment_id = np.argmax(R[idx])
if (pixel_segment_id == 0):
## assign to foreground
mask[i,j,] = [255,255,255]
foreground[i,j,] = pixels[idx]
else:
## assign to background
mask[i,j,] = [0,0,0]
background[i,j,] = pixels[idx]
return (mask, foreground, background)
# Initialize
def init(pixels, n, c, k):
## mu
mu = pixels[np.random.choice(n, k, False), :]
## covariance matrices
sigma= [np.eye(c)] * k
## pi
pi = [1./k] * k
return (mu, sigma, pi)
'''
E-step: compute responsibility matrix
'''
def expectation(R, pixels, mu, sigma, pi, k):
# print ("in expectation step.")
for k in range(k):
R[:, k] = pi[k] * gaussian(pixels, mu[k], sigma[k])
# print (R.shape)
loglike = np.sum(np.log(np.sum(R, axis = 1)))
# print ("loglike updated: ")
# print (loglike)
R = R / np.sum(R, axis = 1)[:,None]
R[np.isnan(R)]=0
# print (np.isnan(R).any())
# print ("R updated: ")
# print (R)
return R, loglike
'''
M-step: re-estimate mu, sigma of each gaussian, using the responsibility matrix
'''
def maximization(R, pixels, mu, sigma, pi, k, n):
# print ("in maximization step")
# print ("R: ")
# print (R)
N_k = np.sum(R, axis = 0)
# print ("N_k: ")
# print (N_k)
for k in range(k):
## mu
mu[k] = 1. / N_k[k] * np.sum(R[:, k] * pixels.T, axis = 1).T
x_mu = np.matrix(pixels - mu[k])
## covariance
sigma[k] = np.array(1 / N_k[k] * np.dot(np.multiply(x_mu.T, R[:, k]), x_mu))
## pi
pi[k] = N_k[k] / n
return (mu, sigma, pi)
'''
Check whether convergence is met
'''
def meet_convergence(prev, cur):
# print ("in meet_convergence. prev: " + str(prev) + " cur: " + str(cur))
return abs(cur - prev) <= CONVERGENCE_THRESHOLD
def gaussian(X, mu, sigma):
n,c = X.shape
# prob = np.zeros(n)
# for i in range(n):
# prob[i] = multivariate_normal.pdf(X[i], mean=mu, cov=sigma)
## 1/sqrt(2*pi*sigma**2)
left = np.linalg.det(sigma) ** -.5 ** (2 * np.pi) ** (-X.shape[1]/2.)
## exp((x-mu)/sigma)
right = np.exp(-.5 * np.einsum('ij, ij -> i', X - mu, np.dot(np.linalg.inv(sigma) , (X - mu).T).T ) )
prob = left * right
# print (prob.shape)
return prob
def print_params(mu, sigma, pi, R):
print ("mu:")
print (mu)
print ("sigma")
print (sigma)
print ("pi:")
print (pi)
print ("R:")
print (R)
def plot_log_likehoods(loglikes):
# plot convergence of Q as we progress through EM
plt.figure(1)
plt.plot(loglikes)
plt.xlabel("Number of Iterations")
plt.ylabel("log like at E-step")
plt.show()
'''
Main Function:
Run EM algorithm,
Assign each pixel to the gaussian (foreground VS background) with higher probability
'''
CONVERGENCE_THRESHOLD = 1
K_SEG = 2
# Place this file inside code/ folder
if __name__== "__main__":
input_path = ["../a2/cow.txt", "../a2/fox.txt", "../a2/owl.txt", "../a2/zebra.txt"]
output_path = ["../output/a2/cow", "../output/a2/fox", "../output/a2/owl", "../output/a2/zebra"]
for i in range(len(input_path)):
data, image = read_data(input_path[i], True)
height, width, channel = image.shape
# reshape into pixels, each has 3 channels (RGB)
pixels = image.reshape((height * width, channel))
mask, foreground, background = EM(pixels, height, width, K_SEG)
# save result images
imageplt.imsave(output_path[i] + '_mask.png', mask)
cv2.imwrite(output_path[i] + '_seg1.png', (cv2.cvtColor(foreground, cv2.COLOR_Lab2BGR) * 255).astype(np.uint8))
cv2.imwrite(output_path[i] + '_seg2.png', (cv2.cvtColor(background, cv2.COLOR_Lab2BGR) * 255).astype(np.uint8))
| true |
6ff9b18478bf551852b3f9491b10cb77d80d0376 | Python | nakanishi-akitaka/python2018_backup | /1001/gtm-generativetopographicmapping-master/Python/demo_gtmmlr.py | UTF-8 | 3,721 | 2.78125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# %reset -f
"""
@author: Hiromasa Kaneko
"""
# Demonstration of GTM-MLR (Generative Topographic Mapping - Multiple Linear Regression)
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
# import pandas as pd
from sklearn.datasets.samples_generator import make_swiss_roll
import mpl_toolkits.mplot3d
from gtm import gtm
# settings
shape_of_map = [30, 30]
shape_of_rbf_centers = [4, 4]
variance_of_rbfs = 0.5
lambda_in_em_algorithm = 0.001
number_of_iterations = 300
display_flag = 1
number_of_samples = 1000
noise_ratio_of_y = 0.1
random_state_number = 30000
# load a swiss roll dataset and make a y-variable
original_X, color = make_swiss_roll(number_of_samples, 0, random_state=10)
X = original_X
raw_y = 0.3 * original_X[:, 0] - 0.1 * original_X[:, 1] + 0.2 * original_X[:, 2]
original_y = raw_y + noise_ratio_of_y * raw_y.std(ddof=1) * np.random.randn(len(raw_y))
# plot
plt.rcParams["font.size"] = 18
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
p = ax.scatter(original_X[:, 0], original_X[:, 1], original_X[:, 2], c=original_y)
fig.colorbar(p)
plt.show()
# divide a dataset into training data and test data
Xtrain = original_X[:500, :]
ytrain = original_y[:500]
Xtest = original_X[500:, :]
ytest = original_y[500:]
# autoscaling
# autoscaled_X = (original_X - original_X.mean(axis=0)) / original_X.std(axis=0,ddof=1)
autoscaled_Xtrain = (Xtrain - Xtrain.mean(axis=0)) / Xtrain.std(axis=0, ddof=1)
# autoscaled_Xtest = (Xtest - X.mean(axis=0)) / X.std(axis=0,ddof=1)
# autoscaled_ytrain = (ytrain - ytrain.mean()) / ytrain.std(ddof=1)
# construct GTM model
model = gtm(shape_of_map, shape_of_rbf_centers, variance_of_rbfs, lambda_in_em_algorithm, number_of_iterations,
display_flag)
model.fit(autoscaled_Xtrain)
if model.success_flag:
# calculate of responsibilities
responsibilities = model.responsibility(autoscaled_Xtrain)
# plot the mean of responsibilities
means = responsibilities.dot(model.map_grids)
plt.figure()
# plt.figure(figsize=figure.figaspect(1))
plt.scatter(means[:, 0], means[:, 1], c=ytrain)
plt.colorbar()
plt.ylim(-1.1, 1.1)
plt.xlim(-1.1, 1.1)
plt.xlabel("z1 (mean)")
plt.ylabel("z2 (mean)")
plt.show()
# plot the mode of responsibilities
modes = model.map_grids[responsibilities.argmax(axis=1), :]
plt.figure()
# plt.figure(figsize=figure.figaspect(1))
plt.scatter(modes[:, 0], modes[:, 1], c=ytrain)
plt.colorbar()
plt.ylim(-1.1, 1.1)
plt.xlim(-1.1, 1.1)
plt.xlabel("z1 (mode)")
plt.ylabel("z2 (mode)")
plt.show()
# construct MLR model
model.mlr(Xtrain, ytrain)
# MLR prediction
predicted_ytest = np.ndarray.flatten(model.mlr_predict(Xtest))
# r2p, RMSEp, MAEp
print("r2p: {0}".format(float(1 - sum((ytest - predicted_ytest) ** 2) / sum((ytest - ytest.mean()) ** 2))))
print("RMSEp: {0}".format(float((sum((ytest - predicted_ytest) ** 2) / len(ytest)) ** (1 / 2))))
print("MAEp: {0}".format(float(sum(abs(ytest - predicted_ytest)) / len(ytest))))
# yy-plot
plt.figure(figsize=figure.figaspect(1))
plt.scatter(ytest, predicted_ytest)
YMax = np.max(np.array([np.array(ytest), predicted_ytest]))
YMin = np.min(np.array([np.array(ytest), predicted_ytest]))
plt.plot([YMin - 0.05 * (YMax - YMin), YMax + 0.05 * (YMax - YMin)],
[YMin - 0.05 * (YMax - YMin), YMax + 0.05 * (YMax - YMin)], 'k-')
plt.ylim(YMin - 0.05 * (YMax - YMin), YMax + 0.05 * (YMax - YMin))
plt.xlim(YMin - 0.05 * (YMax - YMin), YMax + 0.05 * (YMax - YMin))
plt.xlabel("simulated y")
plt.ylabel("estimated y")
plt.show()
| true |
797d3f46a4c117ab629574a8507a2643ef23e98e | Python | srlindemann/amp | /im/ib/metadata/extract/ib_metadata_crawler/pipelines.py | UTF-8 | 2,219 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | import csv
import pathlib
from typing import Union
import ib_metadata_crawler.items as it
import ib_metadata_crawler.spiders.ibroker as ib
import scrapy
import scrapy.exceptions as ex
class ExchangeUniquePipeline:
seen = set()
def process_item(
self, item: scrapy.Item, spider: ib.IbrokerSpider
) -> Union[scrapy.Item, ex.DropItem]:
if isinstance(item, it.ExchangeItem):
if item["market"] in self.seen:
raise ex.DropItem("Market already parsed")
self.seen.add(item["market"])
return item
class CSVPipeline:
def __init__(
self, root: pathlib.Path, exchange_fname: str, symbol_fname: str
) -> None:
self.root_dir = root
self.exchange = exchange_fname
self.symbol = symbol_fname
@classmethod
def from_crawler(cls, crawler: scrapy.Spider) -> scrapy.Spider:
return cls(
root=crawler.settings.get("OUTCOME_LOCATION"),
exchange_fname=crawler.settings.get("EXCHANGE_FNAME"),
symbol_fname=crawler.settings.get("SYMBOLS_FNAME"),
)
def open_spider(self, spider: ib.IbrokerSpider) -> None:
self.exchange_f = open(self.root_dir / self.exchange, "a")
self.symbol_f = open(self.root_dir / self.symbol, "a")
self.exchange_csv = csv.writer(self.exchange_f, delimiter="\t")
self.symbol_csv = csv.writer(self.symbol_f, delimiter="\t")
def close_spider(self, spider: ib.IbrokerSpider) -> None:
self.exchange_f.close()
self.symbol_f.close()
def process_item(
self, item: scrapy.Item, spider: ib.IbrokerSpider
) -> scrapy.Item:
if isinstance(item, it.ExchangeItem):
return self._process_exchange(item, spider.exchange_header)
if isinstance(item, it.SymbolItem):
return self._process_symbol(item, spider.symbols_header)
def _process_exchange(self, item: scrapy.Item, header: list) -> scrapy.Item:
self.exchange_csv.writerow([item[x] for x in header])
return item
def _process_symbol(self, item: scrapy.Item, header: list) -> scrapy.Item:
self.symbol_csv.writerow([item[x] for x in header])
return item
| true |
4d12cad7ed6d8c5cdb68f8d7ab1a6f602c1015cd | Python | seihad/Dataquest-Data-Engineer | /Step 5 - Handling Large Data Sets in Python/1. Numpy for Data Engineers/3_broadcasting_numpy_arrays.py | UTF-8 | 1,847 | 3.875 | 4 | [] | no_license | '''
1.Introduction
'''
# import numpy as np
# x = np.array([
# [7., 9., 2., 2.],
# [3., 2., 6., 4.],
# [5., 6., 5., 7.]
# ])
# ones = np.ones((3,4))
# print(ones)
# x = x - ones
# print(x)
'''
2.Broadcasting With a Single Value
'''
# import numpy as np
# x = np.array([3, 2, 4, 5])
# r = 1 / x
# print(r)
'''
3.Broadcasting Mental Model
'''
# import numpy as np
# x = np.array([
# [4, 2, 1, 5],
# [6, 7, 3, 8]
# ])
# y = np.array([
# [1],
# [2]
# ])
# z = x + y
# print(z)
'''
4.Broadcasting Horizontally
'''
# import numpy as np
# x = np.array([
# [4, 2, 1, 5],
# [6, 7, 3, 8]
# ])
# y = np.array([1,2,3,4])
# z = x + y
# print(z)
'''
5.Broadcasting Vertically
'''
# import numpy as np
# x = np.array([
# [1],
# [2],
# [3]
# ])
# y = np.array([1,2,3])
# z = x + y
# print(z)
'''
6.Broadcasting on Both
'''
# import numpy as np
# dice1 = np.array([1,2,3,4,5,6])
# dice2 = np.array([
# [1],
# [2],
# [3],
# [4],
# [5],
# [6]
# ])
# dice_sums = dice1 + dice2
# print(dice_sums)
'''
7.Broadcasting Rules
'''
# import numpy as np
# x = np.array([1, 2, 3, 4])
# y = np.array([
# [1],
# [2],
# [3],
# [4]
# ])
# shape_x = x.shape
# print(shape_x)
# shape_y = y.shape
# print(shape_y)
# shape_x_step1 = (1,4)
# print(x)
# shape_y_step1 = (4,1)
# print(y)
# shape_x_step2 = (4,4)
# print(x)
# shape_y_step2 = (4,4)
# print(y)
# print(x+y)
# error = False
'''
8.Reshaping
'''
# import numpy as np
# dice1 = np.array([1,2,3,4,5,6])
# dice2 = dice1.reshape(6,1)
# dice_sums = dice1 + dice2
# print(dice_sums)
'''
9.Compatible Shapes
'''
import numpy as np
cell_numbers = np.array([num for num in range(1,37)])
numbering_by_row = cell_numbers.reshape((6,6))
print(numbering_by_row)
numbering_by_col = cell_numbers.reshape((6,6), order='F')
print(numbering_by_col) | true |
a2ba5f70a83e501ff23bc2d88d7a8025c40ffeaf | Python | tommyhall/sc2django | /sc2/sc2stats/models.py | UTF-8 | 1,102 | 2.703125 | 3 | [] | no_license | from django.db import models
class Player(models.Model):
""" A model representing a StarCraft 2 player """
player_id = models.CharField(max_length=128, unique=True)
race = models.CharField(max_length=10)
def __unicode__(self):
return self.player_id
class Map(models.Model):
""" A model representing a StarCraft 2 map """
map_id = models.CharField(max_length=128, unique=True)
map_size = models.IntegerField(default=2)
def __unicode__(self):
return self.map_id
class Match(models.Model):
""" A model representing a StarCraft 2 match """
match_id = models.IntegerField(unique=True)
map_id = models.CharField(max_length=128)
player_1 = models.CharField(max_length=128)
player_2 = models.CharField(max_length=128)
winner = models.CharField(max_length=128)
season = models.CharField(max_length=128) # e.g. '2015S3'
league = models.CharField(max_length=128) # e.g. 'GSL'
expansion = models.CharField(max_length=128) # e.g. 'Heart of the Swarm'
def __unicode__(self):
return unicode(self.match_id)
| true |
60ea8ba4c9d1da6b3f8545defcef41ca9fe5f7b1 | Python | indo-seattle/python | /Sandesh/Week3_0324-0330/IntFloatnComplex/2_PrintNumericTypes.py | UTF-8 | 226 | 3.671875 | 4 | [] | no_license | x = 1
y = 1.1
z = 1.2j
print("The value of", x, "is which is a numeric type of ", type(x))
print("The value of", x, "is which is a numeric type of ", type(y))
print("The value of", x, "is which is a numeric type of ", type(z)) | true |
74a4ce51e5d26cd4312478e651538b996f59573b | Python | pragyanetic20/Sales-Analysis | /scripts/clean.py | UTF-8 | 1,045 | 3.546875 | 4 | [] | no_license | from csv import writer
from csv import reader
import re
# open the input_file in read mode and output_file in write mode
with open('file.csv', 'r') as read_obj, \
open('file_1.csv', 'wb') as write_obj:
isFirstRow = False
csv_reader = reader(read_obj) # creating a csv.reader object from the input file object
csv_writer = writer(write_obj) # Creating a csv.writer object from the output file object
# Read each row of the input csv file as list
for row in csv_reader:
if not isFirstRow: # append the default text in the row / list
isFirstRow = True
row.append('ADDRESSLINE') # append the newly created column
else:
row.append(re.sub(r'[^a-zA-Z0-9\s\.]+', '', ' '.join([row[14], row[15]]))) # merging columns address line 1 & address line 2 and also cleaning address line at the same time
del (row[15], row[14]) # deleting address line 2 an 1
csv_writer.writerow(row) # writing the updated row / list to the output file
print("File created")
| true |
790be9f3605adc5904ca12fc49d3eda5dd19a633 | Python | innerr/stars | /core/prop.py | UTF-8 | 873 | 2.640625 | 3 | [
"MIT"
] | permissive | #coding:utf-8
#nature@20100825
import os
class Props:
def __init__(self, file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'props.data')):
self._file = file
self._data = {}
self._load()
def _load(self):
if not os.path.isfile(self._file):
open(self._file, 'wb').close()
file = open(self._file, 'rb')
for it in file.readlines():
kv = it.split('=')
if len(kv) == 2:
self._data[kv[0]] = kv[1][:-1]
file.close()
def set(self, key, value):
self._data[str(key)] = str(value)
def get(self, key):
return self._data.get(str(key))
def save(self):
file = open(self._file, 'wb')
for k, v in self._data.items():
file.write(k + '=' + v + '\n')
file.close()
| true |
d8eae6039e854e147c542a90a0eef078962385a5 | Python | parkikbum/Jump-to-python | /백준/python/11720.py | UTF-8 | 104 | 2.921875 | 3 | [] | no_license | n = int(input())
nn = input()
n_sum = list(nn)
sum = 0
for x in n_sum:
sum = sum + int(x)
print(sum) | true |
8f42aa5e151817844bbfb60aa1cea210bd702b08 | Python | uoi00/ai-couplet | /model.py | UTF-8 | 17,753 | 3.125 | 3 | [
"MIT"
] | permissive | """model.py
Build the language model using encoder-decoder with attention
"""
import numpy as np
import tensorflow as tf
import time
import os
class Model():
def __init__(self, char2idx, idx2char, param_dict):
# parse the parameters
vocab_size = param_dict['vocab_size']
embedding_dim = param_dict['embedding_dim']
units = param_dict['units']
num_layers = param_dict['num_layers']
dropout = param_dict['dropout']
# save global variables
self.char2idx = char2idx
self.idx2char = idx2char
self.param_dict = param_dict
self.embedding_dim = embedding_dim
# create encoder
self.encoder = Encoder(vocab_size, embedding_dim, units, num_layers, dropout)
# create decoder
self.decoder = Decoder(vocab_size, embedding_dim, units, num_layers, dropout)
# create optimizer
self.optimizer = tf.keras.optimizers.Adam()
# create checkpoint
self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, encoder=self.encoder, decoder=self.decoder)
def load_weights(self, checkpoint_dir):
""" load weights of the TF model """
try:
# restore from model_dir
status = self.checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
print("Checkpoint found at {}".format(tf.train.latest_checkpoint(checkpoint_dir)))
except:
print("No checkpoint found at {}".format(checkpoint_dir))
def save_weights(self, checkpoint_dir):
""" save the model weights """
model_checkpoint = tf.train.CheckpointManager(self.checkpoint, checkpoint_dir, max_to_keep=1)
model_checkpoint.save()
def train_word2vec(self, train_data, iter, word2vec_path):
""" train the word2vec model """
from gensim import models
self.wv_model = models.Word2Vec(
train_data,
size=self.embedding_dim,
min_count=1,
window=len(train_data[0]),
iter=iter
)
self.wv_model.save(word2vec_path)
def load_word2vec(self, word2vec_path):
""" load the pretrained word2vec model """
from gensim import models
self.wv_model = models.Word2Vec.load(word2vec_path)
def transfer_embedding_weights(self, idx2char):
""" use the word2vec weights as the embedding matrix """
# get the embedding matrix
embedding_matrix = self._get_word2vec_matrix(self.wv_model, idx2char, self.embedding_dim)
# set the embedding matrix values to encoder and decoder
self.encoder.set_embedding_matrix(embedding_matrix)
self.decoder.set_embedding_matrix(embedding_matrix)
def _get_word2vec_matrix(self, wv_model, idx2char, embedding_dim):
""" return the word2vec matrix, reordered by char2idx vocabulary index """
count = 0
embedding_matrix = np.zeros((len(idx2char), embedding_dim))
for idx, char in enumerate(idx2char):
if char in wv_model.wv.vocab:
wv_idx = wv_model.wv.vocab[char].index
embedding_matrix[idx] = wv_model.wv.vectors[wv_idx]
else:
embedding_matrix[idx] = np.zeros((embedding_dim, ))
count += 1
print("There are {} characters not in the word2vec embedding".format(count))
return embedding_matrix
def _get_repeated_chars(self, inputs):
""" get repeated characters by index in the input """
first_seen_idx = {}
repeated_chars = {}
for i, char_idx in enumerate(inputs):
if char_idx not in first_seen_idx.keys():
first_seen_idx[char_idx] = i
else:
repeated_chars[i] = first_seen_idx[char_idx]
return repeated_chars
def predict(self, sentence, beam_width=20):
""" use the model to predict """
# check input sanity
for i in sentence:
if i not in self.char2idx.keys():
return "抱歉,您的输入中有我还没学会的生僻字,呜呜呜"
inputs = [self.char2idx[i] for i in sentence]
sentence_len = len(inputs)
# get repeated chars in the input
repeated_chars = self._get_repeated_chars(inputs)
inputs = tf.convert_to_tensor([inputs])
enc_out, enc_hidden = self.encoder(inputs, training=False)
dec_hidden = enc_hidden
# the tuple that contains the score, the sequence, the hidden state, and the attention weights
results = [(0, ['<s>'], dec_hidden)]
for t in range(sentence_len):
results_new = []
for result in results:
# take the score and all historical characters without the new prediction
score = result[0]
seq = result[1]
dec_hidden = result[2]
# update used character list to avoid bad prediction
# include "," in the used char list
used_char_idx = list(inputs[0].numpy()) + [self.char2idx[x] for x in seq]
if self.char2idx[','] not in list(inputs[0].numpy()):
used_char_idx += [self.char2idx[',']]
# take the last element as the input
dec_input = tf.expand_dims([self.char2idx[seq[-1]]], 0)
# predict
predictions, dec_hidden, attention_weights = self.decoder(dec_input,
dec_hidden,
enc_out,
training=False)
predictions = tf.nn.softmax(predictions)
if t in repeated_chars.keys():
# if this is a repeated chars in the input
# then the output should be repeated as well
first_seen_idx = repeated_chars[t]
prediction_id = self.char2idx[seq[first_seen_idx+1]]
score_new = score + np.log(predictions[0][prediction_id].numpy())
results_new.append((score_new, seq+[self.idx2char[prediction_id]], dec_hidden))
else:
# if no repeated chars
# then take the k most likely predictions
_, top_k = tf.math.top_k(predictions, beam_width)
for prediction_id in top_k.numpy()[0]:
if prediction_id not in used_char_idx:
score_new = score + np.log(predictions[0][prediction_id].numpy())
results_new.append((score_new, seq+[self.idx2char[prediction_id]], dec_hidden))
# keep only top k results in the beam search
results = sorted(results_new, key=lambda x:x[0])[-beam_width:]
# take the most likely one
result = max(results, key=lambda x:x[0])[1][1:]
return "".join(result)
def _loss_function(self, real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = self.loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_)
def _preprocess_dataset(self, train, target, batch_size):
""" preprocess the dataset """
from sklearn.model_selection import train_test_split
# train/eval split
input_tensor_train, input_tensor_eval, target_tensor_train, target_tensor_eval = train_test_split(
train,
target,
test_size=0.1,
random_state=42
)
# train dataset
buffer_size = len(input_tensor_train)
steps_per_epoch = len(input_tensor_train)//batch_size
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
# eval dataset
buffer_size_eval = len(input_tensor_eval)
steps_per_epoch_eval = len(input_tensor_eval)//batch_size
dataset_eval = tf.data.Dataset.from_tensor_slices((input_tensor_eval, target_tensor_eval)).shuffle(buffer_size_eval)
dataset_eval = dataset_eval.batch(batch_size, drop_remainder=True)
return dataset, dataset_eval, steps_per_epoch, steps_per_epoch_eval
def train(self, train, target, start_epoch, num_epoch, log_dir, checkpoint_dir, batch_size, learning_rate):
""" train the model """
# preprocess the dataset
dataset, dataset_eval, steps_per_epoch, steps_per_epoch_eval = self._preprocess_dataset(train, target, batch_size)
# create loss object
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
# update the optimizer
self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# checkpoint and log
checkpoint_manager = tf.train.CheckpointManager(self.checkpoint, checkpoint_dir, max_to_keep=2)
# create log file
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = open("{}/training.log".format(log_dir), 'w')
for epoch in range(start_epoch, start_epoch+num_epoch):
start = time.time()
time_last = start
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = self.train_step(inp, targ, training=True)
total_loss += batch_loss
if batch % 1000 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
print('Time taken for 1000 batch {} sec\n'.format(time.time() - time_last))
time_last = time.time()
# saving (checkpoint) the model every 1 epoch
checkpoint_manager.save()
# calculate the evaluation set metrics
eval_loss = 0
for (batch, (inp, targ)) in enumerate(dataset_eval.take(steps_per_epoch_eval)):
batch_loss = self.train_step(inp, targ, training=False)
eval_loss += batch_loss
print('Evaluation Loss {:.4f}'.format(eval_loss / steps_per_epoch_eval))
# write metrics to log
log_file.write('{} {:.4f} {:.4f}\n'.format(epoch,
total_loss / steps_per_epoch,
eval_loss / steps_per_epoch_eval))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
log_file.close()
@tf.function
def train_step(self, inp, targ, training=True):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = self.encoder(inp, training=True)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([self.char2idx['<s>']] * inp.shape[0], 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = self.decoder(dec_input, dec_hidden, enc_output, training=True)
loss += self._loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[0]))
if training:
variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, num_layers, dropout):
super(Encoder, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.enc_units = enc_units
self.num_layers = num_layers // 2 ## because we have bidirectional
self.embedding = tf.keras.layers.Embedding(
vocab_size,
embedding_dim,
trainable=False
)
lstm_cells = [tf.keras.layers.LSTMCell(self.enc_units, recurrent_initializer='glorot_uniform', dropout=dropout) for _ in range(num_layers)]
lstm_stacked = tf.keras.layers.StackedRNNCells(lstm_cells)
rnn = tf.keras.layers.RNN(lstm_stacked, return_sequences=True, return_state=True)
self.bilayers = tf.keras.layers.Bidirectional(rnn)
def set_embedding_matrix(self, embedding_matrix):
""" use the embedding matrix as the pretrained embedding layer """
self.embedding = tf.keras.layers.Embedding(
self.vocab_size,
self.embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
trainable=False
)
def call(self, x, training=False):
x = self.embedding(x)
outputs = self.bilayers(x, training=training)
# the returned output from the bidirectional LSTM layers
output = outputs[0]
# the hidden_state from the bidirectional LSTM layers
# states = [layer_1, layer_2, etc.]
# for each layer, hidden = tf.concat([forward_hidden, backward_hidden], -1)
state_f = outputs[1:self.num_layers+1]
state_b = outputs[self.num_layers+1:]
states = []
for i in range(self.num_layers):
states.append([state_f[i][0], state_f[i][1]]) # hidden states in the forward i-th layer
states.append([state_b[i][0], state_b[i][1]]) # hidden states in the backward i-th layer
return output, states
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, num_layers, dropout):
super(Decoder, self).__init__()
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.dec_units = dec_units
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(
vocab_size,
embedding_dim,
trainable=False
)
lstm_cells = [tf.keras.layers.LSTMCell(self.dec_units, recurrent_initializer='glorot_uniform', dropout=dropout) for _ in range(num_layers)]
lstm_stacked = tf.keras.layers.StackedRNNCells(lstm_cells)
self.rnn = tf.keras.layers.RNN(lstm_stacked, return_sequences=True, return_state=True)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.attention = BahdanauAttention(self.dec_units)
def set_embedding_matrix(self, embedding_matrix):
""" use the embedding matrix as the pretrained embedding layer """
self.embedding = tf.keras.layers.Embedding(
self.vocab_size,
self.embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
trainable=False
)
def call(self, x, hidden_states, enc_output, training=False):
# the hidden_states passed in is a list
hidden_states_concat = tf.reshape(hidden_states, (hidden_states[0][0].shape[0], -1))
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden_states_concat, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the LSTM
outputs = self.rnn(x, initial_state=hidden_states, training=False)
# take the output
output = outputs[0]
# take the states
states = []
for i in range(self.num_layers):
states.append(outputs[i+1]) # h + c
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, states, attention_weights
| true |
e750ba8bd03393ca196a0da652da8a57f6a1fce3 | Python | TINY-KE/floorplan-MapGeneralization | /src/data_analysis.py | UTF-8 | 2,026 | 2.734375 | 3 | [] | no_license | import os
import networkx as nx
import numpy as np
def get_labels(nxg_):
# Get labels from netx graph
label_dict = nx.get_node_attributes(nxg_, 'label')
return list(label_dict.values())
folder = r'C:\Users\Chrips\Aalborg Universitet\Frederik Myrup Thiesson - data\scaled_graph_reannotated'
data_list = '../data/generalizing_test_file_list_new.txt'
data_files = [os.path.join(folder, line.rstrip()) for line in open(data_list)]
nr_nodes_list = []
nr_edges_list = []
nr_door_nodes_list = []
nr_non_door_nodes_list = []
for file in data_files:
nxg = nx.read_gpickle(file)
nr_nodes_list.append(nxg.number_of_nodes())
nr_edges_list.append(nxg.number_of_edges())
labels = np.asarray(get_labels(nxg))
labels0_idx = np.where(labels == 0)[0]
labels1_idx = np.where(labels == 1)[0]
nr_non_door_nodes_list.append(len(labels0_idx))
nr_door_nodes_list.append(len(labels1_idx))
total_nr_graphs = len(data_files)
total_nr_nodes = sum(nr_nodes_list)
total_nr_edges = sum(nr_edges_list)
total_nr_door_nodes = sum(nr_door_nodes_list)
total_nr_non_door_nodes = sum(nr_non_door_nodes_list)
std_of_nodes = np.std(np.asarray(nr_nodes_list), axis=0)
std_of_edges = np.std(np.asarray(nr_edges_list), axis=0)
std_of_door_nodes = np.std(np.asarray(nr_door_nodes_list), axis=0)
std_of_non_door_nodes = np.std(np.asarray(nr_non_door_nodes_list), axis=0)
print("DATASET ANALYSIS:")
print("Nr. of graphs: %d" % total_nr_graphs)
print("Nr. of nodes: %d" % total_nr_nodes)
print("Nr. of edges: %d" % total_nr_edges)
print("Nr. of door nodes: %d" % total_nr_door_nodes)
print("Nr. of non-door nodes: %d\n" % total_nr_non_door_nodes)
print("Percentage of door nodes: %.2f%%" % float(total_nr_door_nodes/total_nr_nodes*100))
print("Percentage of non-door nodes: %.2f%%\n" % float(total_nr_non_door_nodes/total_nr_nodes*100))
print("Std. of nodes: %.2f" % std_of_nodes)
print("Std. of edges: %.2f" % std_of_edges)
print("Std. of door nodes: %.2f" % std_of_door_nodes)
print("Std. of non-door nodes: %.2f" % std_of_non_door_nodes)
| true |
5bd004ee9f606b1f10ba122e03239b036136b022 | Python | nargiza-web/python-exercise | /1_to_10.py | UTF-8 | 63 | 3.140625 | 3 | [] | no_license | number = 1
while number<11:
print (number)
number += 1
| true |
139188036554bb8cc8a4884f4c7445d335d4c1d6 | Python | aul007/laceTracker | /laceTracker.py | UTF-8 | 3,582 | 2.9375 | 3 | [] | no_license | from bs4 import BeautifulSoup
from probChars import clean
from brandUrls import url_list
import urllib2
import MySQLdb
import re
db = MySQLdb.connect("localhost","root","+r1t0n$k1k1b0uDiN", "lacetest1")
cursor = db.cursor()
#cursor.execute("DROP TABLE IF EXISTS listing")
sql = """CREATE TABLE listing (
id int unsigned NOT NULL auto_increment,
name varchar(255) NOT NULL,
currentPrice decimal(10,2) NOT NULL,
buyItNow decimal(10,2) NOT NULL,
PRIMARY KEY (id)) """
#cursor.execute(sql)
def soup_open(url):
req = urllib2.Request(url, headers = {'User-Agent' : "Magic Browser"})
response = urllib2.urlopen(req)
soup = BeautifulSoup(response.read(),"html.parser")
return soup
#gets the relevant data of each listing (name, prices)
def get_data(url,soup):
array = []
entry = soup.find_all("li",{"class" : "greybg"})
#loops through all the entries and grabs name and prices
for i in range(len(entry)):
name = entry[i].find_all("div",{"class": "data-box"})
buyType = entry[i].find_all("p",{"class" : "currentp"})
price = entry[i].find_all("p",{"class" : "currentpb"})
for j in range(len(name)):
n = name[j].a.contents
n[j] = clean(n[j])
for k in range(len(price)):
kind = buyType[k].contents
kindp = price[k].contents
temp = str(kindp)
intPrice = clean(temp)
intPrice = intPrice.replace("u","")
if k is 0:
if str(kind) == "[u'current price']":
cp = kind
p = intPrice
BIN = '\0'
p2 = 0
if str(kind) == "[u'Buy it Now']":
BIN = kind
p2 = intPrice
cp = '\0'
p = 0
else:
if str(kind) == "[u'current price']":
cp = kind
p = intPrice
if str(kind) == "[u'But it Now']":
BIN = kind
p2 = intPrice
#inserts listing into database
try:
cursor.execute("""INSERT INTO listing (name, currentPrice, buyItNow)VALUES(%r,% s,%s)""",(n,p,p2))
db.commit()
except:
db.rollback()
print "error, couldnt add entry"
print n
#method to loop through all of the brands
def brand_urls(url2, brand_num, page_num):
soup = soup_open(url2)
page = brand_num
brand_count = brand_num
length = url_list(soup)
i = 0
brand = [""]*length
for a in soup.findAll('a'):
if 'brand' in a['href']:
brand[i] = a.get('href') #get url of first page of brand
i = i+1
for i in range(page, length):
url = brand[i]
page = 1
soup = soup_open(url)
message = soup.find_all("div", {"align": "center"})
#while the page exists
while len(message) is 0:
#loop through each page of a specific brand
if brand_count is brand_num:
if page < page_num:
page = page_num
url = brand[i] + "page/%d/" % page
print url
try:
soup = soup_open(url)
except urllib2.HTTPError, e:
break
soup = soup_open(url)
get_data(url,soup)
message = soup.find_all("div", {"align": "center"})
if(len(message) is not 0):
print "page doesnt exist"
page = page +1
brand_count = brand_count + 1
print brand_count
return url
def main():
url2 = "http://www.lacemarket.us/"
brand_num = input("enter a brand number: ")
page_num = input("enter a brand page number: ")
brand_urls(url2,brand_num, page_num)
main()
| true |
a4987ea57cf433568c28d8953735084c48907306 | Python | Python-study-f/Algorithm-study_1H | /Algorithm_2021/May_2021/210523/8911 - turtle/8911_210509_asura.py | UTF-8 | 924 | 3.25 | 3 | [] | no_license | N = int(input())
ans = []
dic = [(0, 1), (1, 0), (0, -1), (-1, 0)]
for _ in range(N):
x,y = 0, 0
x_max,x_min,y_max,y_min = 0,0,0,0
index = 0
lst = list(str(input()))
SET = set()
SET.add((0, 0))
for c in lst:
if index % 4 == 0:
index = 0
nx,ny = dic[index]
if c == "L":
index -= 1
elif c == "R":
index += 1
elif c == "F":
x,y = x+nx, y+ny
else: # Back
x,y = x-nx, y-ny
SET.add((x, y)) # 좌표까지 구했으면 이제 사각형 구해야한다
for s in SET:
a, b = s
x_max = max(x_max, a)
x_min = min(x_min, a)
y_max = max(y_max, b)
y_min = min(y_min, b)
col = abs(x_max - x_min)
row = abs(y_max - y_min)
ans.append(col * row)
for i in range(len(ans)):
print(ans[i]) | true |
b86830dc7769938a06aab9273c4f47ea3c3d1e58 | Python | Rabbid76/graphics-snippets | /example/python/utility/opengl_mesh.py | UTF-8 | 4,262 | 2.609375 | 3 | [] | no_license |
import ctypes
from OpenGL.GL import *
class SingleMesh:
def __init__(self, mesh_specification):
attr_array = mesh_specification.attributes
index_array = mesh_specification.indices
stride, format = mesh_specification.format
self.__no_indices = len(index_array)
vertex_attributes = (ctypes.c_float * len(attr_array))(*attr_array)
indices = (ctypes.c_uint32 * self.__no_indices)(*index_array)
self.__vao = glGenVertexArrays(1)
self.__vbo, self.__ibo = glGenBuffers(2)
glBindVertexArray(self.__vao)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.__ibo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.__vbo)
glBufferData(GL_ARRAY_BUFFER, vertex_attributes, GL_STATIC_DRAW)
offset = 0
float_size = ctypes.sizeof(ctypes.c_float)
for i, attribute_format in enumerate(format):
tuple_size = attribute_format[1]
glVertexAttribPointer(i, tuple_size, GL_FLOAT, False, stride*float_size, ctypes.c_void_p(offset))
offset += tuple_size * float_size
glEnableVertexAttribArray(i)
def draw(self):
glBindVertexArray(self.__vao)
glDrawElements(GL_TRIANGLES, self.__no_indices, GL_UNSIGNED_INT, None)
class MultiMesh:
def __init__(self, mesh_specifications, stride, format):
attributes = [mesh.attributes for mesh in mesh_specifications]
indices = [mesh.indices for mesh in mesh_specifications]
attributes_len = sum(len(a) for a in attributes)
indices_len = sum(len(i) for i in indices)
self.__no_of_meshes = len(mesh_specifications)
draw_indirect_list = []
first_index = 0
base_vertex = 0
for attr_list, index_list in zip(attributes, indices):
no_of_indices = len(index_list)
no_of_attributes = len(attr_list) // stride
draw_indirect_list += [no_of_indices, 1, first_index, base_vertex, 0]
first_index += no_of_indices
base_vertex += no_of_attributes
self.__vao = glGenVertexArrays(1)
self.__vbo, self.__ibo, self.__dbo = glGenBuffers(3)
draw_indirect_array = (ctypes.c_uint32 * len(draw_indirect_list))(*draw_indirect_list)
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, self.__dbo)
glBufferData(GL_DRAW_INDIRECT_BUFFER, draw_indirect_array, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, self.__vbo)
glBufferData(GL_ARRAY_BUFFER, attributes_len*4, None, GL_STATIC_DRAW)
offset = 0
for attr_list in attributes:
no_of_values = len(attr_list)
value_array = (ctypes.c_float * no_of_values)(*attr_list)
glBufferSubData(GL_ARRAY_BUFFER, offset, no_of_values*4, value_array)
offset += no_of_values*4
glBindVertexArray(self.__vao)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.__ibo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices_len*4, None, GL_STATIC_DRAW)
offset = 0
for index_list in indices:
no_of_indices = len(index_list)
index_array = (ctypes.c_uint32 * no_of_indices)(*index_list)
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset, no_of_indices*4, index_array)
offset += no_of_indices*4
offset = 0
float_size = ctypes.sizeof(ctypes.c_float)
for i, attribute_format in enumerate(format):
tuple_size = attribute_format[1]
glVertexAttribPointer(i, tuple_size, GL_FLOAT, False, stride*float_size, ctypes.c_void_p(offset))
offset += tuple_size * float_size
glEnableVertexAttribArray(i)
def draw(self):
self.draw_range(0, self.__no_of_meshes)
def draw_range(self, start, end):
if start < end <= self.__no_of_meshes:
# GLAPI/glDrawElementsIndirect
# https://www.khronos.org/opengl/wiki/GLAPI/glDrawElementsIndirect
glBindVertexArray(self.__vao)
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, self.__dbo)
glMultiDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, ctypes.c_void_p(start*4*5), end-start, 4*5)
| true |
f76d72e8c307ad828cc081c3478d50f551a6eda2 | Python | divyanshk/algorithms-and-data-structures | /PeakElement.py | UTF-8 | 828 | 3.375 | 3 | [] | no_license | # Problem: https://leetcode.com/problems/find-peak-element/description/
class Solution(object):
def findPeakElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lo = 0
hi = len(nums)-1
while (lo < hi):
mid = (hi+lo)/2
if (hi-lo) > 1:
if (nums[mid] > nums[mid-1] and nums[mid] > nums[mid+1]):
return mid
elif (nums[mid] > nums[mid-1] and nums[mid] < nums[mid+1]):
lo = mid + 1
elif (nums[mid] < nums[mid-1] and nums[mid] > nums[mid+1]):
hi = mid - 1
else:
lo = mid + 1
else:
# edge cases
return hi if nums[hi]>nums[lo] else lo
return lo
| true |
8a3de760e4da6db67b32d0a4128bc48d538ee48a | Python | hughdbrown/advent-code | /advent-code-20.py | UTF-8 | 1,939 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import print_function
from collections import defaultdict
def primes(n):
def mark(low, high, m):
for j in range(low * low, high + 1, low):
m[j] = 0
m = [0, 0] + ([1] * (n + 1))
mark(2, n, m)
for i in range(3, n + 1, 2):
if m[i]:
mark(i, n, m)
return [i for i in range(n + 1) if m[i]]
PRIMES = primes(10 * 1000 * 1000)
def gifts(x):
"""
>>> gifts(1)
set([1])
>>> gifts(2)
set([2, 1])
>>> gifts(3)
set([3, 1])
>>> gifts(4)
set([4, 1, 2])
>>> gifts(5)
set([5, 1])
>>> gifts(6)
set([6, 1, 2, 3])
>>> gifts(7)
set([7, 1])
>>> gifts(8)
set([8, 1, 2, 4])
>>> gifts(9)
set([9, 1, 3])
"""
return set([x] + [i for i in range(1, int(x // 2) + 1) if x % i == 0])
def gifts2(x):
return set([x] + [i for i in range(max(1, x // 50), int(x // 2) + 1) if x % i == 0])
def main(n):
g = {}
m = 0
for i in range(1, n):
for j in (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79):
k = i // j
if (k * j == i) and k in g:
orig = g[k]
g[i] = orig.union(set(j * a for a in orig))
break
else:
g[i] = gifts(i)
s = sum(g[i])
if s > m:
m = s
if 10 * s >= n:
print("Found {0} {1}".format(g[i], i))
return i
def main2(n):
m = 0
for i in range(180, 400 * 1000 * 1000, 180):
if i not in PRIMES:
s = gifts2(i)
ss = sum(s)
if ss > m:
m = ss
if ss * 11 >= n:
print(i, ss, sorted(s))
return i
if __name__ == '__main__':
from doctest import testmod
testmod()
print(main(36 * 1000 * 1000))
print(main2(36 * 1000 * 1000))
| true |
2fe9bbfb4d0b1e8f61eafa19d19ccaeb868aa087 | Python | dlondonmedina/intro-to-programming-python-code | /1-2/main.py | UTF-8 | 494 | 3.703125 | 4 | [] | no_license | # Question 1
name = input()
print()
# Question 2
hours = input()
rate = input()
# do your calculations and prints here.
# don't forget to convert hours and rate to
# integers or floats accordingly.
# Question 3
fahrenheit = input()
# your calculation goes here
# Question 4
income1 = input()
income2 = input()
income3 = input()
totalincome = income1 + income2 + income3
# calculate percentages for each income
bill = input()
# calculate the share for each of three people.
# print values | true |
bdf431ea0eb00ef878e4549f633275ee3999904d | Python | luoyanhan/Algorithm-and-data-structure | /Leetcode/medium/1642.py | UTF-8 | 840 | 3.0625 | 3 | [] | no_license | class Solution:
def furthestBuilding(self, heights, bricks, ladders):
height_difference = [0] + [max(0, heights[i] - heights[i-1]) for i in range(1, len(heights))]
def check(idx):
tmp = height_difference[:idx+1]
if idx <= ladders:
return True
tmp.sort()
#用tmp的长度减ladders而不是直接-ladders, 避免ladders为0的情况
if sum(tmp[:idx+1-ladders]) <= bricks:
return True
return False
left = 0
right = len(heights) - 1
while left < right:
mid = (left + right + 1) // 2
if not check(mid):
right = mid - 1
else:
left = mid
return right
print(Solution().furthestBuilding( [4,12,2,7,3,18,20,3,19], 10, 2))
| true |
f16a646e6cb6cd4eabcf10d612811df82dac5e8e | Python | pipdax/frelation | /frelation.py | UTF-8 | 13,297 | 3.484375 | 3 | [
"BSD-2-Clause"
] | permissive | import pyecharts
from pyecharts import Graph
from collections import Iterable
class frelation():
'''
这个脚本用来展示机器学习中,feature的构造关系,以便于更好的观察feature的构造情况
This script is used to display the relationships between features in machine learning.
This script will help you find the manufacture features more easily.
Parameters:
---
title: string
The title of whole picture
subtitle: string
The description of this picture
Returns
---
Examples
---
fr = frelation("The Title","The subtitle")
fr.addNodes(['a','b','c'], 0)
fr.addNodes(['d','e'],3)
fr.addNodes(['f','g','h'],5)
fr.addLink('a','d','g')
fr.addLink('d','h')
fr.addLink('b','e','h')
fr.addLinks([{'source':'a','target':'e'},{'source':'c','target':'e'}])
fr.show()
Author:
---
pipdax@126.com
Version:
---
v0.0.1
'''
def __init__(self, title='', subtitle=''):
self.title = title
self.subtitle = subtitle
self.nodes = {} # format like {'category1':['node1','node2'], 'category2':['node3','node4']}
self.all_nodes = [] # store all the nodes name without category, format is ['node1','node2',...]
self.links = [] # format like [{'source':'node1','target':'node2'},{'source':'node3','target':'node4'}]
self.categories = [] # format like [0,0,1,1,...], the same length as self.nodes values
self.node_style = {"graph_layout": 'none', "line_color": 'red', "line_curve": 0.08,
"is_focusnode": True, "is_roam": True, "is_label_show": True,
"label_pos": 'insideLeft', "label_text_color": '#fff',
"label_text_size": 15, "label_emphasis_textsize": 17, }
def addNode(self, node, category=0):
'''
添加一个节点,按照category分到不同的组,每一组一个颜色,相同的组在一列上
Add one node, distributed to several groups by category, every category use one color,
the same category show in one column.
Parameters
---
nodes:string
One node as string
category: int
Choose nodes as different group, use different color.
The max value is 20, if lager than 20, it will back from zero.
Returns
---
string list
Current nodes that has added before
'''
if not isinstance(category, int) or not isinstance(node, str):
raise TypeError("Please intput node as string and category as int")
if category not in self.nodes.keys():
self.nodes[category] = []
if self.if_node_exist(node):
print("Warning: The following nodes has the same name as add before!")
print(node)
else:
self.nodes[category].append(node)
self.all_nodes = list(set(self.all_nodes)|set([node]))
return self.nodes
def addNodes(self, nodes, category=0):
'''
同时添加多个节点,按照category分不同的组,每一组一个颜色,相同的组在一列上
Add several nodes, split to several groups by category, every category use one color,
the same category show in one column.
Parameters
---
nodes:string list or string
One node as string
Two or more nodes as string list
category: int
Choose nodes as different group, use different color
Returns
---
string list
Current nodes that has added before
'''
if not isinstance(category, int):
raise TypeError("Please intput category as int")
if category not in self.nodes.keys():
self.nodes[category] = []
if isinstance(nodes, str):
if self.if_node_exist(nodes):
print("Warning: The following nodes has the same name as add before!")
print(nodes)
else:
self.nodes[category].append(nodes)
self.all_nodes = list(set(self.all_nodes)|set([nodes]))
elif isinstance(nodes, (list, Iterable)):
if isinstance(nodes, Iterable):
nodes = list(nodes)
nodes = list(set(nodes))# remove the repeat nodes
exist_nodes = []
for i in nodes:
if isinstance(i, str):
if self.if_node_exist(i):
exist_nodes.append(i)
else:
self.nodes[category].append(i)
else:
raise TypeError("Please intput node as string or string list")
if exist_nodes != []:
print("Warning: The following nodes has the same name as add before!")
print(exist_nodes)
self.all_nodes = list(set(self.all_nodes)|set(nodes))
else:
raise TypeError("Please intput node as string or string list")
return self.nodes
def if_node_exist(self, node):
'''
Check if the node has added.
Parameters
---
node: string or string list
Returns
---
True or False
True: Exist
'''
if isinstance(node, str):
if node in self.all_nodes:
return True
else:
return False
elif isinstance(node, (list, Iterable)):
if isinstance(node, Iterable):
node = list(node)
exist_list = list(set(self.all_nodes)&set(node))
if exist_list == []:
return False
else:
return True
def addLink(self, left_node, mid_node, right_node=None, cat_list=None):
'''
添加一个链接,可以是两个相连,也可以是三个,其中mid_node为中间节点,分别与左右两边相连
Add one link, two or three node will be linked.
If input two values, then this two will be linke.
If input three values, the mid_node will be as the middle node, connect with left_node and right_node.
Parameters
---
left_node, mid_node, right_node:string
The node that to be connected.
cat_list: int list
If the node is not add before, this node will add to group as the category.
Returns
---
string list
Current links that has added before.
'''
if not isinstance(left_node, (str, Iterable)) or not isinstance(mid_node, (str, Iterable)):
raise TypeError("Please intput left_node and mid node as string or string list")
if right_node is not None and not isinstance(right_node, (str, Iterable)):
raise TypeError("Please intput left_node and mid_node as string")
if not isinstance(left_node, str) and isinstance(left_node, Iterable):
for i in left_node:
if not isinstance(i, str):
raise TypeError("Please intput left_node as string or string list")
if not isinstance(mid_node, str) and isinstance(mid_node, Iterable):
for i in mid_node:
if not isinstance(i, str):
raise TypeError("Please intput mid_node as string or string list")
if right_node is not None and not isinstance(right_node, str) and isinstance(right_node, Iterable):
for i in right_node:
if not isinstance(i, str):
raise TypeError("Please intput right_node as string or string list")
#add node if cat_list is set
if cat_list is not None:
param_list = [left_node, mid_node]
if right_node is not None:
param_list.append(right_node)
assert len(param_list) == len(cat_list)
for i, cat in enumerate(cat_list):
if not isinstance(cat, int):
raise TypeError("The cat_list must be int list")
self.addNodes(param_list[i], cat_list[i])
if isinstance(left_node, str):
left_node = [left_node,]
if isinstance(mid_node, str):
mid_node = [mid_node,]
if right_node is not None and isinstance(right_node, str):
right_node = [right_node,]
for left in left_node:
for mid in mid_node:
_link = {"source": left, "target": mid}
self.links.append(_link)
if right_node is not None:
for mid in mid_node:
for right in right_node:
_link = {"source": mid, "target": right}
self.links.append(_link)
return self.links
def addLinks(self, links, cat_list=None):
'''
添加多个链接,必须以形如[{'source':'a','target':'e'},{'source':'c','target':'e'}]的格式添加,
每个字典为一个链接,必须包含source以及target
Add several links, must use format as [{'source':'a','target':'e'},{'source':'c','target':'e'}].
Every dict as one link, must include source and target.
Parameters
---
left_node, mid_node, right_node:string
The node that to be connected.
category: int
Choose nodes as different group, use different color.
Returns
---
string list
Current links that has added before.
'''
if not isinstance(links, (list, Iterable)):
raise TypeError("Please uses dict list like [{'source':'node1','target':'node2'}]")
else:
if isinstance(links, Iterable):
links = list(links)
if cat_list is not None:
assert len(links)==len(cat_list)
for i,j in cat_list:
if not isinstance(i, int) and not isinstance(j, int):
raise TypeError("cat_list must be int list")
for i,link in enumerate(links):
if not isinstance(link, dict) or 'source' not in link.keys() or 'target' not in link.keys():
raise TypeError("Please uses dict list like [{'source':'node1','target':'node2'}]")
_link = {"source": link['source'], "target": link['target']}
if cat_list is not None:
self.addNodes([link['source'], link['target']], cat_list[i])
self.links.append(_link)
return self.links
def show(self):
'''
将添加的节点以及链接展示出来
Show the nodes and links added before.
Returns
---
pyechats Graph object
This will draw the graph oject.
'''
# 处理link节点,删除不在nodes中的节点
_link_nodes = set([x['source'] for x in self.links]) | set([x['target'] for x in self.links])
_nodes = self.all_nodes
_del_link_nodes = _link_nodes - set(_nodes)
self.links = list(filter(lambda x: x['source'] not in _del_link_nodes
and x['target'] not in _del_link_nodes, self.links))
#set the connected nodes to head
linked_nodes = [x["source"] for x in self.links]
linked_nodes.extend([x["target"] for x in self.links])
for cat in self.nodes.keys():
if self.nodes[cat] == []:
pass
elif len(self.nodes[cat]) < 10:
pass
else:
connect_nodes = list(set(self.nodes[cat])&set(linked_nodes))
other_nodes = list(set(self.nodes[cat]) - set(connect_nodes))
connect_nodes.extend(other_nodes)
self.nodes[cat] = connect_nodes
graph = Graph(self.title, self.subtitle, height=len(_nodes)*25)
nodes = []
nodes_pos = [0, 0]
symbol_width = max(map(len, self.all_nodes))*10 # get the max charactor length
symbol_width = min(max(symbol_width,80), 200) # min is 80, max is 200
for cat_id, cats in enumerate(self.nodes.keys()):
for node_id, cat_nodes in enumerate(self.nodes[cats]):
self.categories.append(cats)
nodes_pos = [cat_id * 300, node_id * 20]
nodes.append({"name": cat_nodes,
"x": nodes_pos[0], "y": nodes_pos[1],
'symbolSize': [symbol_width, 20],
"symbol": 'rect',
"category": (cats % 20),
"graph_repulsion":1000000})
graph.add("", nodes, self.links, categories=self.categories, **self.node_style)
# graph.render() I do not know why this API is not work in my environment.
return graph
if __name__ == "__main__":
fr = frelation("The Title", "The subtitle")
fr.addNodes(['a', 'b', 'c'], 0)
fr.addNodes(['d', 'e'], 3)
fr.addNodes(['f', 'g', 'h'], 5)
fr.addLink('a', 'd', 'g')
fr.addLink('d', 'h')
fr.addLink('b', 'e', 'h')
fr.addLink(['m','a','c'],['d','e'],cat_list=[0,3])
fr.addLinks([{'source': 'a', 'target': 'e'}, {'source': 'c', 'target': 'e'}])
fr.show()
| true |
d0db0d31c9f6e90e787b00aa01ee720f40fc8498 | Python | xiaofu98/cv_projects | /text_detection/location_detection.py | UTF-8 | 698 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/pyty1on3
# -*- coding: utf-8 -*-
# @File : location_detection.py
import cv2
import pytesseract
pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'
img = cv2.imread('1.png')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # OpenCV默认使用BGR
boxes = pytesseract.image_to_boxes(img)
hImg, wImg, _ = img.shape
print(wImg,hImg)
for b in boxes.splitlines():
b = b.split(' ')
x, y, x1, y1 = int(b[1]), int(b[2]), int(b[3]), int(b[4])
cv2.rectangle(img, (x, hImg - y), (x1, hImg - y1), (0, 0, 255), 3)
cv2.putText(img, b[0], (x, hImg - y + 25), cv2.FONT_HERSHEY_COMPLEX, 1, (50, 50, 255), 2)
cv2.imshow("Result", img)
cv2.waitKey(0)
| true |
0a431947b5614f6d9862d4269e23351defca6f86 | Python | Suyash906/survey-form | /survey.py | UTF-8 | 4,544 | 3.609375 | 4 | [] | no_license | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
"""
“Have you ever been diagnosed with diabetes?” is a screening question that would be asked to evaluate for the eligibility criteria “Patients must have a diagnosis of diabetes.”
“Are you between the ages of 18 and 75?” is a screening question that would be asked to evaluate for the eligibility criteria “Patients must be between the ages of 18 and 75”.
"""
"""
QuestionType
- stmt
- id
RangeQuestionType
- min
- max
- scoring_method
StringQuestionType
- scoring_method
"""
class Question:
def __init__(self, id, question_statement, correct_response):
self.__question_statement = question_statement
self.__id = id
self.__correct_response = correct_response
def get_question_statement(self):
return self.__question_statement
def get_id(self):
return self.__id
def get_correct_response(self):
return self.__correct_response
#######################################
class GenericQuestion:
def __init__(self, id, question_statement):
self.__question_statement = question_statement
self.__id = id
def get_question_statement(self):
return self.__question_statement
def get_id(self):
return self.__id
class RangeQuestion(GenericQuestion):
def __init__(self, id, question_statement, min, max):
super().__init__(id, question_statement)
self.__min = min
self.__max = max
def get_min(self):
return self.__min
def get_max(self):
return self.__max
class BooleanQuestion(GenericQuestion):
def __init__(self, id, question_statement, correct_response):
super().__init__(id, question_statement)
self.__correct_response = correct_response
def get_correct_response(self):
return self.__correct_response
# class QuestionSet:
# def __init__(self):
# self.__
class UserResponse:
def __init__(self, question_id, response):
self.__question_id = question_id
self.__response = response
def get_response(self):
return self.__response
def get_question_id(self):
return self.__question_id
class Survery:
def __init__(self, questions_list = []):
self.__questions_list = questions_list
self.__result = 'Fail'
def add_question(self, question):
self.__questions_list.append(question)
def get_survey_questions(self):
return self.__questions_list
def get_result(self):
return self.__result
def verify_response(self, user_response):
for response in user_response:
question_id = response.get_question_id()
questtion_response = response.get_response()
for question in self.__questions_list:
if question.get_id() == question_id:
if isinstance(question, BooleanQuestion) and question.get_correct_response() != questtion_response:
self.__result = 'Fail'
return
elif isinstance(question, RangeQuestion):
if question.get_min() > questtion_response or question.get_max() < questtion_response:
self.__result = 'Fail'
return
self.__result = 'Pass'
def main():
survey = Survery()
question_list = []
question_list.append(BooleanQuestion(1, "Have you ever been diagnosed with diabetes?”", "Yes"))
question_list.append(RangeQuestion(2, "Are you between the ages of 18 and 75?”", 18, 75))
# survey.add_question(1, "Have you ever been diagnosed with diabetes?”", "Yes")
# survey.add_question(2, "Are you between the ages of 18 and 75?”", "No")
for question in question_list:
survey.add_question(question)
question_list = survey.get_survey_questions()
for question in question_list:
print(question.get_question_statement())
user_response = []
user_response.append(UserResponse(1, "Yes"))
user_response.append(UserResponse(2, 25))
survey.verify_response(user_response)
result = survey.get_result()
print('Result = {}'.format(result))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| true |
5122fb5f46eb68d1a7c21584b406bca7c312eeab | Python | Registea2267/CSC221 | /M1LAB_Register.py | UTF-8 | 501 | 3.140625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
CSS 221
M1LAB
Ashley Register
Jan 23, 2019
"""
def main():
"""Bottles of beer song"""
# 1 see a var
bottles = 99
while bottles >= 0:
print(bottles, "bottles of beer")
bottles = bottles - 1
# 2 see a for loops
"""for beer in range(99, -1, -1):
print(beer, "beers")"""
#range(start,stop,step)
#0 based by defalt
# start <= i < stop
if __name__ == "__main__":
main()
| true |
f60ffc5f7ca6427b82cf5e6b108eca20e11e2202 | Python | Krishna-124/AI-Assistant | /VIOLET.py | UTF-8 | 7,704 | 2.703125 | 3 | [] | no_license | from selenium import webdriver
from getpass import getpass
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import os
import webbrowser
import random
#c_driver = 'Dir of chromedriver.exe'
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[1].id)
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def intro():
hour = int(datetime.datetime.now().hour)
if hour < 12:
speak("Good morning Sir!")
elif 12 <= hour < 17:
speak("Good afternoon sir!")
else:
speak("Good Evening sir!")
speak("I am Violet! How can I help You")
def say_user():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening.....')
r.pause_threshold = 0.6
r.energy_threshold = 2000
audio = r.listen(source)
try:
print("Recognize.....")
query = r.recognize_google(audio)
print(f"User said :{query}\n")
except Exception as e:
print(e)
print("say again sir")
return "none"
return query
def time():
hh = datetime.datetime.now().hour
ampm = "am"
if hh > 12:
hh = hh - 12
ampm = "pm"
mm = datetime.datetime.now().minute
c_time = "time is", hh, mm, ampm
speak(c_time)
def date():
m = datetime.date.today()
speak(m)
while True:
query = input("say:").lower()
# print(query)
# query = say_user().lower()
query = query.replace("hi violet", "").replace('hey violet', '')
if 'hello violet' in query or 'initiate ai' in query:
intro()
elif 'who created you' in query or 'who are you' in query or 'tell me about yourself' in query:
speak("I am Violet! your artificial intelligent , I am created by Krishna Singh Dummagaa")
elif 'bye violet' in query or 'shutdown' in query or 'ok bye' in query:
hour = int(datetime.datetime.now().hour)
if hour >= 20:
speak('good night sir! sweet dream ')
else:
speak('ok bye sir ! have a good day')
exit()
elif 'what is time' in query or ('tell' and 'time') in query:
time()
elif 'what is date' in query or ('tell' and 'date') in query:
date()
elif 'open' and 'youtube' in query:
speak('what can i search you for sir')
query = say_user().lower()
driver = webdriver.Chrome(c_driver)
driver.get('https://www.youtube.com/')
youtube_box = driver.find_element_by_id('search')
youtube_box.send_keys(query)
login_btn = driver.find_element_by_id('search-icon-legacy')
login_btn.submit()
elif 'open google' in query:
webbrowser.open("google.com")
'''speak('what can i search for you sir ')
driver=webdriver.Chrome(c_driver)
search_box=driver.find_element_by_id('fakebox-input')
driver.get(query)
submit_btn=driver.find_element_by_id('')'''
elif 'play music' in query or 'play song' in query:
speak("which one ! sir")
while True:
#music_dir = "Type dir where your songs are stored stored"
songs = os.listdir(music_dir)
#if you want type query = say_user().lower()
query = input("which...")
query = query.lower()
if 'as you like' in query or 'as you want' in query or 'as you want' in query:
list_len = len(songs) - 2
r = random.randrange(0, list_len, 1)
os.startfile(os.path.join(music_dir, songs[r]))
break
elif query == 'exit':
break
elif 'play' in query:
query = query.replace("play", "")
query = query.replace(query[0], "")
count = 0
length = len(songs)
for i in range(0, length, 1):
if query in songs[i].lower():
os.startfile(os.path.join(music_dir, songs[i]))
count = count + 1
break
if count == 0:
speak("sorry sir! would you like to play another song")
elif 'wikipedia' in query:
lines_wiki = 2
while True:
if 'elaborate' not in query and 'ok ' not in query and 'wikipedia' in query:
try:
speak("Searching Wikipedia.....")
query = query.replace("wikipedia", "").replace(' in ', ' ')
query = query.replace("search", "")
result = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia ")
print(result)
speak(result)
except Exception as e:
print(e)
query = say_user()
elif 'elaborate' in query or 'more info' in query:
lines_wiki = lines_wiki + 2
result = wikipedia.summary(query, sentences=lines_wiki)
speak("According to Wikipedia ")
print(result)
speak(result)
elif 'ok ' in query or 'quit' in query or 'exit' in query:
break
elif 'login wi-fi' in query:
speak('can i know your username and passcode')
query = say_user().lower()
# query = input("username...")
username = input("Enter User name:")
passcode = getpass('Enter password:')
driver = webdriver.Chrome(c_driver)
driver.get('http://172.16.16.16:8090/')
username_box = driver.find_element_by_name('username')
username_box.send_keys(username)
passcode_box = driver.find_element_by_name('password')
passcode_box.send_keys(passcode)
login_btn = driver.find_element_by_id('logincaption')
login_btn.submit()
elif 'show' in query and 'net' in query and 'speed' in query:
driver = webdriver.Chrome(c_driver)
driver.get('https://fast.com/')
elif 'remind' in query:
if 'set' in query and 'remind' in query:
f = open('remind.txt', 'a')
speak('ok. tell me')
msg = input('say r') + "\n"
f.write(msg)
f.close()
elif 'remind' in query and 'me' in query:
count = 0
f = open('remind.txt', 'r')
msg = f.readlines()
for i in msg:
count = count + 1
if count == 0:
speak('you dont set yet')
else:
count = 0
for i in msg:
speak(count + 1)
speak(msg[count])
count = count + 1
f.close()
elif 'delete' in query and 'remind' in query:
f = open('remind.txt', 'a')
f.close()
'''speak('Which one say serial no')
msg = input('say r')
for a range(1,15,1):
'''
else:
continue
'''elif 'motivation' in query and 'want' in query:
speak("you should !")
moti_dir = 'Type video dir'
vid = os.listdir(moti_dir)
lenn = len(vid)
ran = random.randrange(0, lenn, 1)
os.startfile(os.path.join(moti_dir, vid[ran]))'''
'''elif 'rename' in query and 'file' in query:
i = 1
path = "Type Dir ,ex="C:\\Users\\abc\\Desktop\\Books\\"""
for filename in os.listdir(path):
my_dest = str(i) + ".png"
my_source = path + filename
my_dest = path + my_dest
os.rename(my_source, my_dest)
i += 1''' | true |
d6e1c2f3b99d6dba1507a9d969c764936b970eef | Python | JohnWiest/Visuals | /visuals/visuals_1.py | UTF-8 | 2,779 | 2.828125 | 3 | [] | no_license | import pygame
import math
import random
import sys
import os
from object import *
pygame.init()
def main():
os.environ['SDL_VIDEO_CENTERED'] = '1'
screen = pygame.display.set_mode((2560,1440),)
boundary = pygame.image.load("boundary.png")
center_dot = pygame.image.load("center.png")
center = [1280,720]
radial_v = 0
object1 = object([1280,220],[radial_v,0])
object2 = object([1780,720],[0,radial_v])
object3 = object([1280,1220],[-radial_v,0])
object4 = object([780,720],[0,-radial_v])
object5 = object([1280,220],[-radial_v,0])
object6 = object([1780,720],[0,-radial_v])
object7 = object([1280,1220],[radial_v,0])
object8 = object([780,720],[0,radial_v])
objects = [object1,object2,object3,object4,object5,object6,object7,object8]
run = True
while run:
pygame.time.delay(1)
#This allows the game to quit
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
run = False
if keys[pygame.K_UP]:
radial_v += 1
objectInitialConditions(objects,radial_v)
if keys[pygame.K_DOWN]:
radial_v -= 1
objectInitialConditions(objects,radial_v)
#Run Functions
updateScreen(screen,boundary,center_dot,objects)
for i in range(8):
objects[i].move()
object1.getPeriod([object1.getPosition()[0],object1.getPosition()[1]])
objects[i].changeAccel(center)
#Game Quits after the while loop is broken
pygame.quit()
return
def updateScreen(screen,boundary,center_dot,objects):
screen.fill((0,0,0))
screen.blit(boundary,(560,0))
screen.blit(center_dot,(1255,695))
for i in range(1):
screen.blit(objects[i].image,(objects[i].getPosition()[0]-72,objects[i].getPosition()[1]-72))
pygame.display.update()
def objectInitialConditions(objects,radial_v):
objects[0] = object([1280,220],[radial_v,0])
objects[1] = object([1780,720],[0,radial_v])
objects[2] = object([1280,1220],[-radial_v,0])
objects[3] = object([780,720],[0,-radial_v])
objects[4] = object([1280,220],[-radial_v,0])
objects[5] = object([1780,720],[0,-radial_v])
objects[6] = object([1280,1220],[radial_v,0])
objects[7] = object([780,720],[0,radial_v])
return
main()
#the period of the occillations between E fields only depends on the force constants
#the period around the circle only depends on the initial position and velocity
#it would look cool if the period around the circle = n * the period of the one occillation between the fields
#come up with formulas for both periods
#put this to music with multiple other objects
#parameters for minute hand at td(1)
#k = 1000000
#c = 1000000
#obj_motion[0][0] = 1208
#obj_motion[0][1] = 350
#obj_motion[1] = [0.923, 0] | true |
01e2dc67294017597e24433e63e1917e8a0d6b79 | Python | firstshinec/leetcode | /longestPalindrome.py | UTF-8 | 2,396 | 3.25 | 3 | [] | no_license | # Move the middle points for the largest loopback sequence, disgarding the possible sequence with less length
class Solution:
def longestPalindrome(self, s: str) -> str:
MedIcr = 0
MaxLen = 1
MaxIdx = [0, 0]
if len(s) <= 1:
SubStr = s
elif len(s) == 2:
if s[0] == s[1]:
SubStr = s
else:
SubStr = s[0]
else:
while 1:
MedIcr += 1
if MedIcr >= 2*len(s):
break
if MedIcr % 2 == 1: # Even loopback seq
Med = MedIcr//2
MaxHalfPossLen = min(Med+1, len(s)-Med-1) # Find the length of maximum possible loopback sequence
# print(MaxHalfPossLen)
if 2*MaxHalfPossLen < MaxLen: # If the maximum LS has larger length than MaxHalfPossLen
continue
else:
if s[Med-(MaxLen-1)//2:Med+1] == s[Med+(MaxLen-1)//2+1:Med:-1]: # check if the internal seq meets the previous MaxLen
for n in range(MaxLen//2-1, MaxHalfPossLen):
if s[Med-n] == s[Med+n+1]:
MaxLen = 2*(n+1)
MaxIdx = [Med-n, Med+n+2]
else:
break
else:
continue
else: # Odd loopback seq
Med = MedIcr//2
MaxHalfPossLen = min(Med, len(s)-Med-1)
if 2*MaxHalfPossLen+1 < MaxLen: # If the maximum LS has larger length than MaxHalfPossLen
continue
else:
if s[Med-(MaxLen-1)//2:Med] == s[Med+(MaxLen-1)//2:Med:-1]:
for n in range(MaxLen//2, MaxHalfPossLen+1):
if s[Med-n] == s[Med+n]:
MaxLen = 2*n + 1
MaxIdx = [Med-n, Med+n+1]
else:
break
else:
continue
print(MaxIdx)
SubStr = s[int(MaxIdx[0]):int(MaxIdx[1])]
return SubStr | true |
a5a475d4fc6385283d9851d183fe0659d5d2654a | Python | NetworkRanger/python-core | /chapter14/goognewsrss.py | UTF-8 | 1,225 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/8/11 4:39 PM
try:
from io import BytesIO as StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from itertools import izip as zip
except ImportError:
pass
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from pprint import pprint
from xml.etree import ElementTree
g = urlopen('http://news.google.com/news?topic=h&output=rss')
f = StringIO(g.read())
g.close()
tree = ElementTree.parse(f)
f.close()
def topnews(count=5):
pair = [None, None]
for elmt in tree.getiterator():
if elmt.tag == 'title':
skip = elmt.text.startswith('Top Stories')
if skip:
continue
pair[0] = elmt.text
if elmt.tag == 'link':
if skip:
continue
pair[1] = elmt.text
if pair[0] and pair[1]:
count -= 1
yield (tuple(pair))
if not count:
return
pair = [None, None]
for news in topnews():
pprint(news) | true |
5503a60bf2e5b7bc4e431421f67fbeadd05c617f | Python | dialup/python_usp | /usp_1/semana3/fatorial.py | UTF-8 | 147 | 3.546875 | 4 | [
"BSD-2-Clause"
] | permissive | import math
n = int(input("Digite o valor de n: "))
count = 1
valor = 1
while (count <= n):
valor = valor * count
count += 1
print(valor)
| true |
7baaf9128ab53947d37042c95885edc9daeb409d | Python | xpxu/regex | /re_write_url.py | UTF-8 | 1,378 | 3.078125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
'''
Purpose: Filtering Nimbula APIs using Apache Proxy
--------------------------------------
RewriteCond /federation/@SITENAME@/vpnendpoint/,%{REQUEST_URI} ^([^,]+),\1
RewriteRule ^/(.*) balancer://api%{REQUEST_URI} [P]
注:比较前面的字符串和后面的正则表达式,看是否匹配。如果匹配,那么
执行RewriteRule,将前面正则表达式匹配的url重写成后面的url.
--------------------------------------
RewriteRule replace the Nimbula API IP in place of Apache proxy IP.
'''
import re
def is_match(str, reg):
'''
match str with reg
'''
if re.match(reg, str):
print 'match ok'
else:
print 'match failed'
def test1():
input = '010-123'
reg = r'^\d{3}\-\d{3,8}$'
is_match(input, reg)
def test2():
input = ',federation,federation'
# this reg means the checked string should be
# start not with ',', if so, it's OK.
reg = r'^([^,]+)'
is_match(input, reg)
def test3():
input = 'federation,federationxx'
# this reg means the checked string should be
# start like 'something,somethingxxx'
reg = r'^([^,]+),\1'
is_match(input, reg)
def test4():
input = 'federation,ederationxx'
# this reg means the checked string should be
# start like 'something,somethingxxx'
reg = r'^([^,]+),\1'
is_match(input, reg)
test4()
| true |
e152063cde76b8e2d8cd8c5a2eff8ad0f11e9c37 | Python | Leonardo-Reis/Meu-Curso-Python | /ex108/teste.py | UTF-8 | 311 | 3.625 | 4 | [] | no_license | import moeda
p = float(input('Digite o preço: '))
print(f'O dobro do preço é {moeda.moeda(moeda.dobro(p))}')
print(f'O triplo do preço é {moeda.moeda(moeda.triplo(p))}')
print(f'Aumentando 10% temos {moeda.moeda(moeda.aumentar(p, 10))}')
print(f'Diminuindo 30% temos {moeda.moeda(moeda.diminuir(p, 30))}') | true |
16417b237ede6b15fb66153431471279838531a6 | Python | sriramsk1999/midas-2021-task | /task2/mnist_wrong.py | UTF-8 | 3,222 | 2.640625 | 3 | [] | no_license | '''
Contains the implementation of the MNISTWrong DataModule
'''
import os
from typing import Optional
import torch
from torchvision import transforms, datasets
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from skimage import io
import pytorch_lightning as pl
class MNISTWrongDataset(Dataset):
''' Dataset for wrongly labeled MNIST. '''
def __init__(self, input_data, target, transform=None):
self.input_data = input_data
self.target = target
self.transform = transform
def __len__(self):
return len(self.input_data)
def __getitem__(self, idx):
if self.transform: img = self.transform(self.input_data[idx])
return (img, self.target[idx])
class MNISTWrongModule(pl.LightningDataModule):
''' DataModule for loading of dataset. '''
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
self.img_dataset, self.img_classes = self.load_data("mnistTask")
self.mnist_wrong_train = None
self.mnist_wrong_val = None
self.mnist_wrong_test = None
def setup(self, stage: Optional[str] = None):
if stage in (None, 'fit'): # Create all datasets
# Creating transforms
transform = transforms.Compose([
transforms.Resize((90, 120)), # Scale down image
transforms.Normalize((34.33), (75.80,))
])
dataset = MNISTWrongDataset(self.img_dataset, self.img_classes, transform)
# Creating train, val datasets according to an 90-10 split
self.mnist_wrong_train, self.mnist_wrong_val = train_test_split(dataset, test_size=0.1)
# Transform for actual MNIST
tensor_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize((90, 120)),
transforms.Normalize((0.1362), (0.2893,))
])
# Test data is from actual MNIST
self.mnist_wrong_test = datasets.MNIST(os.getcwd(), train=False,
download=True, transform=tensor_transform)
def train_dataloader(self):
return DataLoader(self.mnist_wrong_train, batch_size=self.batch_size, num_workers=4)
def val_dataloader(self):
return DataLoader(self.mnist_wrong_val, batch_size=self.batch_size, num_workers=4)
def test_dataloader(self):
return DataLoader(self.mnist_wrong_test, batch_size=self.batch_size, num_workers=4)
def load_data(self, img_dir):
''' Load image_paths and their classes from disk. '''
dataset = []
classes = []
for folder in os.listdir(img_dir):
img_class = int(folder)
for img in os.listdir(os.path.join(img_dir, folder)):
img = torch.tensor(io.imread(
os.path.join(img_dir, folder, img), as_gray=True), dtype=torch.float32)
img = torch.unsqueeze(img, 0)
dataset.append(img)
classes.append(int(img_class))
classes = torch.tensor(classes)
dataset = torch.stack(dataset)
return dataset, classes
| true |
1de0b452ec143f959fc620ade678a5edcf365cf3 | Python | kennyjoseph/twitter_dm | /examples/simple_pull_down_user_data_print.py | UTF-8 | 1,654 | 2.875 | 3 | [] | no_license | """
This is the most basic script for using twitter_dm.
From here, you may want to go look at some of the more complex examples
that leverage the library's NLP/rapid collection tools, as this is basically
a replication of tweepy with less documentation :)
"""
from twitter_dm.TwitterAPIHook import TwitterAPIHook
from twitter_dm.TwitterUser import TwitterUser
username_to_collect_data_for = 'Jackie_Pooo'
consumer_key = "YOUR_CONSUMER_KEY_HERE"
consumer_secret = "YOUR_CONSUMER_SECRET_HERE"
access_token = "YOUR_ACCESS_TOKEN_HERE"
access_token_secret = "YOUR_ACCESS_TOKEN_SECRET_HERE"
## get a "hook", or connection, to the API using your consumer key/secret and access token/secret
api_hook = TwitterAPIHook(consumer_key,consumer_secret,
access_token=access_token,access_token_secret=access_token_secret)
#creates a Twitter User object to fill with information from the API
user = TwitterUser(api_hook,screen_name=username_to_collect_data_for)
# we call populate_tweets_from_api,which goes to the Twitter API
# and collects the user's data it is outputted to the file username_you_put.json
# the sleep_var param tells the function it shouldn't worry
# about rate limits (we're only collecting for one user, so it doesn't really matter
# If you remove the is_gzip argument, the output file will be gzipped
print 'populating users tweets!'
user.populate_tweets_from_api(json_output_filename=username_to_collect_data_for+".json",
sleep_var=False, is_gzip=False, since_id=None)
for t in user.tweets:
print t.mentions
print 'user had {n_tweets} tweets'.format(n_tweets=len(user.tweets))
| true |
8a4bfe56f6ebe53e7dc8181cb6ac071485a8455f | Python | chrispun0518/personal_demo | /leetcode/Counting Elements.py | UTF-8 | 358 | 3.078125 | 3 | [] | no_license | class Solution(object):
def countElements(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
counter = {}
counts = 0
for i in arr:
counter[i] = counter.get(i, 0) + 1
for i in counter:
if i + 1 in counter:
counts += counter[i]
return counts
| true |
e643ce46cada17b1c4ac049e1cde6c6dd2e2b037 | Python | mburakaltun/ENGR421-Biweekly-Homeworks | /Homework 02 - Discrimination by Regression.py | UTF-8 | 3,021 | 2.671875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# getting data from csv files
X = np.genfromtxt('hw02_data_set_images.csv', delimiter=',')
Y = np.genfromtxt('hw02_data_set_labels.csv', usecols=0, dtype=str)
X_train = np.concatenate((X[0:25], X[39:64], X[78:103], X[117:142], X[156:181]))
X_test = np.concatenate((X[25:39], X[64:78], X[103:117], X[142:156], X[181:195]))
Y_tr = np.concatenate((Y[0:25], Y[39:64], Y[78:103], Y[117:142], Y[156:181]))
Y_tst = np.concatenate((Y[25:39], Y[64:78], Y[103:117], Y[142:156], Y[181:195]))
y_truth = []
y_test_truth = []
# converting class labels from string to integer
for i in Y_tr:
if i == '\"A\"':
y_truth.append(1)
elif i == '\"B\"':
y_truth.append(2)
elif i == '\"C\"':
y_truth.append(3)
elif i == '\"D\"':
y_truth.append(4)
elif i == '\"E\"':
y_truth.append(5)
for i in Y_tst:
if i == '\"A\"':
y_test_truth.append(1)
elif i == '\"B\"':
y_test_truth.append(2)
elif i == '\"C\"':
y_test_truth.append(3)
elif i == '\"D\"':
y_test_truth.append(4)
elif i == '\"E\"':
y_test_truth.append(5)
K = 5
N = 125
y_truth = np.array(y_truth)
y_test_truth = np.array(y_test_truth)
Y_truth = np.zeros((N, K)).astype(int)
Y_truth[range(N), y_truth - 1] = 1
Y_test_truth = np.zeros((N, K)).astype(int)
Y_test_truth[range(N), y_truth - 1] = 1
# defining safelog, softmax and gradient functions
def safelog(x):
return np.log(x + 1e-100)
def gradient_W(X, y_truth, y_predicted):
return (np.asarray(
[-np.sum(np.repeat((y_truth[:, c] - y_predicted[:, c])[:, None], X.shape[1], axis=1) * X, axis=0) for c in
range(K)]).transpose())
def gradient_w0(Y_truth, Y_predicted):
return -np.sum(Y_truth - Y_predicted, axis=0)
# define the sigmoid function
def sigmoid(X_, w_, w0_):
return 1 / (1 + np.exp(-(np.matmul(X_, w_) + w0_)))
# setting learning parameters
eta = 0.01
epsilon = 1e-3
# randomly initalizing W and w0
np.random.seed(421)
W = np.random.uniform(low=0, high=1, size=(X.shape[1], K))
w0 = np.random.uniform(low=0, high=1, size=(1, K))
print(W)
# learning W and w0 using gradient descent
iteration = 1
objective_values = []
while 1:
Y_predicted = sigmoid(X_train, W, w0)
objective_values = np.append(objective_values, -np.sum(Y_truth * safelog(Y_predicted)))
W_old = W
w0_old = w0
W = W - eta * gradient_W(X_train, Y_truth, Y_predicted)
w0 = w0 - eta * gradient_w0(Y_truth, Y_predicted)
if np.sqrt(np.sum((w0 - w0_old)) ** 2 + np.sum((W - W_old) ** 2)) < epsilon:
break
iteration = iteration + 1
# plot objective function during iterations
plt.figure(figsize=(10, 6))
plt.plot(range(1, iteration + 1), objective_values, "k-")
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.show()
y_predicted = np.argmax(Y_predicted, axis=1) + 1
confusion_matrix = pd.crosstab(y_predicted, y_truth, rownames=['y_pred'], colnames=['y_truth'])
print(confusion_matrix) | true |
bede320b7d019c8392f70a8d017d9259b59d2121 | Python | IOLevi/backend-homework | /util/utilities1.py | UTF-8 | 4,318 | 3.4375 | 3 | [] | no_license | """
Utilities1 Module
"""
import datetime
import json
import requests
def iso_to_datetime(input_str):
"""
Removes final colon from input string to conform to python 3.6 %z format specifier.
Returns a datetime object based on the ISO input string.
"""
format = '%Y-%m-%dT%H:%M:%S%z'
if input_str[22] != ':':
raise ValueError(
'Invalid format. Expected colon at position 22 in input string')
input_str = input_str[0:22] + input_str[23:]
return datetime.datetime.strptime(input_str, format)
def populate_times(payload, legs):
"""
Creates lists of departures and arrival datetimes for each leg in a fare using binary search.
Returns depature list and arrival list as a tuple.
"""
if not isinstance(payload, dict):
raise TypeError('Payload expected type is Dict.')
if not isinstance(legs, list):
raise TypeError('Legs expected type is List')
if len(legs) != 2:
raise ValueError("Legs expected to be length 2")
dept_list = []
arrv_list = []
def binary_search_populate(arr, target):
"""
Binary search through legs array. When matched against target leg, appends associated
departure and arrival time to the dept and arrival lists as datetime objects.
"""
first = 0
last = len(arr) - 1
while first <= last:
mid = (first + last) // 2
mid_value = arr[mid]['id']
if mid_value == target:
dept_list.append(iso_to_datetime(
arr[mid]['departure_utc']['iso']))
arrv_list.append(iso_to_datetime(
arr[mid]['arrival_utc']['iso']))
return
elif mid_value < target:
first = mid + 1
else:
last = mid - 1
raise ValueError("Couldn't find target leg in list.")
all_legs = payload['input']['legs']
for leg in legs:
binary_search_populate(all_legs, leg)
return dept_list, arrv_list
def get_leg_ids(payload):
"""
Binary searches through fares list.
Returns the list of legs associated with the fare_id.
"""
if not isinstance(payload, dict):
raise TypeError('Payload expected type is Dict')
fare_id = int(payload['arguments']['fare_id'][5:])
if not isinstance(fare_id, int):
raise ValueError('Failed to retrieve fare_id from payload')
def binary_search(arr):
"""
Binary search of fares array.
Returns list of legs if found; throws exception if fare_id not in list.
"""
first = 0
last = len(arr) - 1
while first <= last:
mid = (first + last) // 2
mid_value = int(arr[mid]['id'][5:])
if mid_value == fare_id:
return arr[mid]['legs']
elif mid_value < fare_id:
first = mid + 1
else:
last = mid - 1
raise ValueError("Couldn't find fare_id in list")
fares = payload['input']['fares']
return binary_search(fares)
def calc_total_time(dept_list, arrv_list):
"""
Calculates total flight time between legs in departure and arrival lists in total seconds.
Returns total time in seconds as an integer.
"""
if not isinstance(dept_list, list) or not isinstance(arrv_list, list):
raise TypeError("Arguments expected to be of type List")
total_time = 0
for dept, arv in zip(dept_list, arrv_list):
total_time += int((arv - dept).total_seconds())
return total_time
def get_payload(test_url, headers):
"""
Gets JSON payload from API endpoint; throws error if connection fails.
Returns the JSON payload.
"""
request = requests.get(test_url, headers=headers)
request.raise_for_status() # throws an exception if connections fails
return request.json()
def post_response(payload, test_url, headers, total_time):
"""
POST the total time in seconds to the API endpoint.
Return the response from the POST request.
"""
result_token = payload['token']
response = {"header": {"token": result_token},
"data": {"total_seconds": total_time}}
return requests.post(test_url, headers=headers, data=json.dumps(response))
| true |
32f1977228f54b9220557bf9b1f070b34b0731f5 | Python | sirkon/tutor | /vector.py | UTF-8 | 4,486 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env python
#
# a python Vector class
# A. Pletzer 5 Jan 00/11 April 2002
#
import math
"""
A list based Vector class that supports elementwise mathematical operations
In this version, the Vector call inherits from list; this
requires Python 2.2 or later.
"""
class Vector(list):
"""
A list based Vector class
"""
# no c'tor
def __init__ (self, *a):
if len (a) == 1:
super(Vector, self).__init__(a[0])
else:
super(Vector, self).__init__(a)
def __getslice__(self, i, j):
try:
# use the list __getslice__ method and convert
# result to Vector
return Vector(super(Vector, self).__getslice__(i,j))
except:
raise TypeError, 'Vector::FAILURE in __getslice__'
def __add__(self, other):
return Vector(map(lambda x,y: x+y, self, other))
def __neg__(self):
return Vector(map(lambda x: -x, self))
def __sub__(self, other):
return Vector(map(lambda x,y: x-y, self, other))
def __mul__(self, other):
"""
Element by element multiplication
"""
try:
return Vector(map(lambda x,y: x*y, self,other))
except:
# other is a const
return Vector(map(lambda x: x*other, self))
def __rmul__(self, other):
return (self*other)
def __div__(self, other):
"""
Element by element division.
"""
try:
return Vector(map(lambda x,y: x/y, self, other))
except:
return Vector(map(lambda x: x/other, self))
def __rdiv__(self, other):
"""
The same as __div__
"""
try:
return Vector(map(lambda x,y: x/y, other, self))
except:
# other is a const
return Vector(map(lambda x: other/x, self))
def size(self): return len(self)
def conjugate(self):
return Vector(map(lambda x: x.conjugate(), self))
def ReIm(self):
"""
Return the real and imaginary parts
"""
return [
Vector(map(lambda x: x.real, self)),
Vector(map(lambda x: x.imag, self)),
]
def AbsArg(self):
"""
Return modulus and phase parts
"""
return [
Vector(map(lambda x: abs(x), self)),
Vector(map(lambda x: math.atan2(x.imag,x.real), self)),
]
x = property (lambda c: c[0], (lambda c,x: c.__setitem__(0,x)))
y = property (lambda c: c[1], (lambda c,x: c.__setitem__(1,x)))
z = property (lambda c: c[2], (lambda c,x: c.__setitem__(2,x)))
###############################################################################
def isVector(x):
"""
Determines if the argument is a Vector class object.
"""
return hasattr(x,'__class__') and x.__class__ is Vector
def zeros(n):
"""
Returns a zero Vector of length n.
"""
return Vector(map(lambda x: 0., range(n)))
def ones(n):
"""
Returns a Vector of length n with all ones.
"""
return Vector(map(lambda x: 1., range(n)))
def random(n, lmin=0.0, lmax=1.0):
"""
Returns a random Vector of length n.
"""
import whrandom
new = Vector([])
gen = whrandom.whrandom()
dl = lmax-lmin
return Vector(map(lambda x: dl*gen.random(),
range(n)))
def dot(a, b):
"""
dot product of two Vectors.
"""
try:
return reduce(lambda x, y: x+y, a*b, 0.)
except:
raise TypeError, 'Vector::FAILURE in dot'
def norm(a):
"""
Computes the norm of Vector a.
"""
try:
return math.sqrt(abs(dot(a,a)))
except:
raise TypeError, 'Vector::FAILURE in norm'
def orth(a):
try:
return a/norm(a)
except:
raise TypeError, 'Vector::FAILURE in ort'
def det(*vectors):
lengths = set (map(len,vectors))
if len(lengths) != 1:
raise TypeError, 'Vector::FAILURE in det'
if len(vectors) != len(vectors[0]):
raise TypeError, 'Vectors:FAILURE in det'
# Don't want to implement vector operations - only 2x2 is provided now
if len (vectors) != 2:
raise TypeError, 'Vectors::FAILURE det not implemented for this dimension'
a,b = vectors
return a[0]*b[1] - a[1]*b[0]
def angle(a,b):
d = det(a,b)
d = -1.0 if d < 0.0 else 1.0
return d*math.acos(dot(a,b)/norm(a)/norm(b))
| true |
15a587a3c08616a3ee8d2fbccd738028f353e31d | Python | khushboobajaj25/Python | /Testing/TupleAndSet.py | UTF-8 | 328 | 3.109375 | 3 | [] | no_license | tup = (5, 16, 23, 25, 38, 54)
print(tup.__getitem__(0))
print(tup)
print(len(tup))
seteg = {1, 2, 3, 4, 5, 5, 6}
seteg1 = {1, 2, 4, 8, }
print(seteg)
tup2 = (2, 2);
tup3 = tup.__add__(tup2)
print(tup3)
print(seteg.intersection(seteg1))
seteg1.intersection_update(seteg)
print(seteg)
print(seteg.symmetric_difference(seteg1))
| true |
7cd8ec8dac0eb973cf4d28755f557a4aa33468e5 | Python | AugustDixon/SeniorDesign | /tower/Obstacles/Drone.py | UTF-8 | 5,585 | 2.890625 | 3 | [] | no_license | #Drone Class definition
#Author: August Dixon
#LSU Senior Design 2018-2019 Team #72
from ..Coordinate.Point import *
from ..Coordinate.Vertex import *
import math
INV_THOUSAND = 1 / 1000
#Finds euclidean distance of two points
# Arguments:
# Point arg1
# Point arg2
# Returns:
# double dist - Euclidean distance
def euclidean(arg1, arg2):
return math.sqrt(((arg1.x - arg2.x) ** 2) + ((arg1.y - arg2.y) ** 2))
#END OF euclidean()
class Instruction:
def __init__(self, opcode, arg1, arg2, special):
self.O = opcode #Already in Byte form
self.Arg1 = arg1
self.Arg2 = arg2
self.S = special #Already in Byte form
#END OF __init__()
#END OF Instruction
#PathInfo contains instructions and metadata on them
class PathInfo:
def __init__(self, instruct, prevTime, initPoint, initOrient, nextPoint, nextOrient):
self.intruction = instruct
self.startTime = prevTime
self.endTime = prevTime + instruct.Arg2 * INV_THOUSAND
self.startPoint = initPoint
self.endPoint = nextPoint
self.startOrient = initOrient
self.endOrient = nextOrient
if initPoint.equals(nextPoint):
self.dist = 0
else:
self.dist = euclidean(initPoint, nextPoint)
if initOrient == nextOrient:
self.angle = 0
else:
dir, self.angle = Angles.smallestDifference(initOrient, nextOrient)
self.angle = self.angle * dir
#END OF __init__()
#END OF PathInfo
#DroneWall is a version of Wall for drone collision
class DroneWall:
def __init__(self, aaLeft, aaRight, bbLeft, bbRight, prevTime, nextTime, num):
self.aLeft = aaLeft
self.bLeft = bbLeft
self.aRight = aaRight
self.bRight = bbRight
self.startTime = prevTime
self.endTime = nextTime
self.droneNum = num
#END OF __init__()
#Checks for collisions
# Arguments:
# Point aaLeft, bbLeft - Points of left wall
# Point aaRight, bbRight - Points of Right wall
# double BxAxLeft, ByAyLeft - Precomputed values of left wall
# double BxAxRight, ByAyRight - Precomputed values of right wall
# Returns:
# boolean result
def droneCollide(self, aaLeft, bbLeft, aaRight, bbRight, BxAxLeft, ByAyLeft, BxAxRight, ByAyRight):
if Vertex.intersect(aaLeft, bbLeft, self.aLeft, self.bLeft, BxAxLeft, ByAyLeft):
return True
elif Vertex.intersect(aaLeft, bbLeft, self.aRight, self.bRight, BxAxLeft, ByAyLeft):
return True
elif Vertex.intersect(aaRight, bbRight, self.aLeft, self.bLeft, BxAxRight, ByAyRight):
return True
else:
return Vertex.intersect(aaRight, bbRight, self.aRight, self.bRight, BxAxRight, ByAyRight)
#END OF droneCollide()
#END OF DroneWall
#Drone class to contain Drone data
class Drone:
#Constructor for Drone
# Arguments:
# char number - Comms identifier
# int machine - Machine ID number
def __init__(self, number, machine):
self.num = number
self.machineID = machine
self.priority = None
self.point = None
self.orient = None
self.recalculate = False
self.mapped = False
self.hasBlock = False
self.finished = False
self.hasMothership = False
self.block = ' '
self.startingVertexIdx = None
self.startingOrient = None
self.endingVertexIdx = None
self.destinationPoint = None
self.destinationIdx = []
self.pathInfo = []
self.hasPath = False
self.startTime = None
self.finishTime = None
self.finishOrient = None
self.finishPoint = None
#END OF __init__()
#Compares two drones' machine ID
# Arguments:
# Drone drone - Drone to be compared
# Returns:
# boolean
def compareID(self, drone):
return (drone.machineID == self.machineID)
#END OF compareID()
#Calculates time from angle for turn commands
# Arguments:
# double angle - Angle to turn
# int dir - Direction to turn
# Returns:
# long time - Time in ms
def makeTurn(self, angle, dir):
pass#TODO
#END OF makeTurn()
#Calculates the time and speed for move commands
# Arguments:
# double dist - Distance to be traveled
# Returns:
# int speed
# long time - Time in ms
def makeMove(self, dist):
pass#TODO
#END OF makeMove()
#Generates speed and time for mothership mounting
# Returns:
# int speed
# long time - Time in ms
def mountMothership(self):
pass#TODO
#END OF mountMothership()
#Generates speed and time for mothership dismounting
# Returns:
# int speed
# long time - Time in ms
def dismountMothership(self):
pass#TODO
#END OF dismountMothership()
#Emits instruction list
# Returns:
# Instruction[] instructions
def emitInstructions(self):
instructions = []
for path in self.pathInfo:
instructions.append(path.instruction)
return instructions
#END OF emitInstructions()
def createInstruction(self, code, int, long, end):
return Instruction(code, int, long, end)
#END OF createInstruction()
#END OF Drone | true |
f41731f3fbb6622fee0407f6be4257545f7a496f | Python | RashadGhzi/Python-with-Anis | /anis31.py | UTF-8 | 98 | 3.5 | 4 | [] | no_license |
matrix = [
[1,2,3],[4,5,6]
]
for row in matrix:
for column in row:
print(column)
| true |
865b5549619e994b15e899483f69d4f0d0dfed9a | Python | lisasboylanportfolio/PortfolioJinja2 | /utils.py | UTF-8 | 5,613 | 3.125 | 3 | [] | no_license | import os
import glob
import re
import os.path
from jinja2 import Template
DEBUG = False
#
# Remove all *.html file from directory
#
# Input: directory : a pathname to the directory from which to remove hhtml files
# Return:
# True : if files were removed
# False : No files were removed
#
def cleanDir(directory):
if DEBUG:
print("DEBUG: utils.py.cleanDir()")
# Get a list of all the file paths that end with .html from the directory
fileList = glob.glob(directory + "/*.html")
if DEBUG:
print("DEBUG: utils.py.cleanDir().fileList=", fileList)
if fileList == None or fileList == []:
if DEBUG:
print("DEBUG: utils.py.cleanDir().filelist empty")
return True
# Iterate over the list of filepaths & remove each file.
if DEBUG:
print("DEBUG: utils.py.cleanDir().for()")
for filePath in fileList:
try:
os.remove(filePath)
if DEBUG:
print("DEBUG: utils.py.cleanDir().return(TRUE)")
return True
except:
print("Error while deleting file : ", filePath)
if DEBUG:
print("DEBUG: utils.py.cleanDir().return(FALSE)")
return False
#
# Generate files names with '.html' suffix
#
# Input:
# odir: A directory added to a prefix to create a base file path
# prefixes: a word which is prefixed by odir and '.html' appended
#
# Ouptuts:
# a dictionary. The key is a 'prefix' and the value is a html file name
#
# Note: Prior to Templating, this method require output_dir be prefixed to each filename
# However, with Templating, test confirmed output_dir was not required
#
def getPath(prefixes):
outputs={} # key = page prefix value = filename
if DEBUG:
print("DEBUG: utils.py.getPath()")
for prefix in prefixes:
outputs[prefix]= prefix + ".html"
# Special Case: prefix 'Home' is really output file index.html
outputs["home"]= "index.html"
if DEBUG:
print("DEBUG: utils.py.getPath().outputs:", outputs)
return outputs
#
# Return files in specified directory
#
# Input: Directory to be searched for files
#
# Return:
# A list if file names
#
def getContentFiles(content_dir):
content_files=[]
# get all html files
content_files=glob.glob(content_dir + "/*.html")
if DEBUG:
print("DEBUG: utils.py.getContentFiles().content files=", content_files)
return content_files
#
# Get the prefix of a string. where the prefix is seperated by '_'
#
# Input:
# List of files
#
# Return:
# List of prefixes
#
def getPrefixes(strings):
prefixes=[] # file name prefixes help group content files by page
# list of unique filenam prefixes => filename upto '_'
for file_path in strings:
if DEBUG:
print("DEBUG: utils.py.getPrefixes().filepath=", file_path)
file_name = os.path.basename(file_path)
pos=file_name.index("_")
file_prefix = file_name[0:pos]
if file_prefix not in prefixes:
prefixes.append(file_prefix)
if DEBUG:
print("DEBUG: utils.py.getPrefixes().prefixes=", prefixes)
return prefixes
def getTitles(prefixes):
if DEBUG:
print("DEBUG: utils.py.getTitles()")
titles={}
for title in prefixes:
# Capitalize thefirst Letter
to_upper = title[0]
to_lower = title[1:len(title)]
titles[title]= to_upper.upper() + to_lower.lower()
if DEBUG:
print("DEBUG: utils.py.getTitles().titles=", titles)
return titles
#
# Render a web page using the Templating technique
#
# Input:
# cdir: directory containing template content used to compase page
# odir: generated web pages output directory
# basefile: template file
#
def render(cdir,odir, basefile, prefixes):
for prefix in prefixes:
# open/ reposition file pointer
template_html = open(basefile).read()
# initialize template
template = Template(template_html)
# Get content if no content file exists set variable to " "
try:
if DEBUG:
print("DEBUG: utils.py.render().open(", cdir + prefix + "_main.html).read()")
main_html = open(cdir + prefix + "_main.html").read()
except:
main_html = " "
try:
if DEBUG:
print("DEBUG: utils.py.render().open(", cdir + prefix + "_msg.html.read()")
msg_html = open(cdir + prefix + "_msg.html").read()
except:
msg_html = " "
titles = getTitles(prefixes)
links = getPath(prefixes)
if DEBUG:
print("DEBUG: utils.py.render().links:", links)
print("DEBUG: utils.py.render().main_html=", main_html)
print("DEBUG: utils.py.render().msg_html=", msg_html)
result = template.render(
title=prefix,
content_main=main_html,
content_msg=msg_html,
links=links,
)
# Write out template
if DEBUG:
print("DEBUG: utils.py.render().print to->", odir + prefix + '.html' )
if prefix == 'home':
open(odir + 'index.html', 'w+').write(result)
else:
open(odir + prefix + '.html', 'w+').write(result)
| true |
c445bd1c63603e037c346e2f07215b5b9910408b | Python | geovanij2/UFSC | /ES1/app.py | UTF-8 | 7,818 | 3.015625 | 3 | [] | no_license | import pygame
import Client
from time import sleep
class App():
def __init__(self):
pygame.init()
pygame.font.init()
(width, height) = (800, 600)
self.black = (0,0,0)
self.white = (255, 255, 255)
self.bg_green = (0,100,0)
self.blue = (0,255,0)
self.inactive_blue = (0,180,0)
self.light_blue = (29,231,241)
self.light_grey = (192,192,192)
self.dark_grey = (140,140,140)
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Truco")
self.clock = pygame.time.Clock()
self.init_graphics()
(self.card_width, self.card_height) = self.faced_down_card.get_size()
self.truco_game = Client.Client()
def init_graphics(self):
self.faced_down_card = pygame.image.load("Deck3.gif").convert()
self.hor_faced_down_card = pygame.transform.rotate(self.faced_down_card, -90)
card_names = [suit + rank for suit in 'DSHC' for rank in '4567QJK123']
self.image_dict = {}
for card in card_names:
self.image_dict[card] = pygame.image.load(card+'.gif').convert()
def draw_board(self):
if self.truco_game.running:
player = self.truco_game.num
turned_card = self.truco_game.turned_card
team_mate_card = self.truco_game.get_board_card((player+2)%4)
right_player_card = self.truco_game.get_board_card((player+1)%4)
left_player_card = self.truco_game.get_board_card((player+3)%4)
my_card = self.truco_game.get_board_card(player)
# my hand
for i, card in enumerate(self.truco_game.me.hand):
self.card_button(i*80 + 285, 494, self.card_width, self.card_height, self.light_blue, self.image_dict[card.suit + card.rank], i)
# oposite players hand
for i in range(self.truco_game.players_number_of_cards[(self.truco_game.num+2)%4]):
self.screen.blit(self.faced_down_card, (i*80 + 285, 10))
# player to the right
for i in range(self.truco_game.players_number_of_cards[(self.truco_game.num+1)%4]):
self.screen.blit(self.hor_faced_down_card, (694, i*80 +185))
# player to the left
for i in range(self.truco_game.players_number_of_cards[(self.truco_game.num+3)%4]):
self.screen.blit(self.hor_faced_down_card, (10, i*80 + 185))
# draws thrown cards
if team_mate_card is not None:
self.screen.blit(self.image_dict[team_mate_card.suit + team_mate_card.rank], (365, 120))
if right_player_card is not None:
self.screen.blit(self.image_dict[right_player_card.suit + right_player_card.rank], (550, 250))
if left_player_card is not None:
self.screen.blit(self.image_dict[left_player_card.suit + left_player_card.rank], (180, 250))
if my_card is not None:
self.screen.blit(self.image_dict[my_card.suit + my_card.rank], (365, 384))
if turned_card is not None:
self.screen.blit(self.image_dict[turned_card.suit + turned_card.rank], (405, 250))
self.screen.blit(self.faced_down_card, (325, 250))
self.button("Trucar", 625, 515, 150, 60, self.dark_grey, self.light_grey, self.truco_game.ask_truco)
def draw_HUD(self):
if self.truco_game.me.turn:
# create font
my_font_64 = pygame.font.SysFont(None, 64)
# create text surface
label = my_font_64.render("Sua vez!", 1, self.white)
# draw surface
self.screen.blit(label, (40,500))
if self.truco_game.running:
my_font_32 = pygame.font.SysFont(None, 32)
my_team_score = my_font_32.render("Nós: " + str(self.truco_game.me.score), 1, self.white)
other_team_score = my_font_32.render("Eles: " + str(self.truco_game.other_team_score), 1, self.white)
self.screen.blit(my_team_score, (50, 40))
self.screen.blit(other_team_score, (50, 70))
def card_button(self, x, y, width, height, color, card_image, index):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x + width > mouse[0] > x and y + height > mouse[1] > y:
pygame.draw.rect(self.screen, color, (x-3, y-3, width+6, height+6))
if click[0] == 1:
self.truco_game.play_card(index)
self.screen.blit(card_image, (x, y))
def button(self, msg, x, y, w, h, ic, ac, action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(self.screen, ac, (x, y, w, h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(self.screen, ic, (x, y, w, h))
my_font = pygame.font.SysFont(None, 32)
text_surf, text_rect = self.text_objects(msg, my_font)
text_rect.center = ((x+(w/2)), (y+(h/2)))
self.screen.blit(text_surf, text_rect)
def text_objects(self, text, font):
textSurface = font.render(text, True, self.black)
return textSurface, textSurface.get_rect()
def draw_truco_asked_screen(self):
if self.truco_game.truco_asked:
player = self.truco_game.num
turned_card = self.truco_game.turned_card
team_mate_card = self.truco_game.get_board_card((player+2)%4)
right_player_card = self.truco_game.get_board_card((player+1)%4)
left_player_card = self.truco_game.get_board_card((player+3)%4)
my_card = self.truco_game.get_board_card(player)
self.screen.fill((0,100,0))
# my hand
for i, card in enumerate(self.truco_game.me.hand):
self.card_button(i*80 + 285, 494, self.card_width, self.card_height, self.light_blue, self.image_dict[card.suit + card.rank], i)
# oposite players hand
for i, card in enumerate(self.truco_game.team_mate_cards):
self.screen.blit(self.image_dict[card.suit + card.rank], (i*80 + 285, 10))
# player to the right
for i in range(self.truco_game.players_number_of_cards[(self.truco_game.num+1)%4]):
self.screen.blit(self.hor_faced_down_card, (694, i*80 +185))
# player to the left
for i in range(self.truco_game.players_number_of_cards[(self.truco_game.num+3)%4]):
self.screen.blit(self.hor_faced_down_card, (10, i*80 + 185))
# draws thrown cards
if team_mate_card is not None:
self.screen.blit(self.image_dict[team_mate_card.suit + team_mate_card.rank], (365, 120))
if right_player_card is not None:
self.screen.blit(self.image_dict[right_player_card.suit + right_player_card.rank], (550, 250))
if left_player_card is not None:
self.screen.blit(self.image_dict[left_player_card.suit + left_player_card.rank], (180, 250))
if my_card is not None:
self.screen.blit(self.image_dict[my_card.suit + my_card.rank], (365, 384))
if turned_card is not None:
self.screen.blit(self.image_dict[turned_card.suit + turned_card.rank], (405, 250))
self.screen.blit(self.faced_down_card, (325, 250))
self.button("Cai!", 10, 530, 100, 60, self.inactive_blue, self.blue, self.truco_game.accept_truco)
self.button("Correr", 130, 530, 100, 60, self.dark_grey, self.light_grey, self.truco_game.refuse_truco)
def print_test(self):
print("teste")
def update(self):
if self.truco_game.me.score >= 12 or self.truco_game.other_team_score >= 12:
return 1
self.truco_game.just_played -= 1
# pumps client and server so it looks for new events/messages
self.truco_game.read_network()
# 60 FPS
self.clock.tick(60)
# clear the screen
self.screen.fill((0,100,0))
# draw cards
self.draw_board()
# draw truco screen
self.draw_truco_asked_screen()
# draw HUD
self.draw_HUD()
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
# update the screen
pygame.display.flip()
def finished(self):
if self.truco_game.me.won:
self.screen.fill((0,100,0))
msg = "Você venceu!"
else:
self.screen.fill((100,0,0))
msg = "Você perdeu!"
my_font = pygame.font.SysFont(None, 64)
text_surf, text_rect = self.text_objects(msg, my_font)
text_rect.center = (400,300)
self.screen.blit(text_surf, text_rect)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
pygame.display.flip()
a = App()
while True:
if a.update() == 1:
break
a.finished() | true |
3562bf927bee8a96dc694a074a7edaf10b3937c8 | Python | KseniaZikova/Case_04 | /main.py | UTF-8 | 3,692 | 3.1875 | 3 | [] | no_license | # Developers: Zikova K. 60%, Bateneva M. 80%, Shlapakova K. 90%
import os
def acceptCommand(): # проверка на ввод номера
kk = '1234567'
while True:
s = input('Выберите пункт меню: ')
if s in kk:
return s
else:
continue
def runCommand(command, path):
if command == 1:
return moveUp()
elif command == 2:
return up()
elif command == 3:
return down()
elif command == 4:
return countFiles(path)
elif command == 5:
return countBytes(path)
elif command == 6:
return findFiles(path)
elif command == 7:
return 'Выход выполнен.'
def countFiles(path, a=0):
for root, dirs, files in os.walk(path):
if len(dirs) == 0:
a += len(files)
return a
a += len(files)
return countFiles(path, a)
def countBytes(path, a=0):
for root, dirs, files in os.walk(path):
if '.idea' not in dirs:
if len(dirs) == 0:
for i in files:
a += os.path.getsize(i)
return a
else:
for i in files:
a += os.path.getsize(i)
return countBytes(path, a)
else:
continue
def up(): # переход вверх из папки
os.chdir('..')
path = os.getcwd()
for root, dirs, files in os.walk(path):
print('root:', root, 'dirs:', dirs, 'files:', files)
def down():
papka = input('Введите имя папки, в которую хотите перейти: ')
while True:
try:
a = os.getcwd()
os.chdir(r'%s\%s'%(a, papka))
j = os.getcwd()
print(j)
break
except:
papka = input('Такой папки не существует, введите еще раз: ')
def moveUp(): # вывод каталога
path = os.getcwd()
for root, dirs, files in os.walk(path):
print('root:', root, 'dirs:', dirs, 'files:', files)
def findFiles(path):
target = input('Введите имя файла: ')
lst = []
for root, dirs, files in os.walk(path):
for i in files:
if target in i:
lst.append(os.path.abspath(i))
return lst
else:
continue
def main():
MENU = 'C:\Python34\n' \
'1. Просмотр каталога\n' \
'2. На уровень вверх \n' \
'3 На уровень вниз\n' \
'4 Количество файлов и каталогов\n' \
'5 Размер текущего каталога (в байтах)\n' \
'6 Поиск файла\n' \
'7 Выход из программы\n'
print(MENU)
a = int(input('Выберите пункт меню: '))
path = os.getcwd()
if a == 7:
print(runCommand(a, path))
while a != 7:
print(runCommand(a, path))
MENU = 'C:\Python34\n' \
'1. Просмотр каталога\n' \
'2. На уровень вверх \n' \
'3 На уровень вниз\n' \
'4 Количество файлов и каталогов\n' \
'5 Размер текущего каталога (в байтах)\n' \
'6 Поиск файла\n' \
'7 Выход из программы\n'
print(MENU)
a = int(input('Выберите пункт меню: '))
path = os.getcwd()
if __name__ == '__main__':
main()
| true |
c2ab0bcfa84f86e5266d17823d0b868b6b49657a | Python | DHANUSHVARMA1/IBMLabs | /HCF.py | UTF-8 | 330 | 3.5 | 4 | [] | no_license | def compute_hcf ( x , y ):
if x>y :
smaller = y
else:
smaller = x
for i in range ( 1, smaller+1 ):
if(x%i == 0 ) and ( y%i == 0):
hcf = i
return hcf
num1 = int(input("Enter number 1 : "))
num2 = int(input("Enter number 2 : "))
print("HCF = ",compute_hcf(num1,num2))
| true |
2bdc4e84ae9f968ad447a5dd2b52d353372c7e90 | Python | kannanmavila/coding-interview-questions | /interview_cake/5_ways_to_make_change.py | UTF-8 | 714 | 3.921875 | 4 | [] | no_license | def ways_to_make_change_bottom_up(n, denominations):
"""O(Nk) solution - uses the coins bottom-up.
Start with a particular coin, update all amounts
up till n, and never come back to that coin again.
"""
ways = [1] + [0] * n
for coin in denominations:
# For amounts higher than coin
for amount in xrange(coin, n+1):
# No. of ways you can make amount
# using coins used so far, plus
# new ways by using this'coin' and
# making the remainder in its own ways
ways[amount] += ways[amount-coin]
return ways[n]
print ways_to_make_change_bottom_up(5, [1, 3, 5]) # 3
print ways_to_make_change_bottom_up(4, [1, 3, 5]) # 2
print ways_to_make_change_bottom_up(4, [1, 3, 2]) # 4
| true |
841648b8d687252b5903018a079b08dfac36bdb4 | Python | VladimirMerkul/pythonProject1 | /lessons/lesson 2/task_2_1.py | UTF-8 | 87 | 3.5 | 4 | [] | no_license | x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
n = x[2]
print (x)
print ("third_number=" + str(n)) | true |
24ba9f46ad36dee3393684c2225b7de0b4a2d2ea | Python | rrwielema/ezgoogleapi | /ezgoogleapi/bigquery/schema.py | UTF-8 | 1,258 | 2.84375 | 3 | [
"MIT"
] | permissive | from typing import List
import pandas as pd
from datetime import datetime
class SchemaTypes:
'''
Class to easily assign a data type to a BigQuery-table column.
'''
INT64 = 'INT64'
BOOL = 'BOOL'
FLOAT64 = 'FLOAT64'
STRING = 'STRING'
OBJECT = 'STRING'
BYTES = 'BYTES'
TIMESTAMP = 'TIMESTAMP'
DATE = 'DATE'
TIME = 'TIME'
DATETIME = 'DATETIME'
DATETIME64 = 'DATETIME'
TIMEDELTA = 'DATETIME'
INTERVAL = 'INTERVAL'
GEOGRAPHY = 'GEOGRAPHY'
NUMERIC = 'NUMERIC'
BIGNUMERIC = 'BIGNUMERIC'
JSON = 'JSON'
def schema(df: pd.DataFrame) -> List[list]:
'''
Create a BigQuery table schema based on the data types of a pandas DataFrame.
:param df: pandas DataFrame to base the schema on.
:return: List of column names and data types.
'''
columns = df.columns.to_list()
types = [f.name for f in list(df.dtypes.values)]
return_schema = []
for i, _ in enumerate(types):
if _.replace('[ns]', '').upper() in SchemaTypes.__dict__.keys():
return_schema.append([columns[i], SchemaTypes.__dict__[_.replace('[ns]', '').upper()]])
else:
return_schema.append([columns[i], SchemaTypes.STRING])
return return_schema
| true |
61b8cf85f90dc571de51a47f7fdc4ed6ca05c052 | Python | Hitesh20/OpenCV-Practice-Learning | /contours.py | UTF-8 | 459 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
img = cv2.imread('opencv-logo.png', 1)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
contours, herarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print("No of contours " + str(len(contours)))
cv2.drawContours(img, contours, -1, (10, 215, 80), 3)
cv2.imshow('Original_Image', img)
cv2.imshow('Gray_Image', imgray)
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
fbb42fa1e15f3c2ca8169d7377c012e1a2d6d445 | Python | skjha1/SRM-Python-Elab-solution | /01 Session input Output.py | UTF-8 | 5,319 | 4.09375 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# Input and Output
#
#
# Q. 1 Multiplication Table
#
# Write a python program to print the table of a given number
#
# In[1]:
# Code By Shivendra
num = int(input(""))
for i in range(1, 11):
print(num,"x",i,"=",num*i)
# Q. 2: Height Units
#
#
# Many people think about their height in feet and inches, even in some countries that primarily use the metric system.
#
# Write a program that reads a number of feet from the user, followed by a number of inches.
#
# Once these values are read, your program should compute and display the equivalent number of centimeters.
#
# Hint One foot is 12 inches. One inch is 2.54 centimeters.
# In[10]:
# Code By Shivendra
h_ft = float(input(""))
h_inch = float(input(""))
h_inch =h_inch+ h_ft * 12
h_cm = h_inch * 2.54
print("Your height in centimeters is %.2f" % h_cm)
# Q. 3: Sum of N series
#
#
# Python Program to Read a number n and Compute n+nn+nnn
# In[11]:
# Code By Shivendra
num = int (input (''))
ans = num+num*num+num*num*num
print (ans)
# Q. 4: eLab Temperature Scale
#
# Write a program that begins by reading a temperature from the user in degreesCelsius.
#
# Then your program should display the equivalent temperature in degrees Fahrenheit.
#
# The calculations needed to convert between different units of temperature is your task
# In[12]:
c = float (input(''))
f= (9*c+ (32*5))/5
print ('The fahrenheit value for %.1f celsius is %.2f fahrenheit'%(c,f))
# Q. 5: Traingle
#
#
# The area of a triangle can be computed using the following formula, where b is the length of the base of the triangle, and h is its height:
#
# area = b* h/2
#
# Write a program that allows the user to enter values for b and h. The program should then compute and display the area of a triangle with base length b and height h.
#
# In[13]:
# Code By Shivendra
b = int (input (''))
h= int (input (''))
area = b* h/2
print ('The area of the triangle is',area)
# Q. 6: Grocery Shop
#
#
# QUESTION DESCRIPTION
#
# Write a program to display a grocery bill of the product purchased in the small market by John by getting following input from the user
#
# Get the product name Get the price of the product(Price per Unit) Get the quantity of the product purchased
#
# Input and Output Format:
#
# Refer sample input and output for formatting specification.
#
# All float values are displayed correct to 2 decimal places.
#
# All text in bold corresponds to input and the rest corresponds to output.
# In[14]:
# Code By Shivendra
soap= str(input (''))
price = float (input (''))
item= int (input (''))
bill= price * item
print ("Product Details")
print (soap)
print (price)
print (item)
print ('Bill:',bill)
# Q. 7: Salary Calculator
# QUESTION DESCRIPTION
#
# Help Raja to calculate a first salary that he got from the organisation , he was confused with an salary credited in his account .
#
# He asked his friend Ritu to identify how salary pay got calculated by giving the format of salary.
#
# His basic pay (to be entered by user) and Ritu developed a software to calculate the salary pay,with format given as below
#
# HRA=80% of the basic pay,
#
# dA=40% of basic pay
#
# bonus = 25 % of hra
#
# Input and Output Format:
#
# Refer sample input and output for formatting specification.
#
# All float values are displayed correct to 2 decimal places.
#
# All text in bold corresponds to input and the rest corresponds to output
# In[15]:
# Code By Shivendra
n = float(input(''))
hra=n*0.8;
da=n*0.4;
bonus=hra*0.25;
total=hra+da+bonus+n;
print("Total Salary=",total);
# Q. 8: Day Old Bread
# QUESTION DESCRIPTION
#
# A bakery sells loaves of bread for 185 rupees each. Day old bread is discounted by 60 percent. Write a program that begins by reading the number of loaves of day old bread being purchased from the user.
#
# Then your program should display the regular price for the bread, the discount because it is a day old, and the total price.
#
# All of the values should be displayed using two decimal places, and the decimal points in all of the numbers should be aligned when reasonable values are entered by the user.
# In[16]:
# Code By Shivendra
no= int(input (''))
print ('Loaves Discount')
tmp=no*185;
print("Regular Price",tmp);
tmp2=no*185*0.6;
print("Total Discount",tmp2);
amount=tmp-tmp2
print("Total Amount to be paid",amount);
# Q. 9: Area and Perimeter of Circle
# QUESTION DESCRIPTION
#
# Program to calculate area and perimeter of circle
# Note:
# Define pi as 3.14
# In[17]:
# Code By shivendra
r= float(input (''))
pi=3.14
area= pi*r*r
perimeter= 2*pi*r
print ('Area=',area)
print ('Perimeter=',perimeter)
# Q. 10: Body Mass Index
# QUESTION DESCRIPTION
#
# Write a program that computes the body mass index (BMI) of an individual.
#
# Your program should begin by reading a height and weight from the user. If you read the height in meters and the weight in kilograms then body mass index is computed using this slightly simpler formula:
#
# BMI = weight / height height
#
# Use %0.2f in the final output value
# In[18]:
# Code By Shivendra
h= float(input (''))
w= float (input (''))
bmi= (w/(h*h))
if h== 1.69:
print ('The BMI IS {:.02f}'.format(bmi))
else :
print('The BMI IS {:.01f}'.format(bmi))
| true |
0de26086e030e4d64f511a1578c8b00e267e3ce8 | Python | RobbeVer/BW_Stitching | /Test_files/Movement models/finding_translation_rotation.py | UTF-8 | 1,857 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
from skimage import data
from skimage.registration import phase_cross_correlation
from skimage.registration._phase_cross_correlation import _upsampled_dft
from skimage.transform import warp_polar, rotate, rescale
from scipy.ndimage import fourier_shift
path_images = os.path.expanduser('~') + '\Pictures\Stitching_images'
image = cv2.imread(path_images + '\IMG_0781.JPG')
offset_image = cv2.imread(path_images + '\IMG_0782.JPG')
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
offset_image_gray = cv2.cvtColor(offset_image, cv2.COLOR_BGR2GRAY)
image_down = None
offset_image_down = None
for i in range(0, 5):
image_down = cv2.pyrDown(image_gray)
offset_image_down = cv2.pyrDown(offset_image_gray)
shift, error, diffphase = phase_cross_correlation(image_down, offset_image_down)
radius = 705
image_polar = warp_polar(image_down, radius=radius, multichannel=False)
rotated_polar = warp_polar(offset_image_down, radius=radius, multichannel=False)
rotation, error, diffphase = phase_cross_correlation(image_polar, rotated_polar)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1)
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image_down, cmap='gray')
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image_down, cmap='gray')
ax2.set_axis_off()
ax2.set_title('Offset image')
# Show the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image_down) * np.fft.fft2(offset_image_down).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
print(f"Detected pixel offset (y, x): {shift}")
print(f"Detected a rotation of: {rotation[0]}")
plt.show() | true |
0ad9cf0feffe0cb9b3440fa38c1b9fc846e1f072 | Python | datafolklabs/cement | /cement/core/config.py | UTF-8 | 6,248 | 3.078125 | 3 | [
"BSD-3-Clause"
] | permissive | """Cement core config module."""
import os
from abc import abstractmethod
from ..core.interface import Interface
from ..core.handler import Handler
from ..utils.fs import abspath
from ..utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class ConfigInterface(Interface):
"""
This class defines the Config Interface. Handlers that implement this
interface must provide the methods and attributes defined below. In
general, most implementations should sub-class from the provided
:class:`ConfigHandler` base class as a starting point.
"""
class Meta:
"""Handler meta-data."""
#: The string identifier of the interface.
interface = 'config'
@abstractmethod
def parse_file(self, file_path):
"""
Parse config file settings from ``file_path``. Returns True if the
file existed, and was parsed successfully. Returns False otherwise.
Args:
file_path (str): The path to the config file to parse.
Returns:
bool: ``True`` if the file was parsed, ``False`` otherwise.
"""
pass # pragma: nocover
@abstractmethod
def keys(self, section):
"""
Return a list of configuration keys from ``section``.
Args:
section (list): The config section to pull keys from.
Returns:
list: A list of keys in ``section``.
"""
pass # pragma: nocover
@abstractmethod
def get_sections(self):
"""
Return a list of configuration sections.
Returns:
list: A list of config sections.
"""
pass # pragma: nocover
@abstractmethod
def get_dict(self):
"""
Return a dict of the entire configuration.
Returns:
dict: A dictionary of the entire config.
"""
@abstractmethod
def get_section_dict(self, section):
"""
Return a dict of configuration parameters for ``section``.
Args:
section (str): The config section to generate a dict from (using
that sections' keys).
Returns:
dict: A dictionary of the config section.
"""
pass # pragma: nocover
@abstractmethod
def add_section(self, section):
"""
Add a new section if it doesn't already exist.
Args:
section: The section label to create.
Returns:
None
"""
pass # pragma: nocover
@abstractmethod
def get(self, section, key):
"""
Return a configuration value based on ``section.key``. Must honor
environment variables if they exist to override the config... for
example ``config['myapp']['foo']['bar']`` must be overridable by the
environment variable ``MYAPP_FOO_BAR``.... Note that ``MYAPP_`` must
prefix all vars, therefore ``config['redis']['foo']`` would be
overridable by ``MYAPP_REDIS_FOO`` ... but
``config['myapp']['foo']['bar']`` would not have a double prefix of
``MYAPP_MYAPP_FOO_BAR``.
Args:
section (str): The section of the configuration to pull key values
from.
key (str): The configuration key to get the value for.
Returns:
unknown: The value of the ``key`` in ``section``.
"""
pass # pragma: nocover
@abstractmethod
def set(self, section, key, value):
"""
Set a configuration value based at ``section.key``.
Args:
section (str): The ``section`` of the configuration to pull key
value from.
key (str): The configuration key to set the value at.
value: The value to set.
Returns:
None
"""
pass # pragma: nocover
@abstractmethod
def merge(self, dict_obj, override=True):
"""
Merges a dict object into the configuration.
Args:
dict_obj (dict): The dictionary to merge into the config
override (bool): Whether to override existing values or not.
Returns:
None
"""
pass # pragma: nocover
@abstractmethod
def has_section(self, section):
"""
Returns whether or not the section exists.
Args:
section (str): The section to test for.
Returns:
bool: ``True`` if the configuration section exists, ``False``
otherwise.
"""
pass # pragma: nocover
class ConfigHandler(ConfigInterface, Handler):
"""
Config handler implementation.
"""
@abstractmethod
def _parse_file(self, file_path):
"""
Parse a configuration file at ``file_path`` and store it. This
function must be provided by the handler implementation (that is
sub-classing this).
Args:
file_path (str): The file system path to the configuration file.
Returns:
bool: ``True`` if file was read properly, ``False`` otherwise
"""
pass # pragma: nocover
def parse_file(self, file_path):
"""
Ensure we are using the absolute/expanded path to ``file_path``, and
then call ``self._parse_file`` to parse config file settings from it,
overwriting existing config settings.
Developers sub-classing from here should generally override
``_parse_file`` which handles just the parsing of the file and leaving
this function to wrap any checks/logging/etc.
Args:
file_path (str): The file system path to the configuration file.
Returns:
bool: ``True`` if the given ``file_path`` was parsed, and ``False``
otherwise.
"""
file_path = abspath(file_path)
if os.path.exists(file_path):
LOG.debug("config file '%s' exists, loading settings..." %
file_path)
return self._parse_file(file_path)
else:
LOG.debug("config file '%s' does not exist, skipping..." %
file_path)
return False
| true |
920c427456f7cbfc81a55015798a2f2072630909 | Python | rajKarra69420/CryptoPals | /Cryptopals Set 4/set4_challenge26.py | UTF-8 | 1,111 | 2.84375 | 3 | [
"MIT"
] | permissive | import set3_challenge18 as ctr
import random
import os
key = os.urandom(16)
nonce = os.urandom(8)
def ctr_encrypt(message):
plaintext = (b'comment1=cooking%20MCs;userdata=' + message + b'comment2=%20like%20a%20pound%20of%20bacon'). \
replace(b';', b'%3b').replace(b'=', b'%3d')
return ctr.transform(plaintext, key, nonce)
def find_admin_role(ciphertext):
return b';admin=true;' in ctr.transform(ciphertext, key, nonce)
# this is even easier than cbc because we don't have to worry about the previous block
# we can just xor at the index we want to change
def get_admin(encryptor):
empty = bytearray(encryptor(b''))
ciphertext = bytearray(encryptor(b'\x00admin\x00true\x00'))
i = next((i for i in range(len(empty)) if empty[i] != ciphertext[i]), None)
# by sending plaintext with the 0 byte, we have xor(\x00, b';') which evaluates to b';' (same for xor(\x00, b'='))
ciphertext[i] ^= ord(b';')
ciphertext[i + 6] ^= ord(b'=')
ciphertext[i + 11] ^= ord(b';')
return ciphertext
if __name__ == "__main__":
print(find_admin_role(get_admin(ctr_encrypt)))
| true |
a97bbf401f59c0d410ebf04a5c7d57d4e87805db | Python | gistable/gistable | /all-gists/1599710/snippet.py | UTF-8 | 484 | 2.84375 | 3 | [
"MIT"
] | permissive | plugins = {}
def get_input_plugins():
return plugins['input'].items()
class Plugin(object):
plugin_class = None
@classmethod
def register(cls, name):
plugins[cls.plugin_class][name] = cls
class InputPlugin(Plugin):
plugin_class = 'input'
def process_input(self, something):
raise NotImplementedError
class ExamplePlugin(InputPlugin):
def process_input(self, something):
return str(something)
ExamplePlugin.register('example') | true |
f521ae72896cd9e1407004ad81212dea5f9dc818 | Python | DrZhouKarl/LiDAR-Road-Analysis | /viewer.py | UTF-8 | 1,351 | 2.546875 | 3 | [] | no_license | import project_utils as ut
import argparse
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import sys
def viewer(chunk, draw_elevation=False, c=None, s=1, get=False):
np_chunk = np.array(chunk)
fig = plt.figure()
if draw_elevation:
ax = fig.add_subplot(111, projection='3d')
ax.scatter(np_chunk[:,0], np_chunk[:,1], np_chunk[:,2], s=s)
else:
ax = fig.add_subplot(111)
ax.grid(True,linestyle='-',color='0.75')
if c == None:
c = np_chunk[:,2]
elif type(c) == int:
c = [1]*len(np_chunk[:,0])
ax.scatter(np_chunk[:,0], np_chunk[:,1], c=c, s=s, edgecolors='none')
plt.xlabel('Latitude')
plt.ylabel('Longitude')
if get:
return plt
else:
plt.show()
if "__main__" == __name__:
parser = argparse.ArgumentParser()
parser.add_argument("file", help='file to display', type=str)
parser.add_argument("--w3d", help='flag to show the points in a 3D graph', action='store_true')
parser.add_argument("x", help='field of the file that contains the x value', type=int)
parser.add_argument("y", help='field of the file that contains the y value', type=int)
parser.add_argument("z", help='field of the file that contains the z value', type=int)
args = parser.parse_args()
field = [args.x, args.y, args.z]
chunk = ut.loadPoints(args.file, field)
viewer(chunk, draw_elevation=args.w3d)
| true |
42ddabc63643491c3b4de68d082e8e91df574bcb | Python | Orik236/Web_Orka236 | /week8/informatics/Problems4/E.py | UTF-8 | 73 | 3.375 | 3 | [] | no_license | n = int(input())
cnt = 0
while n != 0:
n //= 2
cnt+=1
print(cnt) | true |
9a07877c5b1f9b1888e17559a201d67585771758 | Python | gschen/sctu-ds-2020 | /1906101061-杨超/day0331/test1.py | UTF-8 | 936 | 4.09375 | 4 | [] | no_license | class Stack(object):
def __init__(self,limit = 10):#创建空栈
self.stack = []
self.limit = limit
def is_empty(self):#判断是否为空,空则返回true
return len(self.stack)==0
def push(self,date):#入栈,使数据成为新的栈顶
if len(self.stack)>=self.limit:
print("栈溢出")
else:
self.stack.append(date)
def pop(self):#弹出栈顶,若栈为空,排除异常
if self.stack:
return self.stack.pop()
else:
print("空栈不能被弹出")
def top(self):#查看栈顶,若栈为空,排除异常
if self.stack:
return self.stack[-1]
def size(self):#返回栈长度
return len(self.stack)
stack = Stack()
stack.push(1)
stack.push(3)
stack.push(5)
stack.push(7)
print(stack.size())
print(stack.is_empty())
print(stack.pop())
print(stack.pop())
print(stack.pop()) | true |
5dd5a7153cb2060d67a50fe3bfd914a674b2fd7c | Python | cnbdragon/GundamPy | /bubbles.py | UTF-8 | 1,412 | 2.53125 | 3 | [] | no_license | import nimble
import random as rand
import numpy as np
from nimble import cmds
from nimble import cmds as cmd
decRange = np.arange(-1,1,.1)
decRange2 = np.arange(0,1,.1)
r = 2
a = 2.0*r
y = (0, 1, 0) # y up
#polyPlane -w 1 -h 1 -sx 10 -sy 10 -ax 0 1 0 -cuv 2 -ch 1;
p = cmd.polyPlane(
w=100, h=100, sx=10, sy=10, ax=y, cuv=3, ch=1,n='HotPlate')[0]
cmd.select(p)
cmd.move(0,2,0,r=True)
cmd.setKeyframe()
c = cmds.polySphere(
r=r, sx=10, sy=10, ax=y, cuv=2, ch=1, n='Bubble')[0]
cmds.select(c)
cmd.setKeyframe()
'''
cmd.setKeyframe()
for i in range(1,300,5):
x = rand.choice(decRange)
y = 5*rand.choice(decRange2)
z = rand.choice(decRange)
cmd.currentTime(i)
cmd.move(x, y, z, r=True)
cmd.setKeyframe()
'''
randBubbleCount = rand.choice(range(10,1000))
for i in range(0,100,1):
randX = rand.choice(range(-50,50,1))
randZ = rand.choice(range(-50,50,1))
r = 2
a = 2.0*r
yUp = (0, 1, 0) # y up
b = cmds.polySphere(
r=r, sx=10, sy=10, ax=yUp, cuv=2, ch=1, n='Bubble')[0]
cmds.select(b)
startTime = rand.choice(range(1, 600, 1))
cmd.currentTime(1)
cmd.move(randX, 0, randZ, a=True)
cmd.setKeyframe()
for j in range(startTime, 600, 2):
x = rand.choice(decRange)
y = 5*rand.choice(decRange2)
z = rand.choice(decRange)
cmd.currentTime(j)
cmd.move(x, y, z, r=True)
cmd.setKeyframe()
| true |
2b2c3d9f9f87922732eb76a2004372948ca041e5 | Python | rhtm123/ProjectEuler | /prob_33.py | UTF-8 | 1,169 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 11 21:57:46 2017
@author: fun
"""
def str_array(n):
a=[]
while n > 0:
kuch = n % 10
a.append(kuch)
n = n//10
return a
nom = 1
de = 1
for i in range(11,99):
for j in range(i+1,99):
array1 = str_array(i)
array2 = str_array(j)
if array1[0]==0 or array2[0]==0 or array1[1]==0 or array2[1]==0:
pass
elif array1[1]==array2[1] and array1[0]/array2[0] == float(i/j):
print("i1 %d and j %d"%(i,j))
nom = i*nom
de = j*de
elif array1[1]==array2[0] and array1[0]/array2[1] == float(i/j):
print("i2 %d and j %d"%(i,j))
nom = i*nom
de = j*de
elif array1[0]==array2[1] and array1[1]/array2[0] == float(i/j):
print("i3 %d and j %d"%(i,j))
nom = i*nom
de = j*de
elif array1[0]==array2[0] and array1[1]/array2[1] == float(i/j):
print("i4 %d and j %d"%(i,j))
nom = i*nom
de = j*de
else :
pass
#answer=100
print(int(de/nom)) | true |
7baa170740d5d96483d91678bc7f842ff8ba2a78 | Python | KPW10452025/SQlite-and-Python | /02_insert_one_record.py | UTF-8 | 983 | 3.6875 | 4 | [] | no_license | # reference from
# "SQLite Databases With Python - Full Course"
# https://youtu.be/byHcYRpMgI4
import sqlite3
conn01 = sqlite3.connect('customer.db')
# Insert One Record Into Table
c01 = conn01.cursor()
c01.execute("INSERT INTO customers VALUES ('Ban', 'Takahashi', 'ban@gmail.com')")
c01.execute("INSERT INTO customers VALUES ('Tom', 'Smith', 'tom@gmail.com')")
print("command executed successfully!")
# Commit command
conn01.commit()
# Close connection
conn01.close()
# Every time you run the code, you will write the data into the database.
# It means these data will not be overwritten. It will keep repeating input to the database.
# In this case, we use main01.py to create a database and it's table.
# When we run the main02.py, we'll have Ban and Tom in the database.
# Ban Takahashi ban@gmail.com
# Tom Smith tom@gmail.com
# When we run the main02.py again, we'll have repeated data.
# Ban Takahashi ban@gmail.com
# Tom Smith tom@gmail.com
# Ban Takahashi ban@gmail.com
# Tom Smith tom@gmail.com
| true |
a5a36119780301a504ea70af5c1374f84e98c57d | Python | veloquant/buildcloth | /test/test_dependency_checks.py | UTF-8 | 5,359 | 2.59375 | 3 | [] | no_license | from buildcloth.err import DependencyCheckError
from buildcloth.dependency import DependencyChecks
from unittest import TestCase, skip
import sys
import os
import time
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def write(fname, content):
with open(fname, 'w') as f:
f.write(content)
def breath():
if sys.platform == 'darwin':
time.sleep(0.25)
else:
time.sleep(0.01)
class TestDependencyChecking(TestCase):
@classmethod
def setUp(self):
self.d = DependencyChecks()
self.fn_a = 'fn_a'
self.fn_b = 'fn_b'
@classmethod
def tearDown(self):
for fn in [ self.fn_a, self.fn_b ]:
if os.path.exists(fn):
os.remove(fn)
def ensure_clean(self):
self.assertFalse(os.path.exists(self.fn_a))
self.assertFalse(os.path.exists(self.fn_b))
def test_basic(self):
self.ensure_clean()
touch(self.fn_a)
self.assertTrue(os.path.exists(self.fn_a))
self.assertFalse(os.path.exists(self.fn_b))
def test_basic_alt(self):
self.ensure_clean()
touch(self.fn_b)
self.assertTrue(os.path.exists(self.fn_b))
self.assertFalse(os.path.exists(self.fn_a))
def test_default_method(self):
self.ensure_clean()
self.assertTrue(self.d.check_method, 'mtime')
def test_setting_valid_methods(self):
self.ensure_clean()
for method in ['force', 'ignore', 'hash', 'mtime']:
self.d.check_method = method
self.assertTrue(self.d.check_method, method)
self.d.check_method = 'mtime'
self.assertTrue(self.d.check_method, 'mtime')
def test_setting_invalid_method(self):
self.ensure_clean()
with self.assertRaises(DependencyCheckError):
self.d.check_method = 'magic'
def test_mtime_rebuild(self):
self.ensure_clean()
touch(self.fn_a)
breath()
touch(self.fn_b)
self.assertTrue(self.d.check_method, 'mtime')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_mtime_no_rebuild(self):
self.ensure_clean()
touch(self.fn_b)
breath()
touch(self.fn_a)
self.assertTrue(self.d.check_method, 'mtime')
self.assertFalse(self.d.check(self.fn_a, self.fn_b))
def test_mtime_rebuild_no_target(self):
self.ensure_clean()
touch(self.fn_b)
self.assertTrue(self.d.check_method, 'mtime')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_hash_rebuild(self):
self.ensure_clean()
write(self.fn_a, 'aaa')
write(self.fn_b, 'bbb')
self.d.check_method = 'hash'
self.assertTrue(self.d.check_method, 'hash')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_hash_rebuild_ignoring_update_order(self):
self.ensure_clean()
write(self.fn_b, 'bbb')
breath()
breath()
write(self.fn_a, 'aaa')
self.d.check_method = 'hash'
self.assertTrue(self.d.check_method, 'hash')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_hash_no_rebuild(self):
self.ensure_clean()
write(self.fn_b, 'aaa')
write(self.fn_a, 'aaa')
self.d.check_method = 'hash'
self.assertTrue(self.d.check_method, 'hash')
self.assertFalse(self.d.check(self.fn_a, self.fn_b))
def test_hash_rebuild_no_target(self):
self.ensure_clean()
write(self.fn_b, 'aa')
self.d.check_method = 'hash'
self.assertTrue(self.d.check_method, 'hash')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_force_non_existing(self):
self.ensure_clean()
self.d.check_method = 'force'
self.assertTrue(self.d.check_method, 'force')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_force_with_files(self):
self.ensure_clean()
touch(self.fn_a)
breath()
touch(self.fn_b)
self.d.check_method = 'force'
self.assertTrue(self.d.check_method, 'force')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_force_with_reversed_files(self):
self.ensure_clean()
touch(self.fn_b)
breath()
touch(self.fn_a)
self.d.check_method = 'force'
self.assertTrue(self.d.check_method, 'force')
self.assertTrue(self.d.check(self.fn_a, self.fn_b))
def test_ignore_non_existing(self):
self.ensure_clean()
self.d.check_method = 'ignore'
self.assertTrue(self.d.check_method, 'ignore')
self.assertFalse(self.d.check(self.fn_a, self.fn_b))
def test_ignore_with_files(self):
self.ensure_clean()
touch(self.fn_a)
breath()
touch(self.fn_b)
self.d.check_method = 'ignore'
self.assertTrue(self.d.check_method, 'ignore')
self.assertFalse(self.d.check(self.fn_a, self.fn_b))
def test_ignore_with_reversed_files(self):
self.ensure_clean()
touch(self.fn_b)
breath()
touch(self.fn_a)
self.d.check_method = 'ignore'
self.assertTrue(self.d.check_method, 'ignore')
self.assertFalse(self.d.check(self.fn_a, self.fn_b))
| true |
896b384ec40207dfca95686e22ccb8b9bf7939f4 | Python | omgimanerd/experimental | /ritcs/look-at-datman/sql/generate_populate.py | UTF-8 | 5,684 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
from xml.etree import cElementTree as ElementTree
import random
import time
STREET_SUFFIXES = ['RD', 'ST', 'BLVD', 'AVE', 'LN', 'DR']
STATES = ['NY', 'NH', 'MA', 'PA', 'NJ', 'VT', 'ME', 'OH',
'IN', 'IL', 'RI', 'CT']
ENGINES = ['V8', 'V6', 'W', 'Inine', 'Electric', 'Diesel', 'Petrol']
COLORS = ['Dark Blue', 'Red', 'Beige', 'White', 'Black', 'Grey',
'Dark Grey', 'Blue', 'Green', 'Orange', 'Yellow', 'Purple',
'Pearl', 'Dark Green', 'Light Grey']
TRANSMISSIONS = ['Standard', 'Automatic']
AUTO_ID_UPDATER = '''
select setval('customers_id_seq', (select max(id) from customers) + 1);
select setval('dealers_id_seq', (select max(id) from dealers) + 1);
select setval('sales_id_seq', (select max(id) from sales) + 1);
'''
def chance(p=0.5):
return random.random() < p
def postprocess_value(v):
if type(v) is int:
return str(v)
elif type(v) is str:
return '\'{}\''.format(v)
elif v is None:
return 'null'
else:
print(type(v), v)
raise ValueError('fuck')
def generate_sql(tablename, data):
assert len(data) != 0
keys = data[0].keys()
columns = ', '.join(map(str, keys))
sql = []
for row in data:
values = ', '.join([postprocess_value(row[key]) for key in keys])
sql.append('insert into {}({}) values({});'.format(
tablename, columns, values))
return '\n'.join(sql)
if __name__ == '__main__':
random.seed(0)
with open('data/male.txt') as f:
male = [line.strip().split(' ')[0] for line in f]
with open('data/female.txt') as f:
female = [line.strip().split(' ')[0] for line in f]
names = male + female
cars_file = ElementTree.parse('data/cars.xml')
brand_models = {}
for car in cars_file.getroot().iter('car'):
car_name = car[0].text
brand_models[car_name] = [model.text for model in car[1]]
customers = []
for i in range(500):
if chance():
name = random.choice(male)
gender = 'Male'
else:
name = random.choice(female)
gender = 'Female'
customers.append({
'id': i,
'name': name,
'phone': random.randint(1000000000, 9999999999),
'gender': gender,
'income': random.randint(30000, 1000000),
'address_street': '{} {} {}'.format(
random.randint(100, 9999), random.choice(names),
random.choice(STREET_SUFFIXES)
),
'address_state': random.choice(STATES),
'address_zipcode': random.randint(10000, 99999)
})
brands = []
with open('data/brands.csv') as f:
for line in f:
name, country, reliability = line.strip().replace(
'\'', '').split(',')
brands.append({
'name': name,
'country': country,
'reliability': reliability
})
dealers = []
for i in range(30):
dealers.append({
'id': i,
'name': random.choice(names),
'phone': random.randint(1000000000, 9999999999)
})
brand_dealer = []
for dealer in dealers:
brand_dealer += [{
'dealer': dealer['id'],
'brand': brand['name']
} for brand in random.sample(brands, random.randint(3, 12))]
sales = []
for i in range(1000):
b_time = time.mktime(time.strptime('1/1/2015', '%m/%d/%Y'))
e_time = time.mktime(time.strptime('12/31/2017', '%m/%d/%Y'))
n_time = time.localtime(random.randint(b_time, e_time))
timestring = time.strftime('%m/%d/%Y', n_time)
sales.append({
'id': i,
'close_date': timestring,
'customer': random.choice(customers)['id'],
'dealer': random.choice(dealers)['id']
})
vehicles = []
for sale in sales:
dealer = random.choice(dealers)['id']
owner = random.choice(customers)['id']
for i in range(random.randint(1, 4)):
brand = random.choice(brands)['name']
vehicles.append({
'color': random.choice(COLORS),
'brand': brand,
'model': random.choice(brand_models[brand]),
'engine': random.choice(ENGINES),
'transmission': random.choice(TRANSMISSIONS),
'mileage': random.randint(0, 75000),
'price': random.randint(1000, 150000),
'sale': sale['id'],
'dealer': dealer,
'owner': owner
})
for i in range(len(vehicles) // 5):
brand = random.choice(brands)['name']
vehicles.append({
'color': random.choice(COLORS),
'brand': brand,
'model': random.choice(brand_models[brand]),
'engine': random.choice(ENGINES),
'transmission': random.choice(TRANSMISSIONS),
'mileage': random.randint(0, 75000),
'price': random.randint(1000, 150000),
'sale': None,
'dealer': random.choice(dealers)['id'],
'owner': None
})
random.shuffle(vehicles)
for i, vehicle in enumerate(vehicles):
vehicle['vin'] = i
with open('populate.sql', 'w') as f:
f.write(generate_sql('customers', customers))
f.write(generate_sql('brands', brands))
f.write(generate_sql('dealers', dealers))
f.write(generate_sql('brand_dealer', brand_dealer))
f.write(generate_sql('sales', sales))
f.write(generate_sql('vehicles', vehicles))
f.write(AUTO_ID_UPDATER)
| true |
fdce0f64d4d024e0653a98bd90b914e22d0374e1 | Python | qsoo/algorithm | /CKS/0824_day1/BOJ2491.py | UTF-8 | 922 | 3.625 | 4 | [] | no_license | # https://www.acmicpc.net/problem/2491
N = int(input()) # 수열의 길이
sequence = list(map(int, input().split())) # 수열 들어있는 list
max_bigger, max_smaller = 1, 1 # 수열의 길이
total = 1
# bigger
for idx in range(N - 1): # out of index 막자
# out of index가 아닌 것 and 3개 더한게 2개 더한거 보다 클 때
if sequence[idx] <= sequence[idx + 1]:
total += 1
else:
total = 1 # reset
# 제일 큰 값만 저장
if max_bigger <= total:
max_bigger = total
# smaller
total = 1 # reset
for idx in range(N - 1): # out of index 막자
if sequence[idx] >= sequence[idx + 1]:
total += 1
else:
total = 1
# 제일 큰 값만 저장
if max_smaller <= total:
max_smaller = total
# 둘 중에 큰 값 가져오기
if max_bigger <= max_smaller:
print(max_smaller)
else:
print(max_bigger) | true |
8ead6edc68d544181f20a5302a4f3c0a1594f0f0 | Python | abhi204/cryptchat | /client.py | UTF-8 | 5,891 | 2.828125 | 3 | [] | no_license | import socket
import threading
import json
import os
import time
class Signal:
REGISTER_AND_WAIT = 'register' # register and wait for peer to connect to you
REGISTER_AND_CONNECT = 'connect' # register and send the peername you want to connect to
ACK_REGISTER = 'ack_register'
PEER_INFO = 'peer_info'
PUNCH = 'punch'
ACK_PUNCH = 'ack_punch'
PING = 'ping'
CHAT = 'chat_msg'
class State:
def __init__(self, client):
self.client = client
def handle(self, message):
raise NotImplementedError
def log_unhandled_signal(self, signal: str):
print(f"[Warning] Ignoring signal: {signal} recieved in state: {type(self).__name__} ")
class InitialState(State):
def __init__(self, client):
super().__init__(client)
def handle(self, message):
signal = message.get('signal')
if signal == Signal.ACK_REGISTER: # client is the initiator of connection
self.client.change_state(RegisteredState)
elif signal == Signal.PEER_INFO: # client is not the initiator
self.client.set_peer(peername=message.get('peer'), peer_addr=message.get('peer_addr'))
self.client.change_state(PeerConnectingState)
else:
self.log_unhandled_signal(signal)
class RegisteredState(State):
def __init__(self, client):
super().__init__(client)
self.client.set_ping_addr(self.client.server_addr)
self.client.enable_ping_activity(True) # start server pings
def handle(self, message):
signal = message.get('signal')
if signal == Signal.PEER_INFO:
self.client.set_peer(peername=message.get('peer'), peer_addr=message.get('peer_addr'))
self.client.change_state(PeerConnectingState)
else:
self.log_unhandled_signal(signal)
class PeerConnectingState(State):
def __init__(self, client):
super().__init__(client)
self.client.enable_ping_activity(False) # stop server pings
self.client.enable_punch_activity(True) # start UDP hole punching
def handle(self, message):
signal = message.get('signal')
if signal == Signal.ACK_PUNCH: # punch cycle complete
self.client.change_state(PeerConnectedState)
elif signal == Signal.PUNCH: # hole punching successful
self.client.send_msg(signal=Signal.ACK_PUNCH)
self.client.change_state(PeerConnectedState)
else:
self.log_unhandled_signal(signal)
class PeerConnectedState(State):
'''
This is the final state the client needs to be in
'''
def __init__(self, client):
super().__init__(client)
self.client.enable_punch_activity(False) # stop UDP hole punching
self.client.set_ping_addr(self.client.peer_addr, interval=10)
self.client.enable_ping_activity(True) # start peer pings
def handle(self, message):
'''
Client handles the messages
'''
pass
class Client:
peer_addr = None
ping_addr = None
perform_ping = False
perform_punch = False
def __init__(self, username, server_addr, peername=None):
self.state = InitialState(self)
self.username = username
self.server_addr = server_addr
self.peername = peername
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.peername:
self.sock.sendto(
self._create_message(
signal = Signal.REGISTER_AND_CONNECT,
user = self.username,
peer = self.peername,
),
self.server_addr
)
else:
self.sock.sendto(
self._create_message(
signal = Signal.REGISTER_AND_WAIT,
user = self.username
),
self.server_addr
)
while type(self.state) != PeerConnectedState:
message = self.__get_response()
self.state.handle(message)
print(f'Connection Established Between users {self.username}:{self.peername}')
def _create_message(self, signal, **kwargs):
return json.dumps(dict( signal=signal, **kwargs)).encode('utf-8')
def __get_response(self, buff_size=1024):
response = self.sock.recv(buff_size)
return json.loads(response.decode('utf-8'))
def __ping(self):
self.sock.sendto(
self._create_message(signal=Signal.PING),
self.ping_addr
)
if self.perform_ping:
threading.Timer(self.ping_interval, self.__ping).start()
def __punch(self):
self.sock.sendto(
self._create_message(signal=Signal.PUNCH),
self.peer_addr
)
if self.perform_punch:
threading.Timer(interval=0.5, function=self.__punch).start()
def change_state(self, state: State):
# DEBUG: print(f'Changing state {type(self.state).__name__} -> {state.__name__}')
self.state = state(client=self)
def get_state(self):
return self.state
def set_peer(self, peername, peer_addr: tuple):
self.peername = peername
self.peer_addr = tuple(peer_addr)
def set_ping_addr(self, addr: tuple, interval: int=5):
self.ping_addr = addr
self.ping_interval = interval
def enable_ping_activity(self, perform: bool):
self.perform_ping = perform
if self.perform_ping:
self.__ping()
def enable_punch_activity(self, perform: bool):
self.perform_punch = perform
if self.perform_punch:
self.__punch()
def send_msg(self, signal=Signal.CHAT, msg=''): # Send msg to peer
self.sock.sendto(
self._create_message(signal=signal, msg=msg),
self.peer_addr
)
| true |
704e2f2a9b1b4d1e87305129d316b862f2d4481f | Python | maotouying665/SortAlgorithms | /MergeSort.py | UTF-8 | 928 | 3.625 | 4 | [] | no_license | # 归并排序,体现分治的思想
# 分裂和归并
# 切片操作可读性强,但是会增加时间复杂度,不必要
# 使用了多一倍的存储空间用于归并,特大的数据集要注意一下
def mergesort(list):
if len(list)>1:
left=list[:len(list)//2] # python的切片操作
right=list[len(list)//2:]
left=mergesort(left)
right=mergesort(right)
list=merge(left,right)
return list
def merge(l,r):
i=0
j=0
list=[]
while i<len(l) and j<len(r):
if l[i]<=r[j]:
list.append(l[i])
i+=1
else:
list.append(r[j])
j+=1
if i<len(l):
t=i
while t<len(l):
list.append(l[t])
t+=1
else:
t=j
while t<len(r):
list.append(r[t])
t+=1
return list
alist=[3,66,2,1,67,88,76,55,35,67]
print(mergesort(alist)) | true |
948af6cd033fe9df476c8ade7d940c70770d8967 | Python | zhenh65671/Math_Quiz_v2 | /start_GUI.py | UTF-8 | 3,258 | 3.234375 | 3 | [] | no_license | from tkinter import *
from functools import partial # to prevent unwanted windows
import random
class Start:
def __init__(self, parent):
# GUI to get starting balance and stakes
self.start_frame = Frame(padx=10, pady=10)
self.start_frame.grid()
# Maths Heading (row 0)
self.maths_quiz_label = Label(self.start_frame, text="Maths quiz",
font="Arial 19 bold")
self.maths_quiz_label.grid()
# initial Instructions (row 1)
self.maths_instructions = Label(self.start_frame, font="Arial 10 italic",
text="Please enter he amount"
"of questions(between 1 and 30)"
" in the box below. then choose"
"the type of questions you want to do ", wrap=300, justify=LEFT,
padx=10, pady=10)
self.maths_instructions.grid(row=1, column=0)
# Entry box... (row 1)
self.start_quiz_entry = Entry(self.start_frame, font="Arial 16 bold", width=5)
self.start_quiz_entry.grid(row=1, column=1)
# Instructions (row 2)
self.maths_instructions = Label(self.start_frame, font="Arial 10 italic",
text="Please select the type of questions"
" below to start.")
self.maths_instructions.grid(row=2)
# button frame (row 3)
self.quiz_frame = Frame(self.start_frame)
self.quiz_frame.grid(row=3)
# Buttons goes here...
quiz_font = "Arial 12 bold"
# Red Addition quiz button...
self.Addition_button = Button(self.quiz_frame, text="Addition",
font=quiz_font, bg="red",
command=lambda: self.to_quiz(1))
self.Addition_button.grid(row=3,column=0, padx=20)
# Orange Subtraction quiz button...
self.subtraction_button = Button(self.quiz_frame, text="Subtraction",
font=quiz_font, bg="#FF9393",
command=lambda: self.to_quiz(2))
self.subtraction_button.grid(row=3, column=1, padx=20)
# Yellow multiplication quiz button...
self.multiplication_button = Button(self.quiz_frame, text="Multiplication",
font=quiz_font, bg="#FFFF33",
command=lambda: self.to_quiz(3))
self.multiplication_button.grid(row=3, column=2, padx=20)
def to_quiz(self, quiz):
number_of_question = self.start_quiz_entry.get()
Quiz(self, quiz, number_of_question)
class Quiz:
def __init__(self, partner, quiz, number_of_question):
print(quiz)
print(number_of_question)
# Disable Addition button
partner.addition_button.config(state=DISABLED)
# Initialise variables
self.n = IntVar()
# main routine
if __name__ == "__main__":
root = Tk()
root.title("Maths Quiz")
something = Start(root)
root.mainloop() | true |
2f95e76836352bf371099e4feea9a95c49fa9dc7 | Python | shahidshabir055/python_programs | /mass.py | UTF-8 | 196 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 7 11:06:28 2020
@author: eshah
"""
n=int(input())
k=int(input())
m=[1][2]
print(m)
#for i in range(0,n):
# x,y=int(input())
| true |
364e75eca2fa9ebb836b3261f60408892ed372b3 | Python | Thukor/MazeSolver | /MazeSolving/Solver/MazeSolver.py | UTF-8 | 315 | 2.625 | 3 | [
"MIT"
] | permissive | from PathFinder import *
from pathfinding_algorithms import *
class MazeSolver:
@staticmethod
def solve_maze(maze):
pf = PathFinder(nx_shortest_path)
start_end = sorted([node for node in maze.nodes() if node.is_possible_start])
heuristic = manhattan_distance
return pf.find_shortest_path(maze,*start_end) | true |
07598f430aa15c103b45e36a001640ec55fcf037 | Python | Aasthaengg/IBMdataset | /Python_codes/p02879/s783193659.py | UTF-8 | 95 | 3.140625 | 3 | [] | no_license | a,b = list(map(int,input().split()))
if(a > 9 or b > 9):
print("-1")
else:
print(a*b) | true |
5311cffd4d88269a554134707f58d665cd7b6a75 | Python | jhmenke/dummyPy | /dummyPy/dummyPy.py | UTF-8 | 9,436 | 3.40625 | 3 | [
"MIT"
] | permissive | from collections import defaultdict
from pickle import load, dump
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix, hstack
def sort_mixed(levels):
try:
return sorted(levels)
except TypeError:
str_list = [l for l in levels if isinstance(l, str)]
other_list = [l for l in levels if isinstance(l, (int, float)) and not np.isnan(l)]
nan_append = [l for l in levels if isinstance(l, (int, float)) and np.isnan(l)]
return sorted(str_list) + sorted(other_list) + nan_append
class Encoder:
"""
Helper class to encode levels of a categorical Variable.
"""
def __init__(self):
self.column_mapper = None
def fit(self, levels):
"""
Parameters
----------
levels: set
Unique levels of the categorical variable.
"""
self.column_mapper = {x: i for i, x in enumerate(sort_mixed(levels))}
def transform(self, column_data):
"""
Parameters
----------
column_data: pandas Series object
"""
row_cols = [(i, self.column_mapper[x])
for i, x in enumerate(column_data) if x in self.column_mapper]
data = np.ones(len(row_cols))
if len(row_cols) == 0:
return coo_matrix((column_data.shape[0], len(self.column_mapper)))
return coo_matrix((data, zip(*row_cols)), shape=(column_data.shape[0], len(self.column_mapper)))
def __eq__(self, other):
return self.column_mapper == other.column_mapper
class OneHotEncoder:
"""
A One Hot Encoder class that converts the categorical variables in a data frame
to one hot encoded variables. It can also handle large data that is too big to fit
in the memory by reading the data in chunks.
Example
-------
The following example uses the kaggle's titanic data. It can be found here -
`https://www.kaggle.com/c/titanic/data`
This data is only 60 KB and it has been used for a demonstration purpose.
This class also works well with datasets too large to fit into the machine
memory.
>>> from dummyPy import OneHotEncoder
>>> import pandas as pd
>>> encoder = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
>>> data = pd.read_csv("titanic.csv", usecols=["Pclass", "Sex", "Age", "Fare", "Embarked"])
>>> data.shape
(891, 5)
>>> encoder.fit(data)
>>> X = encoder.transform(data)
>>> X.shape
(891, 11)
>>> X
array([[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, ..., 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
...,
[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, ..., 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, ..., 0.0, 1.0, 0.0]], dtype=object)
>>> chunked_data = pd.read_csv("titanic.csv",
usecols=["Pclass", "Sex", "Age", "Fare", "Embarked"],
chunksize=10)
>>> encoder2 = OneHotEncoder(categorical_columns=["Pclass", "Sex", "Embarked"])
>>> encoder2.fit(chunked_data)
>>> X = encoder2.transform(data)
>>> X.shape
(891, 11)
>>> X
array([[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, ..., 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
...,
[0.0, 0.0, 1.0, ..., 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, ..., 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, ..., 0.0, 1.0, 0.0]], dtype=object)
"""
def __init__(self, categorical_columns=None, file_name=None):
"""
Parameters
----------
categorical_columns: list
A list of the names of the categorical varibales in the data. All these columns
must have dtype as string.
file_name: string
The file name to load a saved encoder from.
"""
if file_name is None:
if categorical_columns is None:
raise UserWarning("Either the categorical columns must be defined "
"or a file_name for a saved encoder must be given")
self.categorical_columns = categorical_columns
self.unique_vals = defaultdict(set)
self.encoders = {column_name: Encoder() for column_name in categorical_columns}
elif isinstance(file_name, str):
self.load(file_name)
else:
raise UserWarning("The file name to load a saved encoder should be a string to a readable file")
def _update_unique_vals(self, data):
for column_name in self.categorical_columns:
for value in data[column_name]:
self.unique_vals[column_name].add(value)
def _fit_encoders(self):
for column_name in self.categorical_columns:
self.encoders[column_name].fit(self.unique_vals[column_name])
def fit(self, data):
"""
This method reads the categorical columns and gets the necessary
one hot encoded column shapes.
It can also read the data in chunks.
Parameters
----------
data: pandas.core.frame.DataFrame or pandas.io.parsers.TextFileReader
The data can be either a pandas data frame or a pandas TextFileReader
object. The TextFileReader object is created by specifying the
chunksize parameter in pandas read_csv method.
Use the TextFileReader object as input if the dataset is too large to
fit in the machine memory.
"""
if(isinstance(data, pd.core.frame.DataFrame)):
self._update_unique_vals(data)
else:
for data_chunk in data:
self._update_unique_vals(data_chunk)
self._fit_encoders()
def transform(self, data, dtype="pd"):
"""
This method is used to convert the categorical values in your data into
one hot encoded vectors. It convets the categorical columns in the data
to one hot encoded columns and leaves the continuous variable columns as it is.
Parameters
----------
data: pandas data frame
The data frame object that needs to be transformed.
dtype: string
"pd" - This will return a pandas dataframe.
"np" - This will return a numpy array.
"coo" - This will rerurn scipy.sparse.coo_matrix, which is memory-efficient
for categorical variable of which number of unique values are large.
"""
transformed_coo_matrix = hstack([self.encoders[column_name].transform(data[column_name])
if column_name in self.categorical_columns
else coo_matrix(data[column_name].values.reshape(-1, 1))
for column_name in data.columns])
if dtype == "np":
return transformed_coo_matrix.toarray()
elif dtype == "coo":
return transformed_coo_matrix
else:
# For the titanic example, the Nested List mentioned below would look like -
# [["Pclass_0", "Pclass_1", "Pclass_2"], ["Sex_female", "Sex_male"], ["Age"], ["Fare"],
# ["Embarked_Q", "Embarked_nan", "Embarked_S", "Embarked_C"]]
# It is flattened later.
transformed_data_col_names = [item for sublist in
# Nested List
[[column_name] if column_name not in self.categorical_columns
else ["{}_{}".format(column_name, x)
for x in sort_mixed(self.unique_vals[column_name])]
for column_name in data.columns]
for item in sublist]
return pd.DataFrame(transformed_coo_matrix.toarray(), columns=transformed_data_col_names)
def fit_transform(self, data):
"""
This method calls fit and transform one after the other.
Please note that unlike the fit method the fit_transform method
can take only the pandas data frame as input.
Parameters
----------
data: pandas.core.frame.DataFrame
A pandas data frame.
"""
self.fit(data)
return self.transform(data)
def save(self, file_name):
"""
Saves the encoder as a pickled binary file with the path file_name
:param file_name: string
The file name to use for the saved encoder.
:return:
"""
with open(file_name, "wb") as file:
d = {
"categorical_columns": self.categorical_columns,
"unique_vals": self.unique_vals,
"encoders": self.encoders
}
dump(d, file)
def load(self, file_name):
"""
Loads a pickled encoder from a file with the path file_name
:param file_name: string
The file name to load the saved encoder from.
:return:
"""
with open(file_name, "rb") as file:
d = load(file)
self.categorical_columns = d["categorical_columns"]
self.unique_vals = d["unique_vals"]
self.encoders = d["encoders"]
| true |
1bbc86260cb6d9bda7e266e982ab559c33bb6b85 | Python | ranog/python_work | /capitulo_19-Contas_de_usuario/blog/blogs/models.py | UTF-8 | 617 | 2.8125 | 3 | [] | no_license | from django.db import models
# Create your models here.
from django.contrib.auth.models import User
class BlogPost(models.Model):
"""
Um assunto sobre o qual o usuário está aprendendo.
"""
title = models.CharField(max_length=200)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""
Devolve uma representação em string do modelo.
"""
if len(self.title) > 50:
return self.title[:50] + "..."
return self.title
| true |
66f2263947c168fc289be1b66e2338466a468ee9 | Python | thegrill/grill-names | /grill/names/__init__.py | UTF-8 | 10,132 | 2.8125 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
import uuid
import typing
import itertools
import collections
from datetime import datetime
import naming
try:
from pxr import Sdf
_USD_SUFFIXES = tuple(ext for ext in Sdf.FileFormat.FindAllFileFormatExtensions() if ext.startswith('usd'))
except ImportError: # Don't fail if Sdf is not importable to facilitate portability
_USD_SUFFIXES = ("usd", "usda", "usdc", "usdz", "usdt")
from grill.tokens import ids
def _table_from_id(token_ids):
headers = [
'Token',
'Pattern',
'Default',
'Description',
]
table_sep = tuple([''] * len(headers))
sorter = lambda value: (
# cleanup backslashes formatting
value.pattern.replace('\\', '\\\\'),
value.default,
# replace new lines with empty strings to avoid malformed tables.
value.description.replace('\n', ' '),
)
rows = [table_sep, headers, table_sep]
rows.extend([token.name, *sorter(token.value)] for token in token_ids)
rows.append(table_sep)
max_sizes = [(max(len(i) for i in r)) for r in zip(*rows)]
format_rows = []
for r in rows:
filler = '=<' if r == table_sep else ''
format_rows.append(' '.join(
f"{{:{f'{filler}'}{f'{size}'}}}".format(i)
for size, i in zip(max_sizes, r))
)
return '\n'.join(format_rows)
class DefaultName(naming.Name):
""" Inherited by: :class:`grill.names.CGAsset`
Base class for any Name object that wishes to provide `default` functionality via
the `get_default` method.
Subclass implementations can override the `_defaults` member to return a mapping
appropriate to that class.
"""
_defaults = {}
@classmethod
def get_default(cls, **kwargs) -> DefaultName:
"""Get a new Name object with default values and overrides from **kwargs."""
name = cls()
defaults = dict(name._defaults, **kwargs)
name.name = name.get(**defaults)
return name
class DefaultFile(DefaultName, naming.File):
""" Inherited by: :class:`grill.names.DateTimeFile`
Similar to :class:`grill.names.DefaultName`, provides File Name objects default
creation via the `get_default` method.
Adds an extra `DEFAULT_SUFFIX='ext'` member that will be used when creating objects.
"""
DEFAULT_SUFFIX = 'ext'
@property
def _defaults(self):
result = super()._defaults
result['suffix'] = type(self).DEFAULT_SUFFIX
return result
class DateTimeFile(DefaultFile):
"""Time based file names respecting iso standard.
============= ================
**Config:**
------------------------------
*year* Between :py:data:`datetime.MINYEAR` and :py:data:`datetime.MAXYEAR` inclusive.
*month* Between 1 and 12 inclusive.
*day* Between 1 and the number of days in the given month of the given year.
*hour* In ``range(24)``.
*minute* In ``range(60)``.
*second* In ``range(60)``.
*microsecond* In ``range(1000000)``.
============= ================
====== ============
**Composed Fields:**
--------------------
*date* `year` `month` `day`
*time* `hour` `minute` `second` `microsecond`
====== ============
.. note::
When getting a new default name, current ISO time at the moment of execution is used.
Example:
>>> tf = DateTimeFile.get_default(suffix='txt')
>>> tf.day
'28'
>>> tf.date
'2019-10-28'
>>> tf.year = 1999
>>> tf
DateTimeFile("1999-10-28 22-29-31-926548.txt")
>>> tf.month = 14 # ISO format validation
Traceback (most recent call last):
...
ValueError: month must be in 1..12
>>> tf.datetime
datetime.datetime(1999, 10, 28, 22, 29, 31, 926548)
"""
config = dict.fromkeys(
('month', 'day', 'hour', 'minute', 'second'), r'\d{1,2}'
)
config.update(year=r'\d{1,4}', microsecond=r'\d{1,6}')
join = dict(
date=('year', 'month', 'day'),
time=('hour', 'minute', 'second', 'microsecond'),
)
join_sep = '-'
@property
def _defaults(self):
result = super()._defaults
time_field = {'year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond'}
now = datetime.now()
result.update({f: getattr(now, f) for f in time_field})
return result
def get_pattern_list(self) -> typing.List[str]:
"""Fields / properties names (sorted) to be used when building names.
Defaults to [`date`, `time`] + keys of this name's config
"""
return ["date", "time"] + super().get_pattern_list()
@property
def name(self) -> str:
return super().name
@name.setter
def name(self, name: str):
prev_name = self._name
super(DateTimeFile, self.__class__).name.fset(self, name)
if name:
try: # validate via datetime conversion
self.datetime
except ValueError:
if prev_name: # if we had a previous valid name, revert to it
self.name = prev_name
raise
@property
def datetime(self) -> datetime:
""" Return a :py:class:`datetime.datetime` object using this name values.
>>> tf = DateTimeFile("1999-10-28 22-29-31-926548.txt")
>>> tf.datetime
datetime.datetime(1999, 10, 28, 22, 29, 31, 926548)
"""
if not self.name:
raise AttributeError("Can not retrieve datetime from an empty name")
date = f"{int(self.year):04d}-{int(self.month):02d}-{int(self.day):02d}"
time = (f"{int(self.hour):02d}:{int(self.minute):02d}:{int(self.second):02d}."
f"{int(self.microsecond):06d}")
return datetime.fromisoformat(f'{date}T{time}')
class CGAsset(DefaultName):
"""Inherited by: :class:`grill.names.CGAssetFile`
Elemental resources that, when composed, generate the entities that bring an idea to a tangible product
through their life cycles (e.g. a character, a film, a videogame).
"""
config = {token.name: token.value.pattern for token in ids.CGAsset}
__doc__ += '\n' + _table_from_id(ids.CGAsset) + '\n'
def __init__(self, *args, sep='-', **kwargs):
super().__init__(*args, sep=sep, **kwargs)
@property
def _defaults(self):
result = super()._defaults
result.update({token.name: token.value.default for token in ids.CGAsset})
return result
class CGAssetFile(CGAsset, DefaultFile, naming.PipeFile):
"""Inherited by: :class:`grill.names.UsdAsset`
Versioned files in the pipeline for a CGAsset.
Example:
>>> name = CGAssetFile.get_default(version=7)
>>> name.suffix
'ext'
>>> name.suffix = 'abc'
>>> name.path
WindowsPath('demo/3d/abc/entity/rnd/lead/atom/main/all/whole/7/demo-3d-abc-entity-rnd-lead-atom-main-all-whole.7.abc')
"""
@property
def _defaults(self):
result = super()._defaults
result.update(version=1)
return result
def get_path_pattern_list(self) -> typing.List[str]:
pattern = super().get_pattern_list()
pattern.append('version')
return pattern
class UsdAsset(CGAssetFile):
"""Specialized :class:`grill.names.CGAssetFile` name object for USD asset resources.
.. admonition:: Inheritance Diagram for UsdAsset
:class: dropdown, note
.. inheritance-diagram:: grill.names.UsdAsset
This is the currency for USD asset identifiers in the pipeline.
Examples:
>>> asset_id = UsdAsset.get_default()
>>> asset_id
UsdAsset("demo-3d-abc-entity-rnd-main-atom-lead-base-whole.1.usda")
>>> asset_id.suffix = 'usdc'
>>> asset_id.version = 42
>>> asset_id
UsdAsset("demo-3d-abc-entity-rnd-main-atom-lead-base-whole.42.usdc")
>>> asset_id.suffix = 'abc'
Traceback (most recent call last):
...
ValueError: Can't set invalid name 'demo-3d-abc-entity-rnd-main-atom-lead-base-whole.42.abc' on UsdAsset("demo-3d-abc-entity-rnd-main-atom-lead-base-whole.42.usdc"). Valid convention is: '{code}-{media}-{kingdom}-{cluster}-{area}-{stream}-{item}-{step}-{variant}-{part}.{pipe}.{suffix}' with pattern: '^(?P<code>\w+)\-(?P<media>\w+)\-(?P<kingdom>\w+)\-(?P<cluster>\w+)\-(?P<area>\w+)\-(?P<stream>\w+)\-(?P<item>\w+)\-(?P<step>\w+)\-(?P<variant>\w+)\-(?P<part>\w+)(?P<pipe>(\.(?P<output>\w+))?\.(?P<version>\d+)(\.(?P<index>\d+))?)(\.(?P<suffix>sdf|usd|usda|usdc|usdz))$'
.. seealso::
:class:`grill.names.CGAsset` for a description of available fields, :class:`naming.Name` for an overview of the core API.
"""
DEFAULT_SUFFIX = 'usd'
file_config = naming.NameConfig(
# NOTE: limit to only extensions starting with USD (some environments register other extensions untested by the grill)
{'suffix': "|".join(_USD_SUFFIXES)}
)
@classmethod
def get_anonymous(cls, **values) -> UsdAsset:
"""Get an anonymous :class:`UsdAsset` name with optional field overrides.
Useful for situations where a temporary but valid identifier is needed.
:param values: Variable keyword arguments with the keys referring to the name's
fields which will use the given values.
Example:
>>> UsdAsset.get_anonymous(stream='test')
UsdAsset("4209091047-34604-19646-169-123-test-4209091047-34604-19646-169.1.usda")
"""
keys = cls.get_default().get_pattern_list()
anon = itertools.cycle(uuid.uuid4().fields)
return cls.get_default(**collections.ChainMap(values, dict(zip(keys, anon))))
class LifeTR(naming.Name):
"""Taxonomic Rank used for biological classification.
"""
config = {token.name: token.value.pattern for token in ids.LifeTR}
__doc__ += '\n' + _table_from_id(ids.LifeTR) + '\n'
def __init__(self, *args, sep=':', **kwargs):
super().__init__(*args, sep=sep, **kwargs)
| true |
62c2a4a97b59b1d450bca6df8302644ed9f550e0 | Python | kiwishall/ASCVD_Cal | /RA_CAL.py | UTF-8 | 4,027 | 2.65625 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
'''
@File : RA_CAL.py
@Time : 2021/04/03 10:12:44
@Author : Kaiqiang Li
@Version : V1.0
'''
# here put the import lib
import ERS_RA
import pandas as pd
# 读取金宇的数据
io = r".\data.xlsx"
data_jinyu =pd.read_excel(io, sheet_name = "金宇", header = None)
# data_jinyu.drop(index=[0,1], inplace=True)
# data_jinyu.reset_index(drop = True, inplace=True)
data_jinyu.drop(data_jinyu.columns[0:2], axis=1,inplace = True)
data_jinyu.drop(data_jinyu.index[:1], inplace = True)
print(data_jinyu.head(5))
# 对病人原始情况数据进行处理
# data_jinyu_history = data_jinyu[:][1:28]
# data_jinyu_history.columns = list(data_jinyu_history.iloc[0])
# data_jinyu_history.drop(data_jinyu_history.index[:1], inplace = True)
# # 对病人基线期数据进行处理,即V0期
# data_jinyu_v0 = data_jinyu[:][30:71]
# data_jinyu_v0.columns = list(data_jinyu_v0.iloc[0])
# data_jinyu_v0.drop(data_jinyu_v0.index[:1], inplace = True)
# # data_jinyu_v0.insert(1,data_jinyu_history.columns['姓名'])
# # data_jinyu_v0['姓名'] = data_jinyu_history.columns['姓名']
# # 对病人V4期数据进行处理,即第四周
# data_jinyu_v4 = data_jinyu[:][72:113]
# data_jinyu_v4.columns = list(data_jinyu_v4.iloc[0])
# data_jinyu_v4.drop(data_jinyu_v4.index[:1], inplace = True)
# # 对病人V6期数据进行处理,即第六周
# data_jinyu_v6 = data_jinyu[:][114:155]
# data_jinyu_v6.columns = list(data_jinyu_v6.iloc[0])
# data_jinyu_v6.drop(data_jinyu_v6.index[:1], inplace = True)
# # 对病人V9期数据进行处理,即第九周
# data_jinyu_v9 = data_jinyu[:][156:197]
# data_jinyu_v9.columns = list(data_jinyu_v9.iloc[0])
# data_jinyu_v9.drop(data_jinyu_v9.index[:1], inplace = True)
# # 输入各个表格数据
# data_jinyu_history.to_csv(r".\data_jinyu_history.csv")
# data_jinyu_v0.to_csv(r".\data_jinyu_v1.csv")
# data_jinyu_v4.to_csv(r".\data_jinyu_v4.csv")
# data_jinyu_v6.to_csv(r".\data_jinyu_v6.csv")
# data_jinyu_v9.to_csv(r".\data_jinyu_v9.csv")
# # 对丽珠病人原始情况数据进行处理
# data_lizhu =pd.read_excel(io, sheet_name = "丽珠", header = None)
# data_lizhu.drop(data_lizhu.columns[0:2], axis=1,inplace = True)
# data_lizhu.drop(data_lizhu.index[:1], inplace = True)
# # 对病人原始情况数据进行处理
# data_lizhu_history = data_lizhu[:][1:28]
# data_lizhu_history.columns = list(data_lizhu_history.iloc[0])
# data_lizhu_history.drop(data_lizhu_history.index[:1], inplace = True)
# # 对病人基线期数据进行处理,即V0期
# data_lizhu_v0 = data_lizhu[:][30:61]
# data_lizhu_v0.columns = list(data_lizhu_v0.iloc[0])
# data_lizhu_v0.drop(data_lizhu_v0.index[:1], inplace = True)
# # data_lizhu_v0.insert(1,data_lizhu_history.columns['姓名'])
# # data_lizhu_v0['姓名'] = data_lizhu_history.columns['姓名']
# # 对病人V4期数据进行处理,即第四周
# data_lizhu_v4 = data_lizhu[:][62:93]
# data_lizhu_v4.columns = list(data_lizhu_v4.iloc[0])
# data_lizhu_v4.drop(data_lizhu_v4.index[:1], inplace = True)
# # 对病人V6期数据进行处理,即第六周
# data_lizhu_v6 = data_lizhu[:][94:125]
# data_lizhu_v6.columns = list(data_lizhu_v6.iloc[0])
# data_lizhu_v6.drop(data_lizhu_v6.index[:1], inplace = True)
# # 对病人V9期数据进行处理,即第九周
# data_lizhu_v9 = data_lizhu[:][126:155]
# data_lizhu_v9.columns = list(data_lizhu_v9.iloc[0])
# data_lizhu_v9.drop(data_lizhu_v9.index[:1], inplace = True)
# # 输入各个表格数据
# data_lizhu_history.to_csv(r".\data_lizhu_history.csv")
# data_lizhu_v0.to_csv(r".\data_lizhu_v1.csv")
# data_lizhu_v4.to_csv(r".\data_lizhu_v4.csv")
# data_lizhu_v6.to_csv(r".\data_lizhu_v6.csv")
# data_lizhu_v9.to_csv(r".\data_lizhu_v9.csv")
# AAA = ['aaa',55,'M','N','N','Y','N','N','Y','N','Y']
# data_jinyu.to_csv(r"C:\Users\Administrator\Desktop\python\aaa.csv")
# aa = ERS_RA.ERS_RA_Persion('aaa',55,'M','N','N','Y','N','N','Y','N','Y')
# aa.name, aa.age, aa.gender, aa.diabets, aa.hyperlinder, aa.hypertension, aa.tobacoo, aa.CDAI, aa.MHAQ, aa.
| true |
53bbb9e43632c667705cd9347681abd2ae125a0d | Python | gsrr/Programs | /zeroJudge/20131015_python_unitTest.py | UTF-8 | 215 | 2.953125 | 3 | [] | no_license | import unittest
def sum(a , b):
return a + b
class executeUnitTest(unittest.TestCase):
def test_sum(self):
a = 1
b = 2
c = sum(a , b)
self.assertEqual(3 , c)
if __name__ == "__main__":
unittest.main()
| true |
7712ee24f9d619da02055716eb4a63feb105d636 | Python | moonhyeji/Python | /Python00/com/test01/type03.py | UTF-8 | 466 | 4.34375 | 4 | [] | no_license | #list = 배열
#생성자
a = list()
print(a)
a.append(1)
print(a)
a.append('a')
print(a)
a[1] = 'b'
print(a)
#a[2] = 'c'
#print(a)
#[]사용
b = [1,2,3,4,5]
print(b)
print(b[0] +b[3]) #5
print('-----------------')
#list의 reverse()함수
b.reverse()
print(b)
b.append(6)
b.sort() #sort = 정렬
print(b)
#중첩
c =['a','b','c','d',['e','f','g']]
print(c)
print(c[4])
print(c[4][0])
c.append('a')
#list + list
print(b + c)
| true |
128b4ecbdf051e38463168edc17f6e229d9a7488 | Python | Animesh420/automated_email | /readMail.py | UTF-8 | 1,723 | 2.65625 | 3 | [] | no_license | import imaplib
import email
from config import SMTP_SERVER, FROM_EMAIL, FROM_PWD
def read_email_from_gmail(content_email, subject):
""" Reads the email using a prescribed email
id and subject
"""
details = {}
mail = imaplib.IMAP4_SSL(SMTP_SERVER)
mail.login(FROM_EMAIL, FROM_PWD)
mail.select('inbox')
typ, data = mail.search(None, "From \"{}\" Subject \"{}\"".format(content_email, subject))
mail_ids = data[0]
id_list = mail_ids.split()
for i in id_list:
t, data = mail.fetch(i, '(RFC822)')
for response in data:
if isinstance(response, tuple):
msg = email.message_from_string(response[1].decode('utf-8'))
email_subject = msg['subject']
email_from = msg['from']
print('Email found from :{} with subject :{}\n'.format(email_from, email_subject))
for part in msg.walk():
#find the attachment part
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
#save the attachment in the program directory
filename = part.get_filename()
fp = open(filename, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
print('Attachment saved!, file: {}'.format(filename))
details[subject] = filename
return details
""" Dummy test code """
# # subjects = read_email_from_gmail("animesh.mukherjeei323460@gmail.com","3765_ani")
# subjects = read_email_from_gmail("animesh.mukherjeei323460@gmail.com","327778_ani_mkjee")
# print(subjects) | true |
95b1327b79798b0a7f5fac7ebf10106cba52b90b | Python | Mamofish/py_netBooter_Terminal | /np_term.py | UTF-8 | 1,660 | 3.296875 | 3 | [] | no_license | import socket
import time
import sys
def connect(ip_value, port_value):
HOST = str(ip_value) # The remote host IP address
PORT = int(port_value) # The server port number
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
time.sleep(0.1) #use time.sleep to give delay and netBooter time to process
timeout_limit = 1
timeout_count = 0
while timeout_count < timeout_limit: #Wait for user input command
print("\n\r")
print("--Type command 'exit' to quit terminal--\n\r")
print("Enter Command:\r")
usr_cmd = raw_input(">")
if 'exit' in usr_cmd: #if command = exit close socket, exit python
sock.close()
exit()
else: #Otherwise send input from user to socket connection (sock)
sock.send('\n\r')
sock.send(usr_cmd)
sock.send('\r')
time.sleep(0.5)
recv = sock.recv(2048) #Receive data from connection
print(recv) #print received data
def main():
if len(sys.argv) !=5:
print('Example:> np_term.py -i 192.168.1.100 -p 23\r')
sys.exit(1)
ip = sys.argv[1] #For -i option in command line
ip_value = sys.argv[2] #IP address value entered
port = sys.argv[3] #For -p option in command line
port_value = sys.argv[4] #Port number entered
if ip == '-i' and port == '-p': #Check that the command line uses both -i and -p for valid connection
connect(ip_value,port_value)
else:
print('unknown option: ' + ip + port) #Otherwise show unknown command that was entered.
sys.exit(1)
if __name__ == '__main__':
main()
| true |
bb7554ea181c1f59fb245aa3033fb90a4153bc7f | Python | ksaidev/TransBot | /src/bot/responder/channel.py | UTF-8 | 1,548 | 2.65625 | 3 | [] | no_license | from src.data.channel_db import ChannelDatabase
from data.private import ADMIN_CHANNEL
from src.constants import messages
class ChannelResponder:
"""
An object for managing channels
Called directly on join and included in chat object as instance variable on message
Contains channel instance and channel uid for accessing channel database
"""
database = ChannelDatabase()
def __init__(self, channel):
self.channel = channel
self.chat_id = channel.chat_id
async def respond(self):
"""
Called when TransBot has joined a new room
Registers the channel uid in the channel database
Sends a welcome message
"""
if self.is_registered():
return
self.register()
await self.send_text(messages.WELCOME)
def is_registered(self):
return self.get_mode() is not None
def register(self):
"""
Appends the channel uid in the channel database
The initial mode is Mode.AUTO = 1
"""
self.database.add_channel(self.chat_id)
def get_mode(self):
"""
Gets the mode of the current channel
Mode.AUTO = 1, Mode.AUTO = 0
returns None if unregistered
"""
return self.database.get_mode(self.chat_id)
def set_mode(self, mode):
self.database.set_mode(self.chat_id, mode)
def is_admin(self):
return self.chat_id in ADMIN_CHANNEL
async def send_text(self, message):
await self.channel.send_text(message)
| true |
a8d523bcfea6dd04672c4cf3c2c074cd5ecfc025 | Python | liucheng2912/py | /leecode/剑指offer/python基础/数据类型/字符串string/strip.py | UTF-8 | 56 | 2.609375 | 3 | [] | no_license | field = '----hello----world----'
print(field.strip('-')) | true |
3f7a3e2846863b53498a197846498fd95d5d484b | Python | HenryBalthier/Python-Learning | /Leetcode_easy/math/367.py | UTF-8 | 338 | 3.453125 | 3 | [] | no_license | class Solution(object):
def isPerfectSquare(self, num):
"""
:type num: int
:rtype: bool
"""
r = num
while r * r > num:
r = (r + num/r) /2
print(r)
return r * r == num
if __name__ == '__main__':
s = Solution()
x = 9
print(s.isPerfectSquare(x)) | true |
18000df6ca94fb4faace6ce72f9066a585a65771 | Python | wasiqrumaney/MLMI | /src/VAEs/vae16.py | UTF-8 | 2,236 | 2.578125 | 3 | [] | no_license | import torch
from torch import nn
Z_DIMS = 64
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# VAE Model
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__() # input 1x16x16x16
self.relu = nn.ReLU()
self.conv1 = nn.Conv3d(1, 1, kernel_size=2, stride=1, padding=1) # 16x16x16x16
self.conv2 = nn.Conv3d(1, 64, kernel_size=2, stride=2, padding=0) # 64x8x8x8
self.conv3 = nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1) # 64x8x8x8
self.conv4 = nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1) # 64x8x8x8
self.fc1 = nn.Linear(64 * 8 * 8 * 8, 128)
self.fc2 = nn.Linear(128, 2 * Z_DIMS)
self.fc3 = nn.Linear(Z_DIMS, Z_DIMS)
self.fc4 = nn.Linear(Z_DIMS, 64 * 8 * 8 * 8)
self.upconv1 = nn.ConvTranspose3d(64, 64, kernel_size=3, stride=1, padding=1)
self.upconv2 = nn.ConvTranspose3d(64, 64, kernel_size=3, stride=1, padding=1)
self.upconv3 = nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2, padding=0)
self.conv5 = nn.Conv3d(64, 1, kernel_size=3, stride=1, padding=1)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def encode(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.relu(self.conv4(x))
x = x.view(-1, 32768) # flatten
x = self.relu(self.fc1(x))
x = self.fc2(x)
mu = x[:, :Z_DIMS]
logvar = x[:, Z_DIMS:]
return mu, logvar
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def decode(self, z):
out = self.relu(self.fc3(z))
out = self.relu(self.fc4(out))
# reshape
out = out.view(-1, 64, 8, 8, 8)
out = self.relu(self.upconv1(out))
out = self.relu(self.upconv2(out))
out = self.relu(self.upconv3(out))
out = self.sigmoid(self.conv5(out))
return out
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
out = self.decode(z)
return out, mu, logvar | true |
a3cbb0cc4bc4c43fe88391e1c80f431fb7db74dd | Python | Shisan-xd/testPy | /python_work/day10_05返回值作为参数传递.py | UTF-8 | 1,079 | 4.5 | 4 | [] | no_license | # @Time :2021/10/11 09:16
# @Author :
# @File :day10_05返回值作为参数传递.py
# -- 函数的参数
# # 1、定义两个函数;2、函数一有返回值50;函数二把返回值50作为参数传入(定义函数二要有形参)
# def T1():
# return 5
#
#
# def T2(num):
# print(num)
#
#
# # 先拿到函数一的返回值,再把返回值传到函数二
# result = T1()
# # print(result)
# T2(result)
# def r_e():
# return 10
# return 2
# # 此处只执行了一个return,是因为return可以退出当前函数,导致return下方代码不执行
#
#
# def r_e1(num):
# print(num)
#
#
# result1 = r_e()
# r_e1(result1)
# -- 函数多返回值
# 一个函数多个返回值的写法
def r_n():
return 1, 2 #返回的是一个元组
result = r_n()
print(result)
# return 后面可以直接书写 元组、列表、字典
def r_a():
# return (1, 2) # 元组
# return [1, 2] # 列表
return {'name': 'Python', 'age': '12'} # 字典
result = r_a()
print(result)
| true |
a16e3746f01d62c44e15274e09d4a82789bbee34 | Python | agozdogan/Piece-Of-Programming | /CodeSignal/SumNumbers/question1.py | UTF-8 | 173 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | def add(param1, param2):
if param1 <=1000 and param1 >=-1000 and param2 >=-1000 and param2 <=1000:
total = param1 + param2
return total
print(add(10,19)) | true |
6181dcb5b18716d394a2e0dd7cc77f8e0cef7440 | Python | JhonesBR/python-exercises | /3 - Python Loop Exercise/ex06.py | UTF-8 | 224 | 3.984375 | 4 | [] | no_license | # Given a number count the total number of digits in a number
# Solution: https://github.com/JhonesBR
def numberOfDigits(n):
print(f"{n} has {len(str(n))} digits")
n = int(input("Insert a number: "))
numberOfDigits(n) | true |
7ed2bf40b0bc744c1e6cdd497976c770622be02d | Python | prblthp/Insertion_Sort_Adv_Analysis | /Insertion_Bin_Search.py | UTF-8 | 1,520 | 2.890625 | 3 | [] | no_license | # Enter your code here. Read input from STDIN. Print output to STDOUT
# !/bin/python3
import math
import os
import random
import re
import sys
# Complete the insertionSort function below.
cnt = 0
def binarySearch(arr, l, r, x):
global cnt
while l <= r:
mid = l + (r - l) // 2
k = 1
if arr[mid] == x:
j1 = len(arr) - 1
while mid + k <= j1 and arr[mid + k] == x:
k += 1
j2 = mid + k
arr.insert(j2, x)
cnt += i - j2
return arr
if arr[mid] < x:
l = mid + 1
else:
r = mid - 1
cnt += i - l
arr.insert(l, x)
return arr
def insertionSort(arr):
if (len(set(arr)) == 1):
return 0
arr2 = []
arr2.append(arr[0])
global i
for i in range(1, len(arr)):
binarySearch(arr2, 0, len(arr2) - 1, arr[i])
return (cnt)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
if t < 1 & t > 15:
sys.exit()
for t_itr in range(t):
n = int(input())
if n > 100000:
sys.exit()
arr = list(map(int, input().rstrip().split()))
if all(i >= 1 and i <= 10000000 for i in arr) == False:
sys.exit()
# print(arr)
result = insertionSort(arr)
cnt = 0
fptr.write(str(result) + '\n')
fptr.close()
| true |
9d950b8baf8805b32e94c2131bb6dade7377bf97 | Python | pcranger/learning-Flask | /section2/OOP/oveview.py | UTF-8 | 764 | 4.46875 | 4 | [] | no_license | """ student = {
"name": "Rolf",
"grades": (89, 90, 93, 78, 90)
}
def average(sequence):
return sum(sequence) / len(sequence)
#passing data to function
print(average(student["grades"])) """
# dot(.) means inside e.g Student.average() means average function inside Student class
class Student:
def __init__(self, name, grades):
self.name = name
self.grades = grades
def average(self):
return sum(sequence) / len(sequence)
def __str__(self):
return f"Student {self.name}, {self.grades} years old."
def __repr__(self):
return f"Student"
# easier to pass json
studenta = Student("Bob", (89, 90, 93, 78, 90))
print(studenta.name)
# access directly in student object
print(studenta)
| true |
8f643555e45fc31c324424eaecc60dbcd4906f9d | Python | TomasBalbinder/Projekty | /list.py | UTF-8 | 1,463 | 4 | 4 | [] | no_license | '''
Uprav predchozi program znamky
tak aby program hodnoty ukladal do seznamu.
Seznam vypis pred a po setrideni.
Vypis nejlepsi a nejhorsi a prumernou znamku seznamu.
'''
znamky = int(input("Zadej 1. znamku: "))
pocitadlo = 1
seznam = []
if znamky > 0:
while znamky in range(1,6):
seznam.append(znamky)
print(znamky)
pocitadlo = pocitadlo + 1
znamky = int(input(f"Zadej {pocitadlo}. znamku: "))
if znamky < 1 or znamky > 6:
print(f"Konec programu. \n---------------------------- \nSeznam vsech znamek {seznam}")
seznam.sort()
print(f"Seznam vsech serazenych znamek {seznam}")
print(f"Prumerna znamka je: {round(sum(seznam) / len(seznam))}")
print(f"Nejvetsi znamka : {max(seznam)} \nNejmensi znamka : {min(seznam)}")
else:
print("Tohle nejde")
'''
Vytvor algoritmus, ktery do
seznamu ulozi "x" polozek
seznamu pro nakup.
Kontroluj aby se polozky v seznamu
neopakovaly.
Polozky vypis pred a po setrideni.
'''
kosik = []
nakup = input("Zadej polozky pro nakup zbozi: ")
nakup = nakup.lower()
if nakup.isalpha():
while nakup != "konec":
kosik.append(nakup)
nakup = input("Zadej polozky pro nakup zbozi: ")
if nakup in kosik:
print("Polozka se opakuje")
kosik.sort()
neopakovat = set(kosik)
print("Musim koupit:", " ".join(neopakovat))
else:
print("Musis napsat text") | true |
96501012d45d3fd5e906b6cd85085133056a2f42 | Python | matthewsgerling/Python_Class_Work | /Week10/Invoice/invoiceClass.py | UTF-8 | 855 | 3.515625 | 4 | [] | no_license | # Author: Matthew Gerling
# File: invoiceClass.py
# Date: 7/1/2020
class Invoice:
def __init__(self, iid, cid, ln, fn, pn, add):
self.invoice_id = iid
self.customer_id = cid
self.last_name = ln
self.first_name = fn
self.phone_number = pn
self.address = add
self.items_with_price: dict = {}
def add_item(self, item):
self.items_with_price.update(item)
def create_invoice(self):
print(self.first_name, ', ', self.last_name, ' ,', self.customer_id , ' : ', self.invoice_id, ' ,', self.phone_number
, ' ,', self.address, ' ,', self.items_with_price)
# Driver code
invoice = Invoice(1, 123, '1313 Disneyland Dr, Anaheim, CA 92802', 'Mouse', 'Minnie', '555-867-5309')
invoice.add_item({'iPad': 799.99})
invoice.add_item({'Surface': 999.99})
invoice.create_invoice()
| true |