blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d614b4df5b82b2695748ae5ba513bd8b2e8e2009 | Python | HyeonGyuChi/2019_1st_Semester | /Python_Programing/Exercise10/T1.py | UTF-8 | 1,586 | 3.203125 | 3 | [] | no_license | import sqlite3
# con, corsor 생성
con = sqlite3.connect("new_testDB")
cursor = con.cursor()
# create table > 연결자.commit()
try :
sql = "CREATE TABLE IF NOT EXISTS productTable(num integer primary key autoincrement, pCode char(5), pName char(15), price integer, amount integer)"
cursor.execute(sql)
except :
print("오류 테이블 CREATE 실패")
else :
print("성공 테이블 CREATE 성공")
# insert data
try :
sql = "INSERT INTO productTable(pCode, pName, price, amount) VALUES('p0001', '노트북', 110, 5)"
cursor.execute(sql)
sql = "INSERT INTO productTable(pCode, pName, price, amount) VALUES('p0003', '마우스', 3, 22)"
cursor.execute(sql)
sql = "INSERT INTO productTable(pCode, pName, price, amount) VALUES('p0004', '키보드', 2, 11)"
cursor.execute(sql)
con.commit() # insert하면 꼭 저장! 연결자.commit() // 커서. 아님
except :
print("오류 데이터 INSERT 실패")
else :
print("성공 데이터 INSERT 성공")
# select > 커서.fetch~
try :
print("num\tpCode\tpName\tprice\tamount\t")
print("--------------------------------------------------------")
sql = "SELECT * FROM productTable"
cursor.execute(sql)
while True :
row = cursor.fetchone() # select
if not row :
break
else :
num = row[0]
pCode = row[1]
pName = row[2]
price = row[3]
amount = row[4]
print("%5d %5s %15s %5d %5d" %(num, pCode, pName, price, amount))
except :
print("INSERT 오류") | true |
e454cca79602b7c4354485d5f723b72ed975967c | Python | Jinxiatucla/Clustering | /b.py | UTF-8 | 2,341 | 2.703125 | 3 | [] | no_license | import a
import numpy as np
from scipy.sparse.linalg import svds
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import NMF
import pylab as pl
r = [1, 2, 3, 5, 10, 20, 50, 100, 300]
data = a.retrieve_data()
# plot the variance v.s. r
def get_svd(tfidf):
number = 1000
U, s, V = svds(tfidf, number)
# print tfidf.shape
# print U.shape
# print s.shape
# print V.shape
total = np.trace((tfidf.dot(np.transpose(tfidf))).toarray())
s = s[::-1]
s = map(lambda(x): x * x, s)
for i in range(1, number):
s[i] += s[i - 1]
s = map(lambda(x): x / total, s)
x = range(1, number + 1)
print s[number - 1]
pl.plot(x, s)
pl.show()
return U, s, V
# get nmf
def get_nmf(tfidf, rank):
model = NMF(n_components = rank)
return model.fit_transform(tfidf)
# get the truncated svd
def get_truncated_svd(tfidf, rank):
svd = TruncatedSVD(n_components = rank)
return svd.fit_transform(tfidf)
# use LSI to do the reduction
# u, s, v are not in use
# reduction_type is a function to determine which reduction method is used,get_truncated_svd or get_nmf
def reduce_plot(U, s, V, reduction_type, tfidf, k = 2):
U = np.array(U)
V = np.array(V)
s = np.array(s)
labels = a.get_class(data)
homo_list = []
complete_list = []
vscore_list = []
rand_list = []
mutual_list = []
for rank in r:
# get the reducted U, s, V
# r_U = U[:, 0:rank]
# r_s = s[0:rank]
# x = r_U.dot(np.diag(r_s))
x = reduction_type(tfidf, rank)
km = a.k_means_cluster(x, k)
result = a.get_result(km, labels)
homo_list.append(result[0])
complete_list.append(result[1])
vscore_list.append(result[2])
rand_list.append(result[3])
mutual_list.append(result[4])
# plot
pl.plot(r, homo_list, label = "homo")
pl.plot(r, complete_list, label = "complete")
pl.plot(r, vscore_list, label = "vscore")
pl.plot(r, rand_list, label = "rand")
pl.plot(r, mutual_list, label = "mutual")
pl.legend(loc = "upper right")
pl.show()
if __name__ == "__main__":
data = a.retrieve_data()
tfidf = a.get_TFIDF(data)
U, s, V = get_svd(tfidf)
# reduce_plot(U, s, V, get_truncated_svd, tfidf)
reduce_plot(U, s, V, get_nmf, tfidf)
| true |
4134fcb65b1c33170fe00a8d77a4f49afc9671c0 | Python | micahjones13/Sprint-Challenge--Hash-BC | /hashtables/ex1/ex1.py | UTF-8 | 1,617 | 3.71875 | 4 | [] | no_license | # Hint: You may not need all of these. Remove the unused functions.
from hashtables import (HashTable,
hash_table_insert,
hash_table_remove,
hash_table_retrieve,
hash_table_resize)
def get_indices_of_item_weights(weights, length, limit):
ht = HashTable(16)
# add all of the weights into the ht
v = 0
for k in weights:
# Makes the HT look like: [{4: 0}, {6: 1}, {10:2}, {15:3}, {16:4}]
# makes it so we can return the correct indicies, since thats what the answer wants
# print(k, 'K')
hash_table_insert(ht, k, v)
v += 1
# find where limit - weight is in the HT
for i in range(len(weights)):
# this is where the value is that we want.
# print(weights[i], 'weights[i]')
# each = hash_table_retrieve(ht, weights[i])
# print(each, 'each')
#limit: 21
match = limit - weights[i]
# print(match, 'MATCH')
# if the ideal pair exists in the HT, return it
is_there = hash_table_retrieve(ht, match)
if is_there != None:
# print(is_there, i, 'ANSWER')
# return the indexes of the perfect match
return (is_there, i)
# else:
# print('not there.')
# print(ht.storage)
return None
def print_answer(answer):
if answer is not None:
print(str(answer[0] + " " + answer[1]))
else:
print("None")
weights = [4, 6, 10, 15, 16]
length = 5
limit = 21
get_indices_of_item_weights(weights, length, limit)
| true |
1de9797d66191f8331ddfc7d1e3d4db201a1258c | Python | Surja1997/Python-assignments-1 | /data structures/LinkedList.py | UTF-8 | 1,184 | 4.1875 | 4 | [] | no_license | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LList:
def __init__(self):
self.head = None
# creating an empty LL
LL = LList()
LL.head = Node("Surja")
sec = Node("Rohit")
third = Node("Mohit")
fourth = Node("Chandan")
LL.head.next = sec
sec.next = third
third.next = fourth
# Now we'll be traversing
def printList():
val = LL.head
while val:
if val.next:
print("Value= ", val.value, end=", ")
else:
print("Value= ", val.value)
val = val.next
printList()
# insterting a new node at the head of the Linked List
def changeHead(value):
newHead = Node(value)
newHead.next = LL.head
LL.head = newHead
changeHead("Mohor")
printList()
# inserting a node after a certain Node
def insertNode(where, what):
val = LL.head
newNode = Node(what)
while (val):
if val.value == where:
newNode.next = val.next
val.next = newNode
break
if val.next is None:
print(" No such element found")
break
val = val.next
printList()
insertNode("Mohit", "Dada")
| true |
6b244bf7b3bd40985a32b6abc92e7be9cf7b7e13 | Python | sjy9412/startcamp | /day1/lotto.py | UTF-8 | 98 | 2.703125 | 3 | [] | no_license | import random
# numbers = range(1, 46)
lotto = random.sample(range(1, 46), 6)
print(sorted(lotto)) | true |
d45f166f9c84562656aac0585c8ca1c7b902c01e | Python | hemal507/CS-Algorithms | /test_arrayPacking.py | UTF-8 | 421 | 2.546875 | 3 | [] | no_license | import arrayPacking
def test_case1():
assert arrayPacking.arrayPacking([24, 85, 0]) == 21784
def test_case2():
assert arrayPacking.arrayPacking([23, 45, 39]) == 2567447
def test_case3():
assert arrayPacking.arrayPacking([1, 2, 4, 8]) == 134480385
def test_case4():
assert arrayPacking.arrayPacking([5]) == 5
def test_case5():
assert arrayPacking.arrayPacking([187, 99, 42, 43]) == 724198331
| true |
45a1713689638f97c996942a43475d6ce57157ae | Python | frandres/aletheia | /bills/download_spanish.py | UTF-8 | 1,713 | 2.71875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
import unicodedata
import re
import wget
import urllib
failed_articles = []
def get_url_soup(url,items_present,max_tries=100):
try_again = True
tries = 0
while try_again and tries<max_tries:
tries+=1
try:
try_again = False
print ' Trying'
page = requests.get(url,verify=False,timeout=10)
print ' Fetched'
soup = BeautifulSoup(page.content)
for x in items_present:
try_again = try_again or soup.find(**x) is None
except Exception:
try_again = True
if not try_again:
print ' Voila'
return BeautifulSoup(page.content)
else:
print 'Ooops'
failed_articles.append(url)
return None
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
regexp = re.compile('.*?(Llei.*?),.*')
def process_url(url,folder):
soup = get_url_soup(url,[{'name':'div', 'attrs':{'class':'llista_completa'}}])
soup = soup.find('div',{'class':'llista_completa'})
for d in soup.find_all('li',{'class':'word'}):
for x in d.find_all('a',{'hreflang':'es'}):
url = 'http://www.parlament.cat'+ x['href']
name = regexp.findall(x['title'])[0]
name = name.replace(' ','_')
name = name.replace('/','-')
name = folder+name+'.docx'
urllib.urlretrieve (url, name)
print url
def main():
for i in range(1,33):
process_url('http://www.parlament.cat/web/activitat-parlamentaria/lleis?p_pagina='+str(i),'./spanish/')
main()
| true |
2f2cdfd135e10c6bf9914fb274fb53c5949a9686 | Python | DanaSergali/Programming | /project1/csv_writer.py | UTF-8 | 1,493 | 2.984375 | 3 | [] | no_license | import csv
from bs4 import BeautifulSoup
def parse_soup(article_path, text, source):
soup = BeautifulSoup(text, 'html.parser')
main_info = soup.find('div', {'class': 'news-info'}) # главная информация о статье
header = main_info.find('strong').get_text()
header = header.replace("\t", "")
header = header.replace("\n", "")
author = main_info.find('a').get_text()
author = author.replace(" ", "")
text_div = main_info.get_text().split('\n')
created = text_div[3].replace("\t", "")[:10]
audience_age_div = soup.find('div', {'class': 'header-paper'})
audience_age = audience_age_div.find('img').get('title')
audience_age = audience_age.replace('.jpg', '')
publication = soup.find('div', {'class': 'title'}).get_text()
year = created.split('.')
publ_year = year[len(year) - 1]
# topic и audience_level не получилось вытащить со страницы со статьей
row = [article_path, author, header, created, "публицистика", "None", "нейтральный", audience_age, "None",
"районная", source, publication, publ_year, "газета", "Россия", "ru"]
return row
def save_to_csv(csv_path, article_path, text, source):
row = parse_soup(article_path, text, source)
with open(csv_path, "a", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter='\t')
writer.writerow(row)
return row
| true |
fa2083b9ee41195cd71bc6ed83f5656ab5f1c85b | Python | RajaSekar1311/Data-Science-and-Visualization-KITS | /Data Frame & Load Excel File/DescribeDataFrame.py | UTF-8 | 654 | 2.828125 | 3 | [] | no_license | import pandas
myFileName = 'Session2-KITS-Guntur-DataSet.xls'
with pandas.ExcelFile(myFileName) as myExcelFileReadObject:
myDataFrame1 = pandas.read_excel(myExcelFileReadObject,'Sem1-Marks')
myDataFrame2 = pandas.read_excel(myExcelFileReadObject,'Sem2-Marks')
#print(myDataFrame1.describe())
#print(myDataFrame2.describe())
#print(myDataFrame1.head(6))
#print(myDataFrame2.head(6))
DF1TotalRowsCols = myDataFrame1.shape
print(DF1TotalRowsCols)
print(DF1TotalRowsCols[0])
print(DF1TotalRowsCols[1])
DF2TotalRowsCols = myDataFrame1.shape
print(DF2TotalRowsCols)
print(DF2TotalRowsCols[0])
print(DF2TotalRowsCols[1]) | true |
89a1c38f4bb97e2ed9544fe9ff84329c9345b89f | Python | curieuxjy/DS-for-PPM | /day1/Day1_PythonCode/day1_python_programming_11.py | UTF-8 | 423 | 3.34375 | 3 | [] | no_license | # Module
#
import myFunctions
x = 1
y = 2
z = myFunctions.sum(x,y); print('sum: ', z)
z = myFunctions.average(x,y); print('average: ', z)
z = myFunctions.power(x,y); print('power: ', z)
#from myFunctions import sum, average, power
#
#x = 1
#y = 2
#
#z = sum(x,y); print('sum: ', z)
#z = average(x,y); print('average: ', z)
#z = power(x,y); print('power: ', z)
| true |
59b1fe00c53f9920d99c4b54462ce587c564ffd7 | Python | marikoll/FYS4150_projects | /project_4/python_code/ising_run4c.py | UTF-8 | 5,957 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Calculates and plots expectation values as function of MC-cycles for a
20x20 lattice with temperature T = 1.0 and T = 2.4
"""
import numpy as np
from numba import prange
import time
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from Ising_model import MC
spins = 20
trials = int(1e6)
temp = [1.0, 2.4]
sampled_energies = np.zeros((trials, len(temp)))
sampled_absmagn = np.zeros((trials, len(temp)))
start = time.time()
for k in prange(len(temp)):
grid = np.random.choice([-1,1],size=(spins, spins))
energy_avg, magnet_avg, C_v, susceptibility, abs_magnet, c = MC(grid, trials, temp[k])
sampled_energies[:, k] = energy_avg
sampled_absmagn[:, k] = abs_magnet
o_sampled_energies = np.zeros((trials, len(temp)))
o_sampled_absmagn = np.zeros((trials, len(temp)))
for k in prange(len(temp)):
# for i in range(len(trials)):
grid = np.ones((spins, spins))
energy_avg, magnet_avg, C_v, susceptibility, abs_magnet, c = MC(grid, trials, temp[k])
o_sampled_energies[:, k] = energy_avg
o_sampled_absmagn[:, k] = abs_magnet
stop = time.time()
print('CPU: ', stop-start)
T = np.linspace(0, trials, trials/10)
plt.figure(1)
fig, ax = plt.subplots()
ax.plot(T, o_sampled_energies[::10,0], 'r', label= 'ordered configuration')
ax.plot(T, sampled_energies[::10,0], 'b', label= 'random configuration')
ax.legend(loc = 9)
ax.set_title('Expectation values for energy, T = 1.0', fontsize = 15)
ax.set_xlabel('MC-cycles', fontsize = 10)
ax.set_ylabel(r'$\langle E\rangle$', fontsize = 10)
axins = zoomed_inset_axes(ax, 19, loc=1)
axins.plot(T, o_sampled_energies[::10,0], 'r')
axins.plot(T, sampled_energies[::10,0], 'b')
x1, x2, y1, y2 = 0, 10000, -2.01, -1.95
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.savefig('figs/expectations_2020lattice_temp1_energy.pdf', bbox_inches = 'tight')
plt.draw()
plt.show()
plt.figure(2)
fig, ax = plt.subplots()
ax.plot(T, o_sampled_absmagn[::10,0], 'r',label= 'ordered configuration')
ax.plot(T, sampled_absmagn[::10,0], 'b', label= 'random configuration')
ax.legend(loc = 8)
ax.set_title('Expectation values for magnetization, T = 1.0', fontsize = 15)
ax.set_xlabel('MC-cycles', fontsize = 10)
ax.set_ylabel(r'$\langle |M|\rangle$', fontsize = 10)
axins = zoomed_inset_axes(ax, 11, loc=7)
axins.plot(T, o_sampled_absmagn[::10,0], 'r')
axins.plot(T, sampled_absmagn[::10,0], 'b')
x1, x2, y1, y2 = -10, 15000, 0.94, 1.01
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.savefig('figs/expectations_2020lattice_temp1_magnet.pdf', bbox_inches = 'tight')
plt.draw()
plt.show()
plt.figure(3)
fig, ax = plt.subplots()
ax.plot(T, o_sampled_energies[::10,1], 'r', label= 'ordered configuration')
ax.plot(T, sampled_energies[::10,1], 'b', label= 'random configuration')
ax.legend(loc = 9)
ax.set_title('Expectation values for energy, T = 2.4', fontsize = 15)
ax.set_xlabel('MC-cycles', fontsize = 10)
ax.set_ylabel(r'$\langle E\rangle$', fontsize = 10)
axins = zoomed_inset_axes(ax, 8, loc=7)
axins.plot(T, o_sampled_energies[::10,1], 'r')
axins.plot(T, sampled_energies[::10,1], 'b')
x1, x2, y1, y2 = 0, 55000, -1.3, -1.2
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.savefig('figs/expectations_2020lattice_temp24_energy.pdf', bbox_inches = 'tight')
plt.draw()
plt.show()
plt.figure(4)
fig, ax = plt.subplots()
ax.plot(T, o_sampled_absmagn[::10,1], 'r',label= 'ordered configuration')
ax.plot(T, sampled_absmagn[::10,1], 'b', label= 'random configuration')
ax.legend(loc = 9)
ax.set_title('Expectation values for magnetization, T = 2.4', fontsize = 15)
ax.set_xlabel('MC-cycles', fontsize = 10)
ax.set_ylabel(r'$\langle |M|\rangle$', fontsize = 10)
axins = zoomed_inset_axes(ax, 4, loc=7)
axins.plot(T, o_sampled_absmagn[::10,1], 'r')
axins.plot(T, sampled_absmagn[::10,1], 'b')
x1, x2, y1, y2 = -10, 170000, 0.4, 0.56
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="0.5")
plt.savefig('figs/expectations_2020lattice_temp24_magnet.pdf', bbox_inches = 'tight')
plt.draw()
plt.show()
#plt.figure()
#ax1 = plt.subplot(211)
#ax1.set_title('Expectation values for energy and magnetization as functions of MC-cycles, T = 1.0', fontsize = 12)
#ax1.plot(T, o_sampled_energies[::10,0], 'r', label= 'ordered configuration')
#ax1.plot(T, sampled_energies[::10,0], 'b', label = 'random configuration')
#ax1.legend()
#ax1.set_ylabel(r'$\langle E \rangle$', fontsize = 10)
#ax1.grid()
#ax2 = plt.subplot(212)
#ax2.plot(T, o_sampled_absmagn[::10,0], 'r', label = 'ordered configuration')
#ax2.plot(T, sampled_absmagn[::10,0], 'b', label = 'random configuration')
#ax2.legend()
#ax2.set_ylabel(r'$\langle |M|\rangle$', fontsize = 10)
#ax2.set_xlabel('MC-cycles', fontsize = 10)
#ax2.grid()
#plt.savefig('figs/expectations_2020lattice_temp1.pdf', bbox_inches = 'tight')
#plt.show()
#
#
#
#plt.figure()
#ax1 = plt.subplot(211)
#ax1.set_title('Expectation values for energy and magnetization as functions of MC-cycles, T = 2.4', fontsize = 12)
#ax1.plot(T, o_sampled_energies[::10,1], 'r', label= 'ordered configuration')
#ax1.plot(T, sampled_energies[::10,1], 'b', label = 'random configuration')
#ax1.legend()
#ax1.set_ylabel(r'$\langle E \rangle$', fontsize = 10)
#ax1.grid()
#ax2 = plt.subplot(212)
#ax2.plot(T, o_sampled_absmagn[::10,1], 'r', label = 'ordered configuration')
#ax2.plot(T, sampled_absmagn[::10,1], 'b', label = 'random configuration')
#ax2.legend()
#ax2.set_ylabel(r'$\langle |M|\rangle$', fontsize = 10)
#ax2.set_xlabel('MC-cycles', fontsize = 10)
#ax2.grid()
#plt.savefig('figs/expectations_2020lattice_temp24.pdf', bbox_inches = 'tight')
#plt.show()
| true |
8241b067042683930266ec0d5ac343978fb540f8 | Python | shaan2348/hacker_rank | /playfair_cipher_2.py | UTF-8 | 635 | 3.4375 | 3 | [] | no_license | def matrix(key):
m = []
alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
for i in key.upper():
if i not in m:
m.append(i)
for i in alphabet:
if i not in m:
m.append(i)
m_group = []
for i in range(5):
m_group.append('')
m_group[0] = m[0:5]
m_group[1] = m[5:10]
m_group[2] = m[10:15]
m_group[3] = m[15:20]
m_group[4] = m[20:25]
return m_group
def groups(text):
for i in range(len(text)):
pass
def encrypt():
pass
def decrypt():
pass
text = input("Enter Your message here:")
key = input("Enter your key:")
print(matrix(key))
| true |
8cbbb763f42c828db008ffd6af774e2c814978fe | Python | simdax/synchroVid | /capture.py | UTF-8 | 1,156 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import cv2
import os
class Capture():
def __init__(self, filename=os.path.abspath("video.mkv") ):
print filename
self.c = cv2.VideoCapture(str(filename))
print(self.c)
self.go = False
#helper
def tbCallback(self, n):
self.c.set(1,n)
ret, frame = self.c.read()
cv2.imshow("Capture", frame)
def togglePause(self):
if self.go == False:
self.go = True
else:
self.go=False
def createTB(self):
nbFrames= self.c.get(7)
cv2.createTrackbar("tb", "Capture", cv2.getTrackbarPos("tb", "Capture"), int(nbFrames), self.tbCallback)
def startCapture(self):
self.togglePause()
# 0 uis for window normal
cv2.namedWindow("Capture",0)
self.createTB()
while(self.go):
ret, frame = self.c.read()
if frame is None:
self.tbCallback(0)
else:
cv2.imshow("Capture", frame)
cv2.waitKey(25)
def stopCapture(self):
self.go=False
cv2.destroyAllWindows()
| true |
a2d2e4491f3d2f32dcfe3e55e28f8ce25f0bfad0 | Python | zuoguoqing/gqfacenet_recognition | /test_facenet_recognition.py | UTF-8 | 5,516 | 2.515625 | 3 | [
"MIT"
] | permissive | import cv2
from test_facenet_register import FaceRecognition
from PIL import Image, ImageDraw
import multiprocessing as mp
import time
face_recognition = FaceRecognition("config_facenet.yaml")
def recognition_photo():
frame = Image.open('datasets/multiface.jpg')
results = face_recognition.recognition(frame)
print(results)
frame_draw = frame.copy()
draw = ImageDraw.Draw(frame_draw)
for result in results:
draw.rectangle(result["bbox"], outline=(255, 255, 255))
if len(result["userid"]) > 0:
userid = result["userid"].split("_")
userid.pop(len(userid) - 1)
draw.text((int(result["bbox"][0]), int(result["bbox"][1])), str("_".join(userid)), fill=(255, 255, 255),
font=face_recognition.font)
if result.get("emotion") is not None and len(result["emotion"]) > 0:
draw.text((int(result["bbox"][0]), int(result["bbox"][1] + 20)), str(result["emotion"]),
fill=(255, 255, 255),
font=face_recognition.font)
frame_draw.save('output/multiface_facenet.jpg')
def recognition_video():
camara = cv2.VideoCapture(0)
camara.set(cv2.CAP_PROP_FRAME_WIDTH, 500)
camara.set(cv2.CAP_PROP_FRAME_HEIGHT, 500)
camara.set(cv2.CAP_PROP_FPS, 25)
while True:
# 读取帧摄像头
ret, frame = camara.read()
if ret:
frame = cv2.flip(frame, 1)
results = face_recognition.recognition(Image.fromarray(frame))
print(results)
if results is not None:
for result in results:
cv2.rectangle(frame, (int(result['bbox'][0]), int(result['bbox'][1])),
(int(result['bbox'][2]), int(result['bbox'][3])), (255, 255, 255), 2)
if len(result["userid"]) > 0:
userid = result["userid"].split("_")
userid.pop(len(userid) - 1)
cv2.putText(frame, str("_".join(userid)), (int(result['bbox'][0]), int(result['bbox'][1])),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))
if result.get("emotion") is not None and len(result["emotion"]) > 0:
cv2.putText(frame, str(result["emotion"]),
(int(result['bbox'][0]), int(result['bbox'][1] + 20)), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (255, 255, 255))
cv2.imshow('recognition_face', frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'):
break
camara.release()
cv2.destroyAllWindows()
def camera_put(queue, url):
cap = cv2.VideoCapture(url)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 500)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 500)
cap.set(cv2.CAP_PROP_FPS, 25)
if cap.isOpened():
print(f"视频地址:{url}")
while True:
ret, frame = cap.read()
if ret:
queue.put(frame)
time.sleep(0.01)
def camera_get(queue, winname):
cv2.namedWindow(winname, flags=cv2.WINDOW_FREERATIO)
while True:
frame = queue.get()
frame = cv2.flip(frame, 1)
results = face_recognition.recognition(Image.fromarray(frame))
print(results)
if results is not None:
for result in results:
cv2.rectangle(frame, (int(result['bbox'][0]), int(result['bbox'][1])),
(int(result['bbox'][2]), int(result['bbox'][3])), (255, 255, 255), 2)
if len(result["userid"]) > 0:
userid = result["userid"].split("_")
userid.pop(len(userid) - 1)
cv2.putText(frame, str("_".join(userid)), (int(result['bbox'][0]), int(result['bbox'][1])),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))
if result.get("emotion") is not None and len(result["emotion"]) > 0:
cv2.putText(frame, str(result["emotion"]),
(int(result['bbox'][0]), int(result['bbox'][1] + 20)), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (255, 255, 255))
cv2.imshow(winname, frame)
cv2.waitKey(1)
def run_single_camera():
mp.set_start_method(method='spawn') # init
queue = mp.Queue(maxsize=2)
camera_url = 0
processes = [mp.Process(target=camera_put, args=(queue, camera_url)),
mp.Process(target=camera_get, args=(queue, f"{camera_url}"))]
[process.start() for process in processes]
[process.join() for process in processes]
def run_multi_camera():
camera_urls = [
"rtsp://username:password@192.168.1.100/h264/ch1/main/av_stream",
"rtsp://username:password@192.168.1.101//Streaming/Channels/1",
"rtsp://username:password@192.168.1.102/cam/realmonitor?channel=1&subtype=0"
]
mp.set_start_method(method='spawn') # init
queues = [mp.Queue(maxsize=4) for _ in camera_urls]
processes = []
for queue, camera_url in zip(queues, camera_urls):
processes.append(mp.Process(target=image_put, args=(queue, camera_url)))
processes.append(mp.Process(target=image_get, args=(queue, camera_url)))
for process in processes:
process.daemon = True
process.start()
for process in processes:
process.join()
if __name__ == '__main__':
# recognition_photo()
# recognition_video()
run_single_camera() | true |
f90a697387c5932f87b9163de47257f1d0193f49 | Python | Lalala-xnk/Machine-Learning-in-Finance | /preprocessing/preprocessing.py | UTF-8 | 3,470 | 2.984375 | 3 | [] | no_license | import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def refresh(df):
# remove meaningless and defective data, for example, remove NAN and change '60 months' to '60'
# only the first 10 lines are read for testing
newdf = df[['loan_amnt', 'term', 'int_rate', 'installment', 'grade', 'sub_grade', 'emp_length', 'annual_inc',\
'verification_status', 'issue_d', 'loan_status']].iloc[0: 10]
for i in range(newdf.shape[0]):
newdf.loc[i, 'term'] = newdf.loc[i, 'term'].split(' ')[1]
newdf.loc[i, 'int_rate'] = str(float(newdf.loc[i, 'int_rate'][0:-1])/100)
newdf.loc[i, 'grade'] = ord(newdf.loc[i, 'grade']) - ord('A') + 1
newdf.loc[i, 'sub_grade'] = newdf.loc[i, 'sub_grade'][-1]
newdf.loc[i, 'emp_length'] = newdf.loc[i, 'emp_length'].split(' ')[0]
if newdf.loc[i, 'emp_length'][-1] == '+':
newdf.loc[i, 'emp_length'] = newdf.loc[i, 'emp_length'][0:-1]
if newdf.loc[i, 'verification_status'] == 'Not Verified':
newdf.loc[i, 'verification_status'] = -1
elif newdf.loc[i, 'verification_status'] == 'Source Verified':
newdf.loc[i, 'verification_status'] = 1
else:
newdf.loc[i, 'verification_status'] = 0
newdf.loc[i, 'issue_d'] = newdf.loc[i, 'issue_d'].split('-')[0]
if newdf.loc[i, 'issue_d'] == 'Jun':
newdf.loc[i, 'issue_d'] = 6
elif newdf.loc[i, 'issue_d'] == 'Jul':
newdf.loc[i, 'issue_d'] = 7
elif newdf.loc[i, 'issue_d'] == 'Apr':
newdf.loc[i, 'issue_d'] = 4
elif newdf.loc[i, 'issue_d'] == 'May':
newdf.loc[i, 'issue_d'] = 5
elif newdf.loc[i, 'issue_d'] == 'March':
newdf.loc[i, 'issue_d'] = 3
if newdf.loc[i, 'loan_status'] in ['Current', 'Fully Paid']:
newdf.loc[i, 'loan_status'] = 1 # good loan
elif newdf.loc[i, 'loan_status'] in ['Charged Off', 'Late (31-120 days)', 'Late (16-30 days)']:
newdf.loc[i, 'loan_status'] = -1 # bad loan
if newdf.loc[i, 'loan_status'] in ['Default', 'In Grace Period']:
newdf.loc[i, 'loan_status'] = 0 # grey loan
return newdf
def scale(df):
# scaling and standarization
scaler = StandardScaler()
scaler.fit(df)
df_scaled = scaler.transform(df)
df_scaled = pd.DataFrame(df_scaled, columns=df.columns)
return df_scaled
def PCA_progress(df):
# PCA progress
pca = PCA()
pca.fit(df)
return pd.DataFrame(pca.transform(df), index=df.index, columns=["PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7", \
"PC8", "PC9", "PC10"])
def main():
df = pd.read_csv("LoanStats_2016Q2.csv", low_memory=False)
df_refreshed = refresh(df)
train_scaled = scale(df_refreshed.drop(['loan_status'], axis=1, inplace=False))
train_PCA = PCA_progress(train_scaled)
train_result = df_refreshed['loan_status']
df_refreshed.to_csv('new_LoanStats_2016Q2.csv')
train_scaled.to_csv('train_scaled_2016Q2.csv')
train_PCA.to_csv('train_PCA_2016Q2.csv')
print(df_refreshed.head())
print(train_scaled.head())
print(train_PCA.head())
if __name__ == '__main__':
main() | true |
7b2a334c5672abdac69ace296768a424fe32f141 | Python | ananthkalki/melange-colour-detector | /crop.py | UTF-8 | 854 | 2.734375 | 3 | [] | no_license | from PIL import Image
import cv2
import imutils
image = cv2.imread(r".jpg")
resized = imutils.resize(image, width=300)
ratio = image.shape[0] / float(resized.shape[0])
# convert the resized image to grayscale, blur it slightly,
# and threshold it
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image and initialize the
# shape detector
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[0]
cnts = imutils.grab_contours(cnts)
image2=Image.open('cloth.jpg')
peri = cv2.arcLength(cnts, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
(x, y, w, h) = cv2.boundingRect(approx)
croppedIm = image2.crop((x,y,x+w,y+h))
croppedIm.save('cropped.jpg') | true |
78be12b58049a6ec9ba0a57832001e1df52324d1 | Python | BrankoFurnadjiski/ProteinProteinInteractions | /prepareFullGO_v2.py | UTF-8 | 2,885 | 2.71875 | 3 | [] | no_license | """
This file connects string-db protenins with GO annotations according to GO Consortium
"""
import gzip
import time
# Counter for skipping rows
counter = 1
# Flag for skipping first row
flag = True
# Dictionary for mapping from stringID to uniprotIDs
stringMapping = dict()
# Dictionary for mapping from uniprotID to GO annotation
uniMapping = dict()
# Reading from:
GOAHuman = open("../data/goa_human_164.gaf", "r")
HumanPPI700Links = open("../data/HumanPPI700_Links.txt", "r")
HumanPPI900Links = open("../data/HumanPPI900_Links.txt", "r")
# Writing into:
HumanPPI700GO = open("../data/HumanPPI700_GO_v2.txt", "w")
HumanPPI900GO = open("../data/HumanPPI900_GO_v2.txt", "w")
# Reading from these to create child-parent relationships:
BPGOfull = open("../data/BPGOfull.txt", "r")
MFGOfull = open("../data/MFGOfull.txt", "r")
CCGOfull = open("../data/CCGOfull.txt", "r")
print("(" + time.strftime("%c") + ") Reading Uniprot...")
# Filling stringMapping dictionary
with gzip.open('../data/9606_reviewed_uniprot_2_string.04_2015.tsv.gz','r') as fin:
for line in fin:
if flag:
flag = False
continue
line = line.decode('ascii')
line = line[:-1]
parts = line.split("\t")
stringId = parts[0] + "." + parts[2]
fullUniprotId = parts[1]
uniprotId = fullUniprotId.split("|")[0]
stringMapping[stringId] = uniprotId
print("(" + time.strftime("%c") + ") Reading GOA...")
# Filling uniMapping dictionary
for line in GOAHuman:
if counter <= 12:
counter += 1
continue
parts = line.split("\t")
uniprotId = parts[1]
goId = parts[4]
if uniprotId not in uniMapping:
uniMapping[uniprotId] = set()
uniMapping[uniprotId].add(goId)
print("(" + time.strftime("%c") + ") Writing into HumanPPI700_GO_v2...")
# Writing GO annotations for proteins in HumanPPI700 into HumanPPI700_GO
for line in HumanPPI700Links:
parts = line.split(" -> ")
stringId = parts[0]
if stringId not in stringMapping:
continue
uniprotId = stringMapping[stringId]
if uniprotId not in uniMapping:
continue
else:
goTerms = uniMapping[uniprotId]
for go in goTerms:
HumanPPI700GO.write(stringId + "\t" + uniprotId + "\t" + go + "\t\n")
print("(" + time.strftime("%c") + ") Writing into HumanPPI900_GO_v2...")
# Writing GO annotations for proteins in HumanPPI700 into HumanPPI700_GO
for line in HumanPPI900Links:
parts = line.split(" -> ")
stringId = parts[0]
if stringId not in stringMapping:
continue
uniprotId = stringMapping[stringId]
if uniprotId not in uniMapping:
continue
else:
goTerms = uniMapping[uniprotId]
for go in goTerms:
HumanPPI900GO.write(stringId + "\t" + uniprotId + "\t" + go + "\t\n")
GOAHuman.close()
HumanPPI700Links.close()
HumanPPI900Links.close()
HumanPPI700GO.close()
HumanPPI900GO.close()
BPGOfull.close()
MFGOfull.close()
CCGOfull.close()
print("(" + time.strftime("%c") + ") You done now!!!") | true |
cda37b6322660a5b9bdbb27127f084055d793c56 | Python | aaronmorgenegg/cs5665 | /final_project/src/stats/states.py | UTF-8 | 2,856 | 2.984375 | 3 | [] | no_license | from src.data_processing.classifier import STATE_NAMES
def getStateRatios(state_data):
"""
:param state_data:
:return: state_ratios
[{state: ([ally, ratio]}, {state: [tally, ratio]}]
"""
state_ratios = []
for i in range(len(state_data[0])):
state_tally = [0]*len(STATE_NAMES)
for frame in state_data:
state_tally[frame[i]-1] += 1
player_ratios = {}
for key, value in STATE_NAMES.items():
player_ratios[value] = [state_tally[key-1], round(100*state_tally[key-1]/len(state_data))]
state_ratios.append(player_ratios)
return state_ratios
def getStateRatioAirGround(state_ratios):
"""
:param state_ratios:
:return: [{air: [tally, ratio]}, {ground: [tally, ratio]}, {other: [tally, ratio]}]
"""
ratios = []
air_states = ['moving_air', 'attack_air', 'downed_air']
ground_states = ['moving_ground', 'attack_ground', 'downed_ground', 'neutral']
for i in range(len(state_ratios)):
ratios.append({'air': [0, 0], 'ground': [0, 0], 'other': [0, 0]})
for state in state_ratios:
for name, data in state.items():
if name in air_states:
ratios[i]['air'][0] += data[0]
ratios[i]['air'][1] += data[1]
elif name in ground_states:
ratios[i]['ground'][0] += data[0]
ratios[i]['ground'][1] += data[1]
else:
ratios[i]['other'][0] += data[0]
ratios[i]['other'][1] += data[1]
return ratios
def getStateRatioAttackDefend(state_ratios):
"""
:param state_ratios:
:return: [{attack: [tally, ratio]}, {defense: [tally, ratio]}, {other: [tally, ratio]}]
"""
ratios = []
attack_states = ['attack_ground', 'attack_air']
defense_states = ['downed_ground', 'downed_air', 'dead', 'shield', 'dodge']
for i in range(len(state_ratios)):
ratios.append({'attack': [0, 0], 'defense': [0, 0], 'other': [0, 0]})
for state in state_ratios:
for name, data in state.items():
if name in attack_states:
ratios[i]['attack'][0] += data[0]
ratios[i]['attack'][1] += data[1]
elif name in defense_states:
ratios[i]['defense'][0] += data[0]
ratios[i]['defense'][1] += data[1]
else:
ratios[i]['other'][0] += data[0]
ratios[i]['other'][1] += data[1]
return ratios
def printStateRatios(state_ratios):
string = "---State Ratios---\n"
for i, player in enumerate(state_ratios):
string += "Player {}:\n".format(i)
for state, data in player.items():
string += " {}: {}%\n".format(state, data[1])
return string
| true |
4888ec684f92589be035aecf8663985caba52b10 | Python | NaokiEto/CS171 | /hw7/keyframe.py~ | UTF-8 | 21,656 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
from OpenGL.GL import *
from OpenGL.GL.shaders import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import sys
import pyparsing as pp
from math import pi, sin, cos, acos, sqrt
import numpy as np
def idle():
global Initial
if (counterframe == -1 and Initial == 0):
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
Initial = 1
drawfunc()
# use catmull-rom
# for some frame number, let's say frame 33, use frame 30, frame 45, frame 60 and frame 0
# like wise, if say frame 56, use frame 45, frame 60, frame 0, and frame 30
# the deltas will always be 15 since the difference between frames is 15
# to pause or not to pause, that is the question (or key hehe lame alert)
if (pause == 0 and toggle == 0):
drawfunc()
if (pause == 0 and toggle == 1):
if (counterframe < 75):
drawfunc()
else:
glutLeaveMainLoop()
exit(0)
def drawfunc():
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
mvm = glGetFloatv(GL_MODELVIEW_MATRIX)
global counterframe
global rotatecam
global zcoord
print counterframe
counterframe += 1
frameIdx = counterframe % 75
print "TTTTTTTTTTTTTTTTTTTTTTTthe frame number is: ", frameIdx
index = 0
frameBlock = framenum[index]
# while the index is less than the number of frames in the sample script
# and also with the condition that the frame block number is greater than
# the frame index that we want
while index < len(framenum) and frameBlock < frameIdx:
frameBlock = framenum[index]
index += 1
# unfortunately, the above gives an index that is 1 too much in the case of 1-45
if (frameIdx > 0 and index != len(framenum)):
index -= 1
# unfortunately again, the above above gives an index that is 1 too much in the case of
# 46-60
elif (frameIdx > framenum[len(framenum) - 2] and frameIdx <= framenum[len(framenum) - 1]):
index -= 1
if (frameIdx % 15 > 0):
index -= 1
print "the index of the frame ", frameIdx, "is in the the frameBlock ", framenum[index - 1]
u = (frameIdx % 15)/15.0
# This is for the translations, we use catmull-rom spline interpolation
prevTranslation = np.array(translationsFram[index - 1])
currTranslation = np.array(translationsFram[index % len(framenum)])
nextTranslation = np.array(translationsFram[(index + 1) % len(framenum)])
nextnextTranslation = np.array(translationsFram[(index + 2) % len(framenum)])
print "the prevTranslation is: ", prevTranslation
print "the currTranslation is: ", currTranslation
print "the nextTranslation is: ", nextTranslation
print "the nextnextTranslation is: ", nextnextTranslation
kprime0Translate = 0.5*(currTranslation - prevTranslation)/15.0 + 0.5*(nextTranslation - currTranslation)/15.0
kprime1Translate = 0.5*(nextTranslation - currTranslation)/15.0 + 0.5*(nextnextTranslation - nextTranslation)/15.0
frameUTranslate = currTranslation * (2 * u * u * u - 3 * u * u + 1) + \
nextTranslation * (3 * u * u - 2 * u * u * u) + \
kprime0Translate * (u * u * u - 2 * u * u + u) + \
kprime1Translate * (u * u * u - u * u)
# This is for the scale factors, we use catmull-rom spline interpolation
prevScaleFactor = np.array(scalesFram[index - 1])
currScaleFactor = np.array(scalesFram[index % len(framenum)])
nextScaleFactor = np.array(scalesFram[(index + 1) % len(framenum)])
nextnextScaleFactor = np.array(scalesFram[(index + 2) % len(framenum)])
kprime0Scale = 0.5*(currScaleFactor - prevScaleFactor)/15.0 + 0.5*(nextScaleFactor - currScaleFactor)/15.0
kprime1Scale = 0.5*(nextScaleFactor - currScaleFactor)/15.0 + 0.5*(nextnextScaleFactor - nextScaleFactor)/15.0
frameUScale = currScaleFactor * (2 * u * u * u - 3 * u * u + 1) + \
nextScaleFactor * (3 * u * u - 2 * u * u * u) + \
kprime0Scale * (u * u * u - 2 * u * u + u) + \
kprime1Scale * (u * u * u - u * u)
# This is for the rotations
# previous frame, rotation, convert to quaternion
preprevRotate = rotationsFram[index - 1]
preprevX = preprevRotate[0]/(sqrt(preprevRotate[0]**2 + preprevRotate[1]**2 + preprevRotate[2]**2))
preprevY = preprevRotate[1]/(sqrt(preprevRotate[0]**2 + preprevRotate[1]**2 + preprevRotate[2]**2))
preprevZ = preprevRotate[2]/(sqrt(preprevRotate[0]**2 + preprevRotate[1]**2 + preprevRotate[2]**2))
preprevAngle = preprevRotate[3] * pi/(2.0 * 180.0)
#print "the previous frame rotate axis is: ", preprevX, ", ", preprevY, ", ", preprevZ
#print "the previous angle is: ", preprevAngle
prevqx = preprevX * sin(preprevAngle)
prevqy = preprevY * sin(preprevAngle)
prevqz = preprevZ * sin(preprevAngle)
prevqw = cos(preprevAngle)
prevnormalizing = sqrt(prevqx**2 + prevqy**2 + prevqz**2 + prevqw**2)
prevQuaternion = np.array([prevqx/prevnormalizing, prevqy/prevnormalizing,
prevqz/prevnormalizing, prevqw/prevnormalizing])
# current frame, rotation, convert to quaternion
precurrRotate = rotationsFram[index % len(framenum)]
precurrX = precurrRotate[0]/(sqrt(precurrRotate[0]**2 + precurrRotate[1]**2 + precurrRotate[2]**2))
precurrY = precurrRotate[1]/(sqrt(precurrRotate[0]**2 + precurrRotate[1]**2 + precurrRotate[2]**2))
precurrZ = precurrRotate[2]/(sqrt(precurrRotate[0]**2 + precurrRotate[1]**2 + precurrRotate[2]**2))
precurrAngle = precurrRotate[3] * pi/(2.0*180.0)
#print "the current frame rotate axis is: ", precurrX, ", ", precurrY, ", ", precurrZ
#print "the current angle is: ", precurrAngle
currqx = precurrX * sin(precurrAngle)
currqy = precurrY * sin(precurrAngle)
currqz = precurrZ * sin(precurrAngle)
currqw = cos(precurrAngle)
currnormalizing = sqrt(currqx**2 + currqy**2 + currqz**2 + currqw**2)
currQuaternion = np.array([currqx/currnormalizing, currqy/currnormalizing,
currqz/currnormalizing, currqw/currnormalizing])
#print "the current quaternion is: ", currQuaternion
# next frame, rotation, convert to quaternion
prenextRotate = rotationsFram[(index + 1) % len(framenum)]
prenextX = prenextRotate[0]/(sqrt(prenextRotate[0]**2 + prenextRotate[1]**2 + prenextRotate[2]**2))
prenextY = prenextRotate[1]/(sqrt(prenextRotate[0]**2 + prenextRotate[1]**2 + prenextRotate[2]**2))
prenextZ = prenextRotate[2]/(sqrt(prenextRotate[0]**2 + prenextRotate[1]**2 + prenextRotate[2]**2))
prenextAngle = prenextRotate[3] * pi/(2.0*180.0)
nextqx = prenextX * sin(prenextAngle)
nextqy = prenextY * sin(prenextAngle)
nextqz = prenextZ * sin(prenextAngle)
nextqw = cos(prenextAngle)
nextnormalizing = sqrt(nextqx**2 + nextqy**2 + nextqz**2 + nextqw**2)
nextQuaternion = np.array([nextqx/nextnormalizing, nextqy/nextnormalizing,
nextqz/nextnormalizing, nextqw/nextnormalizing])
# next next frame, rotation, convert to quaternion
prenextnextRotate = rotationsFram[(index + 2) % len(framenum)]
prenextnextX = prenextnextRotate[0]/(sqrt(prenextnextRotate[0]**2 + prenextnextRotate[1]**2 + prenextnextRotate[2]**2))
prenextnextY = prenextnextRotate[1]/(sqrt(prenextnextRotate[0]**2 + prenextnextRotate[1]**2 + prenextnextRotate[2]**2))
prenextnextZ = prenextnextRotate[2]/(sqrt(prenextnextRotate[0]**2 + prenextnextRotate[1]**2 + prenextnextRotate[2]**2))
prenextnextAngle = prenextnextRotate[3] * pi/(2.0*180.0)
nextnextqx = prenextnextX * sin(prenextnextAngle)
nextnextqy = prenextnextY * sin(prenextnextAngle)
nextnextqz = prenextnextZ * sin(prenextnextAngle)
nextnextqw = cos(prenextAngle)
nextnextnormalizing = sqrt(nextnextqx**2 + nextnextqy**2 + nextnextqz**2 + nextnextqw**2)
nextnextQuaternion = np.array([nextnextqx/nextnextnormalizing, nextnextqy/nextnextnormalizing,
nextnextqz/nextnextnormalizing, nextnextqw/nextnextnormalizing])
# This is for the quaternions, we use catmull-rom spline interpolation
kprime0Rotate = 0.5*(currQuaternion - prevQuaternion)/15.0 + 0.5*(nextQuaternion - currQuaternion)/15.0
kprime1Rotate = 0.5*(nextQuaternion - currQuaternion)/15.0 + 0.5*(nextnextQuaternion - nextQuaternion)/15.0
#print "the u-value is: ", u
frameURotate = currQuaternion * (2 * u * u * u - 3 * u * u + 1) + \
nextQuaternion * (3 * u * u - 2 * u * u * u) + \
kprime0Rotate * (u * u * u - 2 * u * u + u) + \
kprime1Rotate * (u * u * u - u * u)
#print "the inverse cosine of: ", frameURotate[3]
# to make sure that the ratio is less than or equal to 1, to be able to do inverse cosine
if (frameURotate[3] <= 1.0 and frameURotate[3] >= -1.0):
rotateAngle = 2.0*acos(frameURotate[3])
elif (frameURotate[3] < -1.0):
rotateAngle = 2.0 * pi
elif (frameURotate[3] > 1.0):
rotateAngle = 2.0*0.0
#print "the axes before dividing by sine are: ", frameURotate[0], ", ", frameURotate[1], ", ", frameURotate[2], ") with angle ", rotateAngle
#print "the difference is: ", sin(rotateAngle) - 0.001
# to make sure the sine of the angle is greater than 0
if (abs(sin(rotateAngle)) > 0.001):
rotateX = frameURotate[0]/sin(rotateAngle)
#print "the y-axis is: ", frameURotate[1]
#print "the angle is: ", rotateAngle
rotateY = frameURotate[1]/sin(rotateAngle)
rotateZ = frameURotate[2]/sin(rotateAngle)
else:
rotateX = frameURotate[0]
rotateY = frameURotate[1]
rotateZ = frameURotate[2]
rotateAngle = rotateAngle * 180.0/pi
#print "the rotate angle is: ", rotateAngle
#print "the rotate axis is: (", rotateX, ", ", rotateY, ", ", rotateZ, ")"
if (abs(rotateX) < 1e-10):
rotateX = 0.0
print "the translate array is: ", frameUTranslate
print "scale factor array is: ", frameUScale
print "the rotate array is: ", rotateX, ", ", rotateY, ", ", rotateZ, ", ", rotateAngle
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
rotatecam = rotatecam * Zoom
zcoord = zcoord * Zoom
# this is to help with the camera rotation around the origin
gluLookAt(rotatecam, 0.0, zcoord, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
glTranslatef(frameUTranslate[0], frameUTranslate[1], frameUTranslate[2])
glRotatef(rotateAngle, rotateX, rotateY, rotateZ)
glScalef(frameUScale[0], frameUScale[1], frameUScale[2])
#glMultMatrixf(mvm)
glEnable (GL_POLYGON_SMOOTH)
#glLoadIdentity()
glPushMatrix()
glColor3f(1.0, 1.0, 0.0)
yellowcylinder = gluNewQuadric()
gluQuadricDrawStyle(yellowcylinder, GLU_FILL)
# to make it far away
glTranslatef(-2.0, 0.0, -2.0)
glRotatef(90.0,0.0,1.0,0.0)
# gluQuadric object, base, top, height, slices, stacks
gluCylinder(yellowcylinder, 0.2, 0.2, 2.0, 8, 10)
glTranslatef(0.0, 0.0, 2.0)
glColor3f(0.0, 1.0, 0.0)
glRotatef(0.0,0.0,1.0,0.0)
greencylinder = gluNewQuadric()
gluQuadricDrawStyle(greencylinder, GLU_FILL)
gluCylinder(greencylinder, 0.2, 0.2, 2.0, 8, 10)
glTranslatef(0.1, 0.2, 0.0)
glColor3f(0.0, 0.0, 1.0)
glRotatef(-90.0,1.0,0.0,0.0)
bluecylinder = gluNewQuadric()
gluQuadricDrawStyle(bluecylinder, GLU_FILL)
gluCylinder(bluecylinder, 0.2, 0.2, 4.0, 8, 10)
glTranslatef(0.0, 2.0, 4.1)
glRotatef(90.0, 1.0, 0.0, 0.0)
glColor3f(1.0, 0.0, 1.0)
pinkcylinder = gluNewQuadric()
gluQuadricDrawStyle(pinkcylinder, GLU_FILL)
gluCylinder(pinkcylinder, 0.2, 0.2, 2.0, 8, 10)
glTranslatef(0.0, 0.0, 2.0)
glColor3f(0.0, 1.0, 1.0)
cyancylinder = gluNewQuadric()
glRotatef(0.0, 1.0, 0.0, 0.0)
gluQuadricDrawStyle(cyancylinder, GLU_FILL)
gluCylinder(cyancylinder, 0.2, 0.2, 2.0, 8, 10)
glPopMatrix()
# delete the created objects
gluDeleteQuadric(yellowcylinder)
gluDeleteQuadric(greencylinder)
gluDeleteQuadric(bluecylinder)
gluDeleteQuadric(pinkcylinder)
gluDeleteQuadric(cyancylinder)
glutSwapBuffers()
# GLUT calls this function when a key is pressed. Here we just quit when ESC or
# 'q' is pressed.
def keyfunc(key, x, y):
global pause
global counterframe
global toggle
mod = glutGetModifiers()
# To exit the program
if key == 27 or key == 'q' or key == 'Q':
glutLeaveMainLoop()
exit(0)
# To stop (pause) the program
if key == 'S' or key == 's':
pause = 1
# To play (start) the program
if key == 'P' or key == 'p':
pause = 0
# To forward one frame
if key == 'F' or key == 'f':
pause = 0
drawfunc()
pause = 1
# suppose to decrement the time by 1
if key == 'R' or key == 'r':
pause = 0
counterframe -= 2
print "the counterframe is: ", counterframe
drawfunc()
pause = 1
# Toggle Loop mode on/off.
# Loop mode means that the animation will restart at the
# beginning upon reaching the end
if key == 'T' or key == 't':
toggle = 1 - toggle
# Jump to frame. After pressing this key, the program should
# ask the user to input the frame number to jump to.
if key == 'J' or key == 'j':
# take the user-input for the frame number
DesiredFrame = input("Please input the frame number you would like to see: ")
# convert the string input into integer
DesiredFrame = int(DesiredFrame)
# pause the frame
pause = 1
# set the frame to the desired frame number
counterframe = DesiredFrame - 1
# draw
drawfunc()
# Zero. Reset to the first frame.
if key == '0':
pause = 1
counterframe = -1
drawfunc()
def processSpecialKeys(key, x, y):
global Zoom
global counterframe
global rotatecam
global zcoord
global case1
global case2
# zoom in
if key == GLUT_KEY_UP:
#print "the up key was pressed!"
Zoom = 0.9
counterframe -= 1
#print "the new zoom is: ", Zoom
drawfunc()
# reset Zoom
Zoom = 1.0
# zoom out
elif key == GLUT_KEY_DOWN:
#print "the down key was pressed!"
Zoom = 10.0/9.0
counterframe -= 1
#print "the new Zoom is: ", Zoom
drawfunc()
# reset Zoom
Zoom = 1.0
# rotate left around the origin (0, 0, 0)
elif key == GLUT_KEY_LEFT:
# do pythagorean theorem/circle thingy
originalsum = rotatecam ** 2 + zcoord ** 2
# if we subtracting 0.5 from the x-coordinate as we go to the left,
# and if the original sum is going to be less than the new x-coordinate
# squared, then we have to switch cases to adding 0.5
if (case1 == 1 and originalsum < (rotatecam - 0.5)**2):
case1 = 1 - case1
case2 = 1 - case2
elif (case2 == 1 and originalsum < (rotatecam + 0.5)**2):
case1 = 1 - case1
case2 = 1 - case2
if (case1 == 1):
rotatecam -= 0.5
zcoord = sqrt(originalsum - rotatecam**2)
elif (case2 == 1):
rotatecam += 0.5
zcoord = -1.0*sqrt(originalsum - rotatecam**2)
print "the x-coordinate is: ", rotatecam
print "the zcoord is: ", zcoord
counterframe -= 1
drawfunc()
# rotate right around the origin (0, 0, 0)
elif key == GLUT_KEY_RIGHT:
# do pythagorean theorem/circle thingy
originalsum = rotatecam ** 2 + zcoord ** 2
if (case1 == 1 and originalsum < (rotatecam + 0.5)**2):
print "cases switch!"
case1 = 1 - case1
case2 = 1 - case2
elif (case2 == 1 and originalsum < (rotatecam - 0.5)**2):
case1 = 1 - case1
case2 = 1 - case2
if (case1 == 1):
rotatecam += 0.5
zcoord = sqrt(originalsum - rotatecam**2)
elif (case2 == 1):
print "we are now in case 2"
rotatecam -= 0.5
zcoord = -1.0*sqrt(originalsum - rotatecam**2)
print "the x-coordinate is: ", rotatecam
print "the zcoord is: ", zcoord
counterframe -= 1
drawfunc()
if __name__ == "__main__":
glutInit(sys.argv)
# .script file name to input
samplescript = sys.argv[1]
glutInitDisplayMode(GLUT_RGBA | GLUT_SINGLE | GLUT_DEPTH)
glutInitWindowSize(500, 500)
glutInitWindowPosition(300, 100)
glutCreateWindow("CS171 HW7")
# define grammar
# number is float form
# does +/-, 0., .2, and exponentials
number = pp.Regex(r"[-+]?([0-9]*\.[0-9]*|[0-9]+)([Ee][+-]?[0-9]+)?")
number.setParseAction(lambda toks:float(toks[0]))
leftBrace = pp.Literal("{")
rightBrace = pp.Literal("}")
leftBracket = pp.Literal("[")
rightBracket = pp.Literal("]")
comma = pp.Literal(",")
period = pp.Literal(".")
sharp = pp.Literal("#")
# Optional added for the additional number for rotation
parameter = pp.Optional(sharp) + pp.Optional(pp.Word( pp.alphas )) + \
pp.Optional(pp.Word( pp.alphas ) + period + pp.Word(pp.alphas)) + \
pp.Optional(leftBracket) + pp.Optional(leftBrace) + \
pp.Optional(rightBracket) + pp.Optional(rightBrace) + \
pp.ZeroOrMore(number + pp.Optional(comma))
# Open a file
fo = open(samplescript, "r")
first = fo.readline()
# the total number of frames parsed here
firstparse = parameter.parseString(first)
totalframes = firstparse[0]
first = fo.readline()
# The frame numbers in the .script file
framenum = []
# The translations accumulated into a list
translationsFram = []
# The scale factors accumulated into a list
scalesFram = []
# The rotations accumulated into a list
rotationsFram = []
# count the number of frame
global counterframe
counterframe = -1
# Pause, set to no pause
global pause
pause = 1
# Toggle, set to only 1 run
global toggle
toggle = 1
# To zoom in (press the arrow up key to zoom in)
# (press the arrow down key to zoom out)
global Zoom
Zoom = 1.0
# How far away is the camera from the object
global zcoord
zcoord = 60.0
# To rotate the camera around the origin
# (press the arrow left key to rotate around left about origin)
# (press the arrow right key to rotate around right about origin)
global rotatecam
rotatecam = 0
# To initialize the window at t = 0
global Initial
Initial = 0
# This is for going left or right by more than 90 degrees
global case1
global case2
case1 = 1
case2 = 0
# Enable depth-buffer test.
#glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
#glOrtho(-15.0, 15.0, -15.0, 15.0, 1.0, 30.0)
#glFrustum(-15.0, 15.0, -20.0, 20.0, 0.5, 30.0)
#glFrustum(-.3, 0.3, -0.3, 0.3, 1.0, 30.0)
#gluLookAt(0.0, 0.0, -3.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
# viewing angle is 88 degrees, distance between viewer and nearest clipping plane is 0
# distance between viewer and furthest clipping plane is 10
gluPerspective(65.0, 2.0, 0.01, 500.0);
#glOrtho(0.0, 20.0, 0.0, 20.0, -20.0, 20.0)
while (first != ''):
firstparse = parameter.parseString(first)
# if we reach a Frame block, then store the translation, scale, and rotation, or whatever is available.
if (firstparse[0] == "Frame"):
# add the frame number associated with the Frame term
framenum.append(firstparse[1])
# now let's investigate this Frame block
first = fo.readline()
firstparse = parameter.parseString(first)
# investigate this particular frame block until we reach the next frame block or the end of the file
while (first != '' and firstparse[0] != "Frame"):
if (firstparse[0] == "translation"):
translation = [firstparse[1], firstparse[2], firstparse[3]]
translationsFram.append(translation)
elif (firstparse[0] == "scale"):
scale = [firstparse[1], firstparse[2], firstparse[3]]
scalesFram.append(scale)
elif (firstparse[0] == "rotation"):
rotation = [firstparse[1], firstparse[2], firstparse[3], firstparse[4]]
rotationsFram.append(rotation)
first = fo.readline()
firstparse = parameter.parseString(first)
print "the total amount of frames is: ", totalframes
print "the frame numbers in the file are: ", framenum
print "The translations for the frames are: ", translationsFram
print "the scale factors for the frame are: ", scalesFram
print "the rotations for the frame are: ", rotationsFram
fo.close()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# this is to help with the camera rotation around the origin
gluLookAt(0.0, 0.0, zcoord, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)
glutDisplayFunc(idle)
glutIdleFunc(idle)
glutKeyboardFunc(keyfunc)
glutSpecialFunc(processSpecialKeys)
glutMainLoop()
| true |
48fb7d2b450ca7d111bc170baf0c8c56a3351d99 | Python | jlnerd/pyDSlib | /pyDSlib/ML/postprocessing/transform.py | UTF-8 | 395 | 3.03125 | 3 | [
"MIT"
] | permissive |
def one_hot_proba_to_class(y_proba, proba_threshold = 0.5 ):
"""
Transform a one-hot encoded style numpy array of probablities into 0, 1 class IDs
Arguments:
----------
y_proba: numpy array
"""
for i in range(y_proba.shape[1]):
y_proba[:,i][y_proba[:,i]>=proba_threshold] = 1
y_proba[:,i][y_proba[:,i]<proba_threshold] = 0
return y_proba | true |
49f6f86d7d6f4ff4dc76c87dfd92ae720bf8bcf7 | Python | WeddingCandy/Huaat_2018 | /label_auto/labels_pre_classify_to_independent_doc_v2.py | UTF-8 | 3,735 | 2.703125 | 3 | [] | no_license | # -*- coding:UTF-8 -*
import pandas as pd
import re
import numpy as np
from jieba import posseg as pg
import jieba
import jieba.analyse
import os
"""
用来将新扒下来的标签切词分类。
其中有:
1.过滤词,过滤网页专业术语词;
2.文档只包含一级大类和HEAD信息
"""
# jieba.enable_parallel()
def modify_output(s):
pattern1 = re.compile('[ \[\]\'《》\<\>‘’“”\"\(\)]+')
line1 = pattern1.sub(' ',s)
line1.replace("\[",' ').replace("\'",' ').replace("\]",' ')
pattern2 = re.compile('\s{2,}')
line2 = pattern2.sub(' ',line1)
line2.strip()
return line2
def stop_words(stop_words_list):
stopwords_list = [line.strip() for line in open(stop_words_list, 'r', encoding='utf-8').readlines()]
l1 = sorted(set(stopwords_list), key=stopwords_list.index)
print(l1)
return l1
def line_to_words(s,stopwords_list):
strxx = " "
try:
line = pg.cut(s)
allowPOS = ['n', 'v', 'a', 'ns', 'ad', 't', 's', 'vn', 'nr', 'nt']
for word in line:
if word.flag in allowPOS:
if len(word.word) > 1 and word.word not in stopwords_list:
strxx += word.word + " "
else :
continue
return strxx
except Exception as e:
print(e)
return 'NOTHING'
def load_docs(aim_excel,stop_words_list,label_list):
stopwords_list = stop_words(stop_words_list)
data = pd.read_excel(aim_excel, sheet_name='Sheet2', header=0, encoding='utf-8')
data_length = len(data)
with open(label_list, 'r', encoding='utf-8') as f:
lines = f.readlines()
label_list_dic = {}
for line in lines:
line = line.split(' ')
label_list_dic[line[0]] = line[1].strip()
print(label_list_dic)
return data,label_list_dic,data_length ,stopwords_list
def main_process(aim_excel,stop_words_list,label_list):
data, label_list_dic, data_length,stopwords_list = load_docs(aim_excel,stop_words_list,label_list)
COUNT = 0
for i in range(data_length):
try:
if data.iloc[i:i+1,0:1].values != data.iloc[i+1:i+2,0:1].values:
COUNT = 0
file_class = label_list_dic[(data.iloc[i:i+1,0:1].values).tolist()[0][0]]
contents = str((data.iloc[i:i+1,1:2].values).tolist()[0][0])
contents_modified = modify_output(contents)
contents_fenci = line_to_words(contents_modified,stopwords_list)
if contents != 'nan' and contents !='NOTHING':
COUNT += 1
file_name =path_output+os.sep+file_class+'_'+str(COUNT)+'.txt'
with open(file_name,'w',encoding='utf-8') as f:
f.write(contents_fenci)
except Exception as e:
file_class = (data.iloc[-2:-1, 0:1].values).tolist()[0][0]
contents = str((data.iloc[-2:-1, 1:2].values).tolist()[0][0])
contents = modify_output(contents)
if contents != 'nan' or contents != 'NOTHING':
file_name = path_output + os.sep + file_class + '_' + str(999) + '.txt'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(contents)
print('done')
path_output = r'C:\Users\thinkpad\Desktop\crawlers\labels\level1\output'
aim_excel = r"C:\Users\thinkpad\Desktop\crawlers\labels\level1\labels_original_trainset.xlsx"
stop_words_list =r'C:\Users\thinkpad\Desktop\crawlers\labels\level1\filter_voc.txt'
label_list = r'C:\Users\thinkpad\Desktop\crawlers\labels\level1_lablelist_with_original_name.txt'
if not os.path.exists(path_output):
os.mkdir(path_output)
do = main_process(aim_excel,stop_words_list,label_list)
| true |
8eb56af2c2ef4fdf6a04c3e251fd0eecdb3c63bb | Python | alshamiri5/makerfaire-booth | /2018/burger/generator/label_burger.py | UTF-8 | 2,269 | 3.078125 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | import sys
sys.path.insert(0, "../constants")
from constants import MAX_BURGER_HEIGHT
from burger_elements import BurgerElement
def label_burger(burger, debug=False):
if len(burger) != MAX_BURGER_HEIGHT:
if debug: print("Burger is wrong size")
return False
for i in range(len(burger)):
if (burger[i] == BurgerElement.shoe.value or
burger[i] == BurgerElement.banana.value or
burger[i] == BurgerElement.book.value):
if debug: print("Cannot have shoe, banana, or book")
return False
topbun_pos = None
for i in range(len(burger)):
layer = burger[i]
# base case
if layer == BurgerElement.empty.value:
continue
if layer == BurgerElement.topbun.value:
topbun_pos = i
break
if debug: print("prefix elements must be empty or topbun")
return False
if topbun_pos is None:
if debug: print("Burger does not have topbun")
return False
if burger[-1] != BurgerElement.bottombun.value:
if debug: print("Burger does not have bottombun bottom")
return False
for i in range(topbun_pos+1, len(burger)-1):
if burger[i] == BurgerElement.topbun.value or burger[i] == BurgerElement.bottombun.value:
if debug: print("Cannot have internal buns")
return False
for i in range(topbun_pos+1, len(burger)):
if burger[i] == BurgerElement.empty.value:
if debug: print("Cannot have internal or terminal empty")
return False
for i in range(len(burger)-1):
first, second = burger[i], burger[i+1]
if first != BurgerElement.empty.value and second != BurgerElement.empty.value and first == second:
if debug: print("Cannot have identical sequential items")
return False
for i in range(1, len(burger)-1):
if burger[i] == BurgerElement.patty.value:
break
else:
if debug: print("Must have at least one patty")
return False
last_cheese_index = None
for i in range(1, len(burger)-1):
if burger[i] == BurgerElement.cheese.value:
last_cheese_index = i
if last_cheese_index is not None:
for i in range(last_cheese_index+1, len(burger)):
if burger[i] == BurgerElement.patty.value:
break
else:
if debug: print("Must have at patty under last cheese.")
return False
return True
| true |
9793cbd62ebe18f8582b62df40c742082faf74cb | Python | Delayless/TL740D_Gyro | /ASCII_conv_hex.py | UTF-8 | 1,695 | 3.640625 | 4 | [] | no_license | class Converter:
@staticmethod
# 比如这里实参只能是不包含前导符0x的十六进制数(0-F)的字符串,如'6805000B0212'
# 可以转换成以这些十六进制数为ASCII码值所对应的字符串返回
def hex_to_ascii(h):
"""
转换成ASCII码值对应的字符串
这次使用我是将字符串'6805000B0212'转换,其中的68转换成h
因为68对应的十进制为104
104对应的ascii字符为h
:return str类型: h (,
"""
list_s = []
# i每次增长2,len为字符串长度
for i in range(0, len(h), 2):
# upper()是转换为大写
# append()是在方法用于在列表(List)末尾添加新的对象
list_s.append(chr(int(h[i:i+2].upper(), 16)))
return ''.join(list_s)
@staticmethod
def str_to_hexstr(s):
list_h = []
for c in s:
# ord()返回单个字符的ASCII码值,如字符a返回的是97
list_h.append(str(hex(ord(c)))[-2:]) # 取hex转换16进制的后两位
return ''.join(list_h)
"""
a1 = Converter.hex_to_ascii('6805000B0212')
b1 = a1.encode()
decode_b1 = b1.decode()
# 跟上面一样的效果
a2 = '\x68\x05\x00\x0B\x02\x12'
b2 = a2.encode()
d1 = Converter.str_to_hex(a1)
c2 = bytes.fromhex('6805000B0212')
d2 = bytes.hex(c2)
e = a1
print(a1)
"""
a = b'h'
b = b'h\x69'
c = b[1:] == b'i'
a = chr(68)
a = Converter.str_to_hexstr('hi')
a = '680400282C'
b = a.encode()
clear_Sensor_angle = bytes.hex(b'h')
b = clear_Sensor_angle.encode()
a = 'h' + 'ello'
b = a.encode()
c = 'hello'
d = c.encode()
e = b'h\x02\x1f'
i = 2
f = b'\x02\x1f'
g = b'h'
h = g + f
| true |
b2f84a31a709a01647c2a33bf32f2e1faf4afdff | Python | diego-aquino/competitive-programming | /OBI/Exercices/dp/dequeProblem.py | UTF-8 | 902 | 3.296875 | 3 | [] | no_license | # Working solution!
def main():
n = int(input())
seq = tuple(map(lambda x: int(x), input().split()))
points = []
for i in range(n - 1):
points.append([0] * n)
if n == 1:
print(seq[0])
return
for i in range(n - 1):
points[i][i + 1] = (
max(seq[i], seq[i + 1]),
min(seq[i], seq[i + 1])
)
for k in range(2, n):
for i in range(n - k):
currLeft = seq[i] + points[i + 1][i + k][1]
currRight = seq[i + k] + points[i][i + k - 1][1]
nextLeft = points[i + 1][i + k][0]
nextRight = points[i][i + k - 1][0]
if currLeft > currRight:
points[i][i + k] = (currLeft, nextLeft)
else:
points[i][i + k] = (currRight, nextRight)
points.pop()
print(points[0][n - 1][0] - points[0][n - 1][1])
main()
| true |
21c8c544344ff219c2064e210252b16f82e7f56f | Python | Kirishima21/yosei | /lib/medicinesName.py | UTF-8 | 697 | 2.875 | 3 | [
"MIT"
] | permissive | import PySimpleGUI as sg
import pandas as pd
def add_medicines_name(data):
print(data)
df = pd.read_excel('data.xlsx', sheet_name=None, index_col=0)
bool = not any(df["Sheet2"]["name"].str.contains(str(data)))
if bool:
df_add = pd.DataFrame([data], columns=['name'], index=['index'])
df1 = df["Sheet1"]
df2 = df["Sheet2"].append(df_add)
with pd.ExcelWriter('data.xlsx') as writer:
df1.to_excel(writer, sheet_name='Sheet1', index='name')
df2.to_excel(writer, sheet_name='Sheet2', index='index', header="name")
sg.popup("登録が完了しました。")
else:
sg.popup("登録済みの医薬品名です")
| true |
39ff9426351030edaa0cdc09aec0c04671f5558e | Python | AlexFue/Interview-Practice-Problems | /dynamic_programming/fibonacci_number.py | UTF-8 | 1,486 | 4.3125 | 4 | [] | no_license | Problem:
The Fibonacci numbers, commonly denoted F(n) form a sequence, called the Fibonacci sequence, such that each number is the sum of the two preceding ones, starting from 0 and 1. That is,
F(0) = 0, F(1) = 1
F(n) = F(n - 1) + F(n - 2), for n > 1.
Given n, calculate F(n).
Example 1:
Input: n = 2
Output: 1
Explanation: F(2) = F(1) + F(0) = 1 + 0 = 1.
Example 2:
Input: n = 3
Output: 2
Explanation: F(3) = F(2) + F(1) = 1 + 1 = 2.
Example 3:
Input: n = 4
Output: 3
Explanation: F(4) = F(3) + F(2) = 2 + 1 = 3.
Constraints:
0 <= n <= 30
Process:
class Solution:
def fib(self, n: int) -> int:
if n < 2: return n
dp = [0,1]
for x in range(2, n+1):
dp += [dp[x-1] + dp[x-2]]
return dp[-1]
Solution:
The way we are going to solve this problem is by using dynamic programming(bottom-up approach)
Improve upon the recursive option by using iteration, still solving for all of the sub-problems and returning the answer for N, using already computed Fibonacci values. In using a bottom-up approach, we can iteratively compute and store the values, only returning once we reach the result.
Algorithm
If N is less than or equal to 1, return N
Otherwise, iterate through N, storing each computed answer in an array along the way.
Use this array as a reference to the 2 previous numbers to calculate the current Fibonacci number.
Once we've reached the last number, return it's Fibonacci number.
| true |
d4e85b1b47d8080b72540bcc073758b4c601b6b9 | Python | CXY-YSL/MGZDTS | /Python/PythonCode/Chapter08/批量修改文件名.py | UTF-8 | 490 | 3.5 | 4 | [
"MIT"
] | permissive | # 批量在文件名前加前缀
import os
funFlag = 1 # 1表示添加标志 2表示删除标志
folderName = './'
# 获取指定路径的所有文件名字
dirList = os.listdir(folderName)
# 遍历输出所有文件名字
for name in dirList:
print(name)
if funFlag == 1:
newName = '[黑马程序员]-' + name
elif funFlag == 2:
num = len('[黑马程序员]-')
newName = name[num:]
print(newName)
os.rename(folderName+name, folderName+newName)
| true |
b62d453092f5ff3935d8ba0a36a72e4c28e29568 | Python | an2050/titop-tactic | /_lib/sequenceUtils.py | UTF-8 | 4,210 | 2.625 | 3 | [] | no_license | import re
class sequenceError(BaseException):
pass
class sequenceFileObject:
"""docstring for sequenceFileObject"""
def __init__(self, countType="houdini"):
self.active = False
self.countType = countType
self.countTypes = {"houdini": "${F%p%}", "nuke": "%0%p%d"}
# self.pattern = r"(.+?\.\D*?)(\d+)(\D*?(\..+))"
self.pattern = r"^(?P<body>.+?(?P<ver>[vV]\d{1,4})?(\.|_))(?P<counter>\d+?)(?P<ext>\.[\w\d]{2,4})$"
self.body = ""
self.ext = ""
self.counter = ""
self.counterList = []
self.extensions = [".exr", ".dpx", ".jpeg", ".jpg"]
def addSequenceElement(self, fileName):
matchPattern = re.match(self.pattern, fileName)
if not matchPattern:
raise sequenceError("File does not match for sequence: " + fileName)
ext = matchPattern.group('ext')
if ext not in self.extensions:
print("Unexpected file extension '{}'".format(ext))
return False
if self.active:
match = self.checkProperties(matchPattern)
if not match:
return False
else:
self.active = True
self.setSeqProperties(matchPattern)
self.counterList.append(matchPattern.group('counter'))
return True
def setSeqProperties(self, matchPattern):
self.body = matchPattern.group('body')
self.ext = matchPattern.group('ext')
def checkProperties(self, matchPattern):
body = self.body == matchPattern.group('body')
if body:
return True
else:
return False
def getCountGroups(self):
countGroups = []
first = None
last = None
self.counterList.sort()
for idx, element in enumerate(self.counterList):
elementInt = int(element)
if first is None:
first = elementInt
try:
nextElement = int(self.counterList[idx + 1])
if nextElement - elementInt != 1:
last = elementInt
countGroups += self.__closeSeqGroup(first, last)
first = None
last = None
except IndexError:
last = elementInt
countGroups += self.__closeSeqGroup(first, last)
countGroups = sorted(countGroups, key=lambda x: int(re.match(r"\d+", x).group(0)))
return countGroups
def __closeSeqGroup(self, first, last):
if first != last:
return ["{}-{}".format(first, last)]
else:
return [str(last)]
def getPadding(self):
padd = list(set([len(x) for x in self.counterList]))
if len(padd) == 1:
return padd[0]
else:
return ""
def getSequenceTemplate(self):
padding = self.getPadding()
counter = self.countTypes[self.countType].replace("%p%", str(padding))
return "{}{}{}".format(self.body, counter, self.ext)
def getSequenceRepr(self):
return "{} ({})".format(self.getSequenceTemplate(), " :: ".join(self.getCountGroups()))
def __repr__(self):
return self.getSequenceRepr()
def getSeqObj(fileList, processedFiles, noSequencefiles, countType='houdini'):
seqObj = sequenceFileObject(countType)
for fileName in fileList:
if fileName in processedFiles:
continue
try:
if seqObj.addSequenceElement(fileName):
processedFiles.append(fileName)
except sequenceError:
processedFiles.append(fileName)
noSequencefiles.append(fileName)
return seqObj
def getSequecneRepresentation(fileNameList, countType='houdini'):
processedFiles = []
noSequencefiles = []
seqObjList = []
while len(processedFiles) < len(fileNameList):
seqObjList.append(getSeqObj(fileNameList, processedFiles, noSequencefiles, countType))
sequenceReprDict = {}
for seq in seqObjList:
sequenceReprDict[seq.getSequenceRepr()] = seq.getSequenceTemplate()
for file in noSequencefiles:
sequenceReprDict[file] = file
return sequenceReprDict
| true |
5a9046c1eba9d6e7ef42ff7b3bfd75d5bf0775ba | Python | microsoftgraph/msgraph-sdk-python | /msgraph/generated/models/password_credential.py | UTF-8 | 4,546 | 2.625 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
from uuid import UUID
@dataclass
class PasswordCredential(AdditionalDataHolder, BackedModel, Parsable):
# Stores model information.
backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additional_data: Dict[str, Any] = field(default_factory=dict)
# Do not use.
custom_key_identifier: Optional[bytes] = None
# Friendly name for the password. Optional.
display_name: Optional[str] = None
# The date and time at which the password expires represented using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. Optional.
end_date_time: Optional[datetime.datetime] = None
# Contains the first three characters of the password. Read-only.
hint: Optional[str] = None
# The unique identifier for the password.
key_id: Optional[UUID] = None
# The OdataType property
odata_type: Optional[str] = None
# Read-only; Contains the strong passwords generated by Azure AD that are 16-64 characters in length. The generated password value is only returned during the initial POST request to addPassword. There is no way to retrieve this password in the future.
secret_text: Optional[str] = None
# The date and time at which the password becomes valid. The Timestamp type represents date and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 is 2014-01-01T00:00:00Z. Optional.
start_date_time: Optional[datetime.datetime] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> PasswordCredential:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: PasswordCredential
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return PasswordCredential()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
fields: Dict[str, Callable[[Any], None]] = {
"customKeyIdentifier": lambda n : setattr(self, 'custom_key_identifier', n.get_bytes_value()),
"displayName": lambda n : setattr(self, 'display_name', n.get_str_value()),
"endDateTime": lambda n : setattr(self, 'end_date_time', n.get_datetime_value()),
"hint": lambda n : setattr(self, 'hint', n.get_str_value()),
"keyId": lambda n : setattr(self, 'key_id', n.get_uuid_value()),
"@odata.type": lambda n : setattr(self, 'odata_type', n.get_str_value()),
"secretText": lambda n : setattr(self, 'secret_text', n.get_str_value()),
"startDateTime": lambda n : setattr(self, 'start_date_time', n.get_datetime_value()),
}
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
writer.write_bytes_value("customKeyIdentifier", self.custom_key_identifier)
writer.write_str_value("displayName", self.display_name)
writer.write_datetime_value("endDateTime", self.end_date_time)
writer.write_str_value("hint", self.hint)
writer.write_uuid_value("keyId", self.key_id)
writer.write_str_value("@odata.type", self.odata_type)
writer.write_str_value("secretText", self.secret_text)
writer.write_datetime_value("startDateTime", self.start_date_time)
writer.write_additional_data_value(self.additional_data)
| true |
7d1cda0ec7e3da6448121cbab5a32fe52ec6664a | Python | acv0209/HancomMDS | /BigData/강의자료/1. 파이썬 입문/6 함수 만들기.py | UTF-8 | 182 | 3.75 | 4 | [] | no_license | '''
>>> add_num1(1,3)
1 + 3 = 4 (함수 안에서 출력되는 값)
4 (리턴값)'''
def add_num1(a, b):
c = a+b
print("{} + {} = {}".format(a, b, c))
return c
| true |
0b31f9da62f81219927cf65b085be664cc9c7bff | Python | jaychan09070339/Python_Basic | /practice_8/list1.py | UTF-8 | 204 | 3.5 | 4 | [] | no_license |
a=int(input("请输入第一个数:"))
b=int(input("请输入第二个数:"))
c=int(input("请输入第三个数:"))
L=[a,b,c]
print("average:",sum(L)/3)
print("max:",max(L))
print("min:",min(L))
| true |
cd9e0fff0d91e0efda90d647b596c60bae0f2d63 | Python | Aasthaengg/IBMdataset | /Python_codes/p02573/s558448832.py | UTF-8 | 3,012 | 2.765625 | 3 | [] | no_license | from __future__ import print_function
from functools import reduce
from operator import mul
from collections import Counter
from collections import deque
from itertools import accumulate
from queue import Queue
from queue import PriorityQueue as pq
from heapq import heapreplace
from heapq import heapify
from heapq import heappushpop
from heapq import heappop
from heapq import heappush
import heapq
import time
import random
import bisect
import itertools
import collections
from fractions import Fraction
import fractions
import string
import math
import operator
import functools
import copy
import array
import re
import sys
sys.setrecursionlimit(500000)
input = sys.stdin.readline
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
return
# from fractions import gcd
# from math import gcd
# def lcm(n, m):
# return int(n * m / gcd(n, m))
# def coprimize(p, q):
# common = gcd(p, q)
# return (p // common, q // common)
# def find_gcd(list_l):
# x = reduce(gcd, list_l)
# return x
def combinations_count(n, r):
r = min(r, n - r)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer // denom
mod = 1000000007
def combinations_count_mod(n, r):
r = min(r, n - r)
numer = reduce(lambda x, y: x * y % mod, range(n, n - r, -1), 1)
denom = pow(reduce(lambda x, y: x * y % mod, range(1, r + 1), 1), mod - 2, mod)
return numer * denom % mod
class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1] * n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.parents[x] > self.parents[y]:
x, y = y, x
self.parents[x] += self.parents[y]
self.parents[y] = x
def size(self, x):
return -self.parents[self.find(x)]
def same(self, x, y):
return self.find(x) == self.find(y)
def members(self, x):
root = self.find(x)
return [i for i in range(self.n) if self.find(i) == root]
def roots(self):
return [i for i, x in enumerate(self.parents) if x < 0]
def group_count(self):
return len(self.roots())
def all_group_members(self):
return {r: self.members(r) for r in self.roots()}
def __str__(self):
return '\n'.join('{}: {}'.format(r, self.members(r)) for r in self.roots())
def solve():
pass
def main():
n, m = map(int, input().strip().split())
uf = UnionFind(n)
ans = 1
for i in range(m):
a, b = map(lambda x: int(x)-1, input().strip().split())
if a > b:
c = a
a = b
b = c
uf.union(a,b)
ans = max(ans,uf.size(a))
print(ans)
if __name__ == '__main__':
main()
| true |
544c29119eb974b0ef38aa9815bc92138f60ba79 | Python | s-tefan/python-exercises | /plottalistor.py | UTF-8 | 941 | 3.328125 | 3 | [] | no_license | import graphics, math
def plottalistor(xlist,ylist,win,color='black'):
xmin=min(xlist)
xmax=max(xlist)
ymin=min(ylist)
ymax=max(ylist)
#w=win.getWidth()
#h=win.getHeight()
win.setCoords(xmin,ymin,xmax,ymax)
x0,y0=xlist[0],ylist[0]
for n in range(1,len(xlist)):
x1,y1 = xlist[n],ylist[n]
xs0,ys0 = win.toScreen(x0,y0)
xs1,ys1 = win.toScreen(x1,y1)
dx,dy = x1-x0, y1-y0
dxs,dys = xs1-xs0,ys1-ys0
steps=max(abs(dxs),abs(dys))
for k in range(0,steps):
x,y = x0+dx*k/steps, y0+dy*k/steps
win.plot(x,y,color)
x0,y0=x1,y1
ints=100
xl=[j*2*2*math.pi/ints for j in list(range(ints+1))]
yl=[math.sin(x) for x in xl]
print(xl)
print(yl)
win=graphics.GraphWin('x:[{:0.2f},{:0.2f}], y:[{:0.2f},{:0.2f}]'\
.format(min(xl),max(xl),min(yl),max(yl)),\
600, 400)
plottalistor(xl,yl,win)
| true |
1f760666a12360e616b554da41348852a70d6c2b | Python | aish2028/stack | /s1.py | UTF-8 | 1,453 | 4.09375 | 4 | [] | no_license | class Stack:
def __init__(self):
self.st=[]
def push(self,ele):
self.st.append(ele)
def pop(self):
if self.is_empty():
print("stack is empty")
else:
ele=self.st.pop()
print(f"element {ele} is removed from the stack")
def search(self,searchEle):
if self.is_empty():
print("stack is empty")
else:
for index,ele in enumerate(self.st):
if ele==searchEle:
return index
return -1
def show(self):
if self.is_empty:
print("stack is empty")
else:
print(self.st)
def is_empty(self):
return len(self.st)==0
if __name__=="__main__":
st=Stack()
opt_dict={1:st.push,2:st.pop,3:st.search,4:st.show,5:exit}
while True:
print("1.push 2.pop 3.search 4.display 5.exit")
try:
ch=int(input("enter your choice:"))
if ch==1:
ele=int(input('enter the elemnt to be pushed:'))
st.push(ele)
elif ch==2:
st.pop()
elif ch==3:
ele=input("enter the element to search:")
res=st.search(ele)if res!= -1
print(f"element {ele} found in location")
ref=opt_dict[ch]
ref()
except (ValueError,KeyError):
print("enter only numbers from 1 to 5") | true |
4c2b30486593c81649a964d7107c030efd59e88f | Python | robinsingh-rs/Python | /EmailSender/emailsender.py | UTF-8 | 358 | 2.984375 | 3 | [] | no_license | import smtplib
to = input("Enter the email of receiver:\n") # email address
content = input("Enter the message:\n") # message
def sendEmail(to, content):
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.login('sender@email','password')
server.sendmail('sender email', to, content)
server.quit()
sendEmail(to, content) | true |
0f622474b0a797b2fd000a63dd87538109494e7d | Python | jemtca/CodingBat | /Python/String-2/repeat_separator.py | UTF-8 | 459 | 4.09375 | 4 | [] | no_license |
# given two strings, word and a separator sep, return a big string made of count occurrences of the word, separated by the separator string
def repeat_separator(word, sep, count):
s = ''
if count > 1:
for _ in range(count-1):
s = s + word + sep
if count >= 1:
s = s + word
return s
print(repeat_separator('Word', 'X', 3))
print(repeat_separator('This', 'And', 2))
print(repeat_separator('This', 'And', 1))
| true |
412b5f3d44677c281d30bf01b23c9057157d2444 | Python | uilleand/PHY494 | /03_python/list_practice.py | UTF-8 | 229 | 3.015625 | 3 | [] | no_license | # homework assignment one, lol for Hitchiker references
bag = ["guide", "towel", "tea", 42]
ga = "Four score and seven years ago."
# work
for essentials in bag:
most_important = essentials in range(2,4)
print(most_important)
| true |
66ec35b9b6e6ac434c9945457dcdb3b682bcdae4 | Python | shmundada93/InstamojoTweetBot | /worker.py | UTF-8 | 3,256 | 2.5625 | 3 | [] | no_license | import tweepy
from tweepy import Stream
from tweepy import OAuthHandler
from instamojo import Instamojo
import re
import os
import psycopg2
import urlparse
# Twitter Consumer keys and access tokens, used for OAuth
consumer_key = 'nZEzUToqKZcMIWu4nSNXnq6Kq'
consumer_secret = 'xZpwdeiE4FnhQ5E4SE7O3KKa3FCzNWiPfDGRvrIPyHZKvpo4ZH'
access_token = '3183114434-w4opn1VCE4DONH3aTybI0BjMVcdfphNrlbBVP9R'
access_token_secret = 'bz5RRCPpg1qCsRehFCzgoeqlKtKq45rSvyZ9fMGuLCYwb'
#Connecting to database
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse("postgres://jstgsgfleazuvu:nJGYg0dT6AYMNbqdBkV3bIf-Q8@ec2-184-73-165-195.compute-1.amazonaws.com:5432/dbaklh7r4800dg")
#Database Open/Close methods
def opendb():
return psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
def closedb(conn):
conn.commit()
conn.close()
#Tweet pattern
pattern = r'#sell (.*) which is (.*) and costs (\d+) (\w+)'
class StdOutListener(tweepy.streaming.StreamListener):
''' Handles data received from the stream. '''
def on_status(self, status):
text = status.text.encode('ascii','ignore')
if "#sell " in text:
twitter_id = str(status.author.id_str)
twitter_handle = str(status.author.screen_name)
try:
m = re.match(pattern, text)
params = m.groups()
instamojo_auth = user_details[twitter_id]["instamojo_auth"]
api = Instamojo(api_key='4dcb3d45a65808a290e7b79336b4c5be',
auth_token=instamojo_auth)
# Create a Instamojo Link.
response = api.link_create(title=params[0],
description=params[1],
base_price=params[2],
currency=params[3])
# URL of the created link
url = str(response['link']['url'])
#Printing to console
print twitter_handle
print text
print url
# Saving details to Tweets database
conn = opendb()
c = conn.cursor()
c.execute("INSERT INTO Tweets VALUES ('%s','%s','%s')"%(twitter_handle,text,url))
closedb(conn)
print "Database Updated"
except:
pass
return True
def on_error(self, status_code):
print('Got an error with status code: ' + str(status_code))
return True # To continue listening
def on_timeout(self):
print('Timeout...')
return True # To continue listening
#Select twitter_ids to follow
userconn = opendb()
userdb = userconn.cursor()
userdb.execute('SELECT * FROM Users')
users = userdb.fetchall()
user_details = {}
twitter_ids = []
for user in users:
twitter_ids.append(str(user[1]))
user_details[str(user[1])] = {"twitter_handle":str(user[0]),"instamojo_auth":str(user[2])}
closedb(userconn)
#Initializing stream
listener = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
stream = Stream(auth, listener)
stream.filter(follow=twitter_ids)
| true |
7d3c06cb73eef1e258450d3a2c9c61751f645005 | Python | Michael-DaSilva/HEIGVD-SWI21-Labo1-WEP | /files/manual-fragmentation.py | UTF-8 | 2,243 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Manually encrypt a wep message given the WEP key and fragment the packet"""
__author__ = "Michaël da Silva, Nenad Rajic"
__copyright__ = "Copyright 2021, HEIG-VD"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "michael.dasilva@heig-vd.ch, nenad.rajic@heig-vd.ch"
__status__ = "Prototype"
from scapy.all import *
import binascii
from rc4 import RC4
# Fonction de chiffrement des données
def encryption(data, key, arp):
# Calcul du ICV + passage en bytes au format little endian
icv_numerique = binascii.crc32(data.encode()) & 0xffffffff
icv_enclair = icv_numerique.to_bytes(4, byteorder='little')
# rc4 seed est composé de IV+clé
seed = arp.iv + key
# chiffrement rc4
cipher = RC4(seed, streaming=False)
ciphertext = cipher.crypt(data.encode() + icv_enclair)
return ciphertext
# Fonction de création de frame à partir de fragment du message
def createFrame(data, key, arp):
frame = arp
# ICV du corps de la trame chiffré au format Big endian
frame.icv = struct.unpack("!L", data[-4:])[0]
# Corps de la trame chiffré sans l'ICV
frame.wepdata = data[:-4]
return frame
# Cle wep AA:AA:AA:AA:AA
key = b'\xaa\xaa\xaa\xaa\xaa'
# Message à utiliser dans notre paquet forgé (min. 36 caractères)
data = "HEIG-VD 2021, SWI: laboratoire 2 WEP"
# Nom du fichier contenant les paquets
fileName = "step3.pcap"
# Division du message en N string (pour N paquets)
numberFrags = 3
sizeData = len(data)//numberFrags
dataChunks = [data[i:i+sizeData] for i in range(0, len(data), sizeData)]
packets = []
for i in range(numberFrags):
# lecture de message chiffré - rdpcap retourne toujours un array, même si la capture contient un seul paquet
arp = rdpcap('arp.cap')[0]
# Fragment du message du paquet
text = dataChunks[i]
# Bit de "more fragment" à 1 sauf le dernier
if i != numberFrags-1:
arp.FCfield |= 0x4
# Numéro du packet
arp.SC = i
# Reset de taille de packet
arp[RadioTap].len = None
# Création du paquet
cipherText = encryption(text, key, arp)
frame = createFrame(cipherText, key, arp)
packets.append(frame)
wrpcap(fileName, packets) | true |
d904a5cbbdf64387868c0124cfd3153800a536aa | Python | escottrose01/pyGravSim | /engine.py | UTF-8 | 2,107 | 2.796875 | 3 | [] | no_license | import pygame
class SceneBase:
def __init__(self):
self.next = self
def ProcessInput(self, events, pressed_keys):
# Put anything that involves input in here
print("uh-oh, you didn't override this in the child class")
def Update(self):
# Put anything that happens regardless of input here
print("uh-oh, you didn't override this in the child class")
def Render(self, screen):
# Draw the screen!
print("uh-oh, you didn't override this in the child class")
def SwitchToScene(self, next_scene):
self.next = next_scene
def Terminate(self):
self.SwitchToScene(None)
def main(width, height, fps, start_scene):
pygame.init()
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Gravity')
clock = pygame.time.Clock()
active_scene = start_scene
while active_scene != None:
pressed_keys = pygame.key.get_pressed()
# Filter events
filtered_events = []
for event in pygame.event.get():
quit_attempt = False
if event.type == pygame.QUIT:
quit_attempt = True
elif event.type == pygame.KEYDOWN:
alt_pressed = pressed_keys[pygame.K_LALT] or \
pressed_keys[pygame.K_RALT]
if event.key == pygame.K_ESCAPE:
quit_attempt = True
elif event.key == pygame.K_F4 and alt_pressed:
quit_attempt = True
if quit_attempt:
active_scene.Terminate()
else:
filtered_events.append(event)
active_scene.ProcessInput(filtered_events, pressed_keys) # determine and process inputs
active_scene.Update() # ???
active_scene.Render(screen) # Draw
active_scene = active_scene.next
pygame.display.flip()
clock.tick(fps)
fonts = {}
def textBox(screen, pos, message, bgcolor=(255,255,255), fgcolor=(0,0,0), sfont='freesansbold.ttf', ftsize=32):
if sfont in fonts.keys():
font = fonts[sfont]
else:
font = pygame.font.Font(sfont, ftsize)
fonts[sfont] = font
rendered = font.render(message, True, fgcolor)
rendered_rect = rendered.get_rect()
rendered_rect.x = pos[0]
rendered_rect.y = pos[1]
pygame.draw.rect(screen, bgcolor, rendered_rect)
screen.blit(rendered, rendered_rect) | true |
fe8f495ccba7666cd019c30f1061cdf5d7d0e5fe | Python | hlee131/todoer | /todoproject/accounts/tests.py | UTF-8 | 2,513 | 2.9375 | 3 | [] | no_license | import json
from rest_framework.test import APIClient, APITestCase
from django.contrib.auth.models import User
from django.urls import reverse
# Create your tests here.
class TestUserAPI(APITestCase):
def setUp(self):
"""
First part, all sets up client, url, user that will be used in both test cases.
Second part, sets up for test_destroy, creates user and grabs token.
"""
# All
self.client = APIClient()
self.url = reverse('user')
self.user = {
"username": "asilcoiu314d",
"password": "dfjerfoda9328",
"email": "qfui49781@dfj.com"
}
# Setup for test_destroy
# Create User
user = User.objects.create(username=self.user['password'], email=self.user['email'])
user.set_password(self.user['username'])
user.save()
# Get Token
response = self.client.post(reverse('token'), json.dumps({'username': self.user['password'],
'password': self.user['username']}), content_type="application/json")
self.token = response.data['token']
def test_create(self):
"""
Test to check user creation endpoint works, tests:
1. Response code = 201
2. Response data = user without password
3. User in database
"""
response = self.client.post(self.url, json.dumps(self.user), content_type="application/json")
self.assertEqual(response.status_code, 201, f'Expected 201 but recieved {response.status_code}\n INFO: {response.data}')
self.assertEqual(response.data, {"username": self.user['username'],
"email": self.user['email']}, f'Expected user info without password, but received {response.data}')
self.assertEqual(User.objects.all().filter(username=self.user['username']).exists(),
True, f'Expected user but recieved no user')
def test_destroy(self):
"""
Test to check that user deletion works, tests:
1. Response code = 204
2. User no longer in database
"""
# Attatch token
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
# Destroy User
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 204, f'Expected 204 but recieved {response.status_code}')
self.assertEqual(User.objects.all().filter(username=self.user['username']).exists(),
False, f'Expected no user but recieved user')
| true |
a843e2472a7f68bcdc03e35c6f8d4c94b6c8fffc | Python | ipa-rar/pipeline | /tests/test_storage.py | UTF-8 | 2,104 | 2.6875 | 3 | [
"MIT"
] | permissive | from .common import make_temp_path
from pipeline.storage.state import StateStorageEmpty, StateStorageFile
from pipeline.core import PipelineError
import pytest
class TestStateStorageEmpty:
def test_set_value(self):
state_storage = StateStorageEmpty()
state_storage.set_value("key_name", 123)
def test_get_value(self):
state_storage = StateStorageEmpty()
with pytest.raises(PipelineError):
state_storage.get_value("some_key")
state_storage.set_value("some_key", 123)
with pytest.raises(PipelineError):
state_storage.get_value("some_key")
def test_has_key(self):
state_storage = StateStorageEmpty()
assert not state_storage.has_key("key")
state_storage.set_value("key", "abacaba")
assert not state_storage.has_key("key")
def test_remove_key(self):
state_storage = StateStorageEmpty()
with pytest.raises(PipelineError):
state_storage.remove_key("abacaba")
state_storage.set_value("abacaba", 9.23)
with pytest.raises(PipelineError):
state_storage.remove_key("abacaba")
class TestStateStorageFile:
def test_basic(self):
path = make_temp_path()
state_storage = StateStorageFile(path)
assert not state_storage.has_key("key")
with pytest.raises(PipelineError):
state_storage.remove_key("abacaba")
with pytest.raises(PipelineError):
state_storage.get_value("some_key")
def test_save_load(self):
path = make_temp_path()
state_storage = StateStorageFile(path)
state_storage.set_value("aba", 123)
assert state_storage.get_value("aba") == 123
assert state_storage.has_key("aba")
state_storage = StateStorageFile(path)
assert state_storage.get_value("aba") == 123
assert state_storage.has_key("aba")
state_storage.remove_key("aba")
assert not state_storage.has_key("aba")
state_storage = StateStorageFile(path)
assert not state_storage.has_key("aba")
| true |
f5acecd14d3c9b241b2b255ca6e9a3ffa0d1a1a4 | Python | BhanuPrakash-07/app-lock-with-random-password-daily | /RandomPassword/rpsg.py | UTF-8 | 1,466 | 2.8125 | 3 | [] | no_license | import time
from time import ctime
import subprocess
import tkinter
import random as r
from tkinter import PhotoImage
top=tkinter.Tk()
top.geometry('400x400')
var1=tkinter.StringVar()
prev='12'
def cur_time():
import requests as req
tot=req.get('http://worldtimeapi.org/api/timezone/Asia/Kolkata.txt').text
loc=tot.find('2021')
return tot[loc:loc+10]
print('Current Date: ',cur_time())
def gen():
import string
s=''
for i in range(3):
s+=r.choice(string.ascii_letters)
s+=r.choice(string.punctuation)
s+=r.choice(string.digits)
return s
pw=gen()
i=PhotoImage(file=r'B:\Python\ML\MiniProjects\RandomPassword\image.png')
print('Password: ',pw)
def verification():
import os
global prev
name=var1.get()
now=cur_time()
if(now[-2:]!=prev):
if name==pw:
prev=now[-2:]
p=subprocess.call(r'C:\Program Files (x86)\Mozilla Firefox\firefox.exe')
time.sleep(20) #after 20 secs automatically app closed
os.system('taskkill /im firefox.exe /f')
else:
print('incorrect')
else:
print('try tomorrow')
lab=tkinter.Label(top,text='AppLock',font=("Arial", 25)).pack(pady=10)
text=tkinter.Entry(top,textvariable=var1,bd=5,width=30).pack(pady=10)
B=tkinter.Button(top,image=i,command=verification,activebackground='red',bg='light green',fg='black').pack(pady=10)
top.mainloop()
| true |
3491b309bc26e884f04f0e0029ed2e9b66ef3b6c | Python | king-11/Information-Technology-Workshop | /python assignments/Assignment1/assignment1/23.py | UTF-8 | 195 | 3.3125 | 3 | [] | no_license | # function arguments two lists
# iterates over both simulatenously
# print both list until least lenght one exhusted
def fun23(a: list, b: list):
for x, y in zip(a, b):
print(x, y)
| true |
9510b68c81b031d2d691ced327b0c48de5b72cef | Python | Aasthaengg/IBMdataset | /Python_codes/p02970/s670787418.py | UTF-8 | 63 | 2.59375 | 3 | [] | no_license | a = list(map(int,input().split()))
print(-(-a[0]//(2*a[1]+1)))
| true |
6877315225589aba2dce2d32f7d8638da30b108e | Python | kball/ambry | /ambry/library/util.py | UTF-8 | 1,343 | 2.5625 | 3 | [
"BSD-2-Clause"
] | permissive | """A Library is a local collection of bundles. It holds a database for the configuration
of the bundles that have been installed into it.
"""
# Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
# Revised BSD License, included in this distribution as LICENSE.txt
# Setup a default logger. The logger is re-assigned by the
# bundle when the bundle instantiates the logger.
import logging
import logging.handlers
import threading
import time
class DumperThread (threading.Thread):
"""Run a thread for a library to try to dump the database to the retome at regular intervals"""
lock = threading.Lock()
def __init__(self,library):
self.library = library
threading.Thread.__init__(self)
#self.daemon = True
self.library.logger.setLevel(logging.DEBUG)
self.library.logger.debug("Initialized Dumper")
def run (self):
self.library.logger.debug("Run Dumper")
if not self.library.upstream:
self.library.logger.debug("No remote")
return
with DumperThread.lock:
time.sleep(5)
backed_up = self.library.backup()
if backed_up:
self.library.logger.debug("Backed up database")
else:
self.library.logger.debug("Did not back up database")
| true |
d637d32aa22f14cf52bd5df7f2bcc5510b647050 | Python | NiranjanaDeviA/guvi | /codekata/91surface.py | UTF-8 | 72 | 2.515625 | 3 | [] | no_license | l,b,h=map(int,input().split())
vol=l*h*b
s=2*(l*b+l*h+h*b)
print(s,vol)
| true |
569b2fb27b97d36df38c9ffe68386b8440f8927f | Python | sathwikacharya/Automated-Essay-Grading | /code.py | UTF-8 | 8,202 | 2.859375 | 3 | [] | no_license | #Importing the libraries
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
#import seaborn as sns
import streamlit as st
from textblob import TextBlob
import numpy as np
import pandas as pd
import nltk
import re
from nltk.corpus import stopwords
from gensim.models import Word2Vec
from keras.layers import Embedding, LSTM, Dense, Dropout, Lambda, Flatten,BatchNormalization
from keras.models import Sequential, load_model, model_from_config
from sklearn.model_selection import train_test_split
#from sklearn.metrics import mean_squared_error
#from sklearn.metrics import cohen_kappa_score
import random
from PIL import Image
import language_check
#Reading the dataset
data = pd.read_csv("training_set_rel3.tsv", sep='\t', encoding = "ISO-8859-1")
col_to_keep =['essay','domain1_score']
data = data[col_to_keep]
#Adding the image widget
image = Image.open('nlp.png')
st.image(image, use_column_width=True)
#Adding the title and subsequent text
st.title("Welcome to Automated Essay Grading System")
st.header("""
This is a system which lets you input a given essay for a given prompt and the system will return a score for the same
""")
st.warning("The following is the essay prompt. It belongs to the persuasive form of essay")
st.success("""
More and more people use computers, but not everyone agrees that this benefits society. Those who support advances in technology believe that computers have a positive effect on people. They teach hand-eye coordination, give people the ability to learn about faraway places and people, and even allow people to talk online with other people. Others have different ideas. Some experts are concerned that people are spending too much time on their computers and less time exercising, enjoying nature, and interacting with family and friends.
Write an essay to your local newspaper in which you state your opinion on the effects computers have on people. Persuade the readers to agree with you.
""")
#Basic input
name = st.text_input("Before we proceed, enter your name in the box below: ")
organ = ['Academic', 'Non Academic','Corporate','Other']
organisation = st.multiselect("Enter the purpose of using this software:", organ)
st.write("All righty then, Click on this button to view the dataset")
click = st.button("CLICK")
if click == True:
st.dataframe(data)
filter_data = data
#Preprocessing the dataset
def preprocessing():
#Calculates word count
def word_counting(x):
return (len(TextBlob(x).words))
filter_data['word_length'] = filter_data['essay'].apply(word_counting)
#Calculates sentence count
def sentence_counting(x):
sentence_len = len([len(sentence.split(' ')) for sentence in TextBlob(x).sentences])
return sentence_len
filter_data['no_of_sentence'] = filter_data['essay'].apply(sentence_counting)
#Calculates sentiment of sentence
def avg_sentence_sentiment(x):
sentiment_essay = TextBlob(x).sentiment.polarity
return sentiment_essay
filter_data['sentiment_essay'] = filter_data['essay'].apply(avg_sentence_sentiment)
#Calculates average length of words
def avg_length_of_words(x):
word_len = [len(word) for word in TextBlob(x).words]
return (sum(word_len) / len(word_len))
filter_data['avg_word_len'] = filter_data['essay'].apply(avg_length_of_words)
#Checks the grammatical error
def grammar_check(x):
tool = language_check.LanguageTool('en-US')
matches = tool.check(x)
return len(matches)
filter_data['Grammar_check'] = filter_data['essay'].apply(grammar_check)
#The next 5 lines can be commented as they take some time to load. If time is not an issue, feel free to go ahead
st.write("If you wish to, Click on this button to view the features of the essay set")
click_1 = st.checkbox("Check this box")
if click_1 == True:
preprocessing()
st.dataframe(filter_data)
#Enter theessay to be graded here
essay_to_be_graded = st.text_area("Enter here the essay to be graded")
data = data.append({'essay': essay_to_be_graded, 'domain1_score': random.randint(2,12)}, ignore_index=True)
#Processing of essay to be graded
def processing():
y = data['domain1_score']
#splitting into train and test set
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.3, random_state=42)
train_e = X_train['essay'].tolist()
test_e = X_test['essay'].tolist()
train_sents = []
test_sents = []
stop_words = set(stopwords.words('english'))
def sent2word(x):
x = re.sub("[^A-Za-z]", " ", x)
x.lower()
filtered_sentence = []
words = x.split()
for w in words:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
def essay2word(essay):
essay = essay.strip()
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
raw = tokenizer.tokenize(essay)
final_words = []
for i in raw:
if (len(i) > 0):
final_words.append(sent2word(i))
return final_words
for i in train_e:
train_sents += essay2word(i)
for i in test_e:
test_sents += essay2word(i)
#Layout of the LSTM Model
def get_model():
model = Sequential()
model.add(LSTM(300, dropout=0.4, recurrent_dropout=0.4, input_shape=[1, 300], return_sequences=True))
BatchNormalization()
model.add(LSTM(64, recurrent_dropout=0.4))
model.add(Dropout(0.5))
model.add(Dense(1, activation='relu', kernel_initializer='he_normal'))
model.compile(loss='mean_squared_error', optimizer='RMSProp', metrics=['mae'])
model.summary()
return model
# Training Word2Vec model
num_features = 300
min_word_count = 20
num_workers = -1
context = 10
downsampling = 1e-3
model = Word2Vec(train_sents,
workers=num_workers,
size=num_features,
min_count=min_word_count,
window=context,
sample=downsampling)
model.init_sims(replace=True)
model.wv.save_word2vec_format('word2vecmodel.bin', binary=True)
def makeVec(words, model, num_features):
vec = np.zeros((num_features,), dtype="float32")
noOfWords = 0.
index2word_set = set(model.wv.index2word)
for i in words:
if i in index2word_set:
noOfWords += 1
vec = np.add(vec, model[i])
vec = np.divide(vec, noOfWords)
return vec
def getVecs(essays, model, num_features):
c = 0
essay_vecs = np.zeros((len(essays), num_features), dtype="float32")
for i in essays:
essay_vecs[c] = makeVec(i, model, num_features)
c += 1
return essay_vecs
clean_train = []
for i in train_e:
clean_train.append(sent2word(i))
training_vectors = getVecs(clean_train, model, num_features)
clean_test = []
for i in test_e:
clean_test.append(sent2word(i))
testing_vectors = getVecs(clean_test, model, num_features)
training_vectors = np.array(training_vectors)
testing_vectors = np.array(testing_vectors)
# Reshaping train and test vectors to 3 dimensions. (1 represnts one timestep)
training_vectors = np.reshape(training_vectors, (training_vectors.shape[0], 1, training_vectors.shape[1]))
testing_vectors = np.reshape(testing_vectors, (testing_vectors.shape[0], 1, testing_vectors.shape[1]))
lstm_model = get_model()
#fitting the model
lstm_model.fit(training_vectors, y_train, batch_size=64, epochs=150)
y_pred = lstm_model.predict(testing_vectors)
y_pred = np.around(y_pred)
st.write("Your score is", y_pred[8])
#button widget to calcuate score
button_two=st.button("Calculate Score")
#while click==True:
if button_two==True:
preprocessing()
processing()
| true |
f039461af072f605cbba79c23c088874154fe7ff | Python | ghjm/advent2019 | /p12.py | UTF-8 | 2,617 | 3.125 | 3 | [] | no_license | #!/bin/env python
import sys
import re
import copy
import math
def lcm(a):
lcm = a[0]
for i in a[1:]:
lcm = lcm*i//math.gcd(lcm, i)
return lcm
if __name__ == '__main__':
bodies = list()
with open("inputs/input12.txt", "r") as file:
r = re.compile('\< *x=([+-]?\d+), *y=([+-]?\d+), *z=([+-]?\d+) *\>')
for line in (line.rstrip() for line in file):
m = r.match(line)
if not m:
print("Error:", line, "did not match")
sys.exit(1)
bodies.append([int(n) for n in m.groups()])
bodies0 = copy.deepcopy(bodies)
velocities = [[0,0,0] for i in range(len(bodies))]
for step in range(10):
# gravity
for b1 in range(len(bodies)):
for b2 in range(len(bodies)):
if b1==b2:
continue
for axis in range(3):
if bodies[b1][axis] < bodies[b2][axis]:
velocities[b1][axis] += 1
elif bodies[b1][axis] > bodies[b2][axis]:
velocities[b1][axis] -= 1
# movement
for b in range(len(bodies)):
for axis in range(3):
bodies[b][axis] += velocities[b][axis]
pot = [sum([abs(n) for n in b]) for b in bodies]
kin = [sum([abs(n) for n in v]) for v in velocities]
tot = [pot[i] * kin[i] for i in range(len(bodies))]
print("Part 1:", sum(tot))
cycle_lengths = [0] * 3
bodies = copy.deepcopy(bodies0)
velocities = [[0,0,0] for i in range(len(bodies))]
for axis in range(3):
step = 0
while True:
# gravity
for b1 in range(len(bodies)):
for b2 in range(len(bodies)):
if b1==b2:
continue
if bodies[b1][axis] < bodies[b2][axis]:
velocities[b1][axis] += 1
elif bodies[b1][axis] > bodies[b2][axis]:
velocities[b1][axis] -= 1
# movement
for b in range(len(bodies)):
bodies[b][axis] += velocities[b][axis]
step += 1
match = True
for b in range(len(bodies)):
if velocities[b][axis] != 0:
match = False
break
if bodies[b][axis] != bodies0[b][axis]:
match = False
break
if match:
cycle_lengths[axis] = step
break
print("Part 2:", lcm(cycle_lengths))
| true |
6000cda8682cbb2852bcc64e00fc23315ec3a4c6 | Python | Vigneshwaran07/HackerRank-Random-Problem-Solving-Solutions | /Jumping on the Clouds.py | UTF-8 | 164 | 3 | 3 | [] | no_license | n = int(input())
c = list(map(int,input().strip().split()))
c.insert(n,0)
count = 0
i = 0
while (i < n-1):
i += (c[i+2] == 0) + 1
count += 1
print (count)
| true |
ed0b3312322112426e8b8a23c9b6cfaf39182c76 | Python | PhilBaird/SYSC3010_phil_baird | /Else/Initials.py | UTF-8 | 1,117 | 2.546875 | 3 | [] | no_license | from sense_emu import SenseHat
import time
#import keyboard
sense = SenseHat()
keydown = 0
keyup = 0
p = True
while True:
time.sleep(1)
sense.clear()
#key = keyboard.read_key()
#print( key )
#if key == keydown || key == keyup:
if p:
for i in range(7):
sense.set_pixel(i , 0 , 0 , 255 , 255)
for i in range(8):
sense.set_pixel(0 , i , 0 , 255 , 255)
for i in range(7):
sense.set_pixel(i , 3 , 0 , 255 , 255)
for i in range(4):
sense.set_pixel(7 , i , 0 , 255 , 255)
else:
for i in range(7):
sense.set_pixel(i , 0 , 0 , 255 , 0)
for i in range(8):
sense.set_pixel(0 , i , 0 , 255 , 0)
for i in range(7):
sense.set_pixel(i , 3 , 0 , 255 , 0)
for i in range(8):
sense.set_pixel(7 , i , 0 , 255 , 0)
for i in range(7):
sense.set_pixel(i , 7 , 0 , 255 , 0)
p = not p
| true |
9df20c36373453098946f92b07f18d0ec912ca28 | Python | Gandi/dnsknife | /dnsknife/challenge.py | UTF-8 | 853 | 2.75 | 3 | [] | no_license | """
POC for a stateless challenge/response TXT domain ownership validation.
"""
import hashlib
import hmac
import time
def valid_tokens(domain, secret, validity=86400):
if isinstance(secret, str):
secret = secret.encode()
if isinstance(domain, str):
domain = domain.encode('idna')
def token_at(when):
h = hmac.HMAC(secret, digestmod=hashlib.sha256)
h.update(domain)
h.update(str(int(when/validity)).encode())
return h.hexdigest()
# We're not totally strict on validity, but want
# to avoid the worst case where we provided a token
# that immediately expires. Allow up to three half-validity
# intervals in the past.
now = int(time.time())
validity = int(validity/2)
past = now - 3*validity
return [token_at(when) for when in range(past, now, validity)]
| true |
643cd636ebed0588e787dc269ef2cd2698f55009 | Python | Yang-Jianlin/python-learn | /python_BB/demo12.py | UTF-8 | 158 | 3.375 | 3 | [] | no_license | import re
str1 = 'I am is str'
print(re.sub('str', 'as', str1))
str2 = 'I am is yang, and:are you Li'
print(re.split(r'[,:]', str2))
print(str2.split(' '))
| true |
6ae1383b46cd0453a091723fd7cc8bae6f5b6d4c | Python | jamesmkrieger/ProDy | /prody/atomic/nbexclusion.py | UTF-8 | 4,042 | 2.984375 | 3 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-biopython",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""This module defines :class:`NBExclusion` for dealing with bond information provided
by using :meth:`.AtomGroup.setNBExclusions` method."""
from numbers import Integral
import numpy as np
__all__ = ['NBExclusion']
class NBExclusion(object):
"""A pointer class for nonnonbonded exclusion exclusion atoms. Following built-in functions are
customized for this class:
* :func:`iter` yields :class:`~.Atom` instances"""
__slots__ = ['_ag', '_acsi', '_indices']
def __init__(self, ag, indices, acsi=None):
self._ag = ag
self._indices = np.array(indices)
if acsi is None:
self._acsi = ag.getACSIndex()
else:
self._acsi = acsi
def __repr__(self):
one, two = self._indices
names = self._ag._getNames()
return '<NBExclusion: {0}({1})--{2}({3}) from {4}>'.format(
names[one], one, names[two], two, str(self._ag))
def __str__(self):
one, two = self._indices
names = self._ag._getNames()
return '{0}({1})--{2}({3})'.format(
names[one], one, names[two], two)
def __eq__(self, other):
return (isinstance(other, NBExclusion) and other.getAtomGroup() is self._ag
and (np.all(other.getIndices() == self._indices) or
np.all(other.getIndices() == list(reversed(self._indices)))))
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
for index in self._indices:
yield self._ag[index]
def getAtomGroup(self):
"""Returns atom group."""
return self._ag
def getAtoms(self):
"""Returns nonbonded exclusion atoms."""
return (self._ag[self._indices[0]], self._ag[self._indices[1]])
def getIndices(self):
"""Returns indices of nonbonded exclusion atoms."""
return self._indices.copy()
def getVector(self):
"""Returns vector that originates from the first atom."""
one, two = self._indices
acsi = self.getACSIndex()
return self._ag._coords[acsi, two] - self._ag._coords[acsi, one]
def getACSIndex(self):
"""Returns index of the coordinate set."""
acsi = self._acsi
if acsi >= self._ag._n_csets:
raise ValueError('{0} has fewer coordsets than assumed by {1}'
.format(str(self._ag), str(self)))
return acsi
def setACSIndex(self, index):
"""Set the coordinate set at *index* active."""
if self._ag._coords is None:
raise AttributeError('coordinates are not set')
if not isinstance(index, Integral):
raise TypeError('index must be an integer')
n_csets = self._ag._n_csets
if n_csets <= index or n_csets < abs(index):
raise IndexError('coordinate set index is out of range')
if index < 0:
index += n_csets
self._acsi = index
def evalNBExclusions(exclusions, n_atoms):
"""Returns an array mapping atoms to their nonbonded exclusion neighbors and an array
that stores number of nonbonded exclusions made by each atom."""
numexclusions = np.bincount(
exclusions.reshape((exclusions.shape[0] * 2)))
nbemap = np.zeros((n_atoms, numexclusions.max()), int)
nbemap.fill(-1)
index = np.zeros(n_atoms, int)
for nbexclusion in exclusions:
a, b = nbexclusion
nbemap[a, index[a]] = b
nbemap[b, index[b]] = a
index[nbexclusion] += 1
return nbemap, numexclusions
def trimNBExclusions(exclusions, indices):
"""Returns nonbonded exclusions between atoms at given indices."""
iset = set(indices)
exclusions = [nbexclusion for nbexclusion in exclusions if nbexclusion[0]
in iset and nbexclusion[1] in iset]
if exclusions:
newindices = np.zeros(indices.max()+1, int)
newindices[indices] = np.arange(len(indices))
return newindices[np.array(exclusions)]
| true |
12ea5ba39c1863bc7416b2895ced62109252ce7f | Python | awk001/pytest | /web/test_cation.py | UTF-8 | 629 | 2.546875 | 3 | [] | no_license | from time import sleep
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
d = webdriver.Firefox()
d.get("http://www.baidu.com")
d.implicitly_wait(10)
d.maximize_window()
element = d.find_element(By.ID, "kw")
action = ActionChains(d)
# action.send_keys("selenium")
# action.send_keys_to_element(d.find_element(By.ID,"kw"),"selenium",Keys.ENTER)
# 按下shift键输入的字母为大写
action.key_down(Keys.SHIFT).send_keys("abc").key_up(Keys.SHIFT).send_keys("abc") # ABCabc
action.perform()
sleep(3)
d.quit() | true |
9e0911406954b20c3699c8e692809ea0a3913e26 | Python | sngjuk/fuzzy-flow | /src/client.py | UTF-8 | 14,113 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import pickle
import re
from collections import OrderedDict
from time import sleep
import zmq
import node
class FuzzyClient:
def __init__(self, ip='localhost', port=5555):
self.ip = ip
self.port = port
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect('tcp://%s:%d' % (self.ip, self.port))
self.glossary = OrderedDict()
self.glossary_vector = []
self.setting = {'depth_limit': 9, 'jump_limit': 1, 'num': 30, 'sim_th': 0.39}
@staticmethod
def req(rep_name, glossary=None, glossary_vector=None, name1=None, name2=None, setting=None):
req_json = {
'req': rep_name,
'glossary': glossary,
'glossary_vector': glossary_vector,
'name1': name1,
'name2': name2,
'setting': setting
}
return req_json
@staticmethod
def preprocess_input():
in_text = input()
in_text = re.sub(r'[ \t]+$', '', in_text)
re_res = re.search(r'\s*(.*)', in_text)
if re_res:
in_text = re_res.group(1)
return in_text
def list_names(self):
print('// list names')
for i in self.glossary:
print(self.glossary[i].name)
def add_name(self, name):
if name in self.glossary:
return False
neo = node.Node(name, self.get_word_vector(name))
self.glossary[name] = neo
self.glossary_vector.append(neo.vector)
print(f'\n//{name} added')
return True
def delete_name(self, name):
print('// delete name')
if name not in self.glossary:
return False
rm_idx = list(self.glossary.keys()).index(name)
del self.glossary[name]
del self.glossary_vector[rm_idx]
print(f'{name} deleted')
def add_implication(self, source_name, target_name, probability):
print('// add implication')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_implication(target_name, self.get_word_vector(target_name), probability)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
def add_belief(self, source_name, target_name, probability):
print('// add belief')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_belief(target_name, self.get_word_vector(target_name), probability)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
def add_membership(self, source_name, target_name, target_prob, source_prob):
print('// add membership')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_membership(target_name, self.get_word_vector(target_name), target_prob)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
res_prob = self.glossary[target_name].add_membership(source_name, self.get_word_vector(source_name), source_prob)
self.glossary[target_name].sort_reason()
print(f'\n//{target_name} -> {source_name}; {res_prob[0]}, count: {res_prob[1]}')
def show_name(self, name):
if len(self.glossary) == 0 or name not in self.glossary:
print('empty glossary or not exist name')
return
print(f'//// {name}')
print('=== membership ===')
for i in self.glossary[name].membership:
print(f'->{i}; prob {self.glossary[name].membership[i][0]}, count {self.glossary[name].membership[i][1]}')
print('\n=== implication ===')
for i in self.glossary[name].implication:
print(f'->{i}; prob {self.glossary[name].implication[i][0]}, count {self.glossary[name].implication[i][1]}')
print('\n=== belief ===')
for i in self.glossary[name].belief:
print(f'->{i}; prob {self.glossary[name].belief[i][0]}, count {self.glossary[name].belief[i][1]}')
def get_word_vector(self, name):
req_x = self.req('gw', name1=name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
return loaded_res['res_data']
def get_glossary_list(self):
req_x = self.req('sl')
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def load_glossary(self, file_name):
req_x = self.req('lg', name1=file_name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
if loaded_res['res_data']:
self.glossary, self.glossary_vector = loaded_res['res_data']
print(f'{file_name} loaded!')
else:
print(f'\'{file_name}\' file not found in \'save\' folder ;)\n')
def save_glossary(self, file_name):
req_x = self.req('x', glossary=self.glossary, glossary_vector=self.glossary_vector, name1=file_name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
if loaded_res['res_data']:
print(loaded_res['res_data'])
def find_path(self, source, dest):
if not len(self.glossary):
print('empty glossary')
return
req_x = self.req('fp', glossary=self.glossary, glossary_vector=self.glossary_vector,
name1=source, name2=dest, setting=self.setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def across_space(self, source, dest):
req_x = self.req('cr', name1=source, name2=dest)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def search_possible_path(self, source, length):
if not len(self.glossary):
print('empty glossary')
return
setting = {'depth_limit': length, 'jump_limit': self.setting['jump_limit'], 'sim_th': self.setting['sim_th']}
req_x = self.req('sp', glossary=self.glossary, glossary_vector=self.glossary_vector,
name1=source, name2=None, setting=setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_nearest_neighbor(self, name, num=30, sim_th=0.39):
setting = {'num': num, 'sim_th': sim_th}
req_x = self.req('nn', name1=name, name2=None, setting=setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_similarity(self, text1, text2):
req_x = self.req('ss', name1=text1, name2=text2)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_all_names(self):
for name in self.glossary:
print('\n')
self.show_name(name)
def clear_glossary(self):
self.glossary = OrderedDict()
self.glossary_vector = []
def user_select(self):
print('~ welcome ~')
print('load file name? (example \'bug.p\')')
save_filename = self.preprocess_input()
self.load_glossary(save_filename)
self.list_names()
while True:
print('===== select ===== \nsl: server glossaries list \nlg: load server glossary\
\nln; list names\na; add name\ndn: delete name\nsa: show all names\nai; add implication\
\nab: add belief \nam: add membership \nnn: show nearest neighbor \nst: find path setting\
\nsn: show name \nss: show similarity \n\n-=-=- paths -=-=-\
\nfp; find path \ncr: cross vector space \ncg: clear current glossary\
\nsp: search possible path \n \
\n----- exit -----\
\nx; save &exit \nxx; exit without save')
try:
sel = self.preprocess_input()
if sel == 'sl':
self.get_glossary_list()
elif sel == 'lg':
self.get_glossary_list()
print('input; load file name')
name = self.preprocess_input()
# add name
self.load_glossary(name)
self.list_names()
elif sel == 'ln':
# list glossary
self.list_names()
elif sel == 'a':
print('input; name')
name = self.preprocess_input()
# add name
res = self.add_name(name)
if not res:
print('already exist')
elif sel == 'dn':
print('input; name to delete')
name = self.preprocess_input()
# delete name
self.delete_name(name)
elif sel == 'sa':
# show all names
self.show_all_names()
elif sel == 'ai':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print('input; probability')
probability = float(self.preprocess_input())
# add implication
self.add_implication(source_name, target_name, probability)
elif sel == 'ab':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print('input; probability')
probability = float(self.preprocess_input())
# add belief
self.add_belief(source_name, target_name, probability)
elif sel == 'am':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print(f'input; {source_name}->{target_name} similarity')
target_prob = float(self.preprocess_input())
print(f'input; {target_name}->{source_name} similarity')
source_prob = float(self.preprocess_input())
# add membership
self.add_membership(source_name, target_name, target_prob, source_prob)
elif sel == 'sn':
print('input; name')
name = self.preprocess_input()
# show relations
self.show_name(name)
elif sel == 'st':
print('input; depth limit')
self.setting['depth_limit'] = int(self.preprocess_input())
print('input; jump limit')
self.setting['jump_limit'] = int(self.preprocess_input())
elif sel == 'fp':
print('input; source ')
source = self.preprocess_input()
print('input; dest ')
dest = self.preprocess_input()
# find path
self.find_path(source, dest)
elif sel == 'cr':
print('input; source ')
source = self.preprocess_input()
print('input; dest ')
dest = self.preprocess_input()
# find path
self.across_space(source, dest)
elif sel == 'sp':
print('input; source')
source = self.preprocess_input()
print('input; length')
length = int(self.preprocess_input())
# search possible paths with length
self.search_possible_path(source, length)
elif sel == 'nn':
print('input; name')
name = self.preprocess_input()
# show nearest neighbor
self.show_nearest_neighbor(name)
elif sel == 'ss':
print('input; word1')
word1 = self.preprocess_input()
print('input; word2')
word2 = self.preprocess_input()
# show word distance
self.show_similarity(word1, word2)
elif sel == 'cg':
# clear current glossary
self.clear_glossary()
elif sel == 'x':
print('save file name?')
save_filename = self.preprocess_input()
# save
self.save_glossary(save_filename)
break
elif sel == 'xx':
print('exit without save')
print('see ya')
break
print('\nok\n')
except KeyboardInterrupt:
print(' \n\n### Plz Enter \'x\' or \'xx\' to exit ###\n')
sleep(0.33)
def main():
fc = FuzzyClient(ip='35.200.11.163', port=8888)
fc.user_select()
if __name__ == '__main__':
main()
| true |
46983acf6284034c88a1964a3919e95dd7030c01 | Python | YogPanjarale/RDB-RandomDiscordBot- | /discordbot/my_utils/get_covid_data.py | UTF-8 | 912 | 2.546875 | 3 | [] | no_license | import json
import requests
from dataclasses import dataclass
@dataclass()
class CovResponse():
updated: int
cases:int
active: int
recovered: int
deaths: int
todayCases: int
todayRecovered: int
todayDeaths: int
critical: int
casesPerOneMillion: int
deathsPerOneMillion: int
tests: int
testsPerOneMillion: int
population: int
oneCasePerPeople: int
oneDeathPerPeople: int
oneTestPerPeople: int
undefined: int
activePerOneMillion: int
recoveredPerOneMillion: int
criticalPerOneMillion: int
affectedCountries: int
def getCovidData(mode="world") -> CovResponse:
if mode == "world":
r = requests.get("https://disease.sh/v3/covid-19/all")
r_json = r.json()
# print(r_json)
rs = CovResponse(**r_json)
# print(r_json)
return rs
elif mode =="india":
return
pass
| true |
a04dedee57e20d31d2c4fe3fc7a5ab57342f61d2 | Python | zeenat19/dictionary_question | /addlist.py | UTF-8 | 147 | 3.515625 | 4 | [] | no_license |
list1=["one","two","three","four","five"]
list2=[1,2,3,4,5,]
dict1={}
i=0
while i<len(list1):
dict1[list1[i]]=list2[i]
i=i+1
print(dict1) | true |
4cb7c76311fad538a960170aa4eeb1c8b6229429 | Python | targeton/LeetCode-cn | /Solutions/678.有效的括号字符串.py | UTF-8 | 1,614 | 3.640625 | 4 | [] | no_license | #
# @lc app=leetcode.cn id=678 lang=python3
#
# [678] 有效的括号字符串
#
# https://leetcode-cn.com/problems/valid-parenthesis-string/description/
#
# algorithms
# Medium (32.23%)
# Likes: 104
# Dislikes: 0
# Total Accepted: 5.7K
# Total Submissions: 17.6K
# Testcase Example: '"()"'
#
# 给定一个只包含三种字符的字符串:( ,) 和 *,写一个函数来检验这个字符串是否为有效字符串。有效字符串具有如下规则:
#
#
# 任何左括号 ( 必须有相应的右括号 )。
# 任何右括号 ) 必须有相应的左括号 ( 。
# 左括号 ( 必须在对应的右括号之前 )。
# * 可以被视为单个右括号 ) ,或单个左括号 ( ,或一个空字符串。
# 一个空字符串也被视为有效字符串。
#
#
# 示例 1:
#
#
# 输入: "()"
# 输出: True
#
#
# 示例 2:
#
#
# 输入: "(*)"
# 输出: True
#
#
# 示例 3:
#
#
# 输入: "(*))"
# 输出: True
#
#
# 注意:
#
#
# 字符串大小将在 [1,100] 范围内。
#
#
#
# @lc code=start
class Solution:
def checkValidString(self, s: str) -> bool:
left, star = [], []
for i,ch in enumerate(s):
if ch == '(':
left.append(i)
elif ch == '*':
star.append(i)
else:
if left:
left.pop()
elif star:
star.pop()
else:
return False
while left and star:
if left.pop() > star.pop():
return False
return not left
# @lc code=end
| true |
d81b346c0fd0901d2be9408140063ad42a876b4f | Python | SamuelLellis/TextBasedAdventureGame | /sam.py | UTF-8 | 1,248 | 3.59375 | 4 | [] | no_license | def scenario1(choice):
print("As you begin to approach the car sounds of scratching begin to eminate from the truck of the car. Would you like to investigate?")
choice1 = input("Would you like to investigate? Yes or no")
if(choice1.lower() == yes):
death1()
def death1():
print("You manage to pull open the truck of the car...")
def main():
print("You shield your eyes as they are suddenly assaulted by the sunlight upon the opening of the door")
print('"What on Earth?"')
print("You say to yourself as you take in the surrondings around you.")
print("You look to your right and observe an abandoned car.")
input1 = input("Would you like to investigate it? Yes or No?")
print()
if (input1.lower() == "yes"):
print("Deciding to give it a chance you approach the car with caution with your fists up in self defense,\n with the windows being blown out and all you don't want to take any chances.")
if (input1.lower() == "no"):
print("With a shake of your head you decide that the abandoned car can't do much for you, so you turn your head in the opposite direction to see if there is anything else outside of this desolate house that you were trapped in.")
| true |
30d84725a970f8a4df365dda018a17f863e48215 | Python | augustin-barillec/google-pandas-load | /tests/utils/pandas_normalize.py | UTF-8 | 307 | 2.796875 | 3 | [
"MIT"
] | permissive | from copy import deepcopy
def sort(df):
res = deepcopy(df)
cols = list(res.columns)
res = res.sort_values(cols)
return res
def reset_index(df):
res = deepcopy(df)
return res.reset_index(drop=True)
def normalize(df):
res = sort(df)
res = reset_index(res)
return res
| true |
f3baeb009bdc6ccdd734e72330bca26ec69e2e4c | Python | dromakin/substringAlgorithms | /src/libs/rabin_karp.py | UTF-8 | 2,183 | 3.625 | 4 | [
"MIT"
] | permissive | from src.libs.timing import *
# @speed_test
class Hash:
'''
hash class to simplify code function rabin_karp.
'''
def __init__(self, string, size):
self.str = string
self.hash = 0
for i in range(0, size):
self.hash += ord(self.str[i])
self.init = 0
self.end = size
def update(self):
if (self.end <= len(self.str) - 1):
self.hash -= ord(self.str[self.init])
self.hash += ord(self.str[self.end])
self.init += 1
self.end += 1
def digest(self):
return self.hash
def text(self):
return self.str[self.init:self.end]
@speed_test
def rabin_karp(text, sub):
'''
Алгоритм:
1. Вычисляем хеш-функции от каждой строки S
2. Перебираем в цикле все подстроки T длины L
3. Для каждой такой подстроки вычиляем хеш-функцию
4. Сравниваем значение хеш-функции с значением хеш-функций всех строк S
5. Только если есть совпадение хеш-функций, то тогда сравниваем эту подстроку T с той строкой S, для которой было совпадение
Сложность:
* |Σ|=σ - размер алфавита
* |text|=t — длина текста
* |pattern|=p — длина паттерна
Худшее: O(p * t)
Среднее: O(p + t)
Препроцессинг: O(p)
Дополнительная память: O(1)
:param text: текст
:param sub: подстрока
:return: индекс
'''
if text == "" or sub == "":
return -1
len_text = len(text)
len_sub = len(sub)
if len_text < len_sub:
return -1
htext = Hash(text, len_sub)
hsub = Hash(sub, len_sub)
hsub.update()
for i in range(len_text - len_sub + 1):
if htext.digest() == hsub.digest():
if htext.text() == sub:
return i
htext.update()
return -1 | true |
285e51b35c1e91abc7500dc94c2c7f05ef066919 | Python | robinelting/gevprofp | /test_finalproject.py | UTF-8 | 2,562 | 3.234375 | 3 | [] | no_license | import unittest
import finalproject
class test_tokenizer(unittest.TestCase):
def test_tokenizer(self):
'''Checks if function returns a clean and lowercased sentence'''
sentence = finalproject.tokenizer('My mama always said life was like a box of chocolates. You never know what you\'re gonna get.')
self.assertEqual(sentence, 'my mama always said life was like a box of chocolates you never know what you re gonna get ')
class tagger(unittest.TestCase):
def test_tagger(self):
'''Checks if function returns a list with the first element
being the tag and the second element being the script text.'''
sentence = finalproject.tagger([' KLAUE', ' All of it? I took a tiny piece of it. They have a mountain full of it. They\'ve been mining it for thousands of years and still haven\'t scratched the surface.', ' IN THE SEATING AREA'])
self.assertEqual(sentence, [['C', ' KLAUE'],
['D',
' All of it? I took a tiny piece of it. They have a mountain '
"full of it. They've been mining it for thousands of years and still haven't "
'scratched the surface.'],
['M', ' IN THE SEATING AREA']])
def test_value(self):
'''Checks if the function will raise a TypeError when necessary, and checks if the value is a list'''
sentence = finalproject.tagger([' KLAUE', ' All of it? I took a tiny piece of it. They have a mountain full of it. They\'ve been mining it for thousands of years and still haven\'t scratched the surface.', ' IN THE SEATING AREA'])
self.assertRaises(TypeError, finalproject.tagger, -2)
self.assertRaises(TypeError, finalproject.tagger, True)
self.assertIsInstance(sentence, list, msg=None)
class test_script_aligner(unittest.TestCase):
def test_script_aligner(self):
'''Checks if the function raise a TypeError when necessary, and checks if the value is a list'''
tagged_sentences = finalproject.script_aligner([['C', ' OKOYE', 'D', ' Where is Agent Ross? ']], ['the apology from', 'where is'], ['01:44:24,308 --> 01:44:27,895', '01:45:24,308 --> 01:44:27,895'])
self.assertIsInstance(tagged_sentences, list, msg=None)
self.assertRaises(TypeError, finalproject.tagger, -2)
self.assertRaises(TypeError, finalproject.tagger, True)
if __name__ == '__main__':
unittest.main()
| true |
12dfb8861afd30d27154bfe698a9a3504bcae291 | Python | lucasrodrigues10/processamento_imagens | /lab_2/ex_2.py | UTF-8 | 606 | 3.078125 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
# le
img = cv2.imread('sunset3.bmp', 0)
# transformada
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
mag = 20 * np.log(np.abs(fshift))
# soma os niveis cinzas
soma_cinza = np.sum(img)
print('Soma: ', soma_cinza)
numero_pixels = len(img) * len(img)
print('Numero de Pixel: ', numero_pixels)
divisao = soma_cinza / numero_pixels
# divide
primeiro_valor_fft = f[0, 0] / numero_pixels
print('FFT(0,0)/divisao: ', primeiro_valor_fft)
plt.subplot(131)
plt.imshow(img, cmap='gray')
plt.subplot(132)
plt.imshow(mag, cmap='gray')
plt.show()
| true |
d4c3d9a4c63232a94a071374b6b71d96ca8b9f92 | Python | Devesh-Maheshwari/nlp-python-deeplearning | /Part-08 Web Deployments/api.py | UTF-8 | 1,912 | 2.5625 | 3 | [
"MIT"
] | permissive | import logging
import flask
import os
import numpy as np
from flask import Flask, jsonify, render_template, request
from scipy import misc
from sklearn.externals import joblib
app = Flask(__name__)
# create logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(str(__name__) + ".log")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
@app.route("/")
@app.route("/index")
def index():
return flask.render_template("index.html", label=False)
@app.route("/status", methods=["GET"])
def get_status():
return jsonify({"version": "0.0.1", "status": True})
@app.route("/predict", methods=["POST"])
def make_prediction():
if request.method == "POST":
# get uploaded file if it exists
logger.debug(request.files)
f = request.files["file"]
f.save(f.filename) # save file to disk
logger.info(f"{f.filename} saved to disk")
# read file from disk
with open(f.filename, "r") as infile:
text_content = infile.read()
logger.info(f"Text Content from file read")
prediction = model.predict([text_content])
logger.info(f"prediction: {prediction}")
prediction = "pos" if prediction[0] == 1 else "neg"
os.remove(f.filename)
return flask.render_template("index.html", label=prediction)
if __name__ == "__main__":
# load ml model from disk
model = joblib.load("model.pkl")
# start api
app.run(host="0.0.0.0", port=8000, debug=True)
| true |
a2a982c5a896e5b1958d5f6d521ed10b551a6ab4 | Python | leticiaglass/projetos-educacionais-python | /práticaE_planoinclinado.py | UTF-8 | 2,067 | 4.03125 | 4 | [] | no_license | # programa principal
from praticaE2_functions import * # Importando todas as funções criadas no módulo de funções
assunto = "Plano inclinado 3." # Definindo assunto e informações relevantes
print(assunto)
print("O tema deste exercício será o seguinte sistema: dois blocos conectados por um fio que passa por uma polia, um pendurado e um sobre um plano inclinado com atrito. Os dois blocos possuem a mesma massa. O fio é rígido e de massa desprezível. Não há atrito no eixo da polia e a massa da polia também é desprezível. Há atrito entre o plano inclinado e o bloco que desliza sobre ele. Neste o caso o sistema está inicialmente em repouso. \nImportante: os coeficientes de atrito cinético e estático são sempre valores entre zero e um, e o cinético é sempre menor ou igual ao estático, nunca maior. E a aceleração da gravidade vale 9.8 m/s**2. " )
# Solicitando o valor do ângulo ao usuário e testando-o
tet0 = float(input("Por favor digite um valor (entre 0 e 90) para o ângulo de inclinação do plano (em graus): "))
if tet0 > 90 or tet0 < 0:
print("Desculpe, o valor do ângulo que você digitou não é compatível com o problema. Precisamos de um ângulo entre 0 e 90 graus. O programa será encerrado.")
exit()
else: # Após passar pela condição, efetuando o cálculo de cae_max e a_max e imprimindo na devida formatação
cae_max = a_static(radians(tet0))
print("O atrito estático máximo (antes do sistema sair do repouso) é: {:.3f}".format(cae_max))
a_max = acceleration(tet0)
print("E a aceleração máxima do bloco 2 (evidentemente desprezando o atrito) vale: {:.3f} m/s**2.".format(a_max))
# Solicitando valor para a aceleração
a2 = float(input("Agora por favor digite um valor (MENOR QUE a_max) para a aceleração do bloco 2 (em m/s²): "))
while a2 >= a_max:
print(a_max, a2)
a2 = float(input("Lembre-se: o valor deve ser MENOR que aceleração máxima! Tente novamente: "))
cac = a_kinetic(tet0, a2)
print("O valor do atrito cinético vale: {:.3f}".format(cac))
print(cac)
| true |
38f5192faf88c1327912b230c247e46dcc3d2f26 | Python | liaowucisheng/self-study-Python | /01周/python程序/用for循环实现1~100求和.py | UTF-8 | 472 | 4.34375 | 4 | [] | no_license | sum = 0
for x in range(101):
sum += x
print(sum)
"""
range(101):可以用来产生0到100范围的整数,需要注意的是取不到101。
range(1, 101):可以用来产生1到100范围的整数,相当于前面是闭区间后面是开区间。
range(1, 101, 2):可以用来产生1到100的奇数,其中2是步长,即每次数值递增的值。
range(100, 0, -2):可以用来产生100到1的偶数,其中-2是步长,即每次数字递减的值。
""" | true |
38174cf96db3ae42d585a716c09e6f585cdbaa76 | Python | nevin-watkins/dog_breed_app | /models/run_model.py | UTF-8 | 2,772 | 2.765625 | 3 | [
"MIT"
] | permissive | # This is where I'm keeping the Restnet Algorithm
import numpy as np
from keras.models import Sequential
from keras.layers import GlobalAveragePooling2D, Conv2D, Dropout, GlobalAveragePooling2D
from keras.layers import Dense
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
import sys
def load_model(algo_type):
'''
Input:
1. algo_type: 'VGG19' or 'Resnet50' are the only options for now
Output relevant data imported
1. train: training data
'''
if algo_type == 'Resnet50':
bottleneck_features = np.load('../data/bottleneck_features/DogResnet50Data.npz')
train = bottleneck_features['train']
valid = bottleneck_features['valid']
test = bottleneck_features['test']
data_shape = train.shape[1:]
return train, valid, test, data_shape
elif algo_type == 'VGG19':
bottleneck_features = np.load('../data/bottleneck_features/DogVGG19Data.npz')
train = bottleneck_features['train']
valid = bottleneck_features['valid']
test = bottleneck_features['test']
data_shape = train.shape[1:]
return train, valid, test, data_shape
else:
print('Algorithm type not found')
def build_model(data_shape):
'''
Pretty simple model here but it's accurate!
Input: shape of the last 3 values of the input vector data
Output: model
'''
model = Sequential()
model.add(GlobalAveragePooling2D(input_shape=data_shape))
model.add(Dropout(0.2))
model.add(Dense(133, activation='softmax'))
return model
def compile_model(model):
'''
Input: model
Output: compiled model
'''
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
def train_model(model, algo_type, train, valid):
'''
Input:
1. Model
2. algo_type: currently 'Resnet50' or 'VGG19'
3. train data
4. valid data
Output:
fit model
'''
checkpointer = ModelCheckpoint(filepath='../models/saved_models/weights.best.{}.hdf5'.format(algo_type),
verbose=1, save_best_only=True)
model.fit(train, train_targets,
validation_data=(valid, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
return model
def get_model(algo_type="Resnet50", retrain=False):
'''
This essentially acts like a main for this python file. it runs through all of the products.
Notes: I offer the retrain option here. But it's much faster and I default to not having this an option.
'''
train, valid, test, data_shape = load_model(algo_type)
model = build_model(data_shape)
model = compile_model(model)
if retrain==True:
model = train_model(model, algo_type, train, valid)
saved_model = '../models/saved_models/weights.best.{}.hdf5'.format(algo_type)
model.load_weights(saved_model)
return model
| true |
4d11edf48b9b12e91dd317bcae09e214ba5b7c1e | Python | ynbella/draco | /draco/triangle.py | UTF-8 | 3,826 | 3.296875 | 3 | [] | no_license | from math import acos, pow, sqrt, degrees, isclose
from itertools import combinations
from star import Star
class Triangle:
def __init__(self, a: Star, b: Star, c: Star):
self.stars = [a, b, c]
self.sides = self._calculate_sides(self.stars)
self.sorted_sides = sorted(self.sides)
self.angles = self._calculate_angles(self.sides)
self.sorted_angles = sorted(self.angles)
self.normalized_sides = self._normalize_sides(self.sides)
self.sorted_normalized_sides = sorted(self.normalized_sides)
@staticmethod
def _calculate_sides(points):
sides = []
edges = combinations(points, 2)
for edge in edges:
sides.append(sqrt(pow(edge[1].x - edge[0].x, 2) + pow(edge[1].y - edge[0].y, 2)))
return sides
@staticmethod
def _calculate_angles(sides):
angles = []
n = len(sides)
for i in range(n):
a = sides[i % n]
b = sides[(i + 2) % n]
c = sides[(i + 1) % n]
numerator = pow(b, 2) + pow(c, 2) - pow(a, 2)
denominator = 2 * b * c
if isclose(denominator, 0):
angles.append(0)
else:
angles.append(degrees(acos(max(-1.0, min(1.0, numerator / denominator)))))
return angles
@staticmethod
def _normalize_sides(sides):
minimum = min(sides)
if isclose(minimum, 0):
return sides
else:
return [side / minimum for side in sides]
def __eq__(self, other):
if isinstance(other, Triangle):
return self.stars[0] == other.stars[0] and self.stars[1] == other.stars[1] and self.stars[2] == other.stars[
2]
return False
def connected(self, other):
if isinstance(other, Triangle):
for star in self.stars:
if star in other.stars:
return True
return False
# region Comparison
def similar(self, other, method: int, side_tol: float = 0.1, ang_tol: float = 5.0):
if isinstance(other, Triangle):
if method == 0:
return self._similar_aaa(other, ang_tol)
elif method == 1:
return self._similar_sas(other, side_tol, ang_tol)
elif method == 2:
return self._similar_sss(other, side_tol)
else:
print("Invalid argument: Invalid comparison method", method)
return False
def _similar_aaa(self, other, tol):
for i in range(3):
if not isclose(self.sorted_angles[i], other.sorted_angles[i], abs_tol=tol):
return False
return True
def _similar_sas(self, other, side_tol, agl_tol):
if isclose(self.sorted_sides[0] / other.sorted_sides[0], self.sorted_sides[1] / other.sorted_sides[1],
abs_tol=side_tol):
if isclose(self.sorted_angles[2], other.sorted_angles[2], abs_tol=agl_tol):
return True
if isclose(self.sorted_sides[1] / other.sorted_sides[1], self.sorted_sides[2] / other.sorted_sides[2],
abs_tol=side_tol):
if isclose(self.sorted_angles[0], other.sorted_angles[0], abs_tol=agl_tol):
return True
if isclose(self.sorted_sides[2] / other.sorted_sides[2], self.sorted_sides[0] / other.sorted_sides[0],
abs_tol=side_tol):
if isclose(self.sorted_angles[1], other.sorted_angles[1], abs_tol=agl_tol):
return True
return False
def _similar_sss(self, other, tol):
for i in range(3):
if not isclose(self.sorted_normalized_sides[i], other.sorted_normalized_sides[i], abs_tol=tol):
return False
return True
# endregion
| true |
88c9f443f80ad6f7e8798912f28a4e63b253cd84 | Python | justinhsg/AdventOfCode2016 | /4/security.py | UTF-8 | 1,204 | 2.75 | 3 | [] | no_license | with open("input.txt", "r") as infile:
raw = infile.read().split("\n")
pretty = []
for i in raw:
checksum = i[-6:-1]
wordsval = i[:-7].split("-")
letters = "".join(sorted("".join(wordsval[:-1])))
value = int(wordsval[-1])
pretty.append([letters, checksum, value])
part1 = 0
for i in pretty:
store = []
p = 0
while(p<len(i[0])):
val = i[0].count(i[0][p])
store.append([val, i[0][p]])
p+=val
store = sorted(store, key = lambda k: -k[0])
if("".join([store[j][1] for j in range(5)]) == i[1]):
part1 += i[2]
pretty = []
for i in raw:
checksum = i[-6:-1]
wordsval = i[:-7].split("-")
letters = wordsval[:-1]
value = int(wordsval[-1])%26
pretty.append([letters, checksum, value])
def offset(c, v):
return chr((ord(c)-ord('a')+v)%26+ord('a'))
part2 = 0
for i in pretty:
llist = i[0]
offsets = i[2]
sentence = []
for k in range(3):
word = "".join([offset(c,offsets) for c in llist[k]])
sentence.append(word)
if("northpole" in sentence):
part2 = (raw[pretty.index(i)].split("-")[-1].split("[")[0])
break
print("Part 1: {}\nPart 2: {}".format(part1, part2)) | true |
d1164c138d0d51dd74bf5454fbc80074768f9d2c | Python | junghankim-git/test_codes | /program/geopotential_height/center.py | UTF-8 | 562 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
p0 = 100000.0
kapa = 2.8571658640413355e-1
def center(p1,p2):
p12_org = (p1+p2)/2.
pi1 = (p1/p0)**kapa
pi2 = (p2/p0)**kapa
pi12 = (pi1+pi2)/2.
p12 = p0*pi12**(1./kapa)
return p12_org, p12
def exner(p):
return (p/p0)**kapa
pi1 = 900000.
pi2 = 800000.
pim = (pi1+pi2)/2.0
ex1 = (pi1/p0)**kapa
ex2 = (pi2/p0)**kapa
exm = (pim/p0)**kapa
print pi1, pim, pi2
print ex1, exm, ex2
print (ex1+ex2)/2.0
print center(pi1,pi2)
p = [100000.0,50000.0,30000.0,10000.0]
for i in range(len(p)):
print p[i], exner(p[i])
| true |
f03bce739f3c0635d0838e7f184f4069c916f4e7 | Python | Eradch/3 | /4.py | UTF-8 | 146 | 3.15625 | 3 | [] | no_license | def m_pow_fun(x, y):
try:
res = x ** y
except TypeError:
return "Error"
return res
print(m_pow_fun(2, -3))
| true |
08ba1abda0c7d56eda621347fe71b16ef45016c4 | Python | RadiObad/1MAC-Workshop | /16 - Stores/Forums/main.py | UTF-8 | 792 | 2.828125 | 3 | [
"MIT"
] | permissive | import models, stores
member1 = models.Member("Manar", 23)
member2 = models.Member("Nour", 21)
post1 = models.Post("First Post", "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
post2 = models.Post("Second Post", "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.")
post3 = models.Post("Third Post", "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.")
member_store = stores.MemberStore()
member_store.add(member1)
member_store.add(member2)
post_store = stores.PostStore()
post_store.add(post1)
post_store.add(post2)
post_store.add(post3)
print (member_store.get_all())
print (post_store.get_all()) | true |
5919b504db342e96ef94cbe3adcf2c562652d197 | Python | samuelgerber/OrthogonalAutoencoding | /run-aec-sine.py | UTF-8 | 7,864 | 2.5625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import argparse
import time
import ae
import data2d
#fix seed
tf.set_random_seed(10)
np.random.seed(2)
parser = argparse.ArgumentParser(description='Autoencoder for spiral data set.')
parser.add_argument('--npoints', metavar='N', type=int, nargs='?', default=300,
help='number of points in spiral data')
parser.add_argument('--noise', type=float, nargs='?', default=0.03,
help='amount of noise for spiral data')
parser.add_argument('--width', type=float, nargs='?', default=0.,
help='create snake data set with width width')
parser.add_argument('--dimension', type=int, nargs='+',
help='Layers of the autoencoder')
parser.add_argument('--weights', type=float, nargs='?', default=0.2,
help='Setup standard deviation for weight initalization')
parser.add_argument('--stochastic', metavar='N', type=int, nargs='?', default=0,
help='Number of points in stochastic optimization. On default(0) all points are used')
parser.add_argument('--sigma', type=float, nargs='?', default=0.,
help='Denoising autoencoder with Normal distribution noise with sdev sigma')
parser.add_argument('--alpha', type=float, nargs='?', default=0.,
help='Orthogonal penalty regularization')
parser.add_argument('--tortho', type=str, nargs='?', default="squared",
help='Orthogonal penalty type')
parser.add_argument('--beta', type=float, nargs='?', default=0.,
help='Jacobian penalty regularization')
parser.add_argument('--gamma', type=float, nargs='?', default=0.,
help='(gamma - sqrt(Jacobian))^2 penalty regularization')
parser.add_argument('--factor', type=float, nargs='?', default=1.,
help='global factor for all regulariztaion parameter')
parser.add_argument('--frate', type=float, nargs='?', default=0.,
help='Increase gobal factor after every iteration by frate')
parser.add_argument('--lrate', type=float, nargs='?', default=0.00001,
help='Learning rate for Adam optimizer')
parser.add_argument('--outer', type=int, nargs='?', default=40,
help='Number of outer optimization each of inner steps. Print after every outer iterations')
parser.add_argument('--inner', type=int, nargs='?', default=500,
help='Number of inner optimization.')
parser.add_argument('--file', type=str, nargs='?', default='./',
help='Store results in folder.')
args = parser.parse_args()
f = open(args.file + "/args.txt", 'w')
f.write( str(args) )
f.close()
f = open(args.file + "/log.txt", 'w')
if args.width > 0.:
data = data2d.SineSnake(npoints=args.npoints, width = args.width, sigma = args.noise)
testdata = data2d.SineSnake(npoints=10000, width = args.width, sigma = args.noise)
else:
data = data2d.Sine(npoints=args.npoints, sigma = args.noise)
testdata = data2d.Sine(npoints=10000, sigma = args.noise)
#setup autoencoder
aec = ae.AutoEncoder()
aec.addDimension( data.getDimension() )
for d in args.dimension :
aec.addDimension( d )
aec.stochastic = args.stochastic
aec.sigma = args.sigma
aec.alpha = args.alpha
aec.tortho = args.tortho
aec.beta = args.beta
aec.gamma = args.gamma
aec.rate = args.frate
aec.factor = args.factor
aec.setup( sdev=args.weights, lrate=args.lrate )
def sineline(npoints = 10000) :
phi = np.linspace(0, np.pi, npoints)
arc = np.vstack([ np.cos(phi), np.sin(phi) ] )
arc[0,:] = arc[0,:] / 1.5
arc[1,:] = (arc[1,:] -0.5) / 1.5
arc = np.transpose(arc)
xr = aec.xr[-1].eval( feed_dict={ aec.x: arc,
aec.noise: np.zeros(arc.shape) })
plt.plot(xr[:,0], xr[:,1], color="0.2", linewidth=2.5, zorder=2)
plt.plot(arc[:, 0], arc[:,1], color="0.8", linewidth=2, zorder=1)
def polargrid(radii = np.linspace(0.05, 1.5, 20), phi = np.linspace(0, np.pi,
101), every=5, xrVar = aec.xr[-1] ) :
arc = np.vstack([ np.cos(phi), np.sin(phi) ] )
arcs = []
orcs = []
for r in radii:
a = arc * r
a[0,:] = a[0,:]/1.5
a[1,:] = (a[1,:]-0.5)/1.5
orcs.append(a)
arcs.append( xrVar.eval( feed_dict={ aec.x: np.transpose(a),
aec.noise: np.transpose( np.zeros(arc.shape) ) }) )
plt.plot(arcs[-1][:,0], arcs[-1][:,1], color="0.2", linewidth=1.5, zorder=2)
plt.plot(a[0, :], a[1,:], color="0.8", linewidth=1, zorder=1)
if every < len(phi):
for i in range( int( len(phi)/every)+1 ):
l = np.zeros( [len(arcs), 2] )
lo = np.zeros( [len(arcs), 2] )
for j in range( len(arcs) ):
l[j,:] = arcs[j][i*every, :]
lo[j,:] = orcs[j][:, i*every]
plt.plot(l[:,0], l[:,1], color="0.2", linewidth=1.5, zorder=2)
plt.plot(lo[:,0], lo[:,1], color="0.8", linewidth=1, zorder=1)
x = data.getData()
with tf.Session() as session:
aec.initalize(session)
polargrid()
plt.axis('scaled')
plt.tick_params(axis='both', which='major', labelsize=18)
plt.tick_params(axis='both', which='minor', labelsize=12)
plt.xlim(-1.05,1.05)
plt.ylim(-0.5,0.7)
plt.savefig( args.file + "/init.pdf" )
plt.clf()
#plt.show()
trainRes = np.zeros((args.outer, 6))
testRes = np.zeros((args.outer, 6))
tStart = time.clock()
for i in range(args.outer):
l, rl, ol, jl, jf, o, tl, trl, tol, tjl, tjf, to = aec.optimize(session, data, args.inner, testdata)
tCurrent = time.clock()
f.write( "time elpased " + str(tCurrent - tStart) + "\n" )
#f.write( "train lloss %s" % ll
f.write( "train loss " + str(l) + "\n" )
f.write( "train rloss " + str(rl) + "\n" )
f.write( "train oloss " + str(ol) + "\n" )
f.write( "train Jloss " + str(jl) + "\n" )
f.write( "train regularization factor " + str( aec.factor ) + "\n" )
f.write( "train factor * alpha * oloss " + str( aec.alpha * aec.factor * ol ) + "\n" )
f.write( "train factor * beta * Jloss " + str( aec.beta * aec.factor * jl ) + "\n" )
f.write( "frob norm " + str( np.mean(np.sqrt(jf)) ) + "\n" )
f.write( "ortho " + str( np.sqrt(o) ) + "\n" )
plt.scatter(x[:,0], x[:,1], c="#16AADB", marker=".", s=300, zorder=3, alpha=0.5)
rx = aec.xr[-1].eval( feed_dict={ aec.x: x,
aec.noise: np.zeros(x.shape) })
plt.scatter(rx[:,0], rx[:, 1], c="#F29E0C", marker=".", s=300, zorder=4, alpha=0.5)
polargrid()
#sineline()
plt.axis('scaled')
plt.tick_params( axis='both', which='major', labelsize=18 )
plt.tick_params( axis='both', which='minor', labelsize=12 )
plt.xlim(-1.05,1.05)
plt.ylim(-0.6,0.7)
plt.savefig( args.file + "/iteration-{:0>5d}.pdf".format( (i+1)*args.inner ) )
plt.clf()
#plt.show()
tCurrent2 = time.clock()
f.write("Plotting time " + str(tCurrent2 - tCurrent) + "\n\n")
f.flush()
trainRes[i, :] = np.array( [l, rl, ol, jl, np.mean(np.sqrt(jf)), o] )
testRes[i, :] = np.array( [tl, trl, tol, tjl, np.mean(np.sqrt(tjf)), to] )
f.close()
plt.tick_params(axis='both', which='major', labelsize=24)
plt.tick_params(axis='both', which='minor', labelsize=17)
plt.plot( np.array(range(args.outer)) ,
np.sqrt( trainRes[:, 1] ), c="#028C2F", linewidth=4)
plt.plot( np.array(range(args.outer)) ,
np.sqrt( testRes[:, 1] ), c="#3C007E", linewidth=4, ls="--")
plt.xlabel('Iterations (in {0})'.format(args.inner), fontsize=30)
plt.ylabel('Root mean square error', fontsize=30)
plt.ylim( ymin = 0, ymax=0.35 )
plt.tight_layout()
plt.savefig( args.file + "/loss.pdf" )
plt.clf()
| true |
f21d1f04ba79d41656b1d7804e1fba99625434d4 | Python | xizhilang2/Python | /Learn PYTHON 3 the HARD WAY/ex20.py | UTF-8 | 607 | 3.6875 | 4 | [] | no_license | from sys import argv
script, inputFile = argv
def printAll(fileInput):
print(fileInput.read())
def rewind(fileInput):
fileInput.seek(0)
def printALine(lineCount, f):
print(f"This's {lineCount} line:", f.readline(), end="")
currentFile = open(inputFile)
print("First let's print the whole file:\n")
printAll(currentFile)
print("Now let's rewind, kind of like a tape.")
rewind(currentFile)
print("Let's prit three lines:")
currentLine = 1
printALine(currentLine, currentFile)
currentLine += 1
printALine(currentLine, currentFile)
currentLine += 1
printALine(currentLine, currentFile)
| true |
2200a0ec533eed63764c3c0c5c5c41d2bf496ce0 | Python | Aasthaengg/IBMdataset | /Python_codes/p03005/s209409526.py | UTF-8 | 89 | 3.28125 | 3 | [] | no_license | balls, men = map(int,input().split())
if men == 1:
print(0)
else:
print(balls - men) | true |
4e10c52b4a56735ee519ed39938f9913db57c8c1 | Python | lizhaojiang/beautufulDay | /ajax_spider_demo/demo1.py | UTF-8 | 1,268 | 2.984375 | 3 | [] | no_license | from selenium import webdriver
import time
driver_path = r"D:\chromedriver\chromedriver.exe" #定义驱动目录 因为目录里面有斜杠 所以前面加r 表示是原生的字符串
#定义谷歌浏览器的驱动 需要传递驱动路劲
driver = webdriver.Chrome(executable_path=driver_path)
driver.get('https://www.baidu.com/')
# time.sleep(5)
# driver.close() #关闭当前页面
# driver.quit() #退出整个浏览器
# inputTage = driver.find_element_by_id('kw') #使用selenium使用获取id的方式获取input输入框
# inputTage = driver.find_element_by_name('wd') #使用selenium使用获取name的方式获取input输入框
# inputTage = driver.find_element_by_class_name('s_ipt') #使用selenium使用获取classname的方式获取input输入框\
#通过xpath语法查找
# inputTage = driver.find_element_by_xpath('//input[@id="kw"]')
#通过css选择器查找 quickdelete-wrap下面的直接子元素 quickdelete-wrap是class 前面要加点
# inputTage = driver.find_element_by_css_selector(".quickdelete-wrap > input")
#如果要加查找多个加s 返回的是列表 取0操作
inputTage = driver.find_elements_by_css_selector(".quickdelete-wrap > input")[0]
inputTage.send_keys('python') #在input输入框中输入python字符串
| true |
47982ef9cb7e965c663ccfd4187b306ae5f3ae7c | Python | rhyun9584/BOJ | /python/1182.py | UTF-8 | 252 | 2.859375 | 3 | [] | no_license | from itertools import combinations
N, S = map(int, input().split())
numbers = list(map(int, input().split()))
result = 0
for i in range(1, N+1):
for arr in combinations(numbers, i):
if sum(arr) == S:
result += 1
print(result) | true |
e488c69066a736b687229ac82cbb4f2b4808ffa2 | Python | igarnett6/CS-1114 | /1114 hw/hw5/ig907_hw5_q1.py | UTF-8 | 306 | 3.9375 | 4 | [] | no_license | userInput = input("Enter an odd length string: ");
middleChar = userInput[int((len(userInput)/2))];
firstHalf = userInput[:(int(len(userInput)/2))];
secondHalf = userInput[int(len(userInput)/2):];
print("Middle charcter: ",middleChar);
print("First half: ",firstHalf);
print("Second half: ",secondHalf);
| true |
d71b7a788d29b9ea8316cc29e14f55a62d2a4e5e | Python | khibma/HomeTemp | /main.py | UTF-8 | 6,017 | 2.640625 | 3 | [
"BSD-3-Clause"
] | permissive | import RPi.GPIO as GPIO
import os
import sys
import time
import datetime
import subprocess
import re
from AdafruitLibs.Adafruit_I2C import Adafruit_I2C
from AdafruitLibs.Adafruit_7Segment import SevenSegment
from AdafruitLibs.Adafruit_BMP085 import BMP085
import outsideWeather as weather
class SensorValues(object):
def __init__(self, DHTPIN, BRIGHTPIN, BUT1PIN, BUT2PIN):
self.bmp = BMP085(0x77) #BMP-085 sensor
self.temp = self.getTemp()
self.humidity = self.getHum(DHTPIN)
self.pressure = self.getPressure()
self.brightness = self.checkBrightness(BRIGHTPIN)
def getTemp(self):
temp = str(self.bmp.readTemperature())
print "temp: {}".format(temp)
return temp
def getHum(self, DHTPIN):
while True:
try:
output = subprocess.check_output(["./AdafruitLibs/Adafruit_DHT_Driver/Adafruit_DHT", "22", DHTPIN]);
matches = re.search("Hum =\s+([0-9.]+)", output)
humidity = str(float(matches.group(1)))
print "humidity {}".format(humidity)
return humidity
except:
time.sleep(1)
def getPressure(self):
pressure = self.bmp.readPressure()
print "pressure {}".format(pressure)
return pressure
def checkBrightness(self, BRIGHTPIN):
''' sensor hardware not implemented
reading = 0
GPIO.setup(BRIGHTPIN, GPIO.OUT)
GPIO.output(BRIGHTPIN, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(BRIGHTPIN, GPIO.IN)
while (GPIO.input(BRIGHTPIN) == GPIO.LOW):
reading += 1
prin ("brightness {}").format(reading)
return reading
'''
return 100
def button_1(self, BUT1PIN):
GPIO.setup(BUT1PIN, GPIO.IN)
if (GPIO.input(BUT1PIN) == False):
print("button 1 was pushed")
else:
print (GPIO.input(BUT1PIN))
def button_2(self, BUT2PIN):
GPIO.setup(BUT2PIN, GPIO.IN)
if (GPIO.input(BUT2PIN) == False):
print("button 2 was pushed")
def display(txt, colon=False):
segment.setColon(False)
# If colon, displaying time..
if colon == True:
digits = []
digits.append(int(txt.hour / 10))
digits.append(txt.hour % 10)
digits.append(int(txt.minute / 10))
digits.append(txt.minute % 10)
dotIdx = 0
segment.setColon(True)
else:
dotIdx = txt.find('.')-1
if dotIdx > 0:
txt = txt.replace(".", "")
digits = list(txt)
segment.setColon(False)
digits.insert(2,0) #colon is postion 2, so insert a dummy value
if dotIdx >=2:
dotIdx+=1
for i in range(0,5):
if i == 2:
continue
dot = False
if dotIdx >0:
if i == dotIdx:
dot = True
if digits[i] == 'c':
segment.writeDigitRaw(i, 99)
elif digits[i] == 'h':
segment.writeDigitRaw(i, 116)
else:
segment.writeDigit(i, int(digits[i]), dot)
def setBrightness(segment, currentBrightness):
if currentBrightness < 50:
segment.setBrightness(5)
elif currentBrightness >= 50 and currentBrightness < 125:
segment.setBrightness(10)
else:
segment.setBrightness(15)
def countdown(i):
while(i!=-1):
display(map(int, str(i).zfill(3)) )
time.sleep(1)
i-=1
def but1_callback(channel):
print "button was pushed"
def LED(pin, status):
GPIO.output(pin, status)
if __name__ == '__main__':
#PIN VALUES
DHTPIN = "25"
BUT1PIN = 17
BUT2PIN = 999
BRIGHTPIN = 1
OUTSIDEPIN = 24
NEGATIVEPIN = 999
OFFPIN = 22
#Init the display
segment = SevenSegment(address=0x70)
segment.setBrightness(15)
GPIO.setmode(GPIO.BCM)
#Init buttons and set the callback
GPIO.setup(BUT1PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(BUT1PIN, GPIO.FALLING, callback = but1_callback, bouncetime=300)
#Init LEDs and shutdown switch
GPIO.setup(OUTSIDEPIN, GPIO.OUT)
#GPIO.setup(NEGATIVEPIN, GPIO.OUT)
GPIO.setup(OFFPIN, GPIO.IN)
#Ready the sensors
sensor = SensorValues(DHTPIN, BRIGHTPIN, BUT1PIN, BUT2PIN)
currentBrightness = 1
wundergroundAPIKey = 'xxxxx'
outsideWeather = weather.getWeather(wundergroundAPIKey)
weatherTime = time.time()
Loop = True
while Loop:
#Every 5 minutes update the outside weather conditions
now = time.time()
if (now - weatherTime) > 300:
weatherTime = now
outsideWeather = weather.getWeather(wundergroundAPIKey)
insideTemp = str(sensor.getTemp())+'c'
display(insideTemp , False)
#if "-" in insideTemp :
# LED(NEGATIVEPIN, True)
time.sleep(4)
#LED(NEGATIVEPIN, False)
outsideTemp = str(outsideWeather['tempC'])+'c'
display(outsideTemp, False)
LED(OUTSIDEPIN, True)
#if "-" in outsideTemp:
# LED(NEGATIVEPIN, True)
time.sleep(4)
LED(OUTSIDEPIN, False)
#LED(NEGATIVEPIN, False)
display(str(sensor.getHum(DHTPIN))+'h', False)
time.sleep(4)
display(str(outsideWeather['humidity'])+'h', False)
LED(OUTSIDEPIN, True)
time.sleep(4)
LED(OUTSIDEPIN, False)
display(str(sensor.getPressure()), False)
time.sleep(4)
display(datetime.datetime.now(), True)
time.sleep(6)
#once a loop, check how bright it is, and adjust the display
# THE HARDWARE IS NOT YET IMPLEMENTED
currentBrightness = sensor.checkBrightness(BRIGHTPIN)
setBrightness(segment, currentBrightness)
#If switched is switched, shutdown the pi
print GPIO.input(OFFPIN)
if GPIO.input(OFFPIN):
Loop = False
segment.clear()
GPIO.cleanup()
#os.system("shutdown now")
| true |
91481083df38b01e8aa79fbfa9951b42a1a525f7 | Python | lfniederauer/pseudo | /pseudo/middlewares/aug_assignment_middleware.py | UTF-8 | 675 | 2.59375 | 3 | [
"MIT"
] | permissive | from pseudo.middlewares.middleware import Middleware
from pseudo.pseudo_tree import Node
class AugAssignmentMiddleware(Middleware):
'''
changes `%<x> = %<x> op %<value>` to `%<x> += %<value>` nodes
`
'''
@classmethod
def process(cls, tree):
return cls().transform(tree)
def transform_assignment(self, node, in_block=False, assignment=None):
if node.value.type == 'binary_op' and node.target == node.value.left:
return Node('aug_assignment',
op=node.value.op,
target=node.target,
value=node.value.right)
else:
return node
| true |
dad51d220681a7eaa10adb6ef49bf4499b981e8d | Python | saivikasmeda/NLP-Assignment1 | /HW1_P4_StandfordPOS.py | UTF-8 | 779 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import re
import numpy as np
from nltk import pos_tag
def words_from_sentences(sentence):
return(sentence.split(' '))
def POScheck(sentence):
sent = words_from_sentences(sentence)
return (pos_tag(sent))
S1 = 'The chairman of the board is completely bold .'
pos_s1 = POScheck(S1)
print("pos for: ",S1," => ",pos_s1,'\n\n')
S2 = 'A chair was found in the middle of the road .'
pos_s2 = POScheck(S2)
print("pos for: ",S2," => ",pos_s2,'\n\n')
want_to_try = input('if you want to try custom string press Y\y: \n')
if (want_to_try == 'Y' or want_to_try == 'y'):
demo_sent = input("Sentence ")
pos_s3 = POScheck(demo_sent)
print("pos for: ",demo_sent," => ",pos_s3,'\n\n')
# In[ ]:
# In[ ]:
| true |
55459f2beb9f3ea16d00efa772f892ea89c5e566 | Python | atiger808/opencv-tutorial | /project-demo/Finger_detection.py | UTF-8 | 3,332 | 2.546875 | 3 | [] | no_license | # _*_ coding: utf-8 _*_
# @Time : 2019/9/17 18:09
# @Author : Ole211
# @Site :
# @File : Finger_detection.py
# @Software : PyCharm
import numpy as np
import cv2
import copy
import math
# variables
isBgCaptured = 0 # bool, whether the background captured
triggerSwitch = False
def nothing(x):
print('threshold: ' + str(x))
def calculateFingers(res, drawing): # -> finished bool, cnt: finger count
# convexity defect
hull = cv2.convexHull(res, returnPoints=False)
if len(hull) > 3:
defects = cv2.convexityDefects(res, hull)
if type(defects) != type(None): # avoid crashing. (BUG not found)
cnt = 0
for i in range(defects.shape[0]): # calculate the angle
s, e, f, d = defects[i][0]
start = tuple(res[s][0])
end = tuple(res[e][0])
far = tuple(res[f][0])
a = math.sqrt((end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2)
b = math.sqrt((far[0] - start[0]) ** 2 + (far[1] - start[1]) ** 2)
c = math.sqrt((end[0] - far[0]) ** 2 + (end[1] - far[1]) ** 2)
angle = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c)) # cosine theorem
if angle <= math.pi / 2: # angle less than 90 degree, treat as fingers
cnt += 1
cv2.circle(drawing, far, 8, [211, 84, 0], -1)
return True, cnt
return False, 0
cap = cv2.VideoCapture(0)
cap.set(10, 200)
cv2.namedWindow('trackbar')
cv2.createTrackbar('trh1', 'trackbar', 60, 100, nothing)
fgbg = cv2.createBackgroundSubtractorMOG2(0, 50)
while True:
ret, frame = cap.read()
threshold = cv2.getTrackbarPos('trh1', 'trackbar')
frame = cv2.bilateralFilter(frame, 5, 50, 100)
frame = cv2.flip(frame, 1)
w, h = frame.shape[:2]
cv2.rectangle(frame, (int(0.5 * h), 0), (h, int(0.8 * w)), (255, 0, 0), 2)
cv2.imshow('frame', frame)
fgmask = fgbg.apply(frame)
kernel = np.ones((3, 3), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
img = cv2.bitwise_and(frame, frame, mask=fgmask)
img = img[0:int(0.8 * w), int(0.5 * h):h]
cv2.imshow('mask', img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (41, 41), 0)
cv2.imshow('blur', blur)
ret, thresh = cv2.threshold(blur, threshold, 255, cv2.THRESH_BINARY)
cv2.imshow('thresh', thresh)
thresh1 = copy.deepcopy(fgmask)
contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
length = len(contours)
maxArea = -1
if length > 0:
contours = sorted(contours, key=cv2.contourArea, reverse=True)
res = contours[0]
hull = cv2.convexHull(res)
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [res], 0, (0, 255, 0), 2)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
isFinishCal, cnt = calculateFingers(res, drawing)
if triggerSwitch is True:
if isFinishCal is True and cnt <= 2:
print(cnt)
# app('System Events').keystroke(' ') # simulate pressing blank space
cv2.imshow('ouput', drawing)
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| true |
922f5fdd857219214201d2fcea5d6be694c18378 | Python | anushreesrinivas/Data-Science | /inferential_statistics_exercise_2anushreesrinivas.py | UTF-8 | 4,411 | 3.640625 | 4 | [] | no_license |
# coding: utf-8
# # Examining Racial Discrimination in the US Job Market
#
# ### Background
# Racial discrimination continues to be pervasive in cultures throughout the world. Researchers examined the level of racial discrimination in the United States labor market by randomly assigning identical résumés to black-sounding or white-sounding names and observing the impact on requests for interviews from employers.
#
# ### Data
# In the dataset provided, each row represents a resume. The 'race' column has two values, 'b' and 'w', indicating black-sounding and white-sounding. The column 'call' has two values, 1 and 0, indicating whether the resume received a call from employers or not.
#
# Note that the 'b' and 'w' values in race are assigned randomly to the resumes when presented to the employer.
# <div class="span5 alert alert-info">
# ### Exercises
# You will perform a statistical analysis to establish whether race has a significant impact on the rate of callbacks for resumes.
#
# Answer the following questions **in this notebook below and submit to your Github account**.
#
# 1. What test is appropriate for this problem? Does CLT apply?
# 2. What are the null and alternate hypotheses?
# 3. Compute margin of error, confidence interval, and p-value.
# 4. Write a story describing the statistical significance in the context or the original problem.
# 5. Does your analysis mean that race/name is the most important factor in callback success? Why or why not? If not, how would you amend your analysis?
#
# You can include written notes in notebook cells using Markdown:
# - In the control panel at the top, choose Cell > Cell Type > Markdown
# - Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet
#
#
# #### Resources
# + Experiment information and data source: http://www.povertyactionlab.org/evaluation/discrimination-job-market-united-states
# + Scipy statistical methods: http://docs.scipy.org/doc/scipy/reference/stats.html
# + Markdown syntax: http://nestacms.com/docs/creating-content/markdown-cheat-sheet
# </div>
# ****
# In[14]:
import pandas as pd
import numpy as np
from scipy import stats
import math
# In[6]:
data = pd.io.stata.read_stata('data/us_job_market_discrimination.dta')
# In[3]:
# In[4]:
data.head()
# Question: What are the null and alternate hypotheses?
# Answer: Our null hypothesis Ho is that the proportion of callbacks for black-sounding names is equal to the proportion of callbacks for white-sounding names.
#
# Alternate hyothesis Ha: The proportion of callbacks for black-sounding names is not equal to the proportion of callbacks for white-sounding names.
# In[7]:
n_black= len(data[data.race == 'b'])
n_white= len(data[data.race == 'w'])
# In[8]:
# number of callbacks for black-sounding names
sum_black=sum(data[data.race=='b'].call)
sum_white=sum(data[data.race=='w'].call)
# In[10]:
prop_black= sum_black/n_black
prop_black
# In[11]:
prop_white= sum_white/n_white
prop_white
# In[12]:
diff_prop=prop_white-prop_black
diff_prop
# In[15]:
SE_black = (prop_black * (1 - prop_black)) / n_black
SE_white = (prop_white * (1 - prop_white)) / n_white
SE = math.sqrt(SE_black + SE_white)
SE
# Compute margin of error, confidence interval, and p-value.
#
# At 95% confidence interval we take the critical z-value to be 1.96 to calculate the margin error (ME). Confidence interval would range from diff_prop- ME to diff_prop+ME.
# In[16]:
ME=1.96*SE
ME
# In[17]:
CI=[diff_prop-ME,diff_prop+ME]
CI
# We can now calculate the z-score as the difference of the difference between the two proportions (diff_prop) and our assumed value of the mean i.e. 0 (since u_black=u_white=0) divided by the Standard error.
# In[18]:
z=(diff_prop-0)/SE
z
# Question: Discuss statistical significance.
#
# Answer: Since the value of z is greater than the critical values of 1.96 at 5% significance level, we can say that this value of 4.115 is even less probable than 1.96. Therefore the p-value is even less than 5%. As p-value is less than 0.05 , we reject our null hypothesis that the proportions of callbacks for black-sounding names is equal to the proportion of callbacks for white-sounding names.
#
# Therefore we can go for our alternate hypothesis that people with white-sounding names are more likely to have callbacks than people with black-sounding names.
#
| true |
cc6c5eda714446eee04981caa58d75c22b2493bd | Python | anthonix-hub/bulk-printing-software | /bulk_py-print-V3.0.0.py | UTF-8 | 14,998 | 2.609375 | 3 | [] | no_license | import os
import time
import tkinter as tk
from datetime import date
from tkinter import *
from tkinter import filedialog, ttk
from tkinter.messagebox import *
from tkinter.ttk import Frame, LabelFrame, OptionMenu
import win32com
from PIL import Image, ImageTk
from win32com import client
import win32print
root = Tk()
#**************************************** splash screen ********************************
class SplashScreen(Frame):
def __init__(self, master=None, width=0.6, height=0.4, useFactor=True):
Frame.__init__(self, master)
self.pack(side=TOP, fill=BOTH, expand=YES)
# get screen width and height
ws = self.master.winfo_screenwidth()
hs = self.master.winfo_screenheight()
w = (useFactor and ws*width) or width
h = (useFactor and ws*height) or height
# calculate position x, y
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
self.master.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.master.overrideredirect(True)
self.lift()
def splash():
if __name__ == '__main__':
origin = Tk()
sp = SplashScreen(origin)
# sp.config(bg="red")
m = Label(sp, text="NABTEB")
m.pack(side=TOP, expand=YES)
m.config(bg="#3366ff", justify=CENTER, font=("calibri", 95))
# p = ImageTk.PhotoImage(Image.open('about.png'))
# ph = Button(sp,image=p,compound='top',text='about',height=45,width=50,bg='#fff').pack(side=TOP, expand=YES)
# Button(sp, text="Press this button to kill the program", bg='red', command=origin.destroy).pack(side=BOTTOM, fill=X)
sp.after(150,origin.destroy)
# MSG = Label(origin,text='message after splash screen').pack()
origin.mainloop()
# splash()
# root.iconbitmap(r'about - Copy.ico')
# root.iconphoto(default='True')
root.title('Bulk printer V3.0.0-py')
root.configure(background='#dcebf1')
root.minsize(width=1040,height=700)
root.maxsize(width=1020,height=600)
# root.configure(bg='#dfd')
#*************** Variables used **********************
dir_path = os.getcwd()
print(dir_path)
nam = 'Bulk-printed_files'
curr_date = date.today()
dty = str(curr_date)
folder_path = os.path.join(str(dir_path),str(nam))
#*************** functions **************
def check_folder_exist():
if os.path.exists(folder_path):
print('folder exists')
chg_dir = os.chdir(folder_path)
sub_dir = os.path.join(str(folder_path),dty)
if os.path.exists(str(sub_dir)):
print('sub folder exists')
root.nw_path = os.path.join(str(os.getcwd()),str(sub_dir))
print(os.getcwd())
else:
sub_dir = os.mkdir(dty)
else:
dir_d = os.mkdir(nam)
print('does not exit')
chg_dir = os.chdir(folder_path)
sub_dir = os.mkdir(dty)
root.nw_path = os.path.join(str(os.getcwd()),str(sub_dir))
os.chdir(str(dir_path))
check_folder_exist() #***calls function to create a folder for recording names of the printed files
def folder_opener():
root.filename = filedialog.askdirectory(title='folder Finder',initialdir=os.path.dirname('desktop'))
info_label = Label(printer_frame,text=root.filename,bg='#dda')
info_label.place(x=2,y=30)
info_label.configure(text=root.filename)
# Label.configure(JOB_frame,text=root.filename)
return root.filename
def file_create():
file_path = os.path.join(str(root.filename)+'.txt')
txt_var = os.path.basename(str(file_path))
try:
nw_file_path = os.path.join(str(root.nw_path),str(txt_var))
tk.f = open(nw_file_path,'a')
except:
showinfo(detail='Sorry something went wrong :(\n\npleace exit and re-enter program !!!')
root.title('bulk printer')
# return tk.f
def Dir_scan():
f = file_create()
var_choice_mnu = choice_mnu.get()
dir_name = root.filename[root.filename.find('/Users'):]
with os.scandir(dir_name) as it:
for entry in it:
if entry.is_file():
files = entry.name
file_arr = [files]
for x in file_arr:
print(x)
df = 'printed -- '+ x +'\n'
txt_area.insert(0.0,df)
print(x,file=tk.f)
#********************* doc & printer control********************************
for copies in range(int(var_choice_mnu)):
word = win32com.client.Dispatch('Word.Application')
time.sleep(0.1)
dir_name = root.filename[root.filename.find('/Users'):]
word.Documents.Open(os.path.join( str(dir_name),x))
word.ActiveDocument.PrintOut()
time.sleep(0.1)
word.visible = 0
word.ActiveDocument.Close()
#********************************************************************************
root.update() #returns control to the program while doing job
disp = 'Bulk printing :'+ x
root.title(disp)
showinfo(detail=' Printing Completed!!!')
root.title('bulk printer')
print(os.getcwd())
def file_select():
var_choice_mnu = choice_mnu.get()
root.select = filedialog.askopenfilenames()
root.in_select = list(root.select)
print(root.in_select)
for x in root.in_select:
print(str(x))
txt_area.insert(0.0,x +'\n')
filr_name = x[x.find('/Users'):]
#******************* files & printer control ************************************
for copies in range(int(var_choice_mnu)):
word = win32com.client.Dispatch('Word.Application')
time.sleep(0.1)
word.Documents.Open(filr_name)
word.ActiveDocument.PrintOut()
time.sleep(0.3)
word.visible = 0
word.ActiveDocument.Close()
#*********************************************************************************
# txt_area.insert(0.0,filr_name +'\n')
root.update()
showinfo(detail=' Printing Completed!!!')
return filr_name
def Dir_scan2(e):
Dir_scan()
def clear():
txt_area.delete(1.0,END)
def exit():
ask = askquestion(title='Quit',message='Do you want to close program?')
if ask == 'yes':
root.destroy()
print('application exited')
def exit2(e):
exit()
#********************** menus ************************
def help_menu():
showinfo(title='help for bulk printer ',type='ok',message='HELP',detail="*** OPEN FOLDER *** Use the Open folder Button to navigate and select a folder,"
"which is container for the the job you which to print.\n\n"
"*** PRINT *** After using the open folder,click on the print button.The print button will take the desired job to the printer for printing.\n\n "
"*** PRINTER INFO *** this program works with the system's default printer,when a printer is changed, do well to set at as the default printer in the system's settings.\n\n"
"""*** clear field *** this can be used to clear the printed files displayed on the text area. \n\n"""
"""*** copies *** you can select the desired number of copies you wish to print, this can be done by clicking on the up or down arrow on the spinbox on the copies option menu or the up or down key on the keyboard,you can also enter the number from the keyboard. \n\n"""
"**** QUIT *** Contrl + q can be used to terminate the program, or go to the the exit menu above the program, and click 'YES' on the dialog section.\n\n"
"*** PROGRAM error *** If the program encounter any error or malfunctions,please quit and restart the program. \n\n"
"""When the program is running, any miscrosoft word document opened will be forced to close. So when using this program do not use miscrosoft word, because the word doc will close""")
def about():
showinfo(title='About bulk printer \n version 3.0.0 ',type='ok',message='About',detail="This program is designed for the purpose of helping in the printing of parking Lists,center statistics and other "
"form of operations required in the smooth running of the examinations. It's major function is to speed up the process of printing any required documents of very "
"large quantities of files of '.doc','.pdf','.xmls' exetensions in leser time and less efforts and even reduse the number of staffs it may take to do such work. \n\n"
"This is a ground breaking version and hope to improve on it's fuctionalities, as staffs get to use the program and suggestions may arise as to what might be desired to be included."
" That will lead to factory recall for improvement and adjustments of feastures thank you.\n\n "
"********************************************************************************************************************************************************"
"\t\t created by --ANTHONY EKOH-- \n\nfor the National Business and Technical Examinations Board (NABTEB)\n"
"******************************************************************************************************************************************************\n\n"
"Thanks to the staffs and management of ICT department(NABTEB)\n\n my fellow IT guys :\n JUDE ONOHWOSAFA, AUSTINE OGBEIDE ,OSAKUE GODSWILL, WISDOM ADAMS and OSADEBAWMEN OKOYO. \n\n"
"I dedicate my success to my lovely mum ***** Mrs JUSTINA EKOH ***** \n\n"
"\t\t\t\t\tAlrights reserved " )
# menu = Menu(root)
# root.config(menu = menu)
# subMenu = Menu(menu,tearoff=False)
# menu.add_cascade(label="File",menu=subMenu )
# subMenu.add_command(label="new Ctr+N")
# subMenu.add_separator()
# subMenu.add_command(label="Exit Ctr+Q",command=exit)
# editMenu = Menu(menu,tearoff=False)
# menu.add_cascade(label="Edit",menu=editMenu)
# editMenu.add_command(label="redo Ctr+Z")
# optionsMenu = Menu(menu,tearoff=False)
# menu.add_cascade(label='options',menu=optionsMenu)
# optionsMenu.add_command(label='print',command='crt+p',underline=0)
# helpmenu = Menu(menu,tearoff=False)
# menu.add_cascade(label='Help',menu=helpmenu)
# helpmenu.add_command(label="About",underline=0,command=about)
# helpmenu.add_separator()
# helpmenu.add_command(label='Help',command=help_menu)
#********************** Frames ***************************
s = ttk.Style()
s.configure('blue.TLabelframe.Label',text='erer',background='#5cd9e2')
main_frame = Frame(root,relief=RIDGE,borderwidth=5)
main_frame.grid(padx=30,pady=66,ipady=0)
display_frame = Frame(main_frame,borderwidth=30,relief=RAISED,width=250)
display_frame.grid(pady=25,padx=33)
printer_frame = ttk.LabelFrame(main_frame, borderwidth=20,style='blue.TLabelframe.Label')
printer_frame.place(x=43,y=0)
JOB_frame = LabelFrame(display_frame,text="",borderwidth=30,style="blue.TLabelframe.Label")
JOB_frame.grid(column=0,padx=2,pady=15)
file_JOB_frame = LabelFrame(display_frame,text="",borderwidth=25,style="blue.TLabelframe.Label")
file_JOB_frame.grid(row=0,column=2,padx=1,pady=0)
buttom_frame = Frame(main_frame)
buttom_frame.grid(sticky='e',pady=1)
#****************** options menu section *******************************
help_photo =ImageTk.PhotoImage(Image.open('help.png'))
option_menu = Button(root,image= help_photo,cursor='hand2',overrelief=GROOVE,text='help',width=50,height=45,compound='top',command=help_menu,bg='#fff')
option_menu.place(x=45,y=10)
about_photo =ImageTk.PhotoImage(Image.open('about.png'))
about_menu = Button(root,image=about_photo,cursor='hand2',overrelief=GROOVE,compound='top',text='about',height=45,width=50,command=about,bg='#fff')
about_menu.place(x=110,y=10)
close_butt = Button(root,height=2,width=7,cursor='hand2',overrelief=GROOVE,font='bold 12',bg='#ec365d',text='exit',command=exit)
close_butt.place(x=340,y=10)
clear_butt = Button(root,height=2,cursor='hand2',overrelief=GROOVE,width=9,bd=2,text='clear field',command=clear,relief=RAISED)
clear_butt.place(x=176,y=10)
choice_mnu = Spinbox(root,from_=1,to=50,width=7,insertbackground="red",wrap='0')
choice_mnu.place(x=270,y=40)
choice_label = Label(root,text='copies',height=1,width=6,font='italic 10',fg='purple',compound='top')
choice_label.place(x=270,y=10)
#********************* printer section *************************
vok = win32print.GetDefaultPrinterW()
vok2 = win32print.OpenPrinter(vok)
default_ptr = "Detected printer: " + vok
prter_dlog = Label(printer_frame,fg='#afd',bg='grey',width=53,text=default_ptr,borderwidth=7,font='varient 10')
prter_dlog.grid(row=0,column=0,padx=1)
#****************** job printer section *********************
folder_photo =ImageTk.PhotoImage(Image.open('folder.png'))
dir_butt = Button(JOB_frame,image=folder_photo,cursor='hand2',overrelief=GROOVE,height=85,width=90,fg='#000',font='10',compound='top',text='open folder',command=folder_opener,relief=RAISED)
dir_butt.grid(row=2,column=0,rowspan=2,pady=13,padx=0)
file_photo =ImageTk.PhotoImage(Image.open('file.png'))
file_butt = Button(file_JOB_frame,image=file_photo,cursor='hand2',overrelief=GROOVE,height=90,width=80,fg='#000',font='7',compound='top',text='select file(s)',command=file_select,relief=RAISED)
file_butt.grid(row=4,column=0,pady=10)
photo = ImageTk.PhotoImage(file="printer.png")
print_butt = Button(JOB_frame,image=photo,height=75,cursor='hand2',overrelief=GROOVE,width=80,fg='red',bd=4,compound='top',text='print',command=Dir_scan,relief=RAISED)
print_butt.grid(row=3,column=1,columnspan=2,padx=12,pady=15,sticky='e')
txt_area = Text(main_frame,undo=True,bg='#fff',fg="#000",height="30",width='43',bd=1,wrap=WORD)
txt_area.grid(sticky='e',column=2,row=0,columnspan=2,padx=30,ipady=25)
#********************* event Binbings **************
root.bind("<Control-q>",exit2)
root.bind("<Control-p>",Dir_scan2)
info = Label(root,text='(C) by Anthony Ekoh 2018',font='variant 10')
info.grid(sticky='e')
root.mainloop()
| true |
2d00671224c188c3a663f4084168fcdde9f08038 | Python | dalab/matrix-manifolds | /experiments/diff_inconsistency.py | UTF-8 | 700 | 3.125 | 3 | [] | no_license | import torch
mat = torch.randn(4, 4, dtype=torch.float64)
mat = (mat @ mat.transpose(-1, -2)).div_(2).add_(torch.eye(4, dtype=torch.float64))
mat = mat.detach().clone().requires_grad_(True)
mat_clone = mat.detach().clone().requires_grad_(True)
# Way 1
chol_mat = mat.cholesky()
logdet1 = 2 * chol_mat.diagonal().log().sum()
# Way 2
w, _ = mat_clone.symeig(eigenvectors=True)
logdet2 = w.log().sum()
print('Are these both log(det(A))?', bool(logdet1 - logdet2 < 1e-8))
logdet1.backward()
logdet2.backward()
inv_mat = mat.inverse()
print('Does Way 1 yield A^{-1}?', bool(torch.norm(mat.grad - inv_mat) < 1e-8))
print('Does Way 2 yield A^{-1}?', bool(torch.norm(mat_clone.grad - inv_mat) < 1e-8))
| true |
7b4e3bfe057a5abfab253d80c7287f7b31c54bcc | Python | AlertBear/oc-work-tools | /interact/Ldom.py | UTF-8 | 13,341 | 2.734375 | 3 | [] | no_license | #!/usr/bin/python
#
# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
#
import time
import re
import pexpect
import os
from basic import *
class Ldom(object):
def __init__(self, name, password, port, record=False):
self.name = name
self.password = password # Telnet root password
self.port = port # Console port
self.record = record
def login(self):
cmd_telnet = 'telnet 0 ' + str(self.port)
cld = pexpect.spawn(cmd_telnet)
cld.send('\r')
child = pexpect.spawn(cmd_telnet)
# Save the interaction log, test user could review to check the whole
# process.
if self.record:
interact_log = os.getenv("INT_LOG")
child.logfile = open(interact_log, 'a+')
child.send('\r')
prompts = [
'console login:',
'Password:',
'~#',
pexpect.TIMEOUT,
pexpect.EOF,
'You do not have write access']
while True:
try:
i = child.expect(prompts, timeout=300)
except Exception:
raise LoginException(
"Failed to login %s due to null expect reason" %
self.name)
if i == 0:
child.sendline('root')
elif i == 1:
child.sendline(self.password)
elif i == 2:
cld.close()
return child
elif i == 3:
raise LoginException(
"Failed to login %s due to incorrect password or TIMEOUT" %
self.name)
elif i == 4:
raise LoginException("Failed to login %s due to EOF" % self.name)
elif i == 5:
child.send('~wy\r')
def sendcmd(self, cmd, expectation='~#', timeout=60, check=True):
"""
Purpose:
Execute the command in this domain without any output
Arguments:
cmd - Command to be executed
expectation - Expect the display after the execution
timeout - Exceed the timeout during the execution will
raise the timeout exception
check - True: Check whether the execution be successful or not
False: No check after the execution
Return:
None
"""
cldconsole = self.login()
cldconsole.sendline(cmd)
try:
cldconsole.expect(expectation, timeout)
except Exception as e:
raise ExecuteException(
"Execution of [{0}] in {1} failed due to:\n{2}".format(
cmd,
self.name,
e))
if check:
# Check to ensure the command has been successfully executed
cldconsole.sendline('echo $?')
i = cldconsole.expect(
['0', '1', pexpect.TIMEOUT, pexpect.EOF], timeout)
if i != 0:
raise ExecuteException(
"Execution of [{0}] failed in {1}".format(
cmd,
self.name))
cldconsole.close()
time.sleep(0.2)
def retsend_one_line(self, cmd, expectation='~#', timeout=60):
"""
Purpose:
Get the execution output of a command in domain,
ensure there is only one line of the output
Arguments:
cmd - Command to be executed
expectation - Expect the display after the execution
timeout - Exceed the timeout during the execution will
raise the timeout exception
Return:
output - The output of the execution in domain
"""
cldconsole = self.login()
if expectation == '~#':
expectation = 'root@.*:~#'
cldconsole.sendline(cmd)
try:
cldconsole.expect(expectation, timeout)
except Exception as e:
raise Exception(
"Execution of [%s] failed in %s due to:\n %s" %
(cmd, self.name, e))
cldconsole.sendline('echo $?')
i = cldconsole.expect(
['0', '1', pexpect.TIMEOUT, pexpect.EOF], timeout)
if i != 0:
raise ReturnException(
"Execution of [%s] failed in %s" % (cmd, self.name))
else:
cldconsole.sendline(cmd)
cldconsole.readline()
cldconsole.readline()
output = cldconsole.readline()
cldconsole.close()
return output
def retsend(self, cmd, expectation='~#', timeout=60, check=True):
"""
Purpose:
Get the execution output of a command in domain
Arguments:
cmd - Command to be executed
expectation - Expect the display after the execution
timeout - Exceed the timeout during the execution will
raise the timeout exception
Return:
output - The output of the execution in domain
"""
cldconsole = self.login()
if expectation == '~#':
expectation = 'root@.*:~#'
cldconsole.sendline(cmd)
cmd_clear = cmd
# Clear the echo of the command once send
cldconsole.expect(cmd_clear)
try:
cldconsole.expect(expectation, timeout)
except Exception as e:
raise Exception(
"Failed to execute [%s] in domain due to:\n %s" % (cmd, e))
output = cldconsole.before
output = output.strip(cmd_clear).strip('\r\n')
if check:
cldconsole.sendline('echo $?')
i = cldconsole.expect(
['0', '1', pexpect.TIMEOUT, pexpect.EOF], timeout)
if i != 0:
raise ReturnException(
"Execution of [%s] failed in %s:\n%s" %
(cmd, self.name, output))
cldconsole.close()
time.sleep(0.2)
return output
def reboot(self, count=1, timeout=600):
"""
Purpose:
Reboot the domain
Arguments:
count - Reboot times
timeout - If domain doesn't reboot to normal status
in timeout seconds, will trigger a Exception
Return:
None
"""
i = 0
cmd = 'reboot'
while i < count:
self.sendcmd(cmd, 'console login:', timeout, check=False)
i += 1
def panic(self, count=1, timeout=600):
"""
Purpose:
Panic the domain
Arguments:
count - Panic times
timeout - If domain doesn't boot to normal status in timeout seconds,
will trigger a Exception
Return:
None
"""
i = 0
cmd_panic = 'echo "rootdir/W 0" | mdb -kw'
# Get debug version by check the printf number in mdb,
# if num == 2 ,debug =False, else num = 3, debug =True
cmd_get_debug_version = 'echo "log_init::dis" | mdb -k |grep printf |wc -l'
printf_num_string = self.retsend_one_line(cmd_get_debug_version)
printf_num = int(printf_num_string.strip())
if printf_num == 2:
debug = False
else:
debug = True
# Test system is a debug one
if debug:
# "eset?" may appear
cmd_telnet = 'telnet 0 ' + str(self.port)
while i < count:
self.sendcmd(cmd_panic, 'rootdir:')
cld = pexpect.spawn(cmd_telnet)
cld.send('\r')
child = pexpect.spawn(cmd_telnet)
child.send('\r')
prompts = [
'eset?',
pexpect.TIMEOUT,
pexpect.EOF,
'You do not have write access']
while True:
try:
i = child.expect(prompts, 60)
except Exception:
raise LoginException(
"Failed to login %s due to null expect reason" %
self.name)
if i == 0:
child.sendline('r')
try:
child.expect(['console login:'], timeout)
except Exception as e:
raise LoginException(e)
else:
break
finally:
cld.close()
elif i == 1:
raise LoginException(
"Failed to login %s due to incorrect password or TIMEOUT" %
self.name)
elif i == 2:
raise LoginException(
"Failed to login %s due to EOF" % self.name)
elif i == 3:
child.send('~wy\r')
prompts.pop(i)
cld.close()
i += 1
# Test system is not a debug one
else:
# Continue panic will reduce the disk space, need delete the newly
# generated core dump file
while i < count:
# Delete the old crash list file
prev_crash_list = '/var/tmp/fcior/tmp/prev_crash_list'
post_crash_list = '/var/tmp/fcior/tmp/post_crash_list'
cmd_delete_compare_file = "rm -f %s %s" % (
prev_crash_list, post_crash_list)
execute(cmd_delete_compare_file)
# Create the new crash list file before panic
cmd_touch_prev_crash_list = "touch %s" % prev_crash_list
execute(cmd_touch_prev_crash_list)
# Get all the file under /var/crash/ in domain before panic
cmd_list_prev_crash_dump = "ls /var/crash/"
try:
output_list_prev_crash_dump = self.retsend(
cmd_list_prev_crash_dump)
except ReturnException:
output_list_prev_crash_dump = None
if output_list_prev_crash_dump is None:
has_prev_crash = False
else:
if re.search(r'.', output_list_prev_crash_dump):
has_prev_crash = True
with open(prev_crash_list, 'r+') as fo:
fo.write(output_list_prev_crash_dump)
else:
has_prev_crash = False
# Panic the system
self.sendcmd(cmd_panic, 'console login:', timeout, check=False)
# If no crash dump before panic
if not has_prev_crash:
cmd = "rm -rf /var/crash/*"
print cmd
self.sendcmd(cmd, check=False)
else:
# Create the new crash list after panic
cmd_touch_post_crash_list = "touch %s" % post_crash_list
execute(cmd_touch_post_crash_list)
# Get the file under /var/crash/ after panic
cmd_list_post_crash_dump = "ls /var/crash/"
try:
output_list_post_crash_dump = self.retsend(
cmd_list_post_crash_dump)
except ReturnException:
output_list_post_crash_dump = None
if output_list_post_crash_dump is None:
pass
else:
with open(post_crash_list, 'r+') as fo:
fo.write(output_list_post_crash_dump)
# Get the newly generated coredump file according to diff two
# files above
output_diff_two_file = None
with open(prev_crash_list, 'r') as prev:
with open(post_crash_list, 'r') as post:
for fprev in prev.readlines():
for fpost in post.readlines():
for file in fpost.split():
file = str(file)
if file not in fprev.split():
output_diff_two_file = file
break
# Delete the newly generated coredump file
if output_diff_two_file is not None:
cmd_get_crashdata = "ls -l /var/crash/{0}".format(
output_diff_two_file)
output_crashdata = self.retsend(cmd_get_crashdata)
crash_data = output_crashdata.split()[-1]
cmd_rmdata = "rm -rf /var/crash/{0}".format(crash_data)
self.sendcmd(cmd_rmdata, check=False)
cmd_clear_coredump = "rm -rf /var/crash/{0}".format(
output_diff_two_file)
self.sendcmd(cmd_clear_coredump, check=False)
i += 1
| true |
def232e861f46b9019bd6716f7c4a086c9e858a0 | Python | Trakton/comunicacoes-moveis | /src/main.py | UTF-8 | 1,356 | 2.75 | 3 | [] | no_license | import pandas as pd
import numpy as np
import grid
import models
import fingerprint
import locate
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsRegressor
def main():
train_data = pd.read_csv('data/train.csv')
bst_data = pd.read_csv('data/bts.csv')
train_data = shuffle(train_data)
train, test = train_test_split(train_data, test_size=0.1)
train = train.dropna()
test = pd.read_csv('data/LocTest.csv')
test = test.dropna()
train_points = train.iloc[:, 1:3].values
train_path_loss = train.iloc[:, 3:9].values
bst_points = bst_data.iloc[:, 1:3].values
location_points = np.concatenate((train_points, bst_points), axis=0)
latitudes, longitudes = grid.build_location_grid(location_points)
bts_coordinates = grid.find_bts_coordinates(bst_points, latitudes, longitudes)
print("location grid with size [{:d}, {:d}] calculated.".format(latitudes.shape[0], longitudes.shape[0]))
knn = KNeighborsRegressor(n_neighbors=5)
trained_models = models.train(train_points, train_path_loss, knn)
fingerprints = fingerprint.get_grids(trained_models, latitudes, longitudes, bts_coordinates)
locate.predict_test_locations(fingerprints, latitudes, longitudes, test, bst_points)
if __name__ == '__main__':
main() | true |
fdabdea092454e64ef3c7d48b167fe10bc84bd5c | Python | HermanYang/SDKDocs | /lt_sdk/proto/configs/param_sweep.py | UTF-8 | 882 | 2.671875 | 3 | [] | no_license | import copy
class ParamSweep(object):
def __init__(self, name, base_fn, *args, **kwargs):
self.name = name
self.base_fn = base_fn
self.args = args # default args for base_fn, not swept over
self.kwargs = kwargs # string -> list
def generate(self):
# flatten params
combos = [{}]
for k, vlist in self.kwargs.items():
new_combos = []
for val in vlist:
for c in combos:
new_c = copy.deepcopy(c)
new_c[k] = val
new_combos.append(new_c)
combos = new_combos
for c in combos:
new_obj = self.base_fn(*self.args, **c)
cfg_name = []
for k, v in c.items():
cfg_name.append("{0}^^{1}".format(k, str(v)))
yield new_obj, "~~".join(cfg_name)
| true |
65a26ae44a92b88b86b505c3877f835ca738f6b2 | Python | LiuZechu/CS4246-mini-project | /source_code/agent/models.py | UTF-8 | 2,426 | 2.890625 | 3 | [] | no_license | import torch
import torch.autograd as autograd
import torch.nn as nn
class Base(nn.Module):
def __init__(self, input_shape, num_actions):
super().__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.construct()
def construct(self):
raise NotImplementedError
def forward(self, x):
if hasattr(self, 'features'):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.layers(x)
return x
def feature_size(self):
x = autograd.Variable(torch.zeros(1, *self.input_shape))
if hasattr(self, 'features'):
x = self.features(x)
return x.view(1, -1).size(1)
class DQN(Base):
def construct(self):
self.layers = nn.Sequential(
nn.Linear(self.feature_size(), 256),
nn.ReLU(),
nn.Linear(256, self.num_actions)
)
class AtariDQN(DQN):
def construct(self):
self.features = nn.Sequential(
nn.Conv2d(self.input_shape[0], 32, kernel_size=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3),
nn.ReLU()
)
self.layers = nn.Sequential(
nn.Linear(self.feature_size(), 512),
nn.ReLU(),
nn.Linear(512, self.num_actions)
)
# Newly added
class BaseAgent(AtariDQN):
def __init__(self, input_shape, num_actions):
super().__init__(input_shape, num_actions)
def act(self, state, epsilon=0.0):
if not isinstance(state, torch.FloatTensor):
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
'''
FILL ME : This function should return epsilon-greedy action.
Input:
* `state` (`torch.tensor` [batch_size, channel, height, width])
* `epsilon` (`float`): the probability for epsilon-greedy
Output: action (`Action` or `int`): representing the action to be taken.
if action is of type `int`, it should be less than `self.num_actions`
'''
Q_values = super().forward(state)
action = torch.argmax(Q_values)
random_number = random.random()
if random_number > epsilon:
return int(action)
else:
action = random.randrange(self.num_actions)
return action
| true |
a44b49709172ea86298d461d90ef365452212639 | Python | DeepakSunwal/Daily-Interview-Pro | /solutions/3x3Sudoku.py | UTF-8 | 1,473 | 3.109375 | 3 | [] | no_license | from functools import reduce
from random import choice
def solver(board):
if isComplete(board):
return board
empty = [(x, y) for x in range(3) for y in range(3) if board[y][x] == 0]
col, row = choice(empty)
for val in range(1, 10):
board[row][col] = val
if isValid(board):
res = solver(board)
if isComplete(res):
return res
board[row][col] = 0
return board
def validrow(board):
for row in board:
values = list()
for value in row:
if value in values and value != 0:
return False
values.append(value)
return True
def validcol(board):
for i in range(len(board[0])):
values = list()
for row in board:
if row[i] in values and row[i] != 0:
return False
values.append(row[i])
return True
def validgrid(board):
for i in range(3):
for j in range(3):
values = list()
if board[i][j] in values and board[i][j] != 0:
return False
values.append(board[i][j])
return True
def isValid(board):
return validrow(board) and validcol(board) and validgrid(board)
def isComplete(board):
return all(x != 0 for x in list(reduce(lambda x, y: x + y, board)))
def main():
board = [[0 for _ in range(3)] for _ in range(3)]
print(solver(board))
if __name__ == '__main__':
main()
| true |
c3180813f993cc381b26284bde414e1c4d7507f0 | Python | bitwalk123/PyGObject_samples | /gtk_spinbutton.py | UTF-8 | 735 | 3.15625 | 3 | [] | no_license | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class MyWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="ボタン")
self.set_default_size(0, 0)
sb = Gtk.SpinButton()
adjustment = Gtk.Adjustment(value=0, lower=0, upper=100, step_increment=1, page_increment=10, page_size=0)
sb.set_adjustment(adjustment)
sb.set_alignment(xalign=1.0)
sb.connect("value-changed", self.on_value_changed)
self.add(sb)
def on_value_changed(self, button):
print("値が {0} に変わりました。".format(str(button.get_value_as_int())))
win = MyWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
| true |
2e726dce34d8ca1356a29428916ef1dc458e9936 | Python | kinegratii/django-echarts | /django_echarts/entities/layouts.py | UTF-8 | 1,790 | 2.703125 | 3 | [
"MIT"
] | permissive | import re
from functools import singledispatch
from typing import List, Union
__all__ = ['LayoutOpts', 'TYPE_LAYOUT_OPTS', 'any2layout']
_defaults = {'l': 8, 'r': 8, 's': 8, 't': 6, 'b': 6, 'f': 12}
_rm = re.compile(r'([lrtbfsa])(([1-9]|(1[12]))?)')
class LayoutOpts:
"""Layout for user defined.
"""
__slots__ = ['pos', 'spans', 'start']
# l=left,r=right,s=stripped,t=top,b=bottom,f=full
_defaults = {'l': 8, 'r': 8, 's': 8, 't': 6, 'b': 6, 'f': 12}
_rm = re.compile(r'([lrtbfsa])(([1-9]|(1[12]))?)')
def __init__(self, pos: str = 'r', spans: List[int] = None):
self.pos = pos
self.spans = spans or []
self.start = pos in 'rb'
def stripped_layout(self) -> 'LayoutOpts':
if self.pos == 'r':
return LayoutOpts(pos='l', spans=self.spans)
elif self.pos == 'l':
return LayoutOpts(pos='r', spans=self.spans)
else:
return self
def __str__(self):
return f'<LOptions:{self.pos},{self.spans}>'
TYPE_LAYOUT_OPTS = Union[int, List[int], str]
@singledispatch
def any2layout(obj) -> LayoutOpts:
raise TypeError('Can not parse LayOpts.')
@any2layout.register(LayoutOpts)
def _(obj) -> LayoutOpts:
return obj
@any2layout.register(int)
def _(obj) -> LayoutOpts:
return LayoutOpts(spans=[obj])
@any2layout.register(list)
def _(obj) -> LayoutOpts:
return LayoutOpts(spans=obj)
@any2layout.register(str)
def _(obj) -> LayoutOpts:
m = _rm.match(obj)
if m:
pos, cols = m.group(1), m.group(2)
if cols is None or cols == '':
cols = _defaults.get(pos, 8)
else:
cols = int(cols)
return LayoutOpts(pos, [cols])
else:
raise ValueError(f'This layout can not be parsed: {obj}')
| true |
4c53054a0352d305f066a46c289b78c57564fa29 | Python | dials/dials | /tests/util/test_exclude_images.py | UTF-8 | 6,498 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | """
tests for functions in dials.util.exclude_images.py
"""
from __future__ import annotations
import copy
from unittest.mock import Mock
import pytest
from dxtbx.model import Experiment, ExperimentList, Scan
from dials.array_family import flex
from dials.util.exclude_images import (
_parse_exclude_images_commands,
exclude_image_ranges_for_scaling,
exclude_image_ranges_from_scans,
get_selection_for_valid_image_ranges,
get_valid_image_ranges,
set_initial_valid_image_ranges,
)
def make_scan_experiment(image_range=(1, 100), expid="0"):
"""Make an experiment with a scan"""
return Experiment(scan=Scan(image_range, (0.0, 1.0)), identifier=expid)
def make_scanless_experiment(expid="1"):
"""Make an experiment without a scan i.e. single-image dataset"""
return Experiment(identifier=expid)
def test_parse_exclude_images_commands():
"""Test for namesake function"""
formats = (
[["1:101:200"], ["0:201:300"]], # if given as separate exclude_images=
[["1:101:200,0:201:300"]], # if given as exclude_images="1:101:200,0:201:300"
[
["1:101:200", "0:201:300"]
], # if given as exclude_images="1:101:200 0:201:300"
)
for command in formats:
r1 = flex.reflection_table()
r1.experiment_identifiers()[1] = "1"
r0 = flex.reflection_table()
r0.experiment_identifiers()[0] = "0"
tables = [r0, r1]
ranges = _parse_exclude_images_commands(command, [], tables)
assert ranges == [("1", (101, 200)), ("0", (201, 300))]
experiments = ["1", "2"]
short_command = [["101:200"]]
with pytest.raises(ValueError):
_ = _parse_exclude_images_commands(short_command, experiments, tables)
mock_exp = Mock()
mock_exp.identifier = "1"
ranges = _parse_exclude_images_commands(short_command, [mock_exp], tables)
assert ranges == [("1", (101, 200))]
with pytest.raises(ValueError):
_ = _parse_exclude_images_commands([["1:101-200"]], [mock_exp], tables)
with pytest.raises(ValueError):
_ = _parse_exclude_images_commands([["1:101:a"]], [], tables)
def test_set_get_initial_valid_image_ranges():
"""Test for get/set valid_image_ranges functions"""
explist = ExperimentList([make_scan_experiment(), make_scanless_experiment()])
explist = set_initial_valid_image_ranges(explist)
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(1, 100)]
ranges = get_valid_image_ranges(explist)
assert len(ranges) == 2
assert list(ranges[0]) == [(1, 100)]
assert ranges[1] is None
def test_exclude_image_ranges_from_scans():
"""Test for namesake function"""
explist = ExperimentList(
[make_scan_experiment(expid="0"), make_scan_experiment(expid="1")]
)
exclude_images = [["0:81:100"], ["1:61:80"]]
r1 = flex.reflection_table()
r1.experiment_identifiers()[1] = "1"
r0 = flex.reflection_table()
r0.experiment_identifiers()[0] = "0"
tables = [r0, r1]
explist = exclude_image_ranges_from_scans(tables, explist, exclude_images)
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(1, 80)]
assert list(explist[1].scan.get_valid_image_ranges("1")) == [(1, 60), (81, 100)]
# Try excluding a range that already has been excluded
explist = exclude_image_ranges_from_scans(tables, explist, [["1:70:80"]])
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(1, 80)]
assert list(explist[1].scan.get_valid_image_ranges("1")) == [(1, 60), (81, 100)]
scanlessexplist = ExperimentList([make_scanless_experiment()])
with pytest.raises(ValueError):
_ = exclude_image_ranges_from_scans(tables, scanlessexplist, [["0:1:100"]])
# Now try excluding everything, should set an empty array
explist = exclude_image_ranges_from_scans(tables, explist, [["1:1:100"]])
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(1, 80)]
assert list(explist[1].scan.get_valid_image_ranges("1")) == []
## test what happens if a single image is left within the scan
explist = ExperimentList(
[make_scan_experiment(expid="0"), make_scan_experiment(expid="1")]
)
exclude_images = [["0:81:100"], ["1:76:79"], ["1:81:99"]]
r1 = flex.reflection_table()
r1.experiment_identifiers()[1] = "1"
r0 = flex.reflection_table()
r0.experiment_identifiers()[0] = "0"
tables = [r0, r1]
explist = exclude_image_ranges_from_scans(tables, explist, exclude_images)
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(1, 80)]
assert list(explist[1].scan.get_valid_image_ranges("1")) == [
(1, 75),
(80, 80),
(100, 100),
]
def test_get_selection_for_valid_image_ranges():
"""Test for namesake function"""
exp = make_scan_experiment()
exp.scan.set_valid_image_ranges("0", [(2, 10)])
refl = flex.reflection_table()
refl["xyzobs.px.value"] = flex.vec3_double(
[(0, 0, 0.5), (0, 0, 1.5), (0, 0, 5.5), (0, 0, 9.5), (0, 0, 10.5)]
)
sel = get_selection_for_valid_image_ranges(refl, exp)
assert list(sel) == [False, True, True, True, False]
exp = make_scanless_experiment()
assert list(get_selection_for_valid_image_ranges(refl, exp)) == [True] * 5
def test_exclude_image_ranges_for_scaling():
"""Test for namesake function."""
refl1 = flex.reflection_table()
refl1["xyzobs.px.value"] = flex.vec3_double(
[(0, 0, 0.5), (0, 0, 1.5), (0, 0, 5.5), (0, 0, 9.5), (0, 0, 10.5)]
)
refl1.set_flags(flex.bool(5, False), refl1.flags.user_excluded_in_scaling)
refl2 = copy.deepcopy(refl1)
refl1.experiment_identifiers()[0] = "0"
refl2.experiment_identifiers()[1] = "1"
explist = ExperimentList(
[
make_scan_experiment(image_range=(2, 20), expid="0"),
make_scan_experiment(image_range=(2, 20), expid="1"),
]
)
refls, explist = exclude_image_ranges_for_scaling(
[refl1, refl2], explist, [["1:11:20"]]
)
assert list(explist[0].scan.get_valid_image_ranges("0")) == [(2, 20)]
assert list(explist[1].scan.get_valid_image_ranges("1")) == [(2, 10)]
assert list(refls[0].get_flags(refls[0].flags.user_excluded_in_scaling)) == [
True,
False,
False,
False,
False,
]
assert list(refls[1].get_flags(refls[0].flags.user_excluded_in_scaling)) == [
True,
False,
False,
False,
True,
]
| true |
1902ee3475aceba8808f41923de97279d7e555d8 | Python | jstac/cycles_moral_hazard | /code/simulate_world_econ_ts.py | UTF-8 | 1,254 | 2.828125 | 3 | [
"BSD-3-Clause"
] | permissive | """
Functions for simulated two country time series
"""
import numpy as np
from integrated_econ import *
def simulate_world_econ(n,
country_x,
country_y,
x0=None,
y0=None,
stochastic=False):
# == Initialize arrays == #
x = np.empty(n)
y = np.empty(n)
ca_x = np.empty(n)
ca_y = np.empty(n)
world_r = np.empty(n+1)
if x0 is not None:
country_x.w = x0
if y0 is not None:
country_y.w = y0
for t in range(n):
if stochastic:
country_x.z = random.uniform(15, 25)
country_y.z = random.uniform(15, 25)
r = global_deposit_rate(country_x, country_y)
world_r[t+1] = r
x[t], y[t] = country_x.w, country_y.w
ca_x[t] = country_x.current_account(r)
ca_y[t] = country_y.current_account(r)
country_x.update(r)
country_y.update(r)
return x, y, world_r, ca_x, ca_y
def simulate_autarky(n, country_x):
# == Initialize arrays == #
x = np.empty(n)
autarky_r = np.empty(n+1)
for t in range(n):
r = country_x.autarky_r()
autarky_r[t+1] = r
x[t] = country_x.w
country_x.update(r)
return x, autarky_r
| true |
86b2660fe22300d7b430e88a7face8ef12242eeb | Python | SunnySingh00/Decision-Tree | /utils.py | UTF-8 | 2,559 | 2.921875 | 3 | [] | no_license | import numpy as np
# TODO: Information Gain function
def Information_Gain(S, branches):
# S: float
# branches: List[List[int]] num_branches * num_cls
# return: float
avg = 0
list_tot = 0
for branch in branches:
list_tot +=sum(branch)
for branch in branches:
tot = sum(branch)
if tot!=0:
ent=0
for child in branch:
if child!=0:
ent+= -(child/tot)*(np.log2(child/tot))
else:
ent+=0
avg+=(tot/list_tot)*(ent)
#print(S-avg)
return S-avg
# TODO: implement reduced error prunning function, pruning your tree on this function
def reduced_error_prunning(decisionTree, X_test, y_test):
ypred = decisionTree.predict(X_test)
accuracy = get_accuracy(y_test, ypred)
stack = traverse(decisionTree.root_node, [decisionTree.root_node])[::-1]
print(stack)
for node in stack:
node.splittable = False
curr_accuracy = get_accuracy(decisionTree.predict(X_test), y_test)
if curr_accuracy > accuracy:
accuracy = curr_accuracy
elif curr_accuracy == accuracy:
if node!=decisionTree.root_node:
accuracy = curr_accuracy
else:
node.splittable = True
def traverse(node,array):
if node.splittable:
for child in node.children:
array.append(child)
traverse(child, array)
return array
def get_accuracy(ytest,ypred):
yes = 0
for i in range(len(ytest)):
if ytest[i] == ypred[i]:
yes += 1
return float(yes/len(ytest))
# print current tree
def print_tree(decisionTree, node=None, name='branch 0', indent='', deep=0):
if node is None:
node = decisionTree.root_node
print(name + '{')
print(indent + '\tdeep: ' + str(deep))
string = ''
label_uniq = np.unique(node.labels).tolist()
for label in label_uniq:
string += str(node.labels.count(label)) + ' : '
print(indent + '\tnum of samples for each class: ' + string[:-2])
if node.splittable:
print(indent + '\tsplit by dim {:d}'.format(node.dim_split))
for idx_child, child in enumerate(node.children):
print_tree(decisionTree, node=child, name='\t' + name + '->' + str(idx_child), indent=indent + '\t', deep=deep+1)
else:
print(indent + '\tclass:', node.cls_max)
print(indent + '}')
| true |
836fe01726bc8a94a1067154afbff5e6afed2f06 | Python | lewy95/DebutPython | /basic/variable/variable.py | UTF-8 | 1,395 | 4 | 4 | [] | no_license | import math
import operator
import random
# money per kg
price = 8.5
# kg
weight = 7.5
# total money
money = price * weight
money -= 5
print(money)
# vac = "i am a variable"
# vac = 10086
# print(vac) # 10086
x = y = z = 10099
a, b, c = 1, 2, "haha"
print(type(c)) # <class 'str'>
# id()函数查看变量的内存地址
print(id(c)) # 2832763712432
c = 123
print(id(c)) # 140709509824576
# about num
i = 9
f1 = 9.15
f2 = -9.15
f3 = 2.5e2
print(f3 * 102) # 测试浮点数的科学计数法
print(max(2, 3, 1, 6))
print(pow(2, 3)) # 8\
print(round(9.82, 1))
print(math.ceil(f1)) # 10 向上取整
print(math.exp(2)) # 7.38905609893065 e的平方
print(math.log2(8)) # 3.0 以2为底8的对数
print(math.log(math.exp(2))) # 2.0
print(math.modf(9.15)[1])
print(operator.abs(f2)) # 9.15 取绝对值
print(operator.eq(3, 4)) # False
print(operator.gt(3, 4)) # False
print(random.choice(range(10)))
# about str
greeting = "hello, i am lewy."
print(greeting[3:10]) # lo, i a
print("f" in greeting[3:10]) # False
print("a" in greeting[3:10]) # True
print("lewy \n anna") # 转义
print(R"lewy \n anna") # 不转义
print(r"lewy \n anna") # 不转义
# My name is krala and weight is 21 kg!
print("My name is %s and weight is %d kg!" % ('krala', 21))
rawRange = range(10)
print(type(rawRange))
myTuple = tuple(rawRange)
print(myTuple)
# 得 (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
| true |
7f2f7cb3531e7a4a36a2efb726ad9f14db91c4a9 | Python | lynnbaratella/pynteractive-fiction | /FN_strFun.py | UTF-8 | 2,113 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# File: "strFun.py"
Gathers useful string functions, also from "cryptFunctions.py"
"""
def inputError(type):
print('ERROR: the input must be ' + type + '.')
# Asks the user to input a string
def promptString(message):
userInput = input(message)
while type(userInput) != str:
inputError('string')
userInput = input(message)
return userInput
# outputs a boolean: whether the string represents a float or not
def isFloat(val):
try:
float(val)
boolFloat = ('.' in val) # is there a . inside the number? So is it float?
return boolFloat
except ValueError:
return False
def str2num(string):
try:
stringConvertedFloat = float(string)
stringConvertedInt = int(stringConvertedFloat)
if isFloat(string):
return stringConvertedFloat
else:
return stringConvertedInt
except ValueError:
inputError('string representing an integer value')
except TypeError:
return None
# Converts a string into the integer number it represents
# if float, truncates with warning
def str2int(string):
try:
stringConvertedFloat = float(string)
stringConvertedInt = int(stringConvertedFloat)
if isFloat(string):
print('WARNING: your input may have been truncated to an integer value')
return stringConvertedInt
except ValueError:
inputError('a string representing an integer value')
except TypeError:
return None
def find(string, stringList, *args): # returns a list of indices. you can specify the number you need
if (args
and type(args[0]) == int):
howMany = args[0]
getAll = False
else:
getAll = True
lineIdx = 0
indicesList = []
nFound = 0
while (lineIdx < len(stringList)
and (getAll
or nFound < howMany)):
if string in stringList[lineIdx]:
indicesList.append(lineIdx)
nFound += 1
lineIdx += 1
return indicesList
| true |
606ffdd7eb3d8cee53b72db851ce495ccdedc233 | Python | Willyou2/2018Hack112 | /TestController.py | UTF-8 | 7,243 | 2.921875 | 3 | [] | no_license | '''from inputs import devices
from inputs import get_gamepad
while True:
events = get_gamepad()
for event in events:
print(event.ev_type, event.code, event.state)'''
from tkinter import *
from inputs import get_gamepad
from inputs import get_key
from inputs import get_mouse
import msvcrt
import time
def raw_input_with_timeout(prompt, timeout=3):
finishat = time.time() + timeout
result = []
while True:
if msvcrt.kbhit():
result.append(msvcrt.getche())
if result[-1] == '\r': # or \n, whatever Win returns;-)
return ''.join(result)
time.sleep(0.1) # just to yield to other processes/threads
else:
if time.time() > finishat:
return None
raw_input_with_timeout(get_gamepad())
'''
while 1:
events = get_gamepad()
for event in events:
print(event.ev_type, event.code, event.state)
from inputs import get_key
while 1:
events = get_key()
for event in events:
print(event.ev_type, event.code, event.state)
from inputs import get_mouse
while 1:
events = get_mouse()
for event in events:
print(event.ev_type, event.code, event.state)
'''
def init(data):
data.clicked = False
data.pointer = [data.width//2, data.height//2]
data.velocity = [0,0]
filename = "cursor.gif"
data.photo = PhotoImage(file=filename)
data.Xmoving = False
data.Ymoving = False
def mousePressed(event,data):
print("blah blah blah")
def keyPressed(event, data):
print("hello1!")
events = get_key()
for a in events:
if a.code == "BTN_SOUTH" and event.state == 1:
data.clicked = True
print("hello!")
def timerFired(data):
pass
def redrawAll(canvas, data):
if data.clicked:
canvas.create_rectangle(0,0,data.width,data.height, fill = "black")
canvas.create_image(data.pointer[0], data.pointer[1], anchor = NW, image = data.photo)
def cancer(data):
events = get_gamepad()
for a in events:
if a in events:
cancer(data)
return None
def gamepad(data):
events = get_gamepad()
for a in events:
print(a.code)
if a.code == "BTN_SOUTH" and a.state == 1:
data.clicked = True
print("hello!")
elif a.code == "BTN_SOUTH" and a.state == 0:
data.clicked = False
elif a.code == "BTN_SELECT" and a.state == 1:
exit()
print(a.code, a.state)
if a.code == "ABS_X":
data.velocity[0] = a.state/10000
#data.pointer[0] += a.state/10000
#data.Xmoving = True
#elif a.code == "ABS_X" and a.state == 0:
#data.Xmoving = False
if a.code == "ABS_Y":
#data.pointer[1] -= a.state/10000
data.velocity[1] = a.state/10000
#elif a.code == "ABS_Y" and a.state == 0:
#data.Ymoving = False
print(data.velocity)
#if data.Xmoving:
'''events = get_mouse()
for event in events:
print(event.ev_type, event.code, event.state)'''
data.pointer[0] += data.velocity[0]
data.pointer[1] -= data.velocity[1]
def run(width=300, height=300):
def redrawAllWrapper(canvas, data):
canvas.delete(ALL)
canvas.create_rectangle(0, 0, data.width, data.height,
fill='white', width=0)
redrawAll(canvas, data)
canvas.update()
def mousePressedWrapper(event, canvas, data):
mousePressed(event, data)
redrawAllWrapper(canvas, data)
def keyPressedWrapper(event, canvas, data):
keyPressed(event, data)
redrawAllWrapper(canvas, data)
def gamepadWrapper(canvas, data):
gamepad(data)
redrawAllWrapper(canvas,data)
canvas.after(1, gamepadWrapper, canvas, data)
def timerFiredWrapper(canvas, data):
timerFired(data)
redrawAllWrapper(canvas, data)
# pause, then call timerFired again
canvas.after(1, timerFiredWrapper, canvas, data)
# Set up data and call init
class Struct(object): pass
data = Struct()
data.width = width
data.height = height
data.timerDelay = 1000 # milliseconds
root = Tk()
init(data)
# create the root and the canvas
canvas = Canvas(root, width=data.width, height=data.height)
canvas.pack()
# set up events
#root.bind("<Button-1>", lambda event:
# mousePressedWrapper(event, canvas, data))
#root.bind("<Key>", lambda event:
# keyPressedWrapper(event, canvas, data))
timerFiredWrapper(canvas, data)
# and launch the app
#root.bind("BTN_SOUTH", lambda event: gamepadWrapper(event, canvas, data))
gamepadWrapper(canvas, data)
root.mainloop() # blocks until window is closed
print("bye!")
run(600, 300)
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file presents an interface for interacting with the Playstation 4 Controller
# in Python. Simply plug your PS4 controller into your computer using USB and run this
# script!
#
# NOTE: I assume in this script that the only joystick plugged in is the PS4 controller.
# if this is not the case, you will need to change the class accordingly.
#
# Copyright © 2015 Clay L. McLeod <clay.l.mcleod@gmail.com>
#
# Distributed under terms of the MIT license.
'''
import os
import pprint
import pygame
class PS4Controller(object):
"""Class representing the PS4 controller. Pretty straightforward functionality."""
controller = None
axis_data = None
button_data = None
hat_data = None
def init(self):
"""Initialize the joystick components"""
pygame.init()
pygame.joystick.init()
self.controller = pygame.joystick.Joystick(0)
self.controller.init()
def listen(self):
"""Listen for events to happen"""
if not self.axis_data:
self.axis_data = {}
if not self.button_data:
self.button_data = {}
for i in range(self.controller.get_numbuttons()):
self.button_data[i] = False
if not self.hat_data:
self.hat_data = {}
for i in range(self.controller.get_numhats()):
self.hat_data[i] = (0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
self.axis_data[event.axis] = round(event.value,2)
elif event.type == pygame.JOYBUTTONDOWN:
self.button_data[event.button] = True
elif event.type == pygame.JOYBUTTONUP:
self.button_data[event.button] = False
elif event.type == pygame.JOYHATMOTION:
self.hat_data[event.hat] = event.value
# Insert your code on what you would like to happen for each event here!
# In the current setup, I have the state simply printing out to the screen.
os.system('clear')
pprint.pprint(self.button_data)
pprint.pprint(self.axis_data)
pprint.pprint(self.hat_data)
if __name__ == "__main__":
ps4 = PS4Controller()
ps4.init()
ps4.listen()''' | true |
6784ce7a1eeb8d91bef1c8f979e5438d139961c8 | Python | lingochamp/gym | /gym/envs/engzo/models.py | UTF-8 | 5,205 | 2.859375 | 3 | [
"MIT"
] | permissive | import random
import pickle
import os
import numpy as np
from gym import Space
from gym.spaces import Discrete
class BaseModel(object):
"""
BaseModel for engzo Adaptive Learning Env
"""
def __init__(self, _id=None):
self._id = _id
class KnowledgeGroup(BaseModel):
def __init__(self, level, kg_num, preliminaries=None):
self.knowledges = [Knowledge(self) for i in range(kg_num)]
self.preliminaries = preliminaries
self.level = level
class Knowledge(BaseModel):
"""
Knowledge is an unique and specific entry to learn
"""
def __init__(self, group=None):
self.group = group
def add_to(self, group):
if group not in self.groups:
self.groups.append(group)
def level(self):
return self.group.level
def sibling(self):
return [x for x in self.group.knowledges if x is not self]
def preliminaries(self):
if self.group.preliminaries:
return reduce(list.__add__, [grp.knowledges for grp in self.group.preliminaries])
else:
return []
class Activity(BaseModel):
"""
Activity is a wrapper class for action vector
"""
def __init__(self, psi, ks):
"""
Args:
psi (:obj:`ndarray`): Requirement of each knowledge.
ks (list of :obj:`Knowledge`): All knowledge.
"""
self.psi = psi
self.knowledge_indexes = np.nonzero(psi)
self.knowledges = [ks[i] for i in self.knowledge_indexes[0]]
self.related_knowledge_indexes = self.__related_knowledge_indexes()
self.preliminary_knowledge_indexes = self.__preliminary_knowledge_indexes()
def __related_knowledge_indexes(self):
"""
Returns: Index of self + knowledges in same group
"""
ks = set(self.knowledges)
for k in self.knowledges:
ks.update(k.sibling())
return np.array([k._id for k in ks])
def __preliminary_knowledge_indexes(self):
ks = set()
for k in self.knowledges:
ks.update(k.preliminaries())
return [k._id for k in ks]
class ActivitySpaceWrapper(Space):
"""
A ActivitySpaceWrapper for ITS,
It's a 2-D array of knowledges
"""
def __init__(self, spaces):
self.spaces = spaces
def sample(self):
return self.spaces[np.random.randint(len(self.spaces))]
def contains(self, x):
return x in self.spaces
##-------------------- Help Methods --------------------
def _generate_groups(level, kg_max=200, group_kg_max=5):
"""
Generate Knowledge Groups
Args:
level (Int): The difficulty of knowledge group
kg_max (Int): The maximum of total knowledges
group_kg_max (Int): The maximum knowledge number in a knowledge group
"""
remain = kg_max
groups = []
for i in range(kg_max):
if remain > 0:
n = Discrete(group_kg_max).sample() + 1
groups.append(KnowledgeGroup(level, n))
remain -= n
return groups
def _generate_knowledges(kg_max=200, group_kg_max=5.):
d = np.random.multinomial(kg_max, [5 / 10., 3.5 / 10., 1.5 / 10.])
groups = reduce(list.__add__, [_generate_groups(i + 1, x, group_kg_max) for i, x in enumerate(d)])
for group in groups:
if group.level > 1:
gs = [g for g in groups if g.level == group.level - 1]
group.preliminaries = random.sample(gs, Discrete(len(gs)).sample())
ks = reduce(list.__add__, [x.knowledges for x in groups])
for i, x in enumerate(ks): x._id = i
return ks
def _generate_activities(ks, num_activity_per_knowledge):
activities = []
K = len(ks)
# Each activity attaches exactly 1 main knowledge and 0 to 2 minor knowledges
for main_knowledge in ks:
psi_bin_size = 1. / num_activity_per_knowledge
for i in range(num_activity_per_knowledge):
act_psi = np.zeros(K)
# Set main knowledge
psi_low, psi_high = max(.1, i * psi_bin_size), min(1., (i + 1) * psi_bin_size)
act_psi[main_knowledge._id] += np.random.uniform(psi_low, psi_high)
# Set minor knowledge
# Choose minor knowledge from knowledge in same level or preliminary level group
# Random from 0. to main knowledge psi
knowledge_candidates = [k for k in ks if
k.group.level <= main_knowledge.group.level and k != main_knowledge]
num_minor_knowledge = random.randint(0, 2)
assert len(knowledge_candidates) >= num_minor_knowledge
for k in random.sample(knowledge_candidates, num_minor_knowledge):
act_psi[k._id] += np.random.uniform(0., act_psi[main_knowledge._id])
activities.append(act_psi)
return np.asarray(activities)
def main():
data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets/activities.pkl')
output = open(data_file, 'wb')
ks = _generate_knowledges()
acts = _generate_activities(ks, 5)
pickle.dump(ks, output)
pickle.dump(acts, output)
output.close()
if __name__ == '__main__':
main()
| true |