blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5741706ca30f39ec558cfca8281448e8a632b827 | Python | Mouse321/python | /Lesson_2/lesson2.5.py | UTF-8 | 180 | 2.8125 | 3 | [] | no_license | from pycat.window import Window
window = Window()
s = window.create_sprite()
s.image = "bat-b.png"
x = input("Enter bat position: ")
s.x = int(x)
s.y = 300
window.run() | true |
ead587e3fb66a1d8ec174a24afbe5957783337f0 | Python | BaoBao0406/Machine-Learning | /Python Basic for ML and DL Book3/Ensemble method Majority Vote Classifier.py | UTF-8 | 2,306 | 2.9375 | 3 | [] | no_license | from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
import warnings
warnings.filterwarnings("ignore")
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=1, stratify=y)
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
import numpy as np
clf1 = LogisticRegression(penalty='l2', C=0.001, random_state=1)
clf2 = DecisionTreeClassifier(max_depth=1, criterion='entropy', random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1, p=2, metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])
clf_labels = ['Logistic regression', 'Decision tree', 'KNN']
print('10-fold cross validation:\n')
for clf, label in zip([pipe1, clf2, pipe3], clf_labels):
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring='roc_auc')
print('ROC AUC: %0.2f (+/- %0.2f) [%s]' % (scores.mean(), scores.std(), label))
from MajorityVoteClassifier import MajorityVoteClassifier
# Use Majority Vote Classifier
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
clf_labels += ['Majority voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator=clf, X=X_train, y=y_train, cv=10, scoring='accuracy')
print('Accuracy: %0.2f (+/- %0.2f) [%s]' % (scores.mean(), scores.std(), label))
# Get parameter from VotingClassifier
print(mv_clf.get_params())
# Use GridSearch to find the best parameters
from sklearn.model_selection import GridSearchCV
params = {'decisiontreeclassifier__max_depth': [1, 2],
'pipeline-1__clf__C': [0.001, 0.1, 100.0]}
grid = GridSearchCV(estimator=mv_clf, param_grid=params, cv=10, scoring='roc_auc')
grid.fit(X_train, y_train)
print('Best parameters: %s' % grid.best_params_)
print('Accuracy: %.2f' % grid.best_score_)
| true |
4d68b18633bd4669612a063b88c6bed47a5b645a | Python | samodle/VAE-Job-Analysis | /get_model.py | UTF-8 | 1,215 | 2.703125 | 3 | [] | no_license | from keras.models import Model
from keras.layers import *
import keras.backend as K
def get_simple_model(timesteps = 2500, input_dim = 100, latent_dim = 25):
inputs = Input(shape=(timesteps, input_dim))
masked = Masking(mask_value=-1)(inputs)
encoded = LSTM(2*latent_dim)(masked)
z_mean = Dense(latent_dim)(encoded)
z_log_sigma = Dense(latent_dim)(encoded)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_sigma])
decoded = Dense(2*latent_dim)(z)
decoded = RepeatVector(timesteps)(decoded)
decoded = LSTM(input_dim, activation = 'sigmoid', return_sequences=True)(decoded)
LSTM_VAE = Model(inputs, decoded)
vae_loss_ = vae_loss(z_mean, z_log_sigma)
LSTM_VAE.add_loss(vae_loss_)
LSTM_VAE.add_metric(vae_loss_, name = 'vae_loss')
encoder = Model(inputs, z)
return LSTM_VAE, encoder
def sampling(args):
z_mean, z_log_sigma = args
epsilon = K.random_normal(shape=K.shape(z_mean))
return z_mean + K.exp(z_log_sigma) * epsilon
def vae_loss(z_mean, z_log_sigma, factor = 1e-3):
kl_loss = - 0.5 * K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma), axis=-1)
return factor*kl_loss
if __name__ == '__main__':
model,_ = get_simple_model()
print(model.summary()) | true |
8b475e1a8b36bb546a6c50440aa337a49489347f | Python | Alexbelobr/online-shop | /re_num1.py | UTF-8 | 89 | 3.0625 | 3 | [] | no_license | import re
text = 'abcdfghjk'
parser = re.search('a[b-f]*f', text)
print(parser.group()) | true |
9a4d08910cb05246cac680c41297921dadae45e9 | Python | shraddha1261/DS | /prac6.py | UTF-8 | 1,272 | 3.90625 | 4 | [] | no_license | list_student_rolls = [1,90,51,12,48,6,34]
print("Selection sort")
for i in range(len(list_student_rolls)):
min_val_index = i
for j in range(i+1,len(list_student_rolls)):
if list_student_rolls[min_val_index] > list_student_rolls[j]:
min_val_index = j
list_student_rolls[i], list_student_rolls[min_val_index] = list_student_rolls[min_val_index],list_student_rolls[i]
print(list_student_rolls)
print("\n\nInsertion sort")
for i in range(j, len(list_student_rolls)):
value = list_student_rolls[i]
j = i-1
while j >= 0 and value < list_student_rolls[j]:
list_student_rolls[j+1] = list_student_rolls[j]
j -= 1
list_student_rolls[j+1] = value
print(list_student_rolls)
print("\n\nBubble sort")
list_of_number = [1,90,51,12,48,6,34]
def bubbleSort(list_of_number):
for i in range(len(list_of_number) - 1):
for j in range(0, len(list_of_number)-i-1):
if list_of_number[j] > list_of_number[j+1]:
list_of_number[j], list_of_number[j+1] = list_of_number[j+1], list_of_number[j]
bubbleSort(list_of_number)
print(list_of_number)
| true |
2fd5f3ccd5ec025d5d22e177847e2d1c1695b0e9 | Python | nikita1998ivanov/Lab-rabota | /5_2.py | UTF-8 | 300 | 3.09375 | 3 | [] | no_license | import csv
my_list = []
with open("students.csv") as f:
next(f)
for line in f:
temp = []
h, nm, a, db = line.split(";")
temp.append(h)
temp.append(nm)
temp.append(a)
temp.append(db)
my_list.append(temp)
my_list.sort()
print(my_list)
| true |
40b35283ff02d271e561cc7633239617a28f3630 | Python | zxjzel/spider | /code/proxies_spider.py | UTF-8 | 3,882 | 2.578125 | 3 | [] | no_license | from lxml import etree
import requests
class ProxiesSpider():
def __init__(self):
self.url = 'https://www.kuaidaili.com/free/'
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"}
self.proxies = []
self.pages = []
def get_response(self,url):
r = requests.get(url,headers=self.headers)
return r.content.decode()
def get_proxies(self,r):
html = etree.HTML(r)
tr_list = html.xpath("//tr")
for tr in tr_list:
proxie = {}
ip = tr.xpath("./td[@data-title='IP']/text()")[0] if tr.xpath("./td[@data-title='IP']/text()") else None
port = tr.xpath("./td[@data-title='PORT']/text()")[0] if tr.xpath("./td[@data-title='PORT']/text()") else None
if ip and port:
proxie["http"] = 'http://'+ip+':'+port
self.proxies.append(proxie)
def check_proxies(self):
for proxie in self.proxies:
try:
response = requests.get('https://book.douban.com/tag/?view=type&icn=index-sorttags-hot',headers = self.headers,proxies=proxie,timeout = 3)
print(response.status_code)
assert response.status_code == 200
except:
print("{}此代理不可用".format(proxie["http"]))
self.proxies.remove(proxie)
def run(self):
#1.url
#2.提交请求获取响应
#4.保存数据
for i in range(25):
r = self.get_response("https://www.kuaidaili.com/free/inha/{}/".format(i+1))
self.get_proxies(r)
print(self.proxies)
self.check_proxies()
print(self.proxies)
#检测代理的可用性
class YunProxie():
def __init__(self):
self.start_url = 'http://www.ip3366.net/'
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36"}
self.proxies = []
def get_response(self,url):
r = requests.get(url,headers=self.headers)
return r.content.decode("gb2312")
def get_data(self,r):
html=etree.HTML(r)
ip_list = html.xpath("//tr/td[position()=1]/text()")
port_list = html.xpath("//tr/td[position()=2]/text()")
kind_list = html.xpath("//tr/td[position()=4]/text()")
next_url = html.xpath("//div[@id='listnav']/ul/a[last()-1]/@href")
next_url = "http://www.ip3366.net/"+next_url[0] if len(next_url)>0 else None
for ip in ip_list:
proxie = {}
if kind_list[ip_list.index(ip)] == 'HTTP':
proxie["http"] = "http://"+ip+':'+port_list[ip_list.index(ip)]
else:
proxie["https"] = "https://"+ip+':'+port_list[ip_list.index(ip)]
self.proxies.append(proxie)
return next_url
def check_proxies(self):
for proxie in self.proxies:
try:
response = requests.get('https://book.douban.com/tag/?view=type&icn=index-sorttags-hot',headers = self.headers,proxies=proxie,timeout = 3)
print(response.status_code)
assert response.status_code == 200
except:
print("此代理不可用")
self.proxies.remove(proxie)
def run(self):
#1.发送请求获取响应
r = self.get_response(self.start_url)
#.获取数据
next_url = self.get_data(r)
while next_url:
r = self.get_response(next_url)
# .获取数据
next_url = self.get_data(r)
print(self.proxies)
self.check_proxies()
print(self.proxies)
if __name__ == '__main__':
# a = ProxiesSpider()
# a.run()
a = YunProxie()
a.run()
| true |
6eb30fd837578c22b9e211ebdf62663c32263dd6 | Python | Eddie02582/Leetcode | /Python/137_Single Number II.py | UTF-8 | 2,054 | 3.78125 | 4 | [] | no_license | '''
Given a non-empty array of integers, every element appears three times except for one, which appears exactly once. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
Example 1:
Input: [2,2,3,2]
Output: 3
Example 2:
Input: [0,1,0,1,0,1,99]
Output: 99
'''
class Solution(object):
def singleNumber_counter(self, nums):
from collections import Counter
counter = Counter(nums)
return [ key for key in counter.keys() if counter[key]==3][-1]
def singleNumber_sum(self, nums):
return (3*sum(set(nums))-sum(nums))/2
'''
I think this is generalize verson of #135. Where use xor bit operater.
Since each bit only contains 2 state, for 3 state, you need two bits m1, m2.
Then for each bit, you just need to build bit operators which does,
When input is 1, change in these order
m1 0 -> 0 -> 1 -> 0
m2 0 -> 1 -> 0 -> 0
and, when input bit is 0, don't change the state.
(you don't need to consider m1,m2 = 1,1)
'''
def singleNumber(self, nums) :
# Use two bit operater
m1, m2 = 0, 0
# n = 1
# m1 0 0 1 0
# m2 0 1 0 0
# n = 0, remain it same
for n in nums:
tmp = m1
m1 = m1 ^ n&(m1 ^ m2)
m2 = m2 ^ n&(~tmp)
#print(n, m1, m2)
return m2
def singleNumber_compare(self, nums):
nums.sort()
for i in range(0,len(nums),3):
if len(nums) <= i+1:
return nums[i]
elif nums[i] != nums[i+1] or nums[i] != nums[i+2]:
return nums[i]
sol = Solution()
assert sol.singleNumber_compare([2,2,3,2])==3
assert sol.singleNumber_compare([0,1,0,1,0,1,99])==99
assert sol.singleNumber_compare([1,2,5,2,5,5,2])==1
| true |
33c986cdc8fe8831afc7cc9b88fc2aa3d3093b19 | Python | idiom/pywinrm | /winrm/__init__.py | UTF-8 | 1,127 | 2.5625 | 3 | [
"MIT"
] | permissive | from winrm.protocol import Protocol
class Response(object):
"""Response from a remote command execution"""
def __init__(self, args):
self.std_out, self.std_err, self.status_code = args
def __repr__(self):
#TODO put tree dots at the end if out/err was truncated
return '<Response code {}, out "{}", err "{}">'.format(
self.status_code, self.std_out[:20], self.std_err[:20])
class Session(object):
#TODO implement context manager methods
def __init__(self, url, auth):
#TODO convert short urls into well-formed endpoint
username, password = auth
self.protocol = Protocol(url, username=username, password=password)
def run_cmd(self, command, args=()):
#TODO optimize perf. Do not call open/close shell every time
shell_id = self.protocol.open_shell()
command_id = self.protocol.run_command(shell_id, command, args)
rs = Response(self.protocol.get_command_output(shell_id, command_id))
self.protocol.cleanup_command(shell_id, command_id)
self.protocol.close_shell(shell_id)
return rs | true |
d6e544eeb9905128d4d8e9b2b35ba74b3254bdb5 | Python | OscarCruz65/Oscar-Cruz | /trimmed mean 08-11-19.py | UTF-8 | 2,068 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
n=1
suma=0
lista=[]
n5=1
n6=1
while n<2:
n3=int(input('dame un valor'))
lista.append(n3)
suma=suma+n3
cantidad=len(lista)
lista.sort()
print(lista)
n4=int(input('Calculas porcentaje? 1=si, cualquier otro numero no'))
if n4==1:
porcentaje=int(input('porcentaje:'))
porcentaje2=porcentaje/100
trimmed=porcentaje2*cantidad
nfloat=int(trimmed)
if trimmed%2==0:
print(trimmed)
trimmed=trimmed/2
print(lista)
while n5<=trimmed:
suma=suma-lista[0]
lista.pop(0)
suma=suma-lista[-1]
lista.pop(-1)
n5=n5+1
elif trimmed%2!=0:
trimmed=trimmed-1
print(trimmed)
trimmed=trimmed/2
print(lista)
while n5<=trimmed:
suma=suma-lista[0]
lista.pop(0)
suma=suma-lista[-1]
lista.pop(-1)
n5=n5+1
elif trimmed!=nfloat:
nfloat=nfloat+0.5
if trimmed<=nfloat:
trimmed=int(trimmed)
print(trimmed)
trimmed=trimmed/2
print(lista)
while n5<=trimmed:
suma=suma-lista[0]
lista.pop(0)
suma=suma-lista[-1]
lista.pop(-1)
n5=n5+1
elif trimmed>nfloat:
trimmed=trimmed+1
trimmed=int(trimmed)
print(trimmed)
trimmed=trimmed/2
print(lista)
while n5<=trimmed:
suma=suma-lista[0]
lista.pop(0)
suma=suma-lista[-1]
lista.pop(-1)
n5=n5+1
print(lista)
resultado=suma/len(lista)
print(f'x{porcentaje}%=:{resultado}')
# In[ ]:
| true |
31200863db490d3f411c27842a825c2de4563db7 | Python | taro-masuda/leetcode | /0681_NextClosestTime.py | UTF-8 | 1,757 | 3.171875 | 3 | [
"MIT"
] | permissive | class Solution:
def nextClosestTime(self, time: str) -> str:
l = []
l.append(int(time[0]))
l.append(int(time[1]))
l.append(int(time[3]))
l.append(int(time[4]))
if l[0] == l[1] == l[2] == l[3]:
return time
min_positive_dist = 24*60+7
min_negative_dist = 0
positive_candidate = ''
negative_candidate = ''
current_time = (l[0]*10 + l[1])*60 + l[2]*10 + l[3]
for first in l:
if first > 2: # invalid hour(HH) representation
continue
for second in l:
if first*10 + second > 23: # invalid hour(HH) representation
continue
for third in l:
if third > 5: # invalid minute(MM) representation
continue
for fourth in l:
current_str = str(first) + str(second) + ':' + str(third) + str(fourth)
if current_str == time:
continue
candidate_value = (first*10 + second)*60 + third*10 + fourth - current_time
if candidate_value > 0 and candidate_value < min_positive_dist:
min_positive_dist = candidate_value
positive_candidate = current_str
if candidate_value < 0 and candidate_value < min_negative_dist:
min_negative_dist = candidate_value
negative_candidate = current_str
if 24*60 + min_negative_dist < min_positive_dist:
return negative_candidate
else:
return positive_candidate
| true |
82f492fcbbecc1da77e41b7962608c41650f09c3 | Python | krenevych/algo | /labs/L06/task2/user.py | UTF-8 | 1,394 | 3.4375 | 3 | [] | no_license | """
Реалізуйте інтерфейс асоціативного масиву, ключами якого є цілі числа,
а значеннями – рядки.
Реалізацію здійсніть як хеш-таблицю з розв’язанням колізій методом ланцюжків.
"""
def init():
""" Викликається 1 раз на початку виконання програми. """
pass
def set(key: int, value: str) -> None:
""" Встановлює значення value для ключа key.
Якщо такий ключ відсутній у структурі - додає пару, інакше змінює значення для цього ключа.
:param key: Ключ
:param value: Значення
"""
pass
def get(key: int):
""" За ключем key повертає значення зі структури.
:param key: Ключ
:return: Значення, що відповідає заданому ключу або None, якщо ключ відсутній у структурі.
"""
return None
def delete(key: int) -> None:
""" Видаляє пару ключ-значення за заданим ключем.
Якщо ключ у структурі відсутній - нічого не робить.
:param key: Ключ
"""
pass
| true |
b97e448da17ae8b33d24c370731c663ef19bd102 | Python | arinaafan/Tutorial_answers | /python_mod8_answers.py | UTF-8 | 635 | 3.578125 | 4 | [] | no_license | # Module 8
#
# 1. Create two lists, one for each species, containing the first gene identifiers of each line (the so called seed orthologs)
import re
specie1 = []
specie2 = []
my_file = open("Inparanoid_table.txt", 'r')
header_line = my_file.readline()
for line in my_file.readlines():
specie1.append(re.findall(r'ENSTGUP\w+', line)[0])
specie2.append(re.findall(r'ENSGALP\w+', line)[0])
# 2. Create a dictionary “homologs” with the keys of the seed ortholog of species 1. Each entry should contain the seed ortholog of species 2 such as
homologs = {}
for i in range(len(specie1)):
homologs[specie1[i]] = specie2[i]
| true |
52c32437d5b842fa433580a07897149034c0e478 | Python | strymsg/01-p4 | /solid-01/invoices.py | UTF-8 | 2,528 | 2.53125 | 3 | [] | no_license | from abc import ABC, abstractmethod
from abstract_invoice import AbstractInvoice
class PhysicialInvoice(AbstractInvoice):
def __init__(self, seller_id, seller_name, date, number,
physical_id, branch_location, products=[], buyer=None, ):
self.physical_id = physical_id
self.branch_location = branch_location
# overriding __init__
self.seller_id = seller_id
self.seller_name = seller_name
self.date = date
self.products = products
self.buyer = buyer
self.number = number
#AbstractInvoice.__init__(self, seller_id, seller_name, date, number, products, buyer)
def generate_invoice(self):
print('PHYSICAL INVOICE')
d = {
'seller_id': self.seller_id,
'seller_name': self.seller_name,
'products': self.products,
'buyer': self.buyer,
'date': self.date,
'number': self.number,
'physical_id': self.physical_id,
'branch_location': self.branch_location
}
print(d)
return d
def print_copy(self):
print('Physical Invoice')
d = {
'seller_id': self.seller_id,
'seller_name': self.seller_name,
'products': self.products,
'buyer': self.buyer,
'date': self.date,
'number': self.number,
'physical_id': self.physical_id,
'branch_location': self.branch_location
}
for k,v in d.items():
print(f' {k}: {v}')
class DigitalInvoice(AbstractInvoice):
def __init__(self, seller_id, seller_name, date, number,
online_id, verification_url, products=[], buyer=None, ):
self.online_id = online_id
self.verification_url = verification_url
# overriding __init__
self.seller_id = seller_id
self.seller_name = seller_name
self.date = date
self.products = products
self.buyer = buyer
self.number = number
def generate_invoice(self):
print('DIGITAL INVOICE')
d = {
'seller_id': self.seller_id,
'seller_name': self.seller_name,
'products': self.products,
'buyer': self.buyer,
'date': self.date,
'number': self.number,
'online_id': self.online_id,
'verification_url': self.verification_url
}
print(d)
return d
def verify(self):
pass | true |
7d3a10af6c05f2d7dc6d2c0d7505fee832a61ac5 | Python | bhardwajat/PythonProjects | /gasoline.py | UTF-8 | 631 | 3.984375 | 4 | [] | no_license | gallons = float(input('Please enter the number of gallons of gasoline: '))
liters = 3.7854 *gallons
print (gallons, ' gallons is the equivalent of ',liters,' liters ')
barrels = float(gallons/19.5)
print (gallons, ' gallons of gasoline requires ',barrels,' barrels of oil ')
pounds = float(gallons*20)
print (gallons, ' gallons of gasoline produces ',pounds,' of CO2 ')
ethanol = float(gallons*1.51915456)
print (gallons, ' gallons of gasoline is energy equivalent to ',ethanol,' gallons of ethanol ')
dollars = float(gallons*4)
print (gallons, ' gallons of gasoline requires ',dollars,' US dollars ')
print ('Thanks for playing')
| true |
f5a9413e44d08a806094f84432197e1c5d1b0e32 | Python | priyanshthakore/deep_learning | /neural_network_basics/loss_optimiser/fuel_example.py | UTF-8 | 1,737 | 2.8125 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer, make_column_selector
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
fuel = pd.read_csv('./fuel.csv')
X = fuel.copy()
# Remove target
y = X.pop('FE')
preprocessor = make_column_transformer(
(StandardScaler(),
make_column_selector(dtype_include=np.number)),
(OneHotEncoder(sparse=False),
make_column_selector(dtype_include=object)),
)
X = preprocessor.fit_transform(X)
y = np.log(y) # log transform target instead of standardizing
input_shape = [X.shape[1]]
print("Input shape: {}".format(input_shape))
# Uncomment to see original data
fuel.head()
# Uncomment to see processed features
pd.DataFrame(X[:10, :]).head()
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dense(128, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1),
])
model.compile(
optimizer='adam',
loss='mae',
)
history = model.fit(
X, y,
# validation_data=(X_valid, y_valid),
batch_size=128,
epochs=200,
)
history_df = pd.DataFrame(history.history)
# Start the plot at epoch 5. You can change this to get a different view.
history_df.loc[1:, ['loss']].plot()
learning_rate = 0.1
batch_size = 32
num_examples = 256
animate_sgd(
learning_rate=learning_rate,
batch_size=batch_size,
num_examples=num_examples,
# You can also change these, if you like
steps=50, # total training steps (batches seen)
true_w=3.0, # the slope of the data
true_b=2.0, # the bias of the data
)
| true |
cb2c06a06a4956c0ea610ac91ef5ce783dc8848d | Python | NeoBro/ncaa-predict | /predict_score.py | UTF-8 | 3,218 | 2.875 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env python3
import argparse
import numpy as np
from ncaa_predict.data_loader import load_ncaa_players, load_ncaa_schools, \
load_ncaa_games, get_players_for_team
from ncaa_predict.estimator import *
from ncaa_predict.util import list_arg, team_name_to_id
def get_historical_score(team_id, all_games):
games = all_games[all_games["school_id"] == team_id]
normal_score = games["score"].mean()
diffs = []
for school_id in all_games["school_id"].unique():
if school_id == team_id:
continue
g = all_games[all_games["school_id"] == school_id]
normal = g["score"].mean()
against_team = g[g["opponent_id"] == team_id]
against_team_score = against_team["score"].mean()
if np.isnan(against_team_score):
continue
diffs.append(normal - against_team_score)
return normal_score, np.mean(diffs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("team_a")
parser.add_argument("team_b")
parser.add_argument(
"--hidden-units", "-u", default=DEFAULT_HIDDEN_UNITS,
type=list_arg(type=int),
help="A comma seperated list of hidden units in each DNN layer.")
parser.add_argument("--model-in", "-m")
parser.add_argument(
"--model-type", "-t", default=ModelType.dnn_classifier,
type=ModelType, choices=list(ModelType))
parser.add_argument(
"--n-threads", "-j", default=DEFAULT_N_THREADS, type=int,
help="Number of threads to use for some Pandas data-loading "
"processes. (default: %(default)s)")
parser.add_argument("--year", "-y", default=2017, type=int)
args = parser.parse_args()
tf.logging.set_verbosity(tf.logging.ERROR)
players = load_ncaa_players(args.year)
all_teams = load_ncaa_schools()
team_a_id = team_name_to_id(args.team_a, all_teams)
team_b_id = team_name_to_id(args.team_b, all_teams)
players_a = get_players_for_team(players, team_a_id)
players_b = get_players_for_team(players, team_b_id)
if args.model_in:
features = np.array([np.stack([players_a, players_b])])
estimator = Estimator(
args.model_type, hidden_units=args.hidden_units,
model_in=args.model_in, n_threads=args.n_threads,
feature_year=args.year)
score = next(estimator.predict(x=features))
print(
"NN Prediction: %s vs. %s final score: %s"
% (args.team_a, args.team_b, score))
else:
# Use each team's games against other teams to figure out how much
# worse an average team does when playing against them (vs. against
# other teams).
# Use that to adjust each team's historical mean score to predict how
# well they'll do against each other.
games = load_ncaa_games(args.year - 1)
a_score, a_diff = get_historical_score(team_a_id, games)
b_score, b_diff = get_historical_score(team_b_id, games)
print(
"Historical prediction: %s %.1f to %s %.1f (total: %.1f)"
% (args.team_a, a_score - b_diff, args.team_b, b_score - a_diff,
a_score + b_score - a_diff - b_diff))
| true |
d0f2cbcd1d7674438d3ba5cab8b5489599b1b31b | Python | aayushsin/AdvancedTopicForSignalProcessing | /symbol_status_updater.py | UTF-8 | 2,898 | 3.046875 | 3 | [] | no_license | #! /usr/bin/env python
# encoding: utf-8
# Copyright Steinwurf ApS 2016.
# Distributed under the "STEINWURF EVALUATION LICENSE 1.0".
# See accompanying file LICENSE.rst or
# http://www.steinwurf.com/licensing
import os
import sys
import copy
import kodo
def main():
"""Example showing the result of enabling the symbol status updater."""
# Set the number of symbols (i.e. the generation size in RLNC
# terminology) and the size of a symbol in bytes
symbols = 50
symbol_size = 160
# In the following we will make an encoder/decoder factory.
# The factories are used to build actual encoders/decoders
# To show the effect of the symbol status updater we need to use a lower
# sized field - the lower the better.
encoder_factory = kodo.FullVectorEncoderFactoryBinary(symbols, symbol_size)
encoder = encoder_factory.build()
decoder_factory = kodo.FullVectorDecoderFactoryBinary(symbols, symbol_size)
# Create two decoders, one which has the status updater turned on, and one
# which has it off.
decoder1 = decoder_factory.build()
decoder2 = decoder_factory.build()
decoder2.set_status_updater_on()
print("decoder 1 status updater: {}".format(
decoder1.is_status_updater_enabled()))
print("decoder 2 status updater: {}".format(
decoder2.is_status_updater_enabled()))
# Create some data to encode. In this case we make a buffer
# with the same size as the encoder's block size (the max.
# amount a single encoder can encode)
# Just for fun - fill the input data with random data
data_in = os.urandom(encoder.block_size())
# Assign the data buffer to the encoder so that we can
# produce encoded symbols
encoder.set_const_symbols(data_in)
# Skip the systematic phase as the effect of the symbol status decoder is
# only visible when reading coded packets.
encoder.set_systematic_off()
print("Processing")
while not decoder1.is_complete():
# Generate an encoded packet
payload = encoder.write_payload()
payload_copy = copy.copy(payload)
# Pass that packet to the decoder
decoder1.read_payload(payload)
decoder2.read_payload(payload_copy)
print("decoder 1: {}".format(decoder1.symbols_uncoded()))
print("decoder 2: {}".format(decoder2.symbols_uncoded()))
print("-----------------")
print("Processing finished")
# The decoder is complete, now copy the symbols from the decoder
data_out1 = decoder1.copy_from_symbols()
data_out2 = decoder2.copy_from_symbols()
# Check if we properly decoded the data
print("Checking results")
if data_out1 == data_in and data_out2 == data_in:
print("Data decoded correctly")
else:
print("Unable to decode please file a bug report :)")
sys.exit(1)
if __name__ == "__main__":
main()
| true |
8b2ad64c105059b3f2631a221af25be627ebf560 | Python | Aasthaengg/IBMdataset | /Python_codes/p03214/s931279357.py | UTF-8 | 171 | 2.734375 | 3 | [] | no_license | n=int(input())
A=list(map(int,input().split()))
mu=sum(A)/n
diff=1000
ans=0
for i,a in enumerate(A):
if abs(a-mu)<diff:
diff=abs(a-mu)
ans=i
print(ans) | true |
e2e123c3665664a8e798d4e19d47942f46fd9914 | Python | RybaPila-IT/Random-Forest | /RandomForest.py | UTF-8 | 2,584 | 3.59375 | 4 | [] | no_license | # Author: Julia Skoneczna
from pandas import DataFrame
import numpy as np
import math
from DecisionTreeClassifier import DecisionTree
class RandomForest:
"""
Creates random forest with a specified number of trees.
"""
def __init__(self, number_of_trees: int, data: DataFrame, target_column_name: str, tree_size: int):
self.number_of_trees = number_of_trees
self.data = data
self.target_column_name = target_column_name
self.forest = []
self.tree_size = tree_size
def create_forest(self):
for tree_index in range(0, self.number_of_trees):
random_subset = []
random_subset = self.pick_random_subset(random_subset)
random_attributes = []
random_attributes = self.pick_random_attributes()
tree_data = self.data.iloc[random_subset, random_attributes]
tree = DecisionTree(tree_data, self.target_column_name)
self.forest.append(tree)
def pick_random_subset(self, random_subset: list) -> list:
for row_index in range(0, self.tree_size):
random_subset.append(np.random.randint(0, len(self.data)))
return random_subset
def pick_random_attributes(self) -> list:
attributes = [col for col in self.data.columns if col != self.target_column_name]
for _ in range(0, len(self.data.columns) - math.floor(math.sqrt(len(self.data.columns)))):
attribute_index = np.random.randint(0, len(attributes))
attributes.pop(attribute_index)
attributes_to_take = self.data.drop(attributes, axis=1)
return [self.data.columns.get_loc(col) for col in attributes_to_take.columns]
def predict(self, data_to_classify):
"""
Classifies data by checking which answer is the most common in the entire forest.
:param data_to_classify - data to be classified.
:return final_answer - most common result in the entire forest.
"""
answer_occurrences = {}
max_occurrences = -1
final_answer = 0
for tree in self.forest:
current_answer = tree.predict(data_to_classify)
if current_answer in answer_occurrences:
answer_occurrences[current_answer] = answer_occurrences[current_answer] + 1
else:
answer_occurrences[current_answer] = 1
if max_occurrences < answer_occurrences[current_answer]:
max_occurrences = answer_occurrences[current_answer]
final_answer = current_answer
return final_answer
| true |
cd981dbf2f41beddb4365450623734aea1af4084 | Python | NataliaZar/iu5_web | /lab3/get_vkfriends.py | UTF-8 | 601 | 2.90625 | 3 | [] | no_license | from vk_client import *
get_user = GetVkID('g_s_f_o')
user = get_user.execute()
user_id = user.get('id')
get_friends = GetVkFriends(user_id)
friends = get_friends.execute()
now = datetime.now()
ages = [0] * 1000
for fr in friends:
try:
date_str = fr.get('bdate')
date = datetime.strptime(date_str, '%d.%m.%Y')
days = (now - date).days
age = days // 365
ages[age] += 1
except:
pass
print(user.get("first_name"), user.get("last_name"))
for age in range(1000):
if ages[age] != 0:
print(age, " ", "#" * ages[age]) | true |
d203a5b3e607651bf24b54f49688d7f8c5775d54 | Python | akshittyagi/TakBot | /Tak-sim/client.py | UTF-8 | 10,579 | 2.96875 | 3 | [
"MIT"
] | permissive | from Communicator import Communicator
import socket,sys,json,os,time,pdb
import math
from Game import Game
from Board import Board
import argparse
class Client(Communicator):
def __init__(self):
self.GAME_TIMER = 100000 # in Milli Seconds
self.NETWORK_TIMER = 500
super(Client,self).__init__()
pass
def setNetworkTimer(self,Time_in_Seconds):
self.NETWORK_TIMER = Time_in_Seconds
def getNetworkTimer(self):
return self.NETWORK_TIMER
def getGameTimer(self):
return self.GAME_TIMER // 1000
def setGameTimer(self,Time_in_Seconds):
self.GAME_TIMER = Time_in_Seconds * 1000
def CheckExeFile(self,Execution_Command,Executable_File):
""" Checks the Existance of the Executable File and
if the extension of the file matches the command used to run it
Args:
Execution_Command : Command used to execute the Executable File (sh, python ./ etc)
Executable_File : The Executable File
Returns:
None
"""
Extension = Executable_File.split('.')
if(len(Extension) == 1):
return False
Extension = Extension[-1]
if(os.path.isfile(Executable_File)):
if(Execution_Command == './' or Execution_Command == 'sh'):
if(Extension == 'sh' or Extension == 'o'):
return True
else:
return False
elif(Execution_Command == 'java'):
if(Extension == 'java'):
return True
else:
return False
elif(Execution_Command == 'python'):
if(Extension == 'py'):
return True
else:
return False
else:
return False
def CreateChildProcess(self,Execution_Command,Executable_File):
""" Creates a Process, with which the client communicates.
Checks the existance of the Executable_File and some basic
checks for whether the Execution_Command used to run the code
matches the extension of the Executable File
Prints if error is found
Args:
Execution_Command : Command used to execute the Executable File (sh, python ./ etc)
Executable_File : The Executable File
Returns:
None
"""
if(self.CheckExeFile(Execution_Command,Executable_File)):
super(Client,self).CreateChildProcess(Execution_Command,Executable_File)
else:
print 'ERROR : EITHER FILE ', Executable_File,' DOES NOT EXIST',
print 'OR THE EXECUTION COMMAND TO RUN THE FILE ',Execution_Command,' IS INCORRECT'
def Connect2Server(self,server_address,port_no):
"""Connects to server with given IP Address and Port No.
Args:
server_address : IP Address
Port No : Port Number
Returns:
None
"""
self.clientSocket = socket.socket()
self.clientSocket.connect((server_address,port_no))
super(Client,self).setSocket(self.clientSocket,self.NETWORK_TIMER)
def SendData2Server(self,data):
""" Sends data (a dictionary) to the Server as a json object
In case action == 'FINISH', closes the pipe on this end
Args:
data : a dictionary of the following format:
{
meta : The meta data in case of an error ( UNEXPECTED STOP, WRONG MOVE etc.), otherwise ''
action : The action to be taken (KILLPROC, NORMAL, FINISH, INIT)
data : Move String or '' in case of an Error
}
Returns:
success_flag : True if successful in sending, False otherwise
"""
if((data['action'] == 'KILLPROC') or (data['action'] == 'FINISH')):
super(Client,self).closeChildProcess()
sendData = json.dumps(data)
success_flag = super(Client,self).SendDataOnSocket(sendData)
if(not success_flag):
print 'ERROR : FAILED TO SEND DATA TO SERVER'
super(Client,self).closeSocket()
elif((data['action'] == 'KILLPROC') or (data['action'] == 'FINISH')):
super(Client,self).closeSocket()
return success_flag
def RecvDataFromServer(self):
""" Receives data from the Server as a string, and Returns the Move.
Uses self.NETWORK_TIMER to decide how long to wait for input from Server
In case of an error, prints the error, and closes the pipe process
In case the last move is made by other client, closes the pipe process and
returns the data
Args:
None
Returns:
retData : String (Move) in case there are no errors, otherwise None
"""
data = super(Client,self).RecvDataOnSocket()
retData = None
if(data == None):
print 'ERROR : TIMEOUT ON SERVER END'
super(Client,self).closeChildProcess()
super(Client,self).closeSocket()
else:
data = json.loads(data)
if(data['action'] == 'NORMAL' or data['action'] == 'INIT'):
retData = data['data']
elif(data['action'] == 'KILLPROC'):
print 'ERROR : ' + data['meta'] + ' ON OTHER CLIENT'
super(Client,self).closeChildProcess()
super(Client,self).closeSocket()
elif(data['action'] == 'FINISH'):
super(Client,self).closeChildProcess()
super(Client,self).closeSocket()
retData = data['data']
return retData
def RecvDataFromProcess(self):
"""Receives Data from the process. This does not implement checks
on the validity of game moves. Hence, the retData from here is not final
, i.e, it may be different than what is sent to the server.
Note: The Action 'FINISH' is set internally by game, not by the network
Handles Errors like Exceptions thrown by process.
However, In case of a timeout, 'FINISH' may be thrown
Uses self.GAME_TIMER to decide how long to wait for a timeout.
For both the above cases, prints the error msg and closes the connection to
the process.
Args:
None
Returns:
retData : dictionary of the nature :
{
meta : '' / MetaData in case of an Error
action : 'NORMAL' / 'KILLPROC' in case of an Error
data : 'DATA' / '' in case of an Error
}
None in case of an error
"""
start_time = time.time()
BUFFER_TIMER = int(math.ceil(self.GAME_TIMER / 1000.0))
print 'Time remaining is: ' + str(BUFFER_TIMER) + 's'
data = super(Client,self).RecvDataOnPipe(BUFFER_TIMER)
end_time = time.time()
retData = None
if(data == None):
print 'ERROR : THIS CLIENT STOPPED UNEXPECTEDLY OR TIMED OUT'
super(Client,self).closeChildProcess()
retData = {'meta':'UNEXPECTED STOP','action':'KILLPROC','data':''}
else:
# 1 Milli Second Default
time_delta = max(1,int((end_time - start_time) * 1000))
self.GAME_TIMER -= time_delta
if(self.GAME_TIMER > 0):
retData = {'meta':'','action':'NORMAL','data':data}
else:
retData = {'meta':'TIMEOUT','action':'KILLPROC','data':''}
return retData
def SendData2Process(self,data):
""" Sends Data (Move) to the process. Handles the case if the process being communicated with has closed.
Args:
data : string data, to send the process (a game move)
Returns:
success_flag : A boolean flag to denote the data transfer to the process was successful or not.
"""
if(data[-1] != '\n'):
data = data + '\n'
success_flag = super(Client, self).SendDataOnPipe(data)
if(success_flag == False):
print 'ERROR : FAILED TO SEND DATA TO PROCESS'
super(Client,self).closeChildProcess()
return success_flag
def game_loop(game, args):
client = Client()
if args.exe.endswith('.py'):
client.CreateChildProcess('python', args.exe)
elif args.exe.endswith('.sh'):
client.CreateChildProcess('sh', args.exe)
else:
client.CreateChildProcess('sh', args.exe)
client.Connect2Server(args.ip, args.port)
server_string = client.RecvDataFromServer()
if(server_string is None):
print 'ERROR IN SETTING UP CONNECTIONS. SORRY'
sys.exit(0)
server_string_list = server_string.strip().split()
player_id = server_string_list[0]
board_size = int(server_string_list[1])
game_timer = int(server_string_list[2])
client.setGameTimer(game_timer)
print 'You are player ' + str(player_id)
print 'You are alloted a time of ' + str(game_timer) + 's\n'
client.SendData2Process(server_string)
if args.mode == 'GUI':
game.render_board.render(game)
elif args.mode == 'CUI':
game.render()
elif args.mode == 'TRAIN':
game.renderer()
if player_id == '2':
move = client.RecvDataFromServer()
if move:
move = move.strip()
print "The other player played " + move
success = game.execute_move(move)
client.SendData2Process(move)
else:
sys.exit(0)
while(True):
move = client.RecvDataFromProcess()
if move['action'] == 'KILLPROC':
client.SendData2Server(move)
break
move['data'] = move['data'].strip()
print "You played " + move['data']
success = game.execute_move(move['data'])
message = {}
if success == 0:
message['data'] = ''
message['action'] = 'KILLPROC'
message['meta'] = 'INVALID MOVE'
print 'INVALID MOVE ON THIS CLIENT'
elif success == 2 or success == 3 or success == 4:
# 2 : Player 1 wins
# 3 : Player 2 wins
# 4 : Game Drawn
message['action'] = 'FINISH'
message['data'] = move['data']
if success == 2:
message['meta'] = '1 wins'
if(player_id == '1'):
print 'YOU WIN!'
else:
print 'YOU LOSE :('
elif success == 3:
message['meta'] = '2 wins'
if(player_id == '2'):
print 'YOU WIN!'
else:
print 'YOU LOSE :('
else:
message['meta'] = 'Game Drawn'
print 'GAME DRAWN'
elif success == 1:
message = move
client.SendData2Server(message)
if message['action'] == 'FINISH' or message['action'] == 'KILLPROC':
break
move = client.RecvDataFromServer()
if move:
move = move.strip()
print "The other player played " + move
success = game.execute_move(move)
if success == 2 or success == 3 or success == 4:
# 2 : Player 1 wins
# 3 : Player 2 wins
# 4 : Game Drawn
if success == 2:
if(player_id == '1'):
print 'YOU WIN!'
else:
print 'YOU LOSE :('
elif success == 3:
if(player_id == '2'):
print 'YOU WIN!'
else:
print 'YOU LOSE :('
else :
print 'GAME DRAWN'
break
else:
client.SendData2Process(move)
else:
break
client.closeChildProcess()
client.closeSocket()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Tak client')
parser.add_argument('ip', metavar = '0.0.0.0', type = str, help = 'Server IP')
parser.add_argument('port', metavar = '10000', type = int, help = 'Server port')
parser.add_argument('exe', metavar = 'run.sh', type = str, help = 'Your executable')
parser.add_argument('-n', dest = 'n', metavar = 'N', type = int, default = 5, help = 'Tak board size')
parser.add_argument('-mode', dest = 'mode', type = str, default = 'GUI', help = 'How to render')
args = parser.parse_args()
game = Game(args.n, args.mode)
if args.mode != 'GUI':
game_loop(game, args)
else:
from threading import Thread
Th = Thread(target = lambda : game_loop(game, args))
Th.start()
game.init_display()
game.display.mainloop()
| true |
b9f3acd2683090c899fd8d24ace7d2ebd173f5ef | Python | ashwani8958/Python | /PyQT and SQLite/M6 - Developing a GUI with PyQT/assignment/M6_Assignment.py | UTF-8 | 10,914 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'M6_Assignment.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import re
import sqlite3
#from findprice import * #Import module from another file
class Ui_Library(object):
def setupUi(self, Library):
Library.setObjectName("Library")
Library.resize(521, 371)
self.verticalLayout = QtWidgets.QVBoxLayout(Library)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lb_book = QtWidgets.QLabel(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.lb_book.setFont(font)
self.lb_book.setAlignment(QtCore.Qt.AlignCenter)
#label "Book Title"
self.lb_book.setObjectName("lb_book")
self.horizontalLayout.addWidget(self.lb_book)
self.t_bookname = QtWidgets.QLineEdit(Library)
#lineText where book name will be written
self.t_bookname.setObjectName("t_bookname")
###########################################################################################################################
regexp = QtCore.QRegExp('([a-zA-Z\s]*)*')### next three line of code make sure that user will be only able to enter
validator = QtGui.QRegExpValidator(regexp)### name of the book which contain only the alphabets
self.t_bookname.setValidator(validator)### nothing else is allowed
###########################################################################################################################
self.horizontalLayout.addWidget(self.t_bookname)
self.pb_price = QtWidgets.QPushButton(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.pb_price.setFont(font)
#push button "PRICE" to find the price of book
self.pb_price.setObjectName("pb_price")
######################################################################################################################
#self.pb_price.clicked.connect(lambda : findprice()) # Connection creation when function is written as seprate module
self.pb_price.clicked.connect(self.findprice)
######################################################################################################################
self.horizontalLayout.addWidget(self.pb_price)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lb_price = QtWidgets.QLabel(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.lb_price.setFont(font)
self.lb_price.setAlignment(QtCore.Qt.AlignCenter)
#label "Price" to show the price of book after successful search
self.lb_price.setObjectName("lb_price")
self.horizontalLayout_2.addWidget(self.lb_price)
self.t_price = QtWidgets.QLineEdit(Library)
#lineText to show the price of the book
self.t_price.setObjectName("t_price")
self.t_price.setReadOnly(True)
self.horizontalLayout_2.addWidget(self.t_price)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lb_quantity = QtWidgets.QLabel(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.lb_quantity.setFont(font)
#Ask user quantity of book to price
self.lb_quantity.setObjectName("lb_quantity")
self.horizontalLayout_3.addWidget(self.lb_quantity)
self.t_quantity = QtWidgets.QLineEdit(Library)
#lineText to enter the number of book required
self.t_quantity.setObjectName("t_quantity")
#######################################################################################################
self.t_quantity.setValidator(QtGui.QIntValidator())###check for the enter value should be integer only
#######################################################################################################
self.horizontalLayout_3.addWidget(self.t_quantity)
self.pb_total = QtWidgets.QPushButton(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.pb_total.setFont(font)
#Pushbotton to find the total price
self.pb_total.setObjectName("pb_total")
#########################################################################################################################################
self.pb_total.clicked.connect(self.cal_total) #connect to the method to calculate the total amount after taking the no of book from user
#########################################################################################################################################
self.horizontalLayout_3.addWidget(self.pb_total)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.lb_total = QtWidgets.QLabel(Library)
font = QtGui.QFont()
font.setFamily("Book Antiqua")
font.setBold(True)
font.setWeight(75)
self.lb_total.setFont(font)
#lable total price
self.lb_total.setObjectName("lb_total")
self.horizontalLayout_4.addWidget(self.lb_total)
self.t_totalprice = QtWidgets.QLineEdit(Library)
#Linetext to display the total price
self.t_totalprice.setObjectName("t_totalprice")
self.horizontalLayout_4.addWidget(self.t_totalprice)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.retranslateUi(Library)
QtCore.QMetaObject.connectSlotsByName(Library)
def retranslateUi(self, Library):
_translate = QtCore.QCoreApplication.translate
Library.setWindowTitle(_translate("Library", "Form"))
self.lb_book.setText(_translate("Library", "Book Title"))
self.pb_price.setText(_translate("Library", "Price"))
self.lb_price.setText(_translate("Library", "Price"))
self.lb_quantity.setText(_translate("Library", "Quantity"))
self.pb_total.setText(_translate("Library", "Find Total"))
self.lb_total.setText(_translate("Library", "Total"))
############################# method(slot) written to perform action based on the EVENT happen in GUI ##########################
######################################## START: CREATE METHOD ##################################################################
def create(self):
#Create database or connect to existing database
MyLibrary = sqlite3.connect('Library.db')
#create a cursor object
curslibrary = MyLibrary.cursor()
#create the book list and price list to populate the database
book_list = ["The C Programming Language", "Digital Circuit", "Basic Electronic", "Electronic Devices and Circuits"]
price_list = [322,564,565,234]
#index variable
i = 0
#execute the Create table and Insert queries only one time when table is create then database is populate by the for loop
try:
curslibrary.execute('CREATE TABLE Bookdetails(Title TEXT(25) NOT NULL, Price REAL NOT NULL);')
curslibrary.execute('INSERT INTO Bookdetails(Title, Price) VALUES (?, ?);', (book_list[i], price_list[i]))
MyLibrary.commit()
i = i + 1
except:
pass
for book in book_list[i:]:
#add the first add to the database
curslibrary.execute('INSERT INTO Bookdetails(Title, Price) VALUES (?, ?);', (book_list[i], price_list[i]))
MyLibrary.commit()#commit changes to the database
i = i + 1
######################################## END: CREATE METHOD ##################################################################
######################################## START: FIND PRICE METHOD ##################################################################
def findprice(self):
#call the function create and populate
self.create()
#Create database or connect to existing database
MyLibrary = sqlite3.connect('Library.db')
#create a cursor object
curslibrary = MyLibrary.cursor()
#Take text written by user in the bookname widget of GUI and save in a variable
txt = self.t_bookname.text()
#
sql = "SELECT * FROM Bookdetails WHERE Title = '"+txt+"';"
curslibrary.execute(sql)
record = curslibrary.fetchone()#fetch only the record which is matched
if record != None:
print("\nBook is available")
print(record)
self.t_price.setText(str(record[1]))#record is the list, it take price from Price widget of GUI and convert it into text for the display purpose
else:
print("\nBook that you are searching is not available")
######################################## END: FIND PRICE METHOD ##################################################################
######################################## START: CAL METHOD ##################################################################
def cal_total(self):
price = float(self.t_price.text())
quantity = int(self.t_quantity.text())
total_price = price * quantity
self.t_totalprice.setText(str(total_price))#write the price in the GUI total price text line widget
######################################## END: CAL METHOD ##################################################################
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Library = QtWidgets.QWidget()
ui = Ui_Library()
ui.setupUi(Library)
Library.show()
sys.exit(app.exec_())
| true |
e447025f4cb17955e9a65729fcc0e0acc7e7200f | Python | 1274085042/Algorithm | /Offer/HUAWEI/明明的随机数.py | UTF-8 | 1,394 | 3.921875 | 4 | [] | no_license | #coding=utf-8
'''
题目描述
明明想在学校中请一些同学一起做一项问卷调查,为了实验的客观性,他先用计算机生成了N个1到1000之间的随机整数(N≤1000),
对于其中重复的数字,只保留一个,把其余相同的数去掉,不同的数对应着不同的学生的学号。然后再把这些数从小到大排序,
按照排好的顺序去找同学做调查。请你协助明明完成“去重”与“排序”的工作
(同一个测试用例里可能会有多组数据,希望大家能正确处理)。
Input Param
n 输入随机数的个数
inputArray n个随机整数组成的数组
Return Value
OutputArray 输出处理后的随机整数
注:测试用例保证输入参数的正确性,答题者无需验证。测试用例不止一组。
输入描述:
输入多行,先输入随机整数的个数,再输入相应个数的整数
输出描述:
返回多行,处理后的结果
'''
while True:
try:
n=int(input())
# num=input()
num_list=[]
for i in range(n):
num=input()
num_list.append( num )
#print(num_list)
#num_list.remove('')
num_list=[int(i) for i in num_list]
num_list=set(num_list)
num_list=sorted(num_list)
for i in num_list:
print(i)
except:
exit() | true |
236afe64e9fd10484715e4644b405b71231add42 | Python | open-mmlab/mmpretrain | /tools/dataset_converters/convert_imagenet_subsets.py | UTF-8 | 1,524 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) OpenMMLab. All rights reserved.
"""SimCLR provides list files for semi-supervised benchmarks
https://github.com/google-research/simclr/tree/master/imagenet_subsets/"""
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='Convert ImageNet subset lists provided by SimCLR into '
'the required format in MMPretrain.')
parser.add_argument(
'input', help='Input list file, downloaded from SimCLR github repo.')
parser.add_argument(
'output', help='Output list file with the required format.')
args = parser.parse_args()
return args
def main():
args = parse_args()
# create dict with full imagenet annotation file
with open('data/imagenet/meta/train.txt', 'r') as f:
lines = f.readlines()
keys = [line.split('/')[0] for line in lines]
labels = [line.strip().split()[1] for line in lines]
mapping = {}
for k, l in zip(keys, labels):
if k not in mapping:
mapping[k] = l
else:
assert mapping[k] == l
# convert
with open(args.input, 'r') as f:
lines = f.readlines()
fns = [line.strip() for line in lines]
sample_keys = [line.split('_')[0] for line in lines]
sample_labels = [mapping[k] for k in sample_keys]
output_lines = [
f'{k}/{fn} {l}\n' for k, fn, l in zip(sample_keys, fns, sample_labels)
]
with open(args.output, 'w+') as f:
f.writelines(output_lines)
if __name__ == '__main__':
main()
| true |
3ef3b154fad1e0a1c1d0bb38f2f44298cffdb298 | Python | nikitaborisov/maxmin_flowsim | /flow_sim/max_min.py | UTF-8 | 2,556 | 2.890625 | 3 | [] | no_license | from typing import Set, List, Sequence, Collection, Union
from sortedcontainers import SortedList
from math import inf
def max_min_bw(circ_list: Sequence[Collection[int]], bw: List[float]) -> List[float]:
"""
Calculates the bandwidth allocated to each circuit in `circ_list` using the max-min bandwidth allocation.
:param circ_list: A list of circuits. Each circuit is a list (or set) of relay indices. There are no constraints
on the lengths of the circuits, but each relay index should appear at most once in a circuit
:param bw: A list of bandwidth values for each relay
:return: Returns a list of bandwidth values for each circuit, ordered the same as `circ_list`
"""
num_relays = len(bw)
# relay_to_circ maps from relay number to a set of circuit (indices) that go through this relay
relay_to_circ: List[Set[int]] = [set() for _ in range(num_relays)]
for circ_id, circ in enumerate(circ_list):
for relay_id in circ:
relay_to_circ[relay_id].add(circ_id)
# initialize allocation to 0
bw_alloc: List[Union[int, None]] = [None for _ in range(len(circ_list))]
# copy of bandwidth list
remaining_bw = bw.copy()
bw_array = [[relay_to_circ[i] and remaining_bw[i] / len(relay_to_circ[i]) or inf, i]
for i in range(num_relays)]
bw_list = SortedList(x for x in bw_array if x[0] < inf)
# iterate until relay_to_circ empty
while any(relay_to_circ):
# select the relay with the smallest bandwidth in the list
bn_bw, bn_relay = bw_list.pop(0)
update_relays = set()
for circ_id in relay_to_circ[bn_relay]:
# we're the bottleneck relay for these circuits
bw_alloc[circ_id] = bn_bw
for other_relay in circ_list[circ_id]:
if other_relay != bn_relay:
# remove the circuit and bandwidth from other relays it goes through
# and note that we need to adjust this relay
update_relays.add(other_relay)
remaining_bw[other_relay] -= bn_bw
relay_to_circ[other_relay].remove(circ_id)
relay_to_circ[bn_relay] = set()
remaining_bw[bn_relay] = 0
for relay in update_relays:
bw_list.remove(bw_array[relay])
if relay_to_circ[relay]:
# update bandwidth and reinsert
bw_array[relay][0] = remaining_bw[relay] / len(relay_to_circ[relay])
bw_list.add(bw_array[relay])
return bw_alloc
| true |
966a053235cb661c74426023b6ca61bc756f5a4a | Python | Silentsoul04/FTSP_2020 | /Summer Training ML & DL/Sentiment_Analysis_18.py | UTF-8 | 3,535 | 3.4375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 14:42:25 2020
@author: Rajesh
"""
import pandas as pd
import numpy as np
dataset = pd.read_csv('E:\ML Code Challenges\ML CSV Files\Restaurant_Reviews.tsv' , delimiter = '\t')
dataset.head()
*****************
Review Liked
0 Wow... Loved this place. 1
1 Crust is not good. 0
2 Not tasty and the texture was just nasty. 0
3 Stopped by during the late May bank holiday of... 1
4 The selection on the menu was great and so wer... 1
# Import nltk Library
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
# For stemming we just need to create the object of PorterStemmer class
from nltk.stem.porter import PorterStemmer
# how to select the first Review for just Testing.
import re
print(dataset['Review'][0]) # Wow... Loved this place.
review = re.sub('[^a-zA-Z]' , ' ' , dataset['Review'][0])
print(review) # Wow Loved this place
review = review.lower()
print(review) # wow loved this place
review = review.split()
print(review) # ['wow', 'loved', 'this', 'place']
review = [word for word in review
if not word in set(stopwords.words('english'))]
print(review) # ['wow', 'loved', 'place'] # Now 'this' is not there into sentance.
# Now we will be creating object for PorterStemmer class
ps = PorterStemmer()
review = [ps.stem(word) for word in review]
print(review) # ['wow', 'love', 'place'] # Means loved = love
review = ' '.join(review)
print(review) # wowloveplace
# Now we will be doing for the Entire dataset.
# Add this into Bigger List.
corpus = []
for i in range(0,1000):
review = re.sub('[^a-zA-Z]',' ' , dataset['Review'][i])
review = review.lower()
review = review.split()
review = [word for word in review if not word in set(stopwords.words('english'))]
ps = PorterStemmer()
review = [ps.stem(word) for word in review]
review = ' '.join(review)
corpus.append(review)
print(corpus)
print(len(corpus)) # 1000
# Now We will be performing the Vectorzation. It is also called as Features Extraction.
# Bag of words # BOG.
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
# It is also called as Sparse Matrix for ND array.
features = cv.fit_transform(corpus).toarray()
labels = dataset.iloc[:,1].values
from sklearn.model_selection import train_test_split
features_train , features_test , labels_train , labels_test =\
train_test_split(features,labels,test_size=0.20,random_state=41)
# Now Applying KNN to this dataset.
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(features_train,labels_train)
labels_pred = classifier.predict(features_test)
# Now we will be applying the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test , labels_pred)
print(cm)
**************
[[69 34]
[47 50]]
print((69+50)/(69+34+47+50)) # 0.595
# Now we will be going for the Naive Bayes algorithms.
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(features_train,labels_train)
labels_pred = classifier.predict(features_test)
# Now we will be applying the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test , labels_pred)
print(cm)
************
[[60 43]
[11 86]]
print((60+86)/(60+43+11+86)) # 0.73
| true |
3800f6b7387fccf93db4ac1c126afdf1bedb2432 | Python | DaniCnTs7/InteligentesPracticas | /SimpleRandomSearch.py | UTF-8 | 4,496 | 3.375 | 3 | [] | no_license | #import java.util.ArrayList;
#import java.util.Hashtable;
#import java.util.Random;
# this class implements a simple search method which explores a single sequence of actions.
# The process is quite simple. At each state we look for the agent possible actions and choose one at random.
# The action is then applied and if the new state is final, the method stops returning the list of applied actions.
# On the other hand, we iterate.
import sys
import random
import Utils
from Position import Position
from Action import Action
from State import State
from Piece import Piece
from Rook import Rook
from Pawn import Pawn
from Bishop import Bishop
from Knight import Knight
from Queen import Queen
from King import King
class SimpleRandomSearch:
#member variables
m_initialState = None
m_seedRS = -1
# Random m_generator = null;
m_solution = None
m_cost = 0.0
m_piece = Piece()
m_finalState = None
# constructor
def __init__(self, s0, seed):
self.m_initialState = s0
self.m_seedRS = seed
# m_generator = new Random(m_seedRS);
self.m_cost = 0.0
random.seed(seed)
if s0.m_agent == Utils.wPawn:
self.m_piece = Pawn(0)
elif s0.m_agent == Utils.bPawn:
self.m_piece = Pawn(1)
elif s0.m_agent == Utils.wRook:
self.m_piece = Rook(0)
elif s0.m_agent == Utils.bRook:
self.m_piece = Rook(1)
elif s0.m_agent == Utils.wBishop:
self.m_piece = Bishop(0)
elif s0.m_agent == Utils.bBishop:
self.m_piece = Bishop(1)
elif s0.m_agent == Utils.wKnight:
self.m_piece = Knight(0)
elif s0.m_agent == Utils.bKnight:
self.m_piece = Knight(1)
elif s0.m_agent == Utils.wQueen:
self.m_piece = Queen(0)
elif s0.m_agent == Utils.bQueen:
self.m_piece = Queen(1)
elif s0.m_agent == Utils.wKing:
self.m_piece = King(0)
elif s0.m_agent == Utils.bKing:
self.m_piece = King(1)
else:
# define the rest of pieces
print("Chess piece not implemented")
sys.exit()
# search method
def doSearch(self):
self.m_solution = []
solutionFound = False
current = None
noSolution = False
# main loop
current = self.m_initialState.copy()
while not(solutionFound):
if current.isFinal(): # first we check if the state is final
solutionFound = True
self.m_finalState = current
else:
# generate successors
possibleActions = self.m_piece.getPossibleActions(current)
for a in possibleActions:
print (a.m_initPos, a.m_finalPos)
rnd = random.randint(0,len(possibleActions)-1)
print(len(possibleActions), rnd)
if len(possibleActions) == 0:
break
action = possibleActions[random.randint(0,len(possibleActions)-1)]
print(action.m_initPos, action.m_finalPos)
self.m_solution.append(action)
self.m_cost += action.getCost()
current = current.applyAction(action)
return current
# main method
if __name__ == '__main__':
print(len(sys.argv))
if (len(sys.argv) != 6):
print("\n**Sorry, correct usage require 5 params:");
print("Board size: int.");
print("Density: (0.1,1]. Probability for each piece to be included.");
print("Seed1: int. To initialize the problem instance random number generator (for reproducibility)");
print("Agent: {0,1,2,3,4,5} standing for white pawn, rook, bishop, knight, queen or king.");
print("Seed2: int. To initialize the Random Search instance random number generator (for reproducibility)");
sys.exit()
else:
size = int(sys.argv[1])
density = float(sys.argv[2])
seed1 = int(sys.argv[3])
agent = int(sys.argv[4])
seed2 = int(sys.argv[5])
if size < 4:
print("\nSorry: board to small, modified to 4")
size = 4
if density<0.1 or density>1.0:
print("\nSorry: bad density value, modified to 0.25")
density = 0.25
if density*32 > size*size:
print("\nSorry: too much pieces for the board size, modifying density to 0.25")
density=0.25
if agent <0 or agent>5:
print("\nSorry: bad selected agent, modified to 1 (white rook)")
agent = Utils.wRook
# getting the initial state
state = Utils.getProblemInstance(size, density, seed1, agent)
Utils.printBoard(state)
srs = SimpleRandomSearch(state,seed2)
finalState = srs.doSearch()
if srs.m_finalState == None:
print("\nSorry, no solution found ....")
else:
print("Solution length: %d" % len(srs.m_solution))
print("Solution cost: %f" % srs.m_cost)
print("Solution:\n")
for i in range(len(srs.m_solution)):
print("%d : " % (i+1), end="")
print(srs.m_solution[i])
Utils.printBoard(finalState)
print()
| true |
d3f7c137eaadf9ad9be5fd4d86573b10e79bb550 | Python | xypan1232/treeCl | /treeCl/utils/phymlIO.py | UTF-8 | 2,095 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
from __future__ import print_function
import re
"""
TO DO: Add PAML class to estimate these kinds of parameters better then
can dispense with this
"""
def extract_gamma_parameter(tree):
gamma_regex = re.compile(r'(?<=Gamma shape parameter: \t\t)[.\d+]+')
try:
gamma = float(gamma_regex.search(tree.output).group())
except AttributeError:
print('Couldn\'t extract alpha parameter')
return 1.0
return gamma
def extract_GTR_parameters(tree):
Afreq_regex = re.compile(r'(?<=f\(A\)= )[.\d+]+')
Cfreq_regex = re.compile(r'(?<=f\(C\)= )[.\d+]+')
Gfreq_regex = re.compile(r'(?<=f\(G\)= )[.\d+]+')
Tfreq_regex = re.compile(r'(?<=f\(T\)= )[.\d+]+')
AtoC_regex = re.compile(r'(?<=A <-> C )[.\d+]+')
AtoG_regex = re.compile(r'(?<=A <-> G )[.\d+]+')
AtoT_regex = re.compile(r'(?<=A <-> T )[.\d+]+')
CtoG_regex = re.compile(r'(?<=A <-> G )[.\d+]+')
CtoT_regex = re.compile(r'(?<=A <-> T )[.\d+]+')
GtoT_regex = re.compile(r'(?<=A <-> T )[.\d+]+')
try:
Afreq = float(Afreq_regex.search(tree.output).group())
Cfreq = float(Cfreq_regex.search(tree.output).group())
Gfreq = float(Gfreq_regex.search(tree.output).group())
Tfreq = float(Tfreq_regex.search(tree.output).group())
AtoC = float(AtoC_regex.search(tree.output).group())
AtoG = float(AtoG_regex.search(tree.output).group())
AtoT = float(AtoT_regex.search(tree.output).group())
CtoG = float(CtoG_regex.search(tree.output).group())
CtoT = float(CtoT_regex.search(tree.output).group())
GtoT = float(GtoT_regex.search(tree.output).group())
except AttributeError:
print('Couldn\'t extract GTR parameters')
Afreq = Cfreq = Gfreq = Tfreq = 0.25
AtoC = AtoG = AtoT = CtoG = CtoT = GtoT = 1.0
d = dict(
Afreq=Afreq,
Cfreq=Cfreq,
Gfreq=Gfreq,
Tfreq=Tfreq,
AtoC=AtoC,
AtoG=AtoG,
AtoT=AtoT,
CtoG=CtoG,
CtoT=CtoT,
GtoT=GtoT,
)
return d
| true |
d4d7730e4880cd0b4eb5fb9c880c177c38d45483 | Python | BogdanIancu/PythonExamples | /day_3/functions.py | UTF-8 | 3,105 | 4.3125 | 4 | [] | no_license | def func1(salary):
print(salary+300)
# storing reference to func1 in bonus
bonus = func1
bonus(3000)
bonus(5000)
bonus(1000)
def func2(value):
print('Starting execution of func2')
#defined inner function inside func2 function
def innerFunc():
print(value)
innerFunc()
print('Finished execution of func2')
func = func2
func(2000)
func('Something')
func('Something else')
func(5000)
def func3(operation):
print('Executing important ops')
operation()
print('Finished important ops')
@func3
def add():
print(5+7)
@func3
def remove():
print(12-10)
""" ops = func3
ops(add)
ops(remove) """
def func4():
print('Executing again')
v = 5
def func5():
print(v)
return func5
var = func4()
var()
var()
def func6(function):
print('Starting...')
function()
def func7():
function()
print('Finished func7')
func7()
return func7
def tmpFunc():
print('I am an important function')
tmp = func6(tmpFunc)
tmp()
tmp()
""" def add(x,y):
return 'Suma elementelor {} si {} este: {}'.format(x,y,x + y)
def substract(x,y):
print('Diferenta dintre {} si {} este: {}'.format(x,y,x-y))
sum = add # stored reference of function add to variable sum
print(sum(5,9))
print(sum(10,19))
print(sum(15,20))
dif = substract # stored reference of function substract to variable dif
dif(10,7)
dif(5,2)
dif(15,12) """
""" def dummyFunc():
functions = []
for i in range(0,3):
print('Added Element: {}'.format(i))
def random():
print('Some important operation')
functions.append(random)
return functions
values = dummyFunc()
values[0]()
values[1]()
values[2]() """
""" def func1():
print('Started execution of func1')
def func2():
print('I am an inner function')
func2()
func2()
print('Finished execution of func1')
inner = func1
inner() """
""" def operations(func):
print('Started execution of {}'.format(func.__name__))
func()
print('Finished execution of {}'.format(func.__name__))
def add():
print('Suma dintre {} si {} este: {}'.format(5,7,5+7))
def substract():
print('Diferenta dintre {} si {} este: {}'.format(12,5,12-5))
def multiply():
print('Produsul dintre {} si {} este: {}'.format(2,3,2*3))
def divide():
print('Catul impartirii dintre {} si {} este: {}'.format(10, 5, 10/5))
ops = operations
ops(add)
ops(substract)
ops(multiply)
ops(divide) """
def combinator(function):
print('Started execution of combinator function')
def innerCombinator():
print('Started execution of innerCombinator function')
print('Started execution of {}'.format(function.__name__))
function()
print('Finished exeuction of {}'.format(function.__name__))
print('Finished execution of innerCombinator function')
function()
innerCombinator()
print('Finished execution of combinator function')
return innerCombinator
def important_op():
print('Some important operation')
innerComb = combinator(important_op)
innerComb()
innerComb() | true |
f4ab1bcd04b184cadebe34a388824baeebd8ac09 | Python | ashwani-bhat/doc-sumsim | /main.py | UTF-8 | 1,405 | 2.828125 | 3 | [] | no_license | from doc import DocumentFeature
import argparse
from pdftotext import PdfConverter
if __name__ == '__main__':
parser = argparse.ArgumentParser()
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--threshold', help=" should be between 0 (same) and 2 (totally different)", required=True)
requiredNamed.add_argument('--pdf-dir', required=True)
optionalNamed = parser.add_argument_group('optional arguments')
optionalNamed.add_argument('--clean', action='store_true', default=False, help='clean the dataset')
optionalNamed.add_argument('--verbose', action='store_true', default=False, help='clean the dataset')
optionalNamed.add_argument('--one', action='store_true', default=False, help='clean the dataset')
args = parser.parse_args()
threshold = float(args.threshold) # should be between 0 (same) and 2 (totally different)
clean = args.clean
pdf_dir = args.pdf_dir
verbose = args.verbose
one = args.one
# Convert all pdfs and store it in json
pdfConverter = PdfConverter(pdf_dir)
pdfConverter.convertall()
doc = DocumentFeature(pdf_dir, clean)
doc.create_features()
if one:
roll_no = str(input("Enter the roll number: \t"))
print(doc.compare_one(roll_no, threshold, verbose))
else:
print(doc.compare_all(threshold, verbose))
| true |
a331962f5e5077e4c57a1ab807ff22eeaee5a0e4 | Python | justinpolackal/text-clean | /datakettle/csv_reader.py | UTF-8 | 5,999 | 2.78125 | 3 | [] | no_license | import os
from datakettle.cleantext.textcleaner import TextCleaner
from datakettle.cleantext.filereader import TextFileReader
import datakettle.cleantext.utils as utils
import logging
class CSVReader (object):
def __init__(self, source_config):
self.source_config = source_config
self.logger = logging.getLogger(__name__)
self.def_special_chars = ['MINUS', 'COMMA', 'DQUOTE', 'SQUOTE', 'FSLASH', 'BSLASH', 'HASH', 'AT', 'EXCL', 'CARAT',
'AMP', 'PCT', 'DOLLAR', 'TILDA', 'APOS', 'COLN', 'SCOLN', 'QMARK', 'LT', 'GT', 'EQ', 'PIPE', 'CBRACE',
'SBRKT','BRKT', 'USCORE', 'ASTRSK', 'DOT', 'PLUS']
self.def_white_space_chars = ["NEWLINE", 'CR', 'FF', 'TAB']
"""
Read local csv files from configured directory path
"""
def read_local_files (self):
tfr = TextFileReader ()
tc = TextCleaner ()
access = self.source_config["access"]
file_filter = ""
if access["file_filter"] :
file_filter = access["file_filter"]
# Read column delimiter
delimiter = ","
if "delimiter" in access:
delimiter = utils.if_null(access["delimiter"], ",")
header_row = None
if "header_row" in access:
header_row = utils.if_null(access["header_row"], None)
data_column = 0
if "data_column" in access:
data_column = utils.if_null(access["data_column"], 0)
label_column = None
if "label_column" in access:
label_column = utils.if_null(access["label_column"], None)
usecols = []
usecols.append(data_column)
if label_column is not None:
usecols.append(label_column)
# If a label is provided globally, read it from config.
# Label will be the class/prediction used for training purposes.
global_label_value = None
if "label_value_override" in access:
global_label_value = utils.if_null(access["label_value_override"], None)
# Read file names from given path
files_list = utils.get_files_in_path(access["path"], file_filter)
file_data_list = []
label_value_list = []
for file in files_list:
# read csv file as a pandas dataframe
data_df = tfr.read_csv_file (file_path=file, separator=delimiter, header_row=header_row, select_cols=usecols)
self.logger.info("Found {} documents ".format(len(data_df)))
if utils.df_size(data_df) < 1:
continue
# Convert data column (text rows) into a list
text_list = list(data_df.iloc[:,0].values)
# If label column is specified, convert label column into list
if label_column is not None:
label_list = list(data_df.iloc[:,1].values)
# Iterate through each text row
id = 0
for textdoc in text_list:
# Cleanup document
clean_data = self.cleanup_data(textdoc)
# Get the label value
label_value = global_label_value
if label_column is not None:
label_value = label_list[id]
file_data_list.append({"content": clean_data, "label":label_value})
id += 1
return file_data_list
"""
Read json files from an S3 path
"""
def read_s3_files (self, config):
self.logger.error ("S3 file reader: Not yet implemented")
return [], []
"""
Cleanup file data list using the cleaning steps listed within the sources section of the feed config JSON
"""
def cleanup_data (self, clean_data):
if clean_data is None or len(clean_data) < 1:
return ""
tc = TextCleaner()
clean_steps = self.source_config["clean"]
# Iterate through each step and perform specified cleaning action
for cstep in clean_steps:
stepname = cstep["step"]
#self.logger.info("Processing: {}".format(stepname))
if stepname == "remove_all_markup":
clean_data = tc.remove_all_markup (doc=clean_data, valid_markup=False)
if stepname == "remove_html_encoded_chars":
clean_data = tc.remove_html_encoded_chars(clean_data, replace_char=' ')
if stepname == "remove_special_chars":
if "special_chars" in cstep:
special_chars = cstep["special_chars"]
else:
special_chars = self.def_special_chars
replace_char = cstep["replace_char"] if cstep.get("replace_char") else ''
clean_data = tc.remove_special_chars (special_chars, clean_data, replace_char=replace_char)
if stepname == "remove_white_spaces":
if "white_space_chars" in cstep:
white_space_chars = cstep["white_space_chars"]
else:
white_space_chars = self.def_white_space_chars
clean_data = tc.remove_white_spaces(white_space_chars=white_space_chars, doc=clean_data)
return clean_data
"""
From the config, understand the file endpoint. It can be local file system or Amazon S3.
Call read function as necessary
Clean up data as specified in the config and return to model builder
"""
def read_csv_data (self):
access = self.source_config["access"]
# Read data from markup files
if (access["endpoint"] == "csv") and (access["filesystem"] == "local"):
self.logger.info ("Reading local text files from {}".format(access["path"]))
data_list = self.read_local_files ()
if (access["endpoint"] == "csv") and (access["filesystem"] == "s3"):
self.logger.info("Reading s3 text files from {}".format(access["path"]))
data_list = self.read_s3_files ()
return data_list
| true |
7a5ffb97c10b862f2e1f541bb6be472f8a74da2b | Python | geetha60/PythonDemosApril29_2014 | /04_Functions/program.py | UTF-8 | 1,295 | 3.921875 | 4 | [] | no_license | import math
def find_cool_numbers(test=None):
nums = []
for n in range(0, 25):
if test is None:
nums.append(n)
elif test(n):
nums.append(n)
return nums
def even_nums(n):
return n % 2 == 0
def thirds_nums(n):
return n % 3 == 0
def filter_nums():
print("Looking for some cool numbers")
for n in find_cool_numbers(thirds_nums):
print(n, end=',')
print()
print()
def sort_by_abs(n):
return math.fabs(n)
#sort_by_abs.nickname="Chuck"
def sorting():
some_data = [1,3,-1,7,11,-2]
some_data.sort(key=sort_by_abs)
print(some_data)
some_data.sort(key=lambda n:-math.fabs(n))
print(some_data)
# print("Nickname: " + sort_by_abs.nickname)
def use_closures():
c7 = create_counter(7, "The_seven")
c100 = create_counter(100, "Biggy")
print(c7())
print(c7())
print(c7())
print(c100())
def create_counter(starting_val, name):
def counter():
nonlocal starting_val
nonlocal name
print("Incrementing {0} to {1}".format(
name, starting_val
))
starting_val+=1
return starting_val
return counter
def main():
filter_nums()
sorting()
use_closures()
if __name__ == '__main__':
main() | true |
2804be5c444cf0e76f6f5e5130d7179149203476 | Python | hymanc/purpleproject1 | /model/planarTagTest.py | UTF-8 | 3,185 | 2.53125 | 3 | [] | no_license | from socket import socket, AF_INET, SOCK_DGRAM, error as SocketError
from numpy import asarray,zeros_like,kron,concatenate,newaxis
from numpy.linalg import lstsq, svd, inv
from json import loads as json_loads, dumps as json_dumps
from sys import stdout
if __name__ != "__main__":
raise RuntimeError("Run this as a script")
try:
s.close()
except Exception:
pass
s = socket(AF_INET, SOCK_DGRAM )
s.bind(("",0xB00))
s.setblocking(0)
def skew( v ):
"""
Convert a 3-vector to a skew matrix such that
dot(skew(x),y) = cross(x,y)
The function is vectorized, such that:
INPUT:
v -- N... x 3 -- input vectors
OUTPUT:
N... x 3 x 3
For example:
>>> skew([[1,2,3],[0,0,1]])
array([[[ 0, 3, -2],
[-3, 0, 1],
[ 2, -1, 0]],
<BLANKLINE>
[[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 0]]])
"""
v = asarray(v).T
z = zeros_like(v[0,...])
return array([
[ z, -v[2,...], v[1,...]],
[v[2,...], z, -v[0,...] ],
[-v[1,...], v[0,...], z ] ]).T
def fitHomography( x, y ):
"""Fit a homography mapping points x to points y"""
x = asarray(x)
assert x.shape == (len(x),3)
y = asarray(y)
assert y.shape == (len(y),3)
S = skew(y)
plan = [ kron(s,xi) for s,xi in zip(S,x) ]
#plan.append([[0]*8+[1]])
A = concatenate( plan, axis=0 )
U,s,V = svd(A)
res = V[-1,:].reshape(3,3)
return res.T
lst = []
msg = None
rh={}
i=0
corners = [4,2,7,5,9]
ref = array([[-3,0,3,3,-3],[2,2,2,-2,-2.0],[1,1,1,1,1]]).T
ax = array([
min(ref[:,0]),max(ref[:,0]),
min(ref[:,1]),max(ref[:,1])
])*1.2
allow = set(corners + [12,14,13,15])
while True: #number of samples
try:
while True:
# read data as fast as possible
msg = s.recv(1<<16)
except SocketError, se:
# until we've run out; last message remains in m
pass
# make sure we got something
if not msg:
continue
# Parse tag information from UDP packet
dat = json_loads(msg)
# Make sure there are enough tags to make sense
if len(dat)<5:
continue
# Collect allowed tags
c = {}
h = {}
for d in dat:
nm = d['i']
if not nm in allow:
continue
p = asarray(d['p'])/100
c[nm] = mean(p,0)
h[nm] = p
print nm,
# Collect the corner tags
try:
roi = array( [ [c[nm][0], c[nm][1], 1] for nm in corners ] )
except KeyError, ck:
print "-- missing corner",ck
continue
# Homography mapping roi to ref
prj = fitHomography( roi, ref )
mrk = dot(roi,prj)
mrk = mrk[:,:2] / mrk[:,[-1]]
print
# display it
clf()
# Mark the corner tags
plot( mrk[:,0],mrk[:,1],'sc',ms=15)
ang0 = [-1+1j,1+1j,1-1j,-1-1j]
# Loop on all tags
for nm,p in h.iteritems():
# Project back
a = dot(c_[p,[1]*len(p)],prj)
a = a[:,:2]/a[:,[-1]]
# Compute position
z = a[:,0]+1j*a[:,1]
mz = mean(z)
# Compute angle
ang = angle(mean((z-mz) / ang0))
plot( z[[0,1,2,3,0]].real, z[[0,1,2,3,0]].imag, '.-b' )
plot( [z[0].real], [z[0].imag], 'og' )
text( mean(a[:,0]), mean(a[:,1]),
"[%d] (%.2g,%.2g) %.0f" % (nm,mz.real,mz.imag,180/pi*ang),
ha='center',va='center' )
axis(ax)
#
draw()
stdout.flush()
| true |
a0247ccc56379ff4c576ed220f03e1a461c5af1a | Python | vt0311/python | /FaceBook/matplotlibTest1.py | UTF-8 | 97 | 2.53125 | 3 | [] | no_license | import matplotlib.pyplot as plt
plt.plot([1,2,3,4])
plt.xlabel('x축 한글 표시')
plt.show() | true |
6fedc2b8dab451e31e3ffee5370ee505f8158be9 | Python | Jmaihuire/wqu | /MScFE650/.ipynb_checkpoints/Kmean-checkpoint.py | UTF-8 | 591 | 2.75 | 3 | [] | no_license | # %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
# %%
X, y = make_blobs(n_samples=200, centers=4, cluster_std=0.5, random_state=0)
plt.scatter(X[:, 0], X[:, 1], s=50)
# %%
k_means = KMeans(n_clusters=4)
k_means.fit(X)
# %%
y_pred = k_means.predict(X)
centers = k_means.cluster_centers_
colors = ['r','g','y','b']
# %%
plt.scatter(X[:, 0], X[:, 1], c = y_pred, s=50)
for i, j in zip(centers, colors):
plt.scatter(i[0], i[1], s=200, c=j, marker='s')
plt.show()
# %%
# %%
# %%
# %%
# %%
| true |
541f3420290a01f48671c8a25aeac09dd71a0c1a | Python | patrickgtafoya/autonomous_vehicle_design | /MPU6050 Bias Offset and Calibration/estimatingOrientation.py | UTF-8 | 6,368 | 2.78125 | 3 | [] | no_license | import biasOffsetCalibration as b
from copy import deepcopy
def state_two(port, dict_1, dict_2, dict_3, k, sig):
next_state = False
# iteration count to write to table file
index = 1
while not next_state:
# receive data, looking for 7 bytes
if port.inWaiting() > 6:
data = receive_data(port)
# print(str(index) + '\t' + bin(data[0])) # debug
next_state = b.look_for_stop(data[4], port, sig)
write_data(data, dict_1, dict_2, dict_3, k)
index += 1
def state_three(dict_1, dict_2, dict_3, keys):
# lists to store drift for each frequency at 1/10, 1/2 and full second
# [0]: 1/10 sec, [1] 1/2 sec, [2] 1 sec
drift_1000hz = []
drift_200hz = []
drift_100hz = []
# 1khz
drift_1000hz.append(calculate_orientation(dict_1, keys, div=10))
drift_1000hz.append(calculate_orientation(dict_1, keys, div=2))
drift_1000hz.append(calculate_orientation(dict_1, keys))
# 200hz
drift_200hz.append(calculate_orientation(dict_2, keys, div=10))
drift_200hz.append(calculate_orientation(dict_2, keys, div=2))
drift_200hz.append(calculate_orientation(dict_2, keys))
# 100hz
drift_100hz.append(calculate_orientation(dict_3, keys, div=10))
drift_100hz.append(calculate_orientation(dict_3, keys, div=2))
drift_100hz.append(calculate_orientation(dict_3, keys))
return [drift_1000hz, drift_200hz, drift_100hz]
def receive_data(port):
# looking for 7 bytes of data
read_bytes = port.read(7)
identifier = int.from_bytes(read_bytes[0:1], byteorder='big', signed=True)
gx = float(int.from_bytes(read_bytes[1:3], byteorder='big', signed=True)) / 10000.0
gy = float(int.from_bytes(read_bytes[3:5], byteorder='big', signed=True)) / 10000.0
gz = float(int.from_bytes(read_bytes[5:], byteorder='big', signed=True)) / 10000.0
stop_check = int.from_bytes(read_bytes, byteorder='big', signed=False)
return identifier, gx, gy, gz, stop_check
def write_data(data, dict_1000, dict_200, dict_100, keys):
# check if 1000Hz data
if data[0] & 0x01:
dict_1000[keys[0]].append(data[1])
dict_1000[keys[1]].append(data[2])
dict_1000[keys[2]].append(data[3])
# check if 200Hz data
if data[0] & 0x02:
dict_200[keys[0]].append(data[1])
dict_200[keys[1]].append(data[2])
dict_200[keys[2]].append(data[3])
# check if 100Hz data
if data[0] & 0x04:
dict_100[keys[0]].append(data[1])
dict_100[keys[1]].append(data[2])
dict_100[keys[2]].append(data[3])
def calculate_orientation(hz_dict, keys, div=1):
# length of data
delta = len(hz_dict[keys[0]])
# slice index value
sl = int(delta / div)
x_data = hz_dict[keys[0]][0:sl]
y_data = hz_dict[keys[1]][0:sl]
z_data = hz_dict[keys[2]][0:sl]
# change in time
delta_t = 1.0 / delta
x_orientation = 0
y_orientation = 0
z_orientation = 0
# calculate drift
for index in range(1, len(x_data)):
x_orientation += (x_data[index] + x_data[index-1]) * (delta_t / 2.0)
y_orientation += (y_data[index] + y_data[index-1]) * (delta_t / 2.0)
z_orientation += (z_data[index] + z_data[index-1]) * (delta_t / 2.0)
# print('X:\t' + str(x_orientation)) # debug
# print('Y:\t' + str(y_orientation)) # debug
# print('Z:\t' + str(z_orientation)) # debug
return x_orientation, y_orientation, z_orientation
if __name__ == "__main__":
portName = "COM4"
file = 'measuredError.txt'
title = 'Measured Error After Calibration'
stop = 0
start = 0xFF
baud_rate = 115200
# lists for reading data
key_list = ['Gyro X', 'Gyro Y', 'Gyro Z']
hz_100 = {key_list[0]: [], key_list[1]: [], key_list[2]: []}
hz_200 = deepcopy(hz_100)
hz_1000 = deepcopy(hz_100)
ser = b.initialize_serial(portName, baud_rate)
b.state_one(ser, file, title, start)
state_two(ser, hz_1000, hz_200, hz_100, key_list, stop)
drift = state_three(hz_1000, hz_200, hz_100, key_list)
# write drift calculations to file
with open('driftCalculations.txt', 'w') as f:
f.write('Drift Calculations\n')
f.write('\n1 kHz:\n')
f.write('\t1/10 sec:\n')
f.write('\t\tX:\t' + str(drift[0][0][0]) + '\n')
f.write('\t\tY:\t' + str(drift[0][0][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[0][0][2]) + '\n')
f.write('\t1/2 sec:\n')
f.write('\t\tX:\t' + str(drift[0][1][0]) + '\n')
f.write('\t\tY:\t' + str(drift[0][1][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[0][1][2]) + '\n')
f.write('\t1 sec:\n')
f.write('\t\tX:\t' + str(drift[0][2][0]) + '\n')
f.write('\t\tY:\t' + str(drift[0][2][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[0][2][2]) + '\n')
f.write('Drift Calculations\n')
f.write('\n200 Hz:\n')
f.write('\t1/10 sec:\n')
f.write('\t\tX:\t' + str(drift[1][0][0]) + '\n')
f.write('\t\tY:\t' + str(drift[1][0][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[1][0][2]) + '\n')
f.write('\t1/2 sec:\n')
f.write('\t\tX:\t' + str(drift[1][1][0]) + '\n')
f.write('\t\tY:\t' + str(drift[1][1][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[1][1][2]) + '\n')
f.write('\t1 sec:\n')
f.write('\t\tX:\t' + str(drift[1][2][0]) + '\n')
f.write('\t\tY:\t' + str(drift[1][2][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[1][2][2]) + '\n')
f.write('Drift Calculations\n')
f.write('\n100 Hz:\n')
f.write('\t1/10 sec:\n')
f.write('\t\tX:\t' + str(drift[2][0][0]) + '\n')
f.write('\t\tY:\t' + str(drift[2][0][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[2][0][2]) + '\n')
f.write('\t1/2 sec:\n')
f.write('\t\tX:\t' + str(drift[2][1][0]) + '\n')
f.write('\t\tY:\t' + str(drift[2][1][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[2][1][2]) + '\n')
f.write('\t1 sec:\n')
f.write('\t\tX:\t' + str(drift[2][2][0]) + '\n')
f.write('\t\tY:\t' + str(drift[2][2][1]) + '\n')
f.write('\t\tZ:\t' + str(drift[2][2][2]) + '\n')
f.close()
| true |
f9078a6b1366c61cbe3fb56e9f1945ca7aa5cb03 | Python | yuminc03/1320_python_AfterSchool | /hello.py | UTF-8 | 66 | 2.765625 | 3 | [] | no_license | #hello.py
print("hello world")
a=3
if a>1:
print("a is big")
| true |
3aa8aa83ddcb552e81fc27f77706cc2e20b0dbb1 | Python | weffschneider/asl_cheesebot | /scripts/astar.py | UTF-8 | 7,938 | 3.65625 | 4 | [] | no_license | import numpy as np
# Represents a motion planning problem to be solved using A*
class AStar(object):
def __init__(self, statespace_lo, statespace_hi, x_init, x_goal, occupancy, resolution):
self.statespace_lo = statespace_lo # state space lower bound (e.g., (-5, -5))
self.statespace_hi = statespace_hi # state space upper bound (e.g., (5, 5))
self.occupancy = occupancy # occupancy grid
self.resolution = resolution # resolution of the discretization of state space (cell/m)
self.x_init = self.snap_to_grid(x_init) # initial state
self.x_goal = self.snap_to_grid(x_goal) # goal state
self.closed_set = [] # the set containing the states that have been visited
self.open_set = [] # the set containing the states that are candidate for future expension
self.f_score = {} # dictionary of the f score (estimated cost from start to goal passing through state)
self.g_score = {} # dictionary of the g score (cost-to-go from start to state)
self.came_from = {} # dictionary keeping track of each state's parent to reconstruct the path
self.open_set.append(x_init)
self.g_score[x_init] = 0
self.f_score[x_init] = self.distance(x_init,x_goal)
self.path = None # the final path as a list of states
self.dist_from_wall = 1.0 # distance from wall to keep
# Checks if a give state is free, meaning it is inside the bounds of the map and
# is not inside any obstacle
# INPUT: (x)
# x - tuple state
# OUTPUT: Boolean True/False
def is_free(self, x):
if x==self.x_init or x==self.x_goal:
return True
for dim in range(len(x)):
if x[dim] < self.statespace_lo[dim]:
return False
if x[dim] >= self.statespace_hi[dim]:
return False
if not self.occupancy.is_free(x):
return False
return True
# computes the euclidean distance between two states
# INPUT: (x1, x2)
# x1 - first state tuple
# x2 - second state tuple
# OUTPUT: Float euclidean distance
def distance(self, x1, x2):
return np.linalg.norm(np.array(x1)-np.array(x2))
# returns the closest point on a discrete state grid
# INPUT: (x)
# x - tuple state
# OUTPUT: A tuple that represents the closest point to x on the discrete state grid
def snap_to_grid(self, x):
return (self.resolution*round(x[0]/self.resolution), self.resolution*round(x[1]/self.resolution))
# gets the FREE neighbor states of a given state. Assumes a motion model
# where we can move up, down, left, right, or along the diagonals by an
# amount equal to self.resolution.
# Use self.is_free in order to check if any given state is indeed free.
# Use self.snap_to_grid (see above) to ensure that the neighbors you compute
# are actually on the discrete grid, i.e., if you were to compute neighbors by
# simply adding/subtracting self.resolution from x, numerical error could
# creep in over the course of many additions and cause grid point equality
# checks to fail. To remedy this, you should make sure that every neighbor is
# snapped to the grid as it is computed.
# INPUT: (x)
# x - tuple state
# OUTPUT: List of neighbors that are free, as a list of TUPLES
def get_neighbors(self, x):
(xpos, ypos) = x
moves = [[xpos-self.resolution, ypos],
[xpos+self.resolution, ypos],
[xpos, ypos+self.resolution],
[xpos, ypos-self.resolution],
[xpos-self.resolution*np.sqrt(2), ypos+self.resolution*np.sqrt(2)],
[xpos-self.resolution*np.sqrt(2), ypos-self.resolution*np.sqrt(2)],
[xpos+self.resolution*np.sqrt(2), ypos+self.resolution*np.sqrt(2)],
[xpos+self.resolution*np.sqrt(2), ypos-self.resolution*np.sqrt(2)]];
neighbors = [tuple(self.snap_to_grid(el)) for el in moves if self.is_free(self.snap_to_grid(el))]
return neighbors
# Gets the state in open_set that has the lowest f_score
# INPUT: None
# OUTPUT: A tuple, the state found in open_set that has the lowest f_score
def find_best_f_score(self):
return min(self.open_set, key=lambda x: self.f_score[x])
# Use the came_from map to reconstruct a path from the initial location
# to the goal location
# INPUT: None
# OUTPUT: A list of tuples, which is a list of the states that go from start to goal
def reconstruct_path(self):
path = [self.x_goal]
current = path[-1]
while current != self.x_init:
path.append(self.came_from[current])
current = path[-1]
return list(reversed(path))
# def fix_animal_position(self, eps):
# goal = self.x_goal
# # find the wall closest to the goal position
# closest_line = None
# min_dist = 1000
# for obs in self.occupancy.obstacles:
# x1, y1 = obs[0]
# x2, y2 = obs[1]
# x0, y0 = goal
# dist = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/np.sqrt((y2-y1)**2 + (x2-x1)**2)
# if dist < min_dist:
# min_dist = dist
# closest_line = obs
# # Find slope of wall and a line perpendicular to the wall
# x1, y1 = obs[0]
# x2, y2 = obs[1]
# slope_wall = (y2-y1)/(x2-x1)
# slope_line = (1.,-1./slope_wall)
# # normalize slope_line, and add the line scaled by
# # (min_dist + eps), which gives new goal
# norm_line = np.norm(slope_line)
# new_goal = goal + slope_line/norm_line*(min_dist + eps)
# return new_goal
# Solves the planning problem using the A* search algorithm. It places
# the solution as a list of of tuples (each representing a state) that go
# from self.x_init to self.x_goal inside the variable self.path
# INPUT: None
# OUTPUT: Boolean, True if a solution from x_init to x_goal was found
def solve(self):
# animal position is most likely beyond the wall
# if not self.is_free(self.x_goal):
# self.x_goal = self.fix_animal_position(self.dist_from_wall)
while len(self.open_set)>0:
x_curr = self.find_best_f_score()
if x_curr == self.x_goal:
self.path = self.reconstruct_path()
return True
self.open_set.remove(x_curr)
self.closed_set.append(x_curr)
for x_neigh in self.get_neighbors(x_curr):
if x_neigh in self.closed_set:
continue
tentative_g_score = self.g_score[x_curr] + self.distance(x_curr, x_neigh)
if x_neigh not in self.open_set:
self.open_set.append(x_neigh)
elif (tentative_g_score > self.g_score[x_neigh]):
continue
self.came_from[x_neigh] = x_curr
self.g_score[x_neigh] = tentative_g_score
self.f_score[x_neigh] = tentative_g_score + self.distance(x_neigh, self.x_goal)
return False
# A 2D state space grid with a set of rectangular obstacles. The grid is fully deterministic
class DetOccupancyGrid2D(object):
def __init__(self, width, height, obstacles):
self.width = width
self.height = height
self.obstacles = obstacles
def is_free(self, x):
for obs in self.obstacles:
inside = True
for dim in range(len(x)):
if x[dim] < obs[0][dim] or x[dim] > obs[1][dim]:
inside = False
break
if inside:
return False
return True
| true |
f4a25394577050ac2791285b896dc37ddcdb0a58 | Python | ritua2/gib | /middle-layer/greyfish_storage/base_functions.py | UTF-8 | 2,541 | 2.953125 | 3 | [
"MIT"
] | permissive | """
BASICS
Contains a set of functions that are called accross the other APIs
"""
import os
import datetime, time
from pathlib import Path
import redis
import mysql.connector as mysql_con
# Checks if the provided user key is valid
def valid_key(ukey, username):
if ukey == os.environ['greyfish_key']:
return True
ipt_db = mysql_con.connect(host = os.environ["URL_BASE"] , port = 6603, user = os.environ["MYSQL_USER"] , password = os.environ["MYSQL_PASSWORD"], database = os.environ["MYSQL_DATABASE"])
cursor = ipt_db.cursor(buffered=True)
cursor.execute("select username from greykeys where token=%s",(ukey,))
user=None
for row in cursor:
user=row[0]
if user == None:
cursor.close()
ipt_db.close()
return False
if user == username:
# Deletes the token since it is single use
cursor.execute("delete from greykeys where token=%s",(ukey,))
ipt_db.commit()
cursor.close()
ipt_db.close()
return True
cursor.close()
ipt_db.close()
return False
# Creates a new key (new dir) in the dictionary
# fpl (arr) (str): Contains the list of subsequent directories
# exdic (dict)
def create_new_dirtag(fpl, exdic):
# New working dictionary
nwd = exdic
for qq in range(0, len(fpl)-1):
nwd = nwd[fpl[qq]]
# Adds one at the end
nwd[fpl[-1]] = {"files":[]}
return exdic
# Returns a dictionary showing all the files in a directory (defaults to working directory)
def structure_in_json(PATH = '.'):
FSJ = {PATH.split('/')[-1]:{"files":[]}}
# Includes the current directory
# Replaces everything before the user
unpart = '/'.join(PATH.split('/')[:-1])+'/'
for ff in [str(x).replace(unpart, '').split('/') for x in Path(PATH).glob('**/*')]:
if os.path.isdir(unpart+'/'.join(ff)):
create_new_dirtag(ff, FSJ)
continue
# Files get added to the list, files
# Loops through the dict
nwd = FSJ
for hh in range(0, len(ff)-1):
nwd = nwd[ff[hh]]
nwd["files"].append(ff[-1])
return FSJ
# Given two lists, returns those values that are lacking in the second
# Empty if list 2 contains those elements
def l2_contains_l1(l1, l2):
return[elem for elem in l1 if elem not in l2]
# Returns a string in UTC time in the format YYYY-MM-DD HH:MM:SS.XXXXXX (where XXXXXX are microseconds)
def timformat():
return datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f")
| true |
1ab782cba214630c6b075db90da5b237d2e83332 | Python | akulakov/explore | /explore/avkutil.py | UTF-8 | 7,840 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""Miscellaneous small utility functions.
vol(vol=None) - Get or set volume using aumix.
progress(ratio, length=40, col=1, cols=("yellow", None, "cyan"),
nocol="=.")
Text mode progress bar.
yes(question, [default answer])
i.e. if yes("erase file?", 'n'): erase_file()
color(text, fg, [bg])
Colorize text using terminal color codes.
beep(times) - Beep terminal bell number of times.
ftime(seconds) - returns h:m:s or m:s if there's no hours.
Term() - terminal stuff
term.size() => height, width
term.clear() => clear terminal
term.getch() => get one char at a time
Andrei Kulakov <ak@silmarill.org>
"""
import os
# import commands
import time
# from string import join
try:
from termios import *
except ImportError:
from TERMIOS import *
from types import *
from sys import stdin, stdout
dbg = 1
enable_color = 0
hotkeycol = "red"
colors = dict([c.split() for c in ( # make dict {"red": "31", ...}
"black 30, red 31, green 32, brown 33, blue 34, purple 35, cyan 36, lgray 37, gray 1;30, "
"lred 1;31, lgreen 1;32, yellow 1;33, lblue 1;34, pink 1;35, lcyan 1;36, white 1;37"
).split(', ')])
class AvkError(Exception):
def __init__(self, value): self.value = value
def __str__(self): return repr(self.value)
def debug(*msgs):
if dbg:
for m in msgs: print ('### ', m)
def replace(val, lst):
""" Replace patterns within `val`."""
for pat, repl in lst:
val = val.replace(pat, repl)
return val
def split(fname):
""" Split filename into (name, extension) tuple."""
lst = fname.split('.')
if len(lst) == 1: return fname, None
else: return str.join(lst[:-1], '.'), lst[-1]
def vol(vol=None):
""" Set or show audio volume.
Uses external mixer called aumix. One optional argument, vol, may
be an int or a string. If a string, it can be of the form "+10".
"""
if vol: os.system("aumix -v%s" % vol)
else: return commands.getoutput("aumix -vq").split()[1][:-1]
def progress(ratio, length=40, col=1, cols=("yellow", None, "cyan"), nocol="=."):
""" Text mode progress bar.
ratio - current position / total (e.g. 0.6 is 60%)
length - bar size
col - color bar
cols - tuple: (elapsed, left, percentage num)
nocol - string, if default="=.", bar is [=======.....]
"""
if ratio > 1: ratio = 1
elchar, leftchar = nocol
elapsed = int(round(ratio*length))
left = length - elapsed
bar = (elchar*elapsed + leftchar*left)[:length]
if col: return color(' '*elapsed, "gray", cols[0]) + color(' '*left, "gray", cols[1])
else: return elchar*elapsed + leftchar*left
def yes(question, default=None):
""" Get an answer for the question.
Return 1 on 'yes' and 0 on 'no'; default may be set to 'y' or 'n';
asks "Question? [Y/n]" (default is capitalized). Yy and Nn are
acceptable. Question is asked until a valid answer is given.
"""
y, n = "yn"
if default:
if default in "Yy": y = 'Y'
elif default in "Nn": n = 'N'
else:
raise AvkError("Error: default must be 'y' or 'n'.")
while 1:
answer = raw_input("%s [%s/%s] " % (question, y, n))
if default and not answer:
return (1 if default in "Yy" else 0)
else:
if not answer: continue
elif answer in "Yy": return 1
elif answer in "Nn": return 0
def no(question, default=None):
return not yes(question, default)
def color(text, fg, bg=None, raw=0):
""" Return colored text.
Uses terminal color codes; set avk_util.enable_color to 0 to return plain un-colored text.
If fg is a tuple, it's assumed to be (fg, bg). Both colors may be 'None'.
Raw means return string in raw form - for writing to a file instead of printing to screen.
Leave default if not sure.
"""
# init vars
xterm, bgcode = 0, ''
if not enable_color or not fg:
return text
if type(fg) in (TupleType, ListType):
fg, bg = fg
opencol, closecol = "\033[", "m"
if raw:
opencol, closecol = r"\[\033[", r"m\]"
clear = opencol + '0' + closecol
if os.environ["TERM"] == "xterm":
xterm = 1
# create color codes
if xterm and fg == "yellow": # In xterm, brown comes out as yellow..
fg = "brown"
fgcode = opencol + colors[fg] + closecol
if bg:
if bg == "yellow" and xterm: bg = "brown"
try: bgcode = opencol + colors[bg].replace('3', '4', 1) + closecol
except KeyError: pass
return "%s%s%s%s" % (bgcode, fgcode, text, clear)
def beep(times, interval=1):
"""Beep terminal bell specified times with `interval` seconds (float or int)."""
for t in range(times):
print ('\a')
time.sleep(interval)
def ftime(seconds, suffixes=['y','w','d','','',''], separator=':', nosec=False):
""" Takes an amount of seconds and turns it into a human-readable amount of time.
ftime(953995) => 1w:04d:00:59:55
if `nosec` is True, seconds will be omitted from output.
adapted from code by: http://snipplr.com/users/wbowers/
"""
t = []
parts = [ (suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
# for each time piece, grab the value and remaining seconds, and add it to the time string
if nosec:
del parts[-1]
for n, (suffix, length) in enumerate(parts):
value = int(seconds) / length
if value > 0 or t: # skip parts until we get first non-zero
seconds = seconds % length
fmt = "%02d%s"
if not t and n+1 < len(parts):
fmt = "%d%s" # don't pad the first part with zeroes
t.append(fmt % (value, suffix))
if not t: t = ['0s']
elif len(t) == 1 and not nosec: t[0] += 's'
return str.join(t, separator)
# print ftime(105, nosec=True)
class Term:
""" Linux terminal management.
clear - calls os.system("clear")
getch - get one char at a time
size - return height, width of the terminal
"""
def __init__(self):
self.fd = stdin.fileno()
self.new_term, self.old_term = tcgetattr(self.fd), tcgetattr(self.fd)
self.new_term[3] = (self.new_term[3] & ~ICANON & ~ECHO)
def normal(self):
"""Set 'normal' terminal settings."""
tcsetattr(self.fd, TCSAFLUSH, self.old_term)
def clear(self):
"""Clear screen."""
os.system("clear")
def cline(self):
"""Clear line."""
stdout.write('\r' + ' '*self.size()[1])
stdout.flush()
def curses(self):
"""Set 'curses' terminal settings. (noecho, something else?)"""
tcsetattr(self.fd, TCSAFLUSH, self.new_term)
def getch(self, prompt=None):
""" Get one character at a time.
NOTE: if the user suspends (^Z) running program, then brings it back to foreground,
you have to instantiate Term class again. Otherwise getch() won't work. Even after
that, the user has to hit 'enter' once before he can enter commands.
"""
if prompt:
stdout.write(prompt)
stdout.flush()
self.curses()
c = os.read(self.fd, 1)
self.normal()
return c
def size(self):
"""Return terminal size as tuple (height, width)."""
import struct, fcntl
h, w = struct.unpack("hhhh", fcntl.ioctl(0, TIOCGWINSZ, "\000"*8))[0:2]
if not h:
h, w = 24, 80
return h, w
| true |
58f576c5b45b4443e3b49bee925dfdfcaebd4c69 | Python | mirjanic/RLudo | /src/players/NeuralNets/reinforce.py | UTF-8 | 2,451 | 2.96875 | 3 | [] | no_license | import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# Constants
GAMMA = 0.9
LR = 0.003
DROPOUT = 0.6
class ReinforceNet(nn.Module):
def __init__(self, state_size, action_size):
super(ReinforceNet, self).__init__()
self.neuralnet = nn.Sequential(
nn.Linear(state_size, 200),
nn.Dropout(p=DROPOUT),
nn.ReLU(),
nn.Linear(200, 30),
nn.Dropout(p=DROPOUT),
nn.ReLU(),
nn.Linear(30, action_size))
def forward(self, x):
return F.softmax(self.neuralnet(x))
class ReinforceAgent(object):
def __init__(self, state_size, action_size):
self.model = ReinforceNet(state_size, action_size)
self.num_actions = action_size
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=LR)
def forward(self, x):
return self.model(x)
def backward(self, rewards, log_probs):
"""
Implements the REINFORCE algorithm for policy gradient.
:param rewards: Reward history
:param log_probs: Log-prob history
:return: None
"""
discounted_rewards = []
for t in range(len(rewards)):
gt = 0
pw = 0
for r in rewards[t:]:
gt = gt + GAMMA ** pw * r
pw = pw + 1
discounted_rewards.append(gt)
discounted_rewards = torch.tensor(discounted_rewards)
discounted_rewards = (discounted_rewards - discounted_rewards.mean()) / (
discounted_rewards.std() + 1e-9) # normalize discounted rewards
policy_gradient = []
for log_prob, gt in zip(log_probs, discounted_rewards):
policy_gradient.append(-log_prob * gt)
self.optimizer.zero_grad()
policy_gradient = torch.stack(policy_gradient).sum()
policy_gradient.backward()
self.optimizer.step()
def get_action(self, state):
"""
Runs a state through NN to get action probabilities
:param state: Current state
:return: Most probable action and log probability
"""
probs = self.forward(Variable(state))
highest_prob_action = np.random.choice(self.num_actions, p=np.squeeze(probs.detach().numpy()))
log_prob = torch.log(probs.squeeze(0)[highest_prob_action])
return highest_prob_action, log_prob
| true |
4d65919c77322a4fd20067f8154b5aabfd6395ac | Python | mgrbic12/i210 | /mgrbic_HW4_416.py | UTF-8 | 260 | 3.953125 | 4 | [] | no_license | #Getting the user input
first = input("Enter first word: ")
print()
second = input("Enter second word: ")
print()
third = input("Enter third word: ")
print()
#Checking to see if first < second < third
if first <= second <= third:
print(True)
| true |
1acc3bb840602700735e353b084b8238c553e716 | Python | AllenHW/nengo-extras | /nengo_extras/dists.py | UTF-8 | 6,665 | 2.890625 | 3 | [] | no_license | from __future__ import absolute_import
import numpy as np
from nengo.dists import Distribution
from nengo.params import NdarrayParam, NumberParam, TupleParam
def gaussian_icdf(mean, std):
import scipy.stats as sps
def icdf(p):
return sps.norm.ppf(p, scale=std, loc=mean)
return icdf
def loggaussian_icdf(log_mean, log_std, base=np.e):
import scipy.stats as sps
mean = base**log_mean
log_std2 = log_std * np.log(base)
def icdf(p):
return sps.lognorm.ppf(p, log_std2, scale=mean)
return icdf
def uniform_icdf(low, high):
def icdf(p):
return p * (high - low) + low
return icdf
class Concatenate(Distribution):
"""Concatenate distributions to form an independent multivariate"""
distributions = TupleParam('distributions', readonly=True)
d = NumberParam('d', low=1, readonly=True)
def __init__(self, distributions):
super(Concatenate, self).__init__()
self.distributions = distributions
# --- determine dimensionality
rng = np.random.RandomState(0)
s = np.column_stack([d.sample(1, rng=rng) for d in self.distributions])
self.d = s.shape[1]
def sample(self, n, d=None, rng=np.random):
assert d is None or d == self.d
return np.column_stack(
[dist.sample(n, rng=rng) for dist in self.distributions])
class MultivariateCopula(Distribution):
"""Generalized multivariate distribution.
Uses the copula method to sample from a general multivariate distribution,
given marginal distributions and copula covariances [1]_.
Parameters
----------
marginal_icdfs : iterable
List of functions, each one being the inverse CDF of the marginal
distribution across that dimension.
rho : array_like (optional)
Array of copula covariances [1]_ between parameters. Defaults to
the identity matrix (independent parameters).
See also
--------
gaussian_icdf, loggaussian_icdf, uniform_icdf
References
----------
.. [1] Copula (probability theory). Wikipedia.
https://en.wikipedia.org/wiki/Copula_(probability_theory)
"""
marginal_icdfs = TupleParam('marginal_icdfs', readonly=True)
rho = NdarrayParam('rho', shape=('*', '*'), optional=True, readonly=True)
def __init__(self, marginal_icdfs, rho=None):
import scipy.stats # we need this for sampling
assert scipy.stats
super(MultivariateCopula, self).__init__()
self.marginal_icdfs = marginal_icdfs
self.rho = rho
d = len(self.marginal_icdfs)
if not all(callable(f) for f in self.marginal_icdfs):
raise ValueError("`marginal_icdfs` must be a list of callables")
if self.rho is not None:
if self.rho.shape != (d, d):
raise ValueError("`rho` must be a %d x %d array" % (d, d))
if not np.array_equal(self.rho, self.rho.T):
raise ValueError(
"`rho` must be a symmetrical positive-definite array")
def sample(self, n, d=None, rng=np.random):
import scipy.stats as sps
assert d is None or d == len(self.marginal_icdfs)
d = len(self.marginal_icdfs)
# normalize rho
rho = np.eye(d) if self.rho is None else self.rho
stds = np.sqrt(np.diag(rho))
rho = rho / np.outer(stds, stds)
# sample from copula
x = sps.norm.cdf(sps.multivariate_normal.rvs(cov=rho, size=n))
# apply marginal inverse CDFs
for i in range(d):
x[:, i] = self.marginal_icdfs[i](x[:, i])
return x
class MultivariateGaussian(Distribution):
mean = NdarrayParam('mean', shape='d')
cov = NdarrayParam('cov', shape=('d', 'd'))
def __init__(self, mean, cov):
super(MultivariateGaussian, self).__init__()
self.d = len(mean)
self.mean = mean
cov = np.asarray(cov)
self.cov = (cov*np.eye(self.d) if cov.size == 1 else
np.diag(cov) if cov.ndim == 1 else cov)
def sample(self, n, d=None, rng=np.random):
assert d is None or d == self.d
return rng.multivariate_normal(self.mean, self.cov, size=n)
class Mixture(Distribution):
distributions = TupleParam('distributions')
p = NdarrayParam('p', shape='*', optional=True)
def __init__(self, distributions, p=None):
super(Mixture, self).__init__()
self.distributions = distributions
if not all(isinstance(d, Distribution) for d in self.distributions):
raise ValueError(
"All elements in `distributions` must be Distributions")
if p is not None:
p = np.array(p)
if p.ndim != 1 or p.size != len(self.distributions):
raise ValueError(
"`p` must be a vector with one element per distribution")
if (p < 0).any():
raise ValueError("`p` must be all non-negative")
p /= p.sum()
self.p = p
def sample(self, n, d=None, rng=np.random):
dd = 1 if d is None else d
samples = np.zeros((n, dd))
nd = len(self.distributions)
i = (rng.randint(nd, size=n) if self.p is None else
rng.choice(nd, p=self.p, size=n))
c = np.bincount(i, minlength=nd)
for k in c.nonzero()[0]:
samples[i == k] = self.distributions[k].sample(c[k], d=dd, rng=rng)
return samples[:, 0] if d is None else samples
class Tile(Distribution):
"""Choose values in order from an array
This distribution is not random, but rather tiles an array to be a
particular size. This is useful for example if you want to pass an array
for a neuron parameter, but are not sure how many neurons there will be.
Parameters
----------
values : array_like
The values to tile.
"""
values = NdarrayParam('values', shape=('*', '*'))
def __init__(self, values):
super(Tile, self).__init__()
values = np.asarray(values)
self.values = values.reshape(-1, 1) if values.ndim < 2 else values
def __repr__(self):
return "Tile(values=%s)" % (self.values)
def sample(self, n, d=None, rng=np.random):
out1 = d is None
d = 1 if d is None else d
nv, dv = self.values.shape
if n > nv or d > dv:
values = np.tile(self.values, (int(np.ceil(float(n) / nv)),
int(np.ceil(float(d) / dv))))
else:
values = self.values
values = values[:n, :d]
return values[:, 0] if out1 else values
| true |
f9dabe3523e53a0ea03e7c7237348c7d7a579a1e | Python | jaebradley/leetcode.py | /test_k_closest_points_to_origin.py | UTF-8 | 2,264 | 3.25 | 3 | [
"MIT"
] | permissive | from unittest import TestCase
from k_closest_points_to_origin import Solution
class TestEmptyPoints(TestCase):
def setUp(self) -> None:
self.points = []
def test_returns_empty_array_when_k_is_0(self):
self.assertListEqual(Solution().kClosest(self.points, 0), [])
def test_returns_empty_array_when_k_is_1(self):
self.assertListEqual(Solution().kClosest(self.points, 1), [])
class TestSinglePoint(TestCase):
def setUp(self) -> None:
self.points = [[1, 1]]
def test_returns_empty_array_when_k_is_0(self):
self.assertListEqual(Solution().kClosest(self.points, 0), [])
def test_returns_point_when_k_is_1(self):
self.assertListEqual(Solution().kClosest(self.points, 1), [[1, 1]])
class TestTwoPoints(TestCase):
def setUp(self) -> None:
self.points = [[1, 1], [2, 1]]
def test_returns_empty_array_when_k_is_0(self):
self.assertListEqual(Solution().kClosest(self.points, 0), [])
def test_returns_closest_point_when_k_is_1(self):
self.assertListEqual(Solution().kClosest(self.points, 1), [[1, 1]])
def test_returns_points_when_k_is_2(self):
points = Solution().kClosest(self.points, 2)
self.assertTrue(len(points), 2)
self.assertTrue([1, 1] in points)
self.assertTrue([2, 1] in points)
class TestThreePoints(TestCase):
def setUp(self) -> None:
self.points = [
[1, 1],
[1, 2],
[1, 3]
]
def test_returns_empty_array_when_k_is_0(self):
self.assertListEqual(Solution().kClosest(self.points, 0), [])
def test_returns_closest_point_when_k_is_1(self):
self.assertListEqual(Solution().kClosest(self.points, 1), [[1, 1]])
def test_returns_two_closest_points_when_k_is_2(self):
points = Solution().kClosest(self.points, 2)
self.assertTrue(len(points), 2)
self.assertTrue([1, 1] in points)
self.assertTrue([1, 2] in points)
def test_returns_three_closest_points_when_k_is_3(self):
points = Solution().kClosest(self.points, 3)
self.assertTrue(len(points), 3)
self.assertTrue([1, 1] in points)
self.assertTrue([1, 2] in points)
self.assertTrue([1, 3] in points)
| true |
0fcac16e80cdf05cce0db062fb782331a6f2bc2b | Python | NOAA-PMEL/EcoFOCI_Jupyter_Notebooks | /2020/KWood/ICOADS/icoaads_antarctica/download_ds548.0.py | UTF-8 | 2,376 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
#################################################################
# Python Script to retrieve 5 online Data files of 'ds548.0',
# total 29.17M. This script uses 'requests' to download data.
#
# Highlight this script by Select All, Copy and Paste it into a file;
# make the file executable and run it on command line.
#
# You need pass in your password as a parameter to execute
# this script; or you can set an environment variable RDAPSWD
# if your Operating System supports it.
#
# Contact zji@ucar.edu (Zaihua Ji) for further assistance.
#################################################################
import sys, os
import requests
def check_file_status(filepath, filesize):
sys.stdout.write('\r')
sys.stdout.flush()
size = int(os.stat(filepath).st_size)
percent_complete = (size/filesize)*100
sys.stdout.write('%.3f %s' % (percent_complete, '% Completed'))
sys.stdout.flush()
# Try to get password
if len(sys.argv) < 2 and not 'RDAPSWD' in os.environ:
try:
import getpass
input = getpass.getpass
except:
try:
input = raw_input
except:
pass
pswd = input('Password: ')
else:
try:
pswd = sys.argv[1]
except:
pswd = os.environ['RDAPSWD']
url = 'https://rda.ucar.edu/cgi-bin/login'
values = {'email' : 'shaun.bell@noaa.gov', 'passwd' : pswd, 'action' : 'login'}
# Authenticate
ret = requests.post(url,data=values)
if ret.status_code != 200:
print('Bad Authentication')
print(ret.text)
exit(1)
dspath = 'https://rda.ucar.edu/dsrqst/BELL493088/'
filelist = [
'ICOADS_R3.0_Rqst493088_17900104-20010102.csv.gz',
'ICOADS_R3.0_Rqst493088_20010103-20130407.csv.gz',
'ICOADS_R3.0_Rqst493088_20130408-20141231.csv.gz',
'readme_imma1.bell493088.pdf',
'rdimma1_csv.f']
for file in filelist:
filename=dspath+file
file_base = os.path.basename(file)
print('Downloading',file_base)
req = requests.get(filename, cookies = ret.cookies, allow_redirects=True, stream=True)
filesize = int(req.headers['Content-length'])
with open(file_base, 'wb') as outfile:
chunk_size=1048576
for chunk in req.iter_content(chunk_size=chunk_size):
outfile.write(chunk)
if chunk_size < filesize:
check_file_status(file_base, filesize)
check_file_status(file_base, filesize)
print()
| true |
08c2946898f68db410d4c07f0cfc89bf2e165531 | Python | RosePasta/BugTypeBasedIRBL | /bench4bl/spring/shdp/sources/SHDP_2_3_0/spring-hadoop-build-tests/src/test/resources/org/springframework/data/hadoop/scripting/basic-script.py | UTF-8 | 616 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | from java.util import UUID
from org.apache.hadoop.fs import Path
print "Home dir is " + str(fs.homeDirectory)
print "Work dir is " + str(fs.workingDirectory)
print "/user exists " + str(fs.exists("/user"))
name = UUID.randomUUID().toString()
scriptName = "src/test/resources/test.properties"
fs.copyFromLocalFile(scriptName, name)
print Path(name).makeQualified(fs)
# use the shell
dir = "script-dir/"
if not fsh.test(dir):
fsh.mkdir(dir)
fsh.cp(name, dir)
fsh.chmodr(700, dir)
print "File content is " + str(fsh.cat(dir + name))
print str(fsh.ls(dir))
fsh.rmr(dir)
fs.getLength(name) | true |
0118117046f56bda1e82ca0d681073a961d80823 | Python | edervishaj/spotify-recsys-challenge | /utils/bot.py | UTF-8 | 4,449 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import time
import logging
from telegram.bot import Bot
from functools import wraps
import threading
token = '512720388:AAHjYnJvvNld3rb70J1vp40gDEiRdcPHxsE'
chat_id = "-262107883"
# chat2 = "-314364535"
chat2 = "-1001356251815"
def bot_wrap(f):
direct = Bot(token=token)
def wrap(*args):
direct.send_message(chat_id=chat_id, text="STARTED : "+f.__name__+" ")
time1 = time.time()
ret = f(*args)
elapsed_time = time.strftime("%H:%M:%S", time.gmtime(time.time()-time1))
direct.send_message(chat_id=chat_id, text="ENDED : "+f.__name__+" in: "+str(elapsed_time)+
"\nturn off the server please <3")
return ret
return wrap
class Bot_v1(object):
"""
only start and end of the program. just writes in telegram when the algorithm is lauchend and when is finished
# EXAMPLE BOT V1
bot = bot_v1("slim")
bot.start()
====_ do things ====
bot.end()
"""
token = '512720388:AAHjYnJvvNld3rb70J1vp40gDEiRdcPHxsE'
chat_id = "-262107883"
def __init__(self, name, token="512720388:AAHjYnJvvNld3rb70J1vp40gDEiRdcPHxsE", chat_id="-262107883"):
self.direct = Bot(token=token)
self.chat_id = chat_id
self.name = name
self.start_time = time.time()
def start(self ):
self.direct.send_message(chat_id=self.chat_id, text="STARTED : "+self.name+" ")
def end(self):
elapsed_time = time.strftime("%H:%M:%S", time.gmtime(time.time()-self.start_time))
self.direct.send_message(chat_id=self.chat_id, text="ENDED : "+self.name+" in: "+str(elapsed_time)+
"\nturn off the server please <3")
def send_message(self, text):
self.direct.send_message(chat_id=self.chat_id, text=self.name+":\n"+text)
def error(self, error_message):
self.direct.send_message(chat_id=self.chat_id,text="ERROR : "+self.name+" blocked itself:\n"+error_message)
if __name__ == '__main__':
# EXAMPLE BOT V1
bot = Bot_v1("boh")
bot.end()
# class bot_v2(object):
#
# token = '512720388:AAHjYnJvvNld3rb70J1vp40gDEiRdcPHxsE'
# chat_id = "-262107883"
#
# def __init__(self, name):
# self.direct = Bot(token=bot.token)
# # self.updater = Updater(token=self.token)
# self.name = name
# self.logger = logging.getLogger(__name__)
# self.updater = Updater(self.token)
#
#
# def main(self):
# """Start the bot."""
# # Create the EventHandler and pass it your bot's token.
#
# logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# level=logging.INFO)
#
# dp = self.updater.dispatcher
#
# dp.add_handler(CommandHandler("start", bot.start))
# dp.add_handler(CommandHandler("help", bot.__help))
# dp.add_handler(CommandHandler("status", bot.__status))
#
# dp.add_error_handler(bot.__error)
#
#
# print("0")
# self.updater.start_polling()
#
# print("1")
# self.updater.idle()
# print("2")
#
#
# def end(self):
# elapsed_time = time.strftime("%H:%M:%S", time.gmtime(time.time()-self.start_time))
# self.updater.bot.send_message(chat_id=bot.chat_id, text="ENDED :"+self.name+" "+str(elapsed_time))
# self.updater.bot.send_message(chat_id=bot.chat_id, text="turn off the server please <3")
#
# def update_status(self, message):
# self.status_message = "LULLULASUDALSUDASLDUA LUL"
#
#
# def __error(self, update, error):
# self.logger.warning('Update "%s" caused error "%s"', update, error)
#
# def __start(self, update,asdasdasd):
# update.message.reply_text("STARTED :"+bot.name+" ")
# bot.start_time = time.time()
#
# def start(self, update):
# """Send a message when the command /start is issued."""
# update.message.reply_text('Hi!')
#
# def __help(self, update):
# """Send a message when the command /help is issued."""
# update.message.reply_text(chat_id=self.chat_id, text='comandi: \help \status \n quando finisce te lo dice lui '
# 'se hai messo la bot.end() ')
#
# def __status(self,update):
# update.message.reply_text("STATUS :"+self.name+"\n"+self.status_message)
#
# if __name__ == '__main__':
#
# bot1 = bot("slim?? sebaculo")
# bot1.main()
# print("1")
# bot1.update_status("epoca sarcazzo")
# bot1.end()
| true |
7fc15b0a2e3885031ef86c1d78971001d47b9c5a | Python | bahkobg/CodeWars | /SumOfDigits.py | UTF-8 | 179 | 3.21875 | 3 | [] | no_license | def digital_root(n: int) -> int:
root = sum([int(x) for x in str(n)])
if len(str(root)) == 1:
return root
return digital_root(root)
print(digital_root(16))
| true |
2f272d0f64de25bb6a7b8711c29d04181f29e59c | Python | juan518munoz/CS50x | /pset7/houses/roster.py | UTF-8 | 601 | 3.453125 | 3 | [] | no_license | import sys
import csv
from cs50 import SQL
# Check correct user input
if len(sys.argv) != 2:
print("Please specify a house")
exit()
# Query database for students in house
db = SQL("sqlite:///students.db")
# Print student's name, birth
text = db.execute("SELECT * FROM students WHERE house = ? ORDER BY last, first", sys.argv[1])
for student in text:
# First name
print(student["first"], end =" ")
# Middle name
if student["middle"] != None:
print(student["middle"], end =" ")
# Last
print(student["last"], end =", ")
# Birth
print(student["birth"]) | true |
b5ebdedda47f22bf97849e4aff1776cb2e83595f | Python | EliseCheng/Daily-Coding | /python/fileworks/test_filecontent.py | UTF-8 | 1,190 | 3.015625 | 3 | [] | no_license | # coding: utf-8
import struct
# 支持文件类型
# 用16进制字符串的目的是可以知道文件头是多少字节
# 各种文件头的长度不一样,少半2字符,长则8字符
def typeList():
return {
"514649FB": 'QCOW2',
}
# 字节码转16进制字符串
def bytes2hex(bytes):
num = len(bytes)
hexstr = u""
for i in range(num):
t = u"%x" % bytes[i]
if len(t) % 2:
hexstr += u"0"
hexstr += t
return hexstr.upper()
# 获取文件类型
def filetype(filename):
binfile = open(filename, 'rb') # 必需二制字读取
tl = typeList()
ftype = 'unknown'
for hcode in tl.keys():
numOfBytes = len(hcode) / 2 # 需要读多少字节
binfile.seek(0) # 每次读取都要回到文件头,不然会一直往后读取
hbytes = struct.unpack_from("B"*numOfBytes, binfile.read(numOfBytes)) # 一个 "B"表示一个字节
f_hcode = bytes2hex(hbytes)
if f_hcode == hcode:
ftype = tl[hcode]
break
binfile.close()
return ftype
if __name__ == '__main__':
print filetype('/home/chengwen/Desktop/python-gflags_1.3-1.debian.tar.gz')
| true |
afef9a38b8acc1936a0926dbed9e7801d59841cc | Python | marshellhe/FreshmanTodo | /PythonLearning/SeleniumWebTest/easonhan007/button_dropdown.py | UTF-8 | 924 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from time import sleep
import os
if 'HTTP_PROXY' in os.environ: del os.environ['HTTP_PROXY']
dr = webdriver.Chrome()
#file_path = 'file:////' + os.path.abspath('button_dropdown.html')
file_path = 'file:///C:/Users/Joyce/PycharmProjects/eason/modal.html'
dr.get(file_path)
sleep(1)
#点击下拉菜单
dr.find_element_by_link_text('Info').click()
#找到dropdown-menu父元素
WebDriverWait(dr,15).until(lambda the_driver:the_driver.find_element_by_class_name('dropdown-menu').is_displayed())
#找到better than
menu = dr.find_element_by_class_name('dropdown-menu').find_element_by_link_text('better than')
menu.click()
sleep(3)
dr.quit()
"""
先找到button group的父div,class为btn-group的div,然后再找到下面所有的div(也就是button),返回text是second的div
""" | true |
0b28cb821fd1cad2c643224ecc21bbcb69e0a039 | Python | Nur99/yandex_lyceum | /2nd_year/WEB9.flask-sqlalchemy/home_work/query_6.py | UTF-8 | 920 | 2.546875 | 3 | [] | no_license | from flask import Flask
from data.db_session import global_init, create_session
from data.users import User
from data.jobs import Jobs
app = Flask(__name__)
app.config['SECRET_KEY'] = 'yandexlyceum_secret_key'
def main():
global_init(input())
session = create_session()
# dictionary of job_id and len of collaboration
collaborations = {x.id: len(x.collaborators.split())
for x in session.query(Jobs).all()}
max_size = max(collaborations.values())
# job_id if len collaboration is max
ids = [x for x in collaborations if collaborations[x] == max_size]
# teamleader id if job id in ids
temp = [x.team_leader for x in session.query(Jobs).all() if x.id in ids]
# result
teamleaders = session.query(User).filter(User.id.in_(temp)).all()
for member in teamleaders:
print(member.name, member.surname)
if __name__ == '__main__':
main()
| true |
09561e7252efaa96f45b17fde0b47078425cf60b | Python | faisaldialpad/hellouniverse | /Python/tests/trees/common.py | UTF-8 | 783 | 3.390625 | 3 | [
"MIT"
] | permissive | class Common:
@staticmethod
def serialize(root):
"""
:type root: TreeNode
:rtype: string
"""
pre_order = Common.__pre_order(root)
pre_order.append('#') # separator
pre_order.extend(Common.__in_order(root))
return ",".join(pre_order)
@staticmethod
def __in_order(root):
if not root:
return []
left = Common.__in_order(root.left)
left.append(str(root.val))
left.extend(Common.__in_order(root.right))
return left
@staticmethod
def __pre_order(root):
if not root:
return []
ret = [str(root.val)]
ret.extend(Common.__pre_order(root.left))
ret.extend(Common.__pre_order(root.right))
return ret
| true |
f6d748a722e7f2853c8eedce1772f904481e87f4 | Python | malanb5/m5_forecasting | /yj/Plotter.py | UTF-8 | 1,041 | 3.1875 | 3 | [] | no_license | import matplotlib.pyplot as plt
class Plotter:
@staticmethod
def scatter(x, y, alpha):
plt.scatter(x, y, alpha=alpha)
plt.show()
@staticmethod
def plotDf(df, fig_name):
labels = []
df.drop(columns=["index"], inplace=True)
for i, (name, row) in enumerate(df.iterrows()):
if name != "index":
plt.plot(row)
labels.append(name)
plt.legend(labels)
plt.savefig("figures/%s"%(fig_name))
plt.show()
@staticmethod
def plot_history(plot_dict):
# list all data in history
print(plot_dict.history.keys())
# summarize history for accuracy
plt.plot(plot_dict.history['accuracy'])
plt.plot(plot_dict.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(plot_dict.history['loss'])
plt.plot(plot_dict.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show() | true |
36958af1149320ac9284b65479d6f708f9d54790 | Python | neutron-L/PycharmProjects | /IntroductionToPragrammingUsingPython/ch01/Ex20.py | UTF-8 | 656 | 3.328125 | 3 | [] | no_license | import turtle
# 下底
turtle.forward(200)
turtle.left(45)
turtle.forward(80)
turtle.left(135)
turtle.forward(200)
turtle.left(45)
turtle.forward(80)
# 上底
turtle.right(135)
turtle.forward(80)
turtle.right(45)
turtle.forward(80)
turtle.right(45)
turtle.forward(200)
turtle.right(90)
turtle.forward(80)
turtle.right(45)
turtle.forward(80)
turtle.right(135)
turtle.forward(80)
turtle.left(90)
turtle.forward(200)
turtle.right(135)
turtle.forward(80)
turtle.right(45)
turtle.forward(200)
turtle.right(135)
turtle.forward(80)
turtle.right(45)
turtle.forward(200)
turtle.right(135)
turtle.forward(80)
turtle.right(135)
turtle.forward(80)
turtle.done() | true |
918bef2dee4254dc954ea9f530701dd3eb074c65 | Python | foleymd/boring-stuff | /regex/dot_star_caret_dollar.py | UTF-8 | 2,426 | 3.90625 | 4 | [] | no_license | # . * ^ $ characters
# ^ match the start and $ match the end
import re
# caret for beginning
begins_with_hello_regex = re.compile(r'^hello')
mo = begins_with_hello_regex.search('hello') #match
print(mo.group())
mo = begins_with_hello_regex.search('yo hello') #no match
print(mo)
# dollar for ending
ends_with_world_regex = re.compile(r'world$')
mo = ends_with_world_regex.search('yo world') #match
print(mo.group())
mo = ends_with_world_regex.search('yo world hello') # no match
print(mo)
# matching with pattern at start and at end with ^ and $
all_digits_regex = re.compile(r'^\d+$')
mo = all_digits_regex.search('12981298713987129387') #match
print(mo.group())
# with character in middle, doesn't match exactly
mo = all_digits_regex.search('129x1234') # no match
print(mo)
# wildcard . character
#single character wildcard
at_regex = re.compile(r'.at')
mo = at_regex.findall('cat hat mat flat') #doesn't return flat because it's only a single character before the 'at'
print(mo) # list object does not have a group
#multiple character wildcard
at_regex = re.compile(r'.{1,2}at') # one or two characters (this includes whitespace characters now)
mo = at_regex.findall('cat hat mat flat') #doesn't return flat because it's only a single character before the 'at'
print(mo) # list object does not have a group
# regex that gets a variable name of characters
name_regex = re.compile(r'First Name: (.*) Last Name: (.*)')
mo = name_regex.findall('First Name: Marjorie Last Name: Foley')
print(mo)
# .* uses greedy mode; .*? is non-greedy mode
serve = '<To serve humans> for dinner.>'
#nongreedy
nongreedy = re.compile(r'<(.*?)>')
mo = nongreedy.findall(serve)
print(mo)
#greedy
greedy = re.compile(r'<(.*)>')
mo = greedy.findall(serve)
print(mo)
# note that * is for any character for new line, so the example below will
# only print the first line
prime = 'Serve the public trust. \n Protect the innocent. \n Uphold the law.'
print(prime)
dot_star = re.compile(r'.*')
mo = dot_star.search(prime)
print(mo.group())
# how to get the dot to mean everything including new lines
dot_star = re.compile(r'.*', re.DOTALL) #second argument says it includes EVERYTHING
mo = dot_star.search(prime)
print(mo.group()) #prints all lines of the prime directive
# IGNORECASE will allow you to do just that
vowel_regex =re.compile(r'[aeiou]', re.IGNORECASE)
mo = vowel_regex.findall('aeio fdhse AIE dhd')
print(mo)
| true |
6dda35b641dfba4c8486168736fc96543a6911f0 | Python | antske/coref_draft | /multisieve_coreference/mention_data.py | UTF-8 | 11,094 | 2.515625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
This module parses the term layer of a KAF/NAF object
"""
from __future__ import print_function
import os
from .offset_info import (
convert_term_ids_to_offsets,
get_offset,
get_offsets_from_span,
get_pos_of_term,
)
stop_words = []
def initiate_stopword_list(lang='nl'):
global stop_words
resources = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"resources"
))
stopfile = open(os.path.join(resources, lang, 'stop_words.txt'), 'r')
for line in stopfile:
stop_words.append(line.rstrip())
stopfile.close()
class Cmention:
'''
This class covers information about mentions that is relevant for
coreference resolution.
`span` and other things store _offsets_.
'''
def __init__(
self,
id,
span,
head_offset=None,
head_pos=None,
number='',
gender='',
person='',
full_head=None,
relaxed_span=None,
entity_type=None,
in_quotation=False,
is_relative_pronoun=False,
is_reflective_pronoun=False,
coreference_prohibited=None,
modifiers=None,
appositives=None,
predicatives=None,
no_stop_words=None,
main_modifiers=None,
sentence_number='',
):
'''
Constructor of the mention
#TODO: revise so that provides information needed for some sieve;
#STEP 1: seive 3 needs option to remove post-head modifiers
:type span: list
:type head_offset: int
:type head_pos: str
:type number: str
:type gender: str
:type person: str
:type full_head: list
:type relaxed_span: list
:type entity_type: str
:type in_quotation: bool
:type is_relative_pronoun: bool
:type is_reflective_pronoun: bool
:type coreference_prohibited: list
:type begin_offset: str
:type end_offset: str
:type modifiers: list
:type appositives: list
:type predicatives: list
:type no_stop_words: list
:type main_modifiers: list
:type sentence_number: str
'''
self.id = id # confirmed
self.span = span
self.head_offset = head_offset
self.head_pos = head_pos
self.full_head = [] if full_head is None else full_head
self.begin_offset = self.span[0]
self.end_offset = self.span[-1]
self.sentence_number = sentence_number
self.relaxed_span = [] if relaxed_span is None else relaxed_span
self.no_stop_words = [] if no_stop_words is None else no_stop_words
self.coreference_prohibited = [] if coreference_prohibited is None \
else coreference_prohibited
self.modifiers = [] if modifiers is None else modifiers
self.main_modifiers = [] if main_modifiers is None else main_modifiers
self.appositives = [] if appositives is None else appositives
self.predicatives = [] if predicatives is None else predicatives
self.number = number
self.gender = gender
self.person = person
self.entity_type = entity_type
self.in_quotation = in_quotation
self.is_relative_pronoun = is_relative_pronoun
self.is_reflective_pronoun = is_reflective_pronoun
def __repr__(self):
return self.__class__.__name__ + '(' + \
'id={self.id!r}, ' \
'span={self.span!r}, ' \
'number={self.number!r}, ' \
'gender={self.gender!r}, ' \
'person={self.person!r}, ' \
'head_offset={self.head_offset!r}, ' \
'full_head={self.full_head!r}, ' \
'head_pos={self.head_pos!r}, ' \
'relaxed_span={self.relaxed_span!r}, ' \
'entity_type={self.entity_type!r}, ' \
'in_quotation={self.in_quotation!r}, ' \
'is_relative_pronoun={self.is_relative_pronoun!r}, ' \
'is_reflective_pronoun={self.is_reflective_pronoun!r}, ' \
'coreference_prohibited={self.coreference_prohibited!r}, ' \
'modifiers={self.modifiers!r}, ' \
'appositives={self.appositives!r}, ' \
'predicatives={self.predicatives!r}, ' \
'no_stop_words={self.no_stop_words!r}, ' \
'main_modifiers={self.main_modifiers!r}, ' \
'sentence_number={self.sentence_number!r}, ' \
')'.format(self=self)
def add_relaxed_span_offset(self, offset):
self.relaxed_span.append(offset)
def add_modifier(self, mod):
self.modifiers.append(mod)
def add_appositive(self, app):
self.appositives.append(app)
def add_predicative(self, pred):
self.predicatives.append(pred)
def add_no_stop_word(self, nsw):
self.no_stop_words.append(nsw)
def add_main_modifier(self, mmod):
self.main_modifiers.append(mmod)
def fill_gaps(self, full_content, allow_adding=lambda _: True):
"""
Find and fill gaps in the span of this mention.
:param full_content: list of things in spans for the whole document
:param allow_adding: (offset) -> bool function deciding whether a
missing term may be added or the gap should be
left as is.
"""
if len(self.span) >= 2:
start = full_content.index(self.span[0])
end = full_content.index(self.span[-1], start)
self.span = full_content[start:end + 1]
def create_mention(nafobj, constituentInfo, head, mid):
'''
Function that creates mention object from naf information
:param nafobj: the input naffile
:param constituentInfo: information about the constituent
:param head: the id of the constituent's head
:param mid: the mid (for creating a unique mention id
:return:
'''
head_offset = None if head is None else get_offset(nafobj, head)
span = constituentInfo.span
offset_ids_span = convert_term_ids_to_offsets(nafobj, span)
mention = Cmention(mid, span=offset_ids_span, head_offset=head_offset)
mention.sentence_number = get_sentence_number(nafobj, head)
# add no stop words and main modifiers
add_non_stopwords(nafobj, span, mention)
add_main_modifiers(nafobj, span, mention)
# mwe info
full_head_tids = constituentInfo.multiword
mention.full_head = convert_term_ids_to_offsets(nafobj, full_head_tids)
# modifers and appositives:
relaxed_span = offset_ids_span
for mod_in_tids in constituentInfo.modifiers:
mod_span = convert_term_ids_to_offsets(nafobj, mod_in_tids)
mention.add_modifier(mod_span)
for mid in mod_span:
if mid > head_offset and mid in relaxed_span:
relaxed_span.remove(mid)
for app_in_tids in constituentInfo.appositives:
app_span = convert_term_ids_to_offsets(nafobj, app_in_tids)
mention.add_appositive(app_span)
for mid in app_span:
if mid > head_offset and mid in relaxed_span:
relaxed_span.remove(mid)
mention.relaxed_span = relaxed_span
for pred_in_tids in constituentInfo.predicatives:
pred_span = convert_term_ids_to_offsets(nafobj, pred_in_tids)
mention.add_predicative(pred_span)
# set sequence of pos FIXME: if not needed till end; remove
# os_seq = get_pos_of_span(nafobj, span)
# mention.set_pos_seq(pos_seq)
# set pos of head
if head is not None:
head_pos = get_pos_of_term(nafobj, head)
mention.head_pos = head_pos
if head_pos in ['pron', 'noun', 'name']:
analyze_nominal_information(nafobj, head, mention)
begin_offset, end_offset = get_offsets_from_span(nafobj, span)
mention.begin_offset = begin_offset
mention.end_offset = end_offset
return mention
def add_main_modifiers(nafobj, span, mention):
'''
Function that creates list of all modifiers that are noun or adjective (possibly including head itself)
:param nafobj: input naf
:param span: list of term ids
:param mention: mention object
:return:
'''
main_mods = []
for tid in span:
term = nafobj.get_term(tid)
if term.get_pos() in ['adj','noun']:
main_mods.append(tid)
main_mods_offset = convert_term_ids_to_offsets(nafobj, main_mods)
mention.main_modifiers = main_mods_offset
def add_non_stopwords(nafobj, span, mention):
'''
Function that verifies which terms in span are not stopwords and adds these to non-stop-word list
:param nafobj: input naf (for linguistic information)
:param span: list of term ids
:param mention: mention object
:return:
'''
non_stop_terms = []
for tid in span:
my_term = nafobj.get_term(tid)
if not my_term.get_type() == 'closed' and not my_term.get_lemma().lower() in stop_words:
non_stop_terms.append(tid)
non_stop_span = convert_term_ids_to_offsets(nafobj, non_stop_terms)
mention.no_stop_words = non_stop_span
def analyze_nominal_information(nafobj, term_id, mention):
myterm = nafobj.get_term(term_id)
morphofeat = myterm.get_morphofeat()
identify_and_set_person(morphofeat, mention)
identify_and_set_gender(morphofeat, mention)
identify_and_set_number(morphofeat, myterm, mention)
set_is_relative_pronoun(morphofeat, mention)
def get_sentence_number(nafobj, head):
myterm = nafobj.get_term(head)
tokid = myterm.get_span().get_span_ids()[0]
mytoken = nafobj.get_token(tokid)
sent_nr = int(mytoken.get_sent())
return sent_nr
def identify_and_set_person(morphofeat, mention):
if '1' in morphofeat:
mention.person = '1'
elif '2' in morphofeat:
mention.person = '2'
elif '3' in morphofeat:
mention.person = '3'
def identify_and_set_number(morphofeat, myterm, mention):
if 'ev' in morphofeat:
mention.number = 'ev'
elif 'mv' in morphofeat:
mention.number = 'mv'
elif 'getal' in morphofeat:
lemma = myterm.get_lemma()
if lemma in ['haar', 'zijn', 'mijn', 'jouw', 'je']:
mention.number = 'ev'
elif lemma in ['ons', 'jullie', 'hun']:
mention.number = 'mv'
def identify_and_set_gender(morphofeat, mention):
if 'fem' in morphofeat:
mention.gender = 'fem'
elif 'masc' in morphofeat:
mention.gender = 'masc'
elif 'onz,' in morphofeat:
mention.gender = 'neut'
def set_is_relative_pronoun(morphofeat, mention):
if 'betr,' in morphofeat:
mention.is_relative_pronoun = True
if 'refl,' in morphofeat:
mention.is_reflective_pronoun = True
| true |
d3981f175f526c39200c8e4ffda2b3353a2c4598 | Python | mcclure/bitbucket-backup | /repos/64bot/contents/mario/run/float.py | UTF-8 | 331 | 3.28125 | 3 | [] | no_license | # Force mario continually in a particular direction
load("mario/basics")
class Float(Runnable):
def __init__(self, x, y, z):
self.set(x,y,z)
def set(self, x, y, z):
self.x = x
self.y = y
self.z = z
# Continually move.
def onBlank(self):
move_mario(self.x, self.y, self.z)
result(Float(0,0,40))
| true |
545b28738a1593a4ed35bd4ae33fa5d55a352075 | Python | srikanthpragada/PYTHON_19_MAR_2021 | /demo/funs/return_value.py | UTF-8 | 202 | 3.484375 | 3 | [] | no_license | def next_even(n):
if n % 2 == 0:
return n + 2
else:
return n + 1
def next_odd(n):
return n + 2 if n % 2 == 1 else n + 1
v = next_even(11)
# v = next_even('abc')
print(v)
| true |
96855badf23205b8d07541d7d05d5e8aae479407 | Python | thethomasmorris/CSC310 | /ThomasMorrisAssign3/ThomasMorrisAssign3Q3.py | UTF-8 | 2,259 | 4.46875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Thomas Morris
Assignment 3
October 17, 2019
Implement a queue using linked lists. You should use your own class with the methods
(enqueue(object), dequeue(), first(), len(), is_empty(), search()) and include a testing.
Note that search(object) returns True (or False) to check if an object is in the Queue
Note:
Pages 264-266 in Data Structures and Algorithms were referenced
"""
class _Node:
def __init__(self, element, enext):
self._element = element
self._next = enext
class LinkedQueue:
def __init__(self):
self._head = None
self._tail = None
self._size = 0
def len(self):
return self._size
def is_empty(self):
return self._size == 0
def first(self):
#return but do not remove first element
if self.is_empty():
raise Exception('Queue is empty')
return self._head._element
def dequeue(self):
#remove first element from queue
if self.is_empty():
raise Exception('Queue is empty')
element = self._head._element
self._head = self._head._next
self._size -= 1
if self.is_empty():
self._tail = None
return element
def enqueue(self, e):
#add element to back of queue
newest = _Node(e, None)
if self.is_empty():
self._head = newest
else:
self._tail._next = newest
self._tail = newest
self._size += 1
def search(self, key):
#search the entire queue for the key element provided
pos = self._head
while pos:
if pos._element == key:
return True
pos = pos._next
return False
if __name__ == '__main__':
l1 = LinkedQueue()
l1.enqueue(5)
l1.enqueue(73)
l1.enqueue(9)
l1.enqueue(8)
l1.enqueue(16)
l1.enqueue(57)
l1.enqueue(95)
print(l1.search(5))
l1.dequeue()
print(l1.search(5))
print(l1.first())
print(l1.len())
print(l1.is_empty())
print(l1.search(73))
| true |
12af014c58b2d131c5ac5d4a465838905cc68624 | Python | aleynakof/coinmarketcap | /para_formatla.py | UTF-8 | 838 | 3.390625 | 3 | [] | no_license | import locale
def standart_formatla(para):
currency = "${:,.2f}".format(para)
return currency
def tl_formatla(para):
# dolar formatında yazmak istersek binlik =, ve ondalik=.
# olmalı. Bunları parametrik de verebiliriz.
binlik = "."
ondalik = ","
currency = "{:,.2f}".format(para)
if binlik == ".":
main, fractional = currency.split('.')[0], currency.split('.')[1]
new_main = main.replace(',', '.')
currency = '$' + new_main + ondalik + fractional + "TL"
else:
currency = '$' + currency
return currency
def locale_turkish_formatla(para):
locale.setlocale(locale.LC_ALL, "tr_TR.utf8")
return locale.currency(para, grouping=True)
print(standart_formatla(12345678.91))
print(tl_formatla(12345678.91))
print(locale_turkish_formatla(12345678.91))
| true |
0666b1bceefb9d9c9bab9c7289659915467b92c9 | Python | shehbajbajwa/session3 | /venv/session2F.py | UTF-8 | 152 | 2.546875 | 3 | [] | no_license | #conditional constructs
total = 500
if total >= 500:
print("flat 40% off") #PEP , 4 spaces auto leave hundia
else:
print("sorry no discount")
| true |
253c1f7bc0bf57ed20050b50f676ca5b930fc5a1 | Python | NatsukiPrayer/TestTask | /main.py | UTF-8 | 941 | 2.890625 | 3 | [] | no_license | import Visualization
import Crawler
import Writer
import DBConnection
import time
if __name__ == '__main__':
t_start = time.time()
new = Crawler.Crawler('http://crawler-test.com/') #Здесь создаётся объект-паучок, в аргументах ссылка на сайт
new.main_crawl(2)
print(time.time() - t_start)
links = new.get_links()
print(len(links))
to_file = Writer.Writer('crawler_test', links) #Здесь создаётся сам .xml файл, в аргументах название и ссылки
to_file.write()
to_DB = DBConnection.DBConnect('sitemaps') #Здесь задаётся имя базы данных для записи
to_DB.to_table('crawler_test', links)
to_DB.commit()
'''pic = Visualization.Visual(links) #Эту часть расскомментировать при желании получить рисунок
pic.draw()'''
| true |
1f2154eea4478ce3ced1d6fd49b5f40032a4e0d9 | Python | aniozen/AppleMusicToMP3 | /main.py | UTF-8 | 3,688 | 2.90625 | 3 | [
"MIT"
] | permissive | import os
import os.path
import eyed3
import urllib.request
import urllib
import re
from datetime import date
import youtube_dl
import concurrent.futures
def metadata(path, title, artist, album):
audiofile = eyed3.load(str(path))
audiofile.tag.artist = artist
audiofile.tag.title = title
audiofile.tag.album = album
audiofile.tag.description = "from youtube"
audiofile.tag.save()
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def parser(path):
names = [] # list of song names
with open(path, 'r') as f:
song = ''
artist = ''
playlist = ''
playlist_flag = 0
for line in f:
if '<key>Name</key><string>' in line and not playlist_flag:
song = line.strip().replace('<key>Name</key><string>', '').replace('</string>', '').replace('&',
'&')
elif '<key>Artist</key><string>' in line:
artist = line.strip().replace('<key>Artist</key><string>', '').replace('</string>', '').replace('&',
'&')
names.append((song, artist)) # artist always comes after name so if artist is found append to list
elif '<key>Playlists</key>' in line:
playlist_flag = 1
elif '<key>Name</key><string>' in line and playlist_flag:
playlist = line.strip().replace('<key>Name</key><string>', '').replace('</string>', '').replace('&',
'&')
return names, playlist
def download_audio(url, playlist, name):
ydl_opts = {
'format': 'bestaudio/best',
'extractaudio': True,
'audioformat': "mp3",
'outtmpl': f'{playlist[1:-1]}/{name[0].replace("/","_")}.',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}]
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
try:
ydl.download([url])
except:
download_audio(url, playlist, name)
def search(song):
query = song[0] + " - " + song[1] + " official audio"
query = query.replace(" ", "+").encode()
html = urllib.request.urlopen("https://www.youtube.com/results?search_query=" + str(query))
video_ids = re.findall(r"watch\?v=(\S{11})", html.read().decode())
return "https://www.youtube.com/watch?v=" + video_ids[0]
def init(playlist):
try:
playlist = playlist[1:-1]
os.mkdir(playlist)
except FileExistsError:
playlist = str(input("A folder with that playlist name already exists\n please enter a new name:"))
init(shellquote(playlist))
return str(playlist)
def main(PATH):
names, playlist = parser(f'{PATH}')
playlist = shellquote(playlist)
playlist = init(playlist)
print(names)
links = []
with concurrent.futures.ThreadPoolExecutor(max_workers=len(names)) as executor:
for song in names:
link = search(song)
links.append(link)
executor.submit(download_audio, str(link), shellquote(playlist), song)
print(links)
albumname = f"{playlist} - {date.today()}"
for name in names:
metadata(rf"{playlist}/{name[0].replace('/','_')}.mp3", f"{name[0]}", name[1], albumname)
if __name__ == '__main__':
main(r'your_playlist.xml') # your playlist's xml file
| true |
b76c407d76a93d15e5ff99056be3f3b603cc5770 | Python | odidev/numcodecs | /numcodecs/base64.py | UTF-8 | 784 | 2.890625 | 3 | [
"MIT"
] | permissive | import base64 as _base64
from .abc import Codec
from .compat import ensure_contiguous_ndarray, ndarray_copy
class Base64(Codec):
"""Codec providing base64 compression via the Python standard library."""
codec_id = "base64"
def encode(self, buf):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
# do compression
compressed = _base64.standard_b64encode(buf)
return compressed
def decode(self, buf, out=None):
# normalise inputs
buf = ensure_contiguous_ndarray(buf)
if out is not None:
out = ensure_contiguous_ndarray(out)
# do decompression
decompressed = _base64.standard_b64decode(buf)
# handle destination
return ndarray_copy(decompressed, out)
| true |
a82077cf8ad4446a2f829cd19be6a7106cf1b602 | Python | alexandraback/datacollection | /solutions_2692487_0/Python/applepie/osmos.py | UTF-8 | 924 | 3.109375 | 3 | [] | no_license | from math import ceil, log
f = open('osmos.in');
out = open('osmos.out', 'w')
cases = int(f.readline());
def diff(start, end):
if start == end:
return 1
return ceil(log((end-1)/(start-1), 2))
def add(start, num):
return (start - 1) * (2** num) + 1
for CASE in range(1, cases+1):
def calc(size, start_index):
for i in range(start_index, num_motes):
mote = motes[i]
if size > mote:
size += mote
else:
# two cases, remove all, or add
num_to_add = diff(size, mote+1)
new_size = add(size, num_to_add) + mote
print(num_to_add, new_size)
return min(num_motes - i, num_to_add + calc(new_size, i + 1))
return 0
line = f.readline().split()
size = int(line[0])
num_motes = int(line[1])
motes = list(map(int, f.readline().split()))
motes.sort()
if size == 1:
ans = num_motes
else:
ans = calc(size, 0)
print(size, motes, ans)
out.write("Case #{0}: {1}\n".format(CASE, ans))
| true |
5d16dd1708155b81cb68aa5fc1880e30154cae9f | Python | sherlockwu/838_BigData | /stage2/toSubmit/test.py | UTF-8 | 1,353 | 3.015625 | 3 | [] | no_license | import os
import re
import sys
from nltk.corpus import wordnet as wn
def contains_The_Prefix(value,text,category):
if(value == 'Earth'):
print 'Test'
if(category == 1):
value = '<p>' + str(value) + '</p>'
else:
value = '<n>' + str(value) + '</n>'
prefix = '((\w+ ){2})' + str(value)
prefix = re.findall(prefix,text)
if len(prefix) == 0:
prefix = '((\w+ ){1})' + str(value)
prefix = re.findall(prefix,text)
for word1, word2 in prefix:
if word1 in ['The', 'the', 'The ','the ']:
return True
return False
#print contains_The_Prefix('Earth','the <p>Earth</p>',1)
#print wn.synsets('sit')[0].pos()
def contains_The_Prefix(value,text,category,skipAppending):
if(category == 1 and skipAppending == False):
value = '<p>' + str(value) + '</p>'
elif(skipAppending == False):
value = '<n>' + str(value) + '</n>'
prefix = '((\w+ ){2})' + str(value)
prefix = re.findall(prefix,text)
if len(prefix) == 0:
prefix = '((\w+ ){1})' + str(value)
prefix = re.findall(prefix,text)
for word1, word2 in prefix:
if word1 in ['The', 'the', 'The ','the '] or word2 in ['The', 'the', 'The ','the ']:
return True
return False
print contains_The_Prefix("Piece.","treasure in the world, One Piece.",0,True) | true |
8c4627d09013e8b30b103d45964ac51a419afb95 | Python | SophiaMeGooda/Happy | /My1stcode.py | UTF-8 | 96 | 2.796875 | 3 | [] | no_license | print("I love Pizza!")
print("pizza " * 33)
print("yum " * 66)
print("I'm full.")
print('yeah')
| true |
b57c5ce55c60506bfe7a0c2947b7790210d3582c | Python | newmansw93/intro_to_python_solns | /week3/day6-functions_practice/beginning_functions_practice/part2.py | UTF-8 | 4,795 | 4.28125 | 4 | [] | no_license | # The solution for question 1.
def get_month_season(month, unk_month):
'''
Input: Str - Abbreviation of month
Output: Str - Season of inputted month
'''
season = None
if month in ('dec', 'jan', 'feb'):
season = 'Winter'
elif month in ('mar', 'apr', 'may'):
season = 'Spring'
elif month in ('jun', 'jul', 'aug'):
season = 'Summer'
elif month in ('jun', 'jul', 'aug'):
season = 'Summer'
else:
season = unk_month
return season
def month_info(month, category):
'''
Input: Str - Abbreviation of month, Str - information category to get for month
Output: Str - category information for the specified month
Categories supported: 'full name' ex: month_info('jan', 'full_name') -----> 'January'
'num_month' ex: month_info('may', 'num_month') -----> 5
'birth_stone' ex: month_info('jul', 'birth_stone') ---> 'Ruby'
'season' ex: month_info('oct', 'season') --------> 'Fall'
'''
full_names = {'jan': 'January', 'feb': 'February', 'mar': 'March', 'apr': 'April',
'may': 'May', 'jun': 'June', 'jul': 'July', 'aug': 'August',
'sep': 'September', 'oct': 'October', 'nov': 'November', 'dec': 'December'}
month_nums = {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}
birth_stones = {'jan': 'Garnet', 'feb': 'Amethyst', 'mar': 'Aquamarine', 'apr': 'Diamond',
'may': 'Emerald', 'jun': 'Pearl', 'jul': 'Ruby', 'aug': 'Peridot',
'sep': 'Sapphire', 'oct': 'Opal', 'nov': 'Topaz', 'dec': 'Turquoise'}
unk_month = 'Unknown month'
# Note that we are using the unk_month here so that we can gracefully handle the scenario
# where the caller of our function inputs an unknown month. The alternative would be to not
# do this, and instead have our program break if a user inputs an unknown month. It's better
# practice to do the former, and try to give callers some useful information if they use
# the function we have built in a way we weren't intending (i.e. passing in an unknown month).
if category == 'full_name':
return full_names.get(month, unk_month)
elif category == 'num_month':
return month_nums.get(month, unk_month)
elif category == 'birth_stone':
return birth_stones.get(month, unk_month)
elif category == 'season':
return get_month_season(month, unk_month)
else:
return 'Unknown category'
# This is the solution for question 2.
def perfect_square(num):
'''
Input: Int
Output: Bool
Return True if num is a perfect square, e.g. 9 = 3 x 3. Return False if num is not
a perfect square, 8 isn't any integer multiplied by itself.
'''
sqrt_num = num ** 0.5
return sqrt_num == int(sqrt_num)
def next_perfect_square(num):
'''
Input: Int
Output: Int
Ex: next_perfect_square(10) --> -1
next_perfect_square(9) ---> 16
next_perfect_square(25) --> 36
next_perfect_square(37) --> -1
Returns the next perfect square (a number that is the square of an integer e.g. 81 = 9 x 9)
greater than the inputted number. If the inputted number is not a perfect square, return -1.
(i.e. the inputted number must also be a perfect square).
'''
if not perfect_square(num):
return -1
i = num + 1
while True:
if perfect_square(i):
break
i += 1
return i
# This is the solution for question 3.
import random
def flip_coin():
'''
Input: None
Output: Str - 'H' for head or 'T' for tail
Perform an "experiment" using random.random(), return 'H' if result is above .5, 'T' otherwise.
'''
test_num = random.random()
# This is what is known as the ternary operator. It is just a one line shorthand for an if-else block,
# similar to how list and dictionary comprehensions are shorthand for creating lists and dictionaries.
result = 'H' if test_num > 0.5 else 'T'
return result
def roll_die():
'''
Input: None
Output: Int - Between 1 and 6
Using random.randint(), perform a die roll and return the number that "comes up".
'''
return random.randint(1, 6)
def flip_coin_roll_die(n_times):
'''
Input: Int - number of times to flip a coin and roll a die
Output: List - of tuples with the outcomes
of the flips and rolls from each time
'''
# We mentioned why we use the `_` in scenarios like this in the `part1.py` solutions
# file ... do you remember why?
return [(flip_coin(), roll_die()) for _ in range(n_times)]
| true |
9dd161ce41b136e05e37be5d12accdee63cb647f | Python | joaogehlen91/calculo_numerico | /Trabalho2/questao1/questao_1_a.py | UTF-8 | 258 | 2.84375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import numpy as np
from math import *
h = 0.125
a = -3.0
b = 3.0
n = int(((b-a)/h)+1)
x = np.linspace(-3.0, 3.0, n)
y = 2.71828182846**-(x**2)
for i in range(1, n-1):
y[i] = 2*y[i]
AT = (h/2)*sum(y)
AT = AT*(2/sqrt(pi))
print AT | true |
760d099db311547e1bf29788c76c18e98c898769 | Python | Tobias-GH-Schulz/H.G.I.2 | /hand_gesture.py | UTF-8 | 5,210 | 2.671875 | 3 | [] | no_license | import cv2
import mediapipe as mp
import time
import HandTrackingModule as htm
import math
import numpy as np
from pymemcache.client import base
def hand_gesture_run():
# initialize the base client to store information used in different scripts
hand_client = base.Client(("localhost", 11211))
hand_client.set("rotation_mode", "initialize")
hand_client.set("zoom_mode", "initialize")
hand_client.set("update_info", {"initialize":1.25})
# initialize video
video = cv2.VideoCapture(0)
# initialize hand detector
detector = htm.handDetector(min_detection_confidence=0.85)
while True:
ret, frame = video.read()
if ret is not None:
frame = cv2.flip(frame, 1)
frame = detector.findHands(frame)
left_lmList, right_lmList = detector.findPosition(frame, draw=False)
tipIds = [4, 8, 12, 16, 20]
lenght = None
# check if there are two hands
if len(left_lmList) != 0 and len(right_lmList) != 0:
# count the number of raised fingers
fingers = []
for id in range(1, 5):
if left_lmList[tipIds[id]][2] < left_lmList[tipIds[id]-2][2]:
fingers.append(1)
if right_lmList[tipIds[id]][2] < right_lmList[tipIds[id]-2][2]:
fingers.append(1)
num_fingers = fingers.count(1)
# check if the second finger of both hands is up and all other fingers are down
for i in range(2,5):
if left_lmList[8][2] < left_lmList[6][2] and right_lmList[8][2] < right_lmList[6][2] and left_lmList[tipIds[id]][2] > left_lmList[tipIds[id]-2][2] and right_lmList[tipIds[id]][2] > right_lmList[tipIds[id]-2][2]:
# get the x,y coordinates for the tip of the second finger for the right hand
right_x, right_y = right_lmList[8][1], right_lmList[8][2]
# get the x,y coordinates for the tip of the second finger for the left hand
left_x, left_y = left_lmList[8][1], left_lmList[8][2]
# get center of distance between the two tips
cx, cy = (left_x+right_x)//2, (left_y+left_y)//2
# draw bigger circles on the two tips
cv2.circle(frame, (right_x, right_y), 15, (255,0,255), cv2.FILLED)
cv2.circle(frame, (left_x,left_y), 15, (255,0,255), cv2.FILLED)
# draw line between the two tips
cv2.line(frame, (left_x,left_y), (right_x,right_y), (255,0, 255), 3)
# use hypothenuse function to get distance between the two tips
lenght = math.hypot(abs(left_x-right_x), abs(left_y-right_y))
# transform distance into zoom scale of plotly
zoom = np.interp(lenght, [15, 700], [4.0, 0.000001])
# as soon as 3 or more fingers ar up, write state "zoom off" in a txt file
if num_fingers >= 3:
hand_client.set("zoom_mode", "zoom off")
# if only 2 fingers are up write the zoom value in a txt file
else:
hand_client.set("update_info", {"zoom":zoom})
# check if there is only one hand
if len(left_lmList) != 0 and len(right_lmList) <= 10:
# count the number of raised fingers
fingers = []
for id in range(1, 5):
if left_lmList[tipIds[id]][2] < left_lmList[tipIds[id]-2][2]:
fingers.append(1)
num_fingers = fingers.count(1)
# check if the second finger is up
if left_lmList[8][2] < left_lmList[6][2]:
# get the x,y coordinates for the tip of the second finger
x1, y1 = left_lmList[8][1], left_lmList[8][2]
# draw bigger circles on the two tips
cv2.circle(frame, (x1,y1), 15, (255,0,255), cv2.FILLED)
# set an area in which the rotation value is written
if 500 < x1 < 1450:
theta = np.interp(x1, [500, 1450], [0.000001, 10.0])
# as soon as 3 or more fingers ar up, write state "rotation off" in a txt file
if num_fingers >= 3:
hand_client.set("rotation_mode", "rotation off")
# if only 2 fingers are up write the rotation value in a txt file
else:
hand_client.set("update_info", {"rotate_xy":theta})
cv2.imshow("frame", frame)
cv2.waitKey(1)
if cv2.waitKey(30) & 0xFF == ord('q'):
break
else:
break
video.release()
cv2.destroyAllWindows()
cv2.waitKey(1)
if __name__ == "__main__":
hand_gesture_run() | true |
36f9e0a075433c3bd1a2434c126b531b07d026ff | Python | gatneil/azurerm | /examples/deploytemplate-cli.py | UTF-8 | 1,928 | 2.625 | 3 | [
"MIT"
] | permissive | # deploytemplate.py
# authenticates using CLI e.g. run this in the Azure Cloud Shell
# takes a deployment template URI and a local parameters file and deploys it
# Arguments: -u templateUri
# -p parameters JSON file
# -l location
# -g existing resource group
# -s subscription
import argparse
import azurerm
from haikunator import Haikunator
import json
import sys
# validate command line arguments
argParser = argparse.ArgumentParser()
argParser.add_argument('--uri', '-u', required=True,
action='store', help='Template URI')
argParser.add_argument('--params', '-p', required=True,
action='store', help='Parameters json file')
argParser.add_argument('--location', '-l', required=True,
action='store', help='Location, e.g. eastus')
argParser.add_argument('--rg', '-g', required=True,
action='store', help='Resource Group name')
argParser.add_argument('--sub', '-s', required=False,
action='store', help='subscription id (optional)')
args = argParser.parse_args()
template_uri = args.uri
params = args.params
rgname = args.rg
location = args.location
subscription_id = args.sub
# load parameters file
try:
with open(params) as params_file:
param_data = json.load(params_file)
except FileNotFoundError:
print('Error: Expecting ' + params + ' in current folder')
sys.exit()
access_token = azurerm.get_access_token_from_cli()
if subscription_id is None:
subscription_id = azurerm.get_subscription_from_cli()
deployment_name = Haikunator().haikunate()
print('Deployment name:' + deployment_name)
deploy_return = azurerm.deploy_template_uri(
access_token, subscription_id, rgname, deployment_name, template_uri, param_data)
print(json.dumps(deploy_return.json(), sort_keys=False, indent=2, separators=(',', ': ')))
| true |
c5912bb51e47d1de197af7ba98c4c40cee479ef1 | Python | jayceazua/wallbreakers_work | /data_structures/bst.py | UTF-8 | 6,450 | 3.90625 | 4 | [] | no_license | from bst_node import Node
class BST:
def __init__(self):
self.root = None
# insert
def insert(self, data):
"""
Best case: when binary tree is a completely balanced tree
O(log n) runtime complexity
"""
if self.root:
self.root.insert(data)
else:
self.root = Node(data)
# def insert_node(data, current):
# if data >= current.data:
# # right
# if not current.right:
# current.right = Node(data)
# else:
# insert_node(data, current.right)
# else:
# # left
# if not current.left:
# current.left = Node(data)
# else:
# insert_node(data, current.left)
# if not self.root:
# self.root = Node(data)
# else:
# insert_node(data, self.root)
# search
def find(self, data):
"""
O(log n) worse case scenario
"""
if self.root:
return self.root.search(data)
else:
return False
# def search(data, current):
# if data == current.data:
# return current.data
# # move left
# if current and data < current.data:
# return search(data, current.left)
# # move right
# elif current:
# return search(data, current.right)
# # if nothing is round
# return None
# return search(data, self.root)
# delete
def delete(self, data):
"""
O(log n) worse case scenario
Case 1: node to be deleted is a leaf
Case 2: node to be deleted has one child
Case 3: node to be deleted has two children
"""
if not self.find(data):
return f"{data} is not in the binary search tree."
# get a reference of the parent node
current = self.root
parent = self.root
is_left_child = False
if not current:
return
while current and current.data != data:
parent = current
if data < current.data:
current = current.left
is_left_child = True
else:
current = current.right
is_left_child = False
if not current:
return
# Case 1 - is a leaf
if not current.left and not current.right:
if current == self.root:
self.root == None
else:
if is_left_child:
parent.left = None
else:
parent.right = None
# Case 2 - has one child
elif not current.right:
if current == self.root:
self.root = self.root.left
elif is_left_child:
parent.left = current.left
else:
parent.right = current.left
elif not current.left:
if current == self.root:
self.root = self.root.right
elif is_left_child:
parent.left = current.right
else:
parent.right = current.right
# Case 3 - has two children
# find the node that is greater than the node to be deleted
# but less than the greater node
# find the successor node which is to the right of the node to be deleted but less than that node
# bring the successor to the current node to be deleted,
# make the left child of current node the left child of the successor node
# then the right child of the current node is the right child of the successor node
# then break the connection of the successor's parent
# the current node's parent connection needs to connect to the current node's parent.
# if the successor has a right child then the parent of the successor's left child becomes their left child...
# in-order
def in_order(self):
"""
Depth-first search
1. traverse the left sub-tree
2. visit the root
3. traverse the right sub-tree
"""
def order(current):
if not current:
return
if current.left:
return order(current.left)
print(current.data, end=" ")
if current.right:
return order(current.right)
order(self.root)
# post-order
def post_order(self):
"""
Depth-first search
1. traverse the left
2. traverse the right
3. visit the root
"""
def order(current):
if not current:
return
if current.left:
return order(current.left)
if current.right:
return order(current.right)
print(current.data, end=" ")
order(self.root)
# pre-order
def pre_order(self):
"""
Depth-first search
1. visit the root
2. traverse the left
3. traverse the right
"""
def order(current):
if not current:
return
print(current.data, end=" ")
if current.left:
return order(current.left)
if current.right:
return order(current.right)
order(self.root)
# level-order
def level_order(self):
"""
Breadth-first search
"""
current = self.root
if not current:
return
q = [current]
while q:
current = q.pop(0)
print(current.data, end=" ")
if current.left:
q.append(current.left)
if current.right:
q.append(current.right)
# height
def height(self):
pass
def smallest_value(self):
current = self.root
if current:
while True:
if not current.left:
return current.data
current = current.left
def largest_value(self):
current = self.root
if current:
while True:
if not current.right:
return current.data
current = current.right
| true |
a8797e746cc05bf87101e41032101dcf9a44fe63 | Python | Gateway2745/Web-Scraper | /index.py | UTF-8 | 1,133 | 2.734375 | 3 | [] | no_license | from selenium import webdriver
import bs4,re,string
browser=webdriver.Firefox()
url="https://codeforces.com/ratings"
browser.get(url)
html=browser.page_source
soup=bs4.BeautifulSoup(html,"html.parser")
ratings=soup.select(".ratingsDatatable .rated-user")
file1=open("top200names.txt",'w')
for name in ratings:
file1.write(name.get_text()+'\n')
file1.close()
file2=open('top200names.txt','r')
lines=file2.readlines()
file2.close()
file3=open("average.txt",'w')
count=0
totalsum=0
for i in range(200):
url="https://codeforces.com/profile/"+lines[i]
browser.get(url)
html=browser.page_source
soup=bs4.BeautifulSoup(html,"html.parser")
names=soup.find_all('div',class_='tickLabel',style=re.compile('left:'))
try:
str1=names[-1].get_text()
str2=names[0].get_text()
num1=re.search('[0-9]+',str1)
num2=re.search('[0-9]+',str2)
if(num1 is not None and num2 is not None):
num1=int(num1.group(0))
num2=int(num2.group(0))
file3.write(lines[i].rstrip('\n')+":"+str(num1-num2+1)+'\n')
if(num1-num2>=0):
totalsum+=num1-num2+1
count+=1
except:
pass
file3.write('AVERAGE: '+str(totalsum/count))
file3.close() | true |
bbe3c2fff2446461bc5de2759661e8dca11c412e | Python | vadosdubr/basic-python-selenium-test | /features/test.py | UTF-8 | 406 | 2.625 | 3 | [] | no_license | from selenium import webdriver
driver = webdriver.Chrome("D:\Install programs\Python and Selenium\chromedriver")
driver.get("https://wikipedia.org")
search_field = driver.find_element_by_id("searchInput")
search_field.send_keys("test text")
search_button = driver.find_element_by_xpath("//*[@id='search-form']/fieldset/button")
search_button.click()
assert "test text" in driver.title
driver.quit() | true |
d6a8409069e90c84ff413b95f5d11c1a1fd19ef1 | Python | Jeckjun/MyPythonLearnWay | /oneDay.py | UTF-8 | 428 | 3.671875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
name = '张三'
age = 20
print('Art %5d, piece per Unit %8.2f' %(453, 59.058))
print(complex(1, 2))
print('{0:s},,,,{1:d},,,,,,,{0:s}'.format(name, age))
'''while 1:
print('提示:break为退出指令')
num = input('输入数字或指令:')
print(type(num))
if num=='break':
break
else:
print('转化为十六进制为:%x' % int(num))
print('退出成功')''' | true |
f01ec1f9404f8977ce867700527ade6a52e8e0ce | Python | rahulkrishnan98/med_ner | /pytorch/vocab.py | UTF-8 | 3,459 | 3.078125 | 3 | [] | no_license | from itertools import chain
from collections import Counter
import json
import os
import utils
class BuildVocab:
'''
input_params
words- List[List]
tags - List[List]
output
returns vocab object
that has mapping for every word - idx pair
__id2word is simply an array (each index is the id for the word)
use __word2id instead
'''
def __init__(self, word2id, id2word):
self.UNK = '<UNK>'
self.PAD = '<PAD>'
self.START = '<START>'
self.END = '<END>'
self.__word2id = word2id
self.__id2word = id2word
#helper methods
def get_word2id(self):
return self.__word2id
def get_id2word(self):
return self.__id2word
def __getitem__(self, item):
if self.UNK in self.__word2id:
return self.__word2id.get(item, self.__word2id[self.UNK])
return self.__word2id[item]
def __len__(self):
return len(self.__word2id)
def id2word(self, idx):
return self.__id2word[idx]
@staticmethod
def build(
data,
max_vocab_size,
frequency_if_exceeds
):
'''
data - list of list of strings
max_vocab_size - integer
frequency_if_exceeds - integer
_______________________________________
if number of unique words exceeds given
max_vocab_size then take only number of
words = frequency_if_exceeds
'''
word_counts = Counter(chain(*data)) #chain gets one iterable
if len(word_counts)<=max_vocab_size:
valid_words = [w for w, d in word_counts.items()]
else:
valid_words = [w for w, d in word_counts.items() if d >= frequency_if_exceeds]
valid_words = sorted(
valid_words, key=lambda x: word_counts[x], reverse=True #highest freq first
)
valid_words = valid_words[: max_vocab_size]
#special Symbols
valid_words += ['<PAD>']
valid_words += ['<UNK>']
word2id = {w: idx for idx, w in enumerate(valid_words)}
return BuildVocab(
word2id= word2id,
id2word= valid_words
)
def save(self, file_path):
with open(file_path, 'w', encoding='utf8') as f:
json.dump({'word2id': self.__word2id, 'id2word': self.__id2word}, f, ensure_ascii=False)
@staticmethod
def load(file_path):
with open(file_path, 'r', encoding='utf8') as f:
entry = json.load(f)
return BuildVocab(word2id=entry['word2id'], id2word=entry['id2word'])
if __name__ == "__main__":
with open("config.json", "r") as fp:
config = json.load(fp)
sentences, tags = utils.getData(
file_path= config['train_file'],
sent_ind= config['sentence_id_col'],
tokens= config['tokens_col'],
tags= config['tags_col']
)
sent_vocab = BuildVocab.build(sentences, int(config['max_size']), int(config['freq_cutoff']))
tag_vocab = BuildVocab.build(tags, int(config['max_size']), int(config['freq_cutoff']))
sent_vocab.save(
os.path.join(
config['SENT_VOCAB_PATH'],
"sent_vocab.json"
)
)
tag_vocab.save(
os.path.join(
config['TAG_VOCAB_PATH'],
"tag_vocab.json"
)
)
| true |
5e2b8b8443e4ecf9d7e193b65487531aaf1c4e43 | Python | Neriitox/Portfolio | /Fungal Friends.py | UTF-8 | 343 | 4.3125 | 4 | [] | no_license | # (yeast 1 hr later) = (yeast now) + 0.6× (yeast now)
s = float(input("Start (g): "))
e = float(input("Finish (g): "))
n = s
hrs = 0
while n < e:
hrs += 1
na = n + 0.6 * n
n = na
print(f"The loaf would need to rise for {hrs} hours.")
# Tells you how long it would take for a loaf of bread to rise to a certain weight | true |
540894ad48c4fe123bc66b66fe81ae5d38cd162d | Python | menasheep/CodingDojo | /Python/BookReviews/apps/first_app/models.py | UTF-8 | 2,312 | 2.609375 | 3 | [] | no_license | from __future__ import unicode_literals
from django.db import models
import os, binascii, bcrypt
class UserManager(models.Manager):
def validateUser(self, postData):
errorStr = []
if len(postData['name']) < 3:
errorStr.append("First name can't be less than 3 characters")
if len(postData['username']) < 3:
errorStr.append("Username can't be less than 3 characters")
if User.object.filter(username=postData['username']):
errorStr.append("Username is already registered")
if len(postData["password"]) < 8:
errorStr.append("Password must be at least 8 characters")
if postData["password"] != postData["pw_confirm"]:
errorStr.append("Password didn't match confirmation.")
# create hashing
encrypted_pw = bcrypt.hashpw(postData["password"].encode(), bcrypt.gensalt())
response_to_views = {}
if errorStr:
response_to_views['status'] = False
response_to_views['errorStr'] = errorStr
else:
user = self.create(name = postData["name"], username = postData["username"], password = encrypted_pw)
response_to_views['status'] = True
response_to_views['userobj'] = user
return response_to_views
def loginUser(self, postData):
errorStr = []
user = User.object.filter(username=postData['username'])
if not user:
errorStr.append("Invalid username")
else:
if bcrypt.hashpw(postData['password'].encode(), user[0].password.encode()) != user[0].password:
errorStr.append("Password is incorrect.")
response_to_views = {}
if errorStr:
response_to_views['status'] = False
response_to_views['errorStr'] = errorStr
else:
response_to_views['status'] = True
response_to_views['userobj'] = user[0]
return response_to_views
class User(models.Model):
name = models.TextField(max_length=100)
username = models.TextField(max_length=100)
password = models.TextField(max_length=100)
created_at = models.DateField(auto_now_add=True)
updated_at = models.DateField(auto_now=True)
object = UserManager()
def __str__(self):
return self.name
| true |
3bd4d49b14a9fc8d1005889330097045966a192d | Python | JergeRG/SDEBARR | /Source/clean.py | UTF-8 | 495 | 2.578125 | 3 | [] | no_license | import csv
import re
from os import path
def cleanData(col):
for doc in col.find({}):
lstSentences = []
CleanReview = re.sub('[^.,a-zA-ZñÑáÁéÉíÍóÓúÚ0-9. \n\.]', '', doc['Review'])
for sentence in doc['Sentences']:
lstSentences.append(re.sub('[^.,a-zA-ZñÑáÁéÉíÍóÓúÚ0-9. \n\.]', '', sentence))
col.update_one({'ID':doc['ID']},{'$set':{'CleanReview':CleanReview,'CleanSentences':lstSentences}})
| true |
b7de1fa1bc38ce4a19afce4f2e0ed229ddd4415f | Python | SamanthaCorner/100daysPython-DAY-8 | /prime_number.py | UTF-8 | 631 | 4.59375 | 5 | [] | no_license | """
100 days of Python course
DAY 8
"""
# user defined function using the modulo for comparison
def prime_checker(number):
"""
Parameters
----------
number : TYPE
DESCRIPTION.
Returns
-------
None.
"""
is_prime = True
for i in range(2, number - 1):
if number % i == 0:
is_prime = False
if is_prime:
print("It's a prime number.")
else:
print("It's not a prime number.")
# this input gets passed into the function where the
# number is checked
n = int(input("Check this number: "))
prime_checker(number=n)
| true |
b11a47f2cf5525f863e8cb22b33c4d419e271fd6 | Python | fans656-deprecated/clrs | /11 max subarray.py | UTF-8 | 2,400 | 2.8125 | 3 | [] | no_license | from clrs import *
import random
def std(a):
n = len(a)
beg = end = 0
ma = a[0]
for i in xrange(n):
cur = a[i]
for j in xrange(i, n):
if j != i:
cur += a[j]
if cur > ma:
ma = cur
beg = i
end = j
return sum(a[beg:end + 1])
@check
def _(f):
a = [random.randint(-50,50) for _ in xrange(100)]
oa = list(a)
yield f(a) == std(oa)
@answer
def f(a):
def max_sub(a, beg, end):
def max_crossing_sub(a, beg, mid, end):
cur = a[mid - 1]
ma_left = cur
for i in xrange(mid - 2, -1, -1):
cur += a[i]
if cur > ma_left:
ma_left = cur
cur = a[mid]
ma_right = cur
for i in xrange(mid + 1, end):
cur += a[i]
if cur > ma_right:
ma_right = cur
return ma_left + ma_right
if end - beg <= 1:
return a[beg]
else:
mid = (beg + end) // 2
return max(max_sub(a, beg, mid),
max_sub(a, mid, end),
max_crossing_sub(a, beg, mid, end))
return max_sub(a, 0, len(a))
@answer
def g(a):
def max_sub(a, beg, end):
def max_crossing_sub(a, beg, mid, end):
cur = a[mid - 1]
ma_left = cur
for i in xrange(mid - 2, -1, -1):
cur += a[i]
if cur > ma_left:
ma_left = cur
cur = a[mid]
ma_right = cur
for i in xrange(mid + 1, end):
cur += a[i]
if cur > ma_right:
ma_right = cur
return ma_left + ma_right
if end - beg < 60:
return std(a[beg:end])
else:
mid = (beg + end) // 2
return max(max_sub(a, beg, mid),
max_sub(a, mid, end),
max_crossing_sub(a, beg, mid, end))
return max_sub(a, 0, len(a))
# O(n) solution
@answer
def h(a):
ma = ma_p = a[0]
for i in xrange(1, len(a)):
ma_p = ma_p + a[i] if ma_p > 0 else a[i]
ma = max(ma, ma_p)
return ma
#a = [random.randint(-50,50) for _ in xrange(int(1000))]
#from f6 import timeit
#with timeit():
# g(a)
#with timeit():
# h(a)
| true |
933c728881ddf78d6b72d7d9dad08d5a49ce1766 | Python | Natanev92/Python | /Flask/fundamentals/html table/hello.py | UTF-8 | 1,383 | 3.515625 | 4 | [] | no_license | from flask import Flask, render_template
# Import Flask to allow us to create our app
app = Flask(__name__)
# Create a new instance of the Flask class called "app"
@app.route('/')
# The "@" decorator associates this route with the function immediately following
def hello_world():
return 'Hello There. General Kanobi!'
# Return the string '' as a response
if __name__=="__main__":
# Ensure this file is being run directly and not from a different module
@app.route('/hello/<name>')
def hello(name):
print(name)
return "Hello there, " + name
@app.route('/users/<username>/<id>')
def show_user_profile(username, id):
print(username)
print(id)
return "username: " + username + ", id: " + id
@app.route('/lists')
def render_lists():
user_info = [
{'name' : 'Michael', 'last_name' : 'Choi', 'full_name': 'Michael Choi'},
{'name' : 'John', 'last_name' : 'Legened', 'full_name':'John Legened' },
{'name' : 'Marc', 'last_name' : 'Reyes', 'full_name':'Marc Reyes'},
{'name' : 'Natan', 'last_name' : 'Villaseñor', 'full_name':'Natan Villaseñor'}
]
return render_template ("lists.html", random_numbers = [3,1,5], user = user_info)
# app.run(debug=True) should be the very last statement!
app.run(debug=True)
# Run the app in debug mode.
| true |
226f1b33e6f0a11203748538bf55ad64410979af | Python | its-Kumar/Python.py | /5_Functions/goldbach's_conjecture.py | UTF-8 | 692 | 3.59375 | 4 | [] | no_license | import random
import sys
def isprime(num):
if num == 2:
return True
if num % 2 == 0:
return False
for i in range(3, int(num ** 0.5) + 1, 2):
if num % i == 0:
return False
return True
def goldbach(num):
a, b = 0, 0
while True:
a = random.randint(1, num)
b = random.randint(1, num)
if isprime(a) and isprime(b):
if (a + b) == num:
return a, b
if __name__ == "__main__":
num = int(input("Enter any even number : "))
if num % 2 != 0:
print("No is not even..!!")
sys.exit()
result = goldbach(num)
print(result)
| true |
5b78560bb0d318b51fd003a6c20b453eafdf5d53 | Python | vigneshmoha/python-100daysofcode | /day008_caeser_cipher/prime_number.py | UTF-8 | 264 | 4.40625 | 4 | [] | no_license | def isPrimeNumber(num):
for i in range(2, num):
if num % i == 0:
return False
return True
num = int(input("Enter a number: "))
if isPrimeNumber(num):
print(f"{num} is a prime number")
else:
print(f"{num} is not a prime number") | true |
d395bf1760ada43313834ca58c31cdf5f8404523 | Python | sorgerlab/famplex | /famplex/api.py | UTF-8 | 16,829 | 3.234375 | 3 | [
"CC0-1.0"
] | permissive | """Provides utilities for working with FampPlex entities and relations
FamPlex is an ontology of protein families and complexes. Individual terms
are genes/proteins. There are higher level terms for families and
complexes. Terms can be connected by isa or partof relationships.
X isa Y expressing that X is a member of the Y family; Z partof W
expressing that Z is a constituent of the W complex.
Each term in the FamPlex ontology exists within a namespace and has an
identifier which is unique within that namespace. Individual genes and
proteins have either HGNC or Uniprot as a namespace. FamPlex has its own
namespace for families and complexes and the unique identifiers are
designed to be human readable. Identifiers for Uniprot are simply Uniprot
IDs. For HGNC the HGNC Symbol is used instead of the HGNC unique ID.
If X isa Y or X partof Y we say that X is a child of Y and Y is a parent of
X. We say Y is above X in the FamPlex ontology if there is a path of isa and
partof edges from X to Y. We also say that Y is an ancestor of X.
X is then below Y in the FamPlex ontology and we also say X is a descendant
of Y.
"""
import warnings
from typing import Container, Dict, List, Optional, Tuple
from famplex.graph import FamplexGraph
__all__ = ['in_famplex', 'parent_terms', 'child_terms', 'root_terms',
'ancestral_terms', 'descendant_terms', 'individual_members', 'isa',
'partof', 'refinement_of', 'dict_representation', 'equivalences',
'reverse_equivalences', 'all_root_terms']
try:
_famplex_graph = FamplexGraph()
except FileNotFoundError:
warnings.warn(
"Resource files are unavailable. If you've cloned this repository, "
"run the script \"update_resources.py\" at the top level to move the "
"resources into the package. See the README for more info.",
Warning)
def in_famplex(namespace: str, id_: str) -> bool:
"""Returns True if input term is a member of the FamPlex ontology.
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
Returns
-------
bool
"""
return _famplex_graph.in_famplex(namespace, id_)
def parent_terms(namespace: str, id_: str,
relation_types: Optional[Container[str]] = None) \
-> List[Tuple[str, str]]:
"""Returns terms immediately above a given term in the FamPlex ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
relation_types : Optional[list]
Set of relation types that input term can have with returned
parent terms. The valid relation types are 'isa' and 'partof'.
If argument is None then there are no restrictions on relation
type.
Default: None
Returns
-------
list
List of tuples of the form (namespace, id) specifying parent terms
of the input term. Values are sorted in case insensitive
alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
if relation_types is None:
relation_types = ['isa', 'partof']
edges = _famplex_graph.parent_edges(namespace, id_)
return [(ns2, id2) for ns2, id2, rel in edges if rel in relation_types]
def child_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Returns terms immediately below a given term in the FamPlex ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of tuples of the form (namespace, id) specifying child terms
of the input term. Values are sorted in case insensitive
alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
if relation_types is None:
relation_types = ['isa', 'partof']
edges = _famplex_graph.child_edges(namespace, id_)
return [(ns2, id2) for ns2, id2, rel in edges if rel in relation_types]
def root_terms(namespace: str, id_: str) -> List[Tuple[str, str]]:
"""Returns top level terms above the input term
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
Returns
-------
list
List of terms above the input that are top level families and/or
complexes within the FamPlex ontology. Values are sorted in case
insensitive alphabetical order, first by namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
return _famplex_graph.root_terms(namespace, id_)
def ancestral_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""
Return list of all terms above a given term in the FamPlex Ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph
class Docstring for more info.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms are returned in breadth first order following
relations upward from bottom to top in the ontology.
Edges from the same node are traversed in case insensitive
alphabetical order, sorted first by namespace and then by id
of the target node.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
_famplex_graph.raise_value_error_if_not_in_famplex(namespace, id_)
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in _famplex_graph.traverse((namespace, id_),
relation_types, 'up'):
output.append((ns2, id2))
return output[1:]
def descendant_terms(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""
Return list of all terms below a given term in the FamPlex Ontology
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the FamplexGraph class
Docstring for more info.
relation_types : Optional[list]
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms are returned in breadth first order following
relations backwards from top to bottom in the ontology.
Edges from the same node are traversed in case insensitive
alphabetical order, sorted first by namespace and then by id
of the target node.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
_famplex_graph.raise_value_error_if_not_in_famplex(namespace, id_)
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in _famplex_graph.traverse((namespace, id_),
relation_types, 'down'):
output.append((ns2, id2))
return output[1:]
def individual_members(namespace: str, id_: str,
relation_types:
Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Return terms beneath a given term that are not families or complexes
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace. See the Famplexgraph class
Docstring for more info.
relation_types : list
Restrict edges to relation types in this list. The valid relation
types are the strings 'isa' and 'partof'.
If argument is None then both isa and partof relations are
included. Default: None
Returns
-------
list
List of terms beneath the input term that have no children
themselves. If relation_types includes only 'isa', then these will
be the individual genes within a family. If only 'partof' relations
are included then these will be the individual members of a
complex. There are some terms that are families of
complexes. These have both partof and isa relationships. In these
cases the returned list can contain families or complexes
if partof or isa relationships are excluded respectively.
Values are sorted in case insensitive alphabetical order, first by
namespace and then by id.
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex. Raises
"""
if relation_types is None:
relation_types = ['isa', 'partof']
output = []
for ns2, id2 in descendant_terms(namespace, id_, relation_types):
if not child_terms(ns2, id2, relation_types=relation_types):
output.append((ns2, id2))
return sorted(output, key=lambda x: (x[0].lower(), x[1].lower()))
def isa(namespace1: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term has an isa relationship with another
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has an isa relationship
with the term given by (namespace2, id2). Will return False if
either of (namespace1, id1) or (namespace2, id2) is not in the
FamPlex ontology.
"""
return _famplex_graph.relation(namespace1, id1, namespace2, id2, ['isa'])
def partof(namespace1: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term has a partof relationship with another
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has a partof
relationship with the term given by (namespace2, id2). Will return
False if either of (namespace1, id1) or (namespace2, id2) is not in
the FamPlex ontology.
"""
return _famplex_graph.relation(namespace1, id1,
namespace2, id2, ['partof'])
def refinement_of(namespace: str, id1: str, namespace2: str, id2: str) -> bool:
"""Return true if one term either isa or partof holds
Parameters
----------
namespace1 : str
Namespace of first term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id1 : str
Identifier of first term.
namespace2 : str
Namespace of second term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id2 : str
Identifier of second term.
Returns
-------
bool
True if the term given by (namespace1, id1) has either an isa or
partof relationship with the term given by (namespace2, id2). Will
return False if either of (namespace1, id1) or (namespace2, id2) is
not in the FamPlex ontology.
"""
return _famplex_graph.relation(namespace, id1,
namespace2, id2, ['isa', 'partof'])
def dict_representation(namespace: str,
id_: str) -> Dict[Tuple[str, str],
List[Tuple[dict, str]]]:
"""Return a nested dictionary representation of a FamPlex term
Parameters
----------
namespace : str
Namespace for a term. This should be one of 'HGNC', 'FPLX' for
FamPlex, or 'UP' for Uniprot.
id_ : str
Identifier for a term within namespace.
Returns
-------
dict
Nested dictionary representing structure of a FamPlex term.
Keys are tuples with namespace, id pairs. Values are lists of
tuples of nested dictionary representations and relationships,
as in the example below. Edges are sorted in case insensitive
alphabetical order, first by namespace and then by id of the
target node.
{('FPLX', 'ESR'): [({('HGNC', 'ESR1'): []}, 'isa'),
({('HGNC', 'ESR2'): []}, 'isa')]}
Raises
------
ValueError
If (namespace, id_) does not correspond to a term in FamPlex.
"""
out: Dict[Tuple[str, str], List[Tuple[dict, str]]] = \
{(namespace, id_): []}
edges = _famplex_graph.child_edges(namespace, id_)
if not edges:
return out
for namespace2, id2, relation in edges:
out[(namespace, id_)].\
append((dict_representation(namespace2, id2), relation))
return out
def equivalences(fplx_id: str,
namespaces: Optional[Container[str]] = None) -> \
List[Tuple[str, str]]:
"""Return list of equivalent terms from other namespaces.
Parameters
----------
fplx_id : str
A valid Famplex ID
namespaces : Optional[container]
List of namespaces returned equivalences to which returned
equivalences will be restricted. Can be used if one is interested
in a particular type of equivalences.
Returns
-------
list
List of tuples of the form (namespace, id) of equivalent terms
from other namespaces.
Raises
------
ValueError
If fplx_id an ID in the FamPlex ontology.
"""
equivs = _famplex_graph.equivalences(fplx_id)
if namespaces is not None:
equivs = [(namespace, id_) for namespace, id_ in equivs
if namespace in namespaces]
return equivs
def reverse_equivalences(namespace: str, id_: str) -> List[str]:
"""Get equivalent FamPlex terms to a given term from another namespace
Parameters
----------
namespace : str
Namespace of a term
id_ : str
id_ of a term
Returns
-------
list
List of FamPlex IDs for families or complexes equivalent to the
term given by (namespace, id_)
"""
return _famplex_graph.reverse_equivalences(namespace, id_)
def all_root_terms() -> List[Tuple[str, str]]:
"""Returns all top level families and complexes in FamPlex
Returns
-------
list
List of tuples of the form ('FPLX', id) where id runs over all
top level families and complexes in FamPlex. List is in alphabetical
order by id.
"""
return _famplex_graph.root_classes
| true |
0463255976de81512791e385674d0a230e0abeae | Python | coderhh/go_30_minutes_a_day | /GoByExample/Pointers/swap.py | UTF-8 | 37 | 2.96875 | 3 | [
"MIT"
] | permissive | a = 3
b = 4
a,b = b,a+b
print(a,b)
| true |
4690f4bbc7128e1a98aa2d01234f8382410f2b0d | Python | pikuch/AOC19 | /intcode.py | UTF-8 | 5,306 | 2.953125 | 3 | [] | no_license | from collections import deque
class Intcode:
def __init__(self):
self.pc = 0
self.code = []
self.rb = 0
self.inputs = deque()
self.outputs = deque()
self.inst = {"01": self.add,
"02": self.mul,
"03": self.inp,
"04": self.outp,
"05": self.jit,
"06": self.jif,
"07": self.lt,
"08": self.eq,
"09": self.arb,
"99": self.halt}
self.inst_length = {"01": 4, "02": 4, "03": 2, "04": 2, "05": 3, "06": 3, "07": 4, "08": 4, "09": 2, "99": 1}
self.state = "fresh"
self.exit_on_output = False
self.memory_allocations = 0
self.instructions_executed = 0
def get_asm(self):
lines = []
i = 0
while i < len(self.code):
current_inst = self.code[i]
line = f"{i:3d}-{i + self.inst_length[self.code[i]] - 1:3d} {self.inst[self.code[i]].__name__}\t" + \
" ".join(map(str, self.code[i+1:i+self.inst_length[self.code[i]]]))
i += self.inst_length[self.code[i]]
lines.append(line)
if current_inst == 99:
lines.append(f"{i:3d}-{len(self.code)-1:3d} " + " ".join(map(str, self.code[i:])))
break
return "\n".join(lines)
def load(self, code):
self.code = list(map(int, code.split(",")))
self.inputs.clear()
self.outputs.clear()
self.rb = 0
self.pc = 0
self.state = "loaded"
def set_input(self, value):
self.inputs.clear()
self.inputs.append(value)
def add_input(self, value):
self.inputs.append(value)
def get_output(self):
return self.outputs.popleft()
def get_all_outputs(self):
outp = " ".join(map(str, self.outputs))
self.outputs.clear()
return outp
def decode(self, code):
s = f"{code:05d}"
return s[-2:], s[2::-1]
def get_addr(self, address, mode):
if mode == "0": # position mode
return self.code[address]
elif mode == "1": # immediate mode
return address
else: # relative mode
return self.rb + self.code[address]
def run(self):
self.state = "running"
while self.state == "running":
try:
instruction, modes = self.decode(self.code[self.pc])
self.inst[instruction](modes)
except IndexError:
self.code.extend([0]*1000)
self.memory_allocations += 1
self.instructions_executed += 1
def step(self):
self.state = "stepping"
try:
instruction, modes = self.decode(self.code[self.pc])
self.inst[instruction](modes)
except IndexError:
self.code.extend([0] * 1000)
self.memory_allocations += 1
self.instructions_executed += 1
def halt(self, modes):
self.state = "halted"
def inp(self, modes):
if len(self.inputs):
if self.get_addr(self.pc + 1, modes[0]) >= len(self.code):
raise IndexError # so we don't throw away input!
self.code[self.get_addr(self.pc + 1, modes[0])] = self.inputs.popleft()
self.pc += 2
else:
self.state = "input_wait"
def outp(self, modes):
self.outputs.append(self.code[self.get_addr(self.pc + 1, modes[0])])
self.pc += 2
if self.exit_on_output:
self.state = "output"
def add(self, modes):
self.code[self.get_addr(self.pc + 3, modes[2])] = self.code[self.get_addr(self.pc + 1, modes[0])] +\
self.code[self.get_addr(self.pc + 2, modes[1])]
self.pc += 4
def mul(self, modes):
self.code[self.get_addr(self.pc + 3, modes[2])] = self.code[self.get_addr(self.pc + 1, modes[0])] *\
self.code[self.get_addr(self.pc + 2, modes[1])]
self.pc += 4
def jit(self, modes):
if self.code[self.get_addr(self.pc + 1, modes[0])] != 0:
self.pc = self.code[self.get_addr(self.pc + 2, modes[1])]
else:
self.pc += 3
def jif(self, modes):
if self.code[self.get_addr(self.pc + 1, modes[0])] == 0:
self.pc = self.code[self.get_addr(self.pc + 2, modes[1])]
else:
self.pc += 3
def lt(self, modes):
if self.code[self.get_addr(self.pc + 1, modes[0])] < self.code[self.get_addr(self.pc + 2, modes[1])]:
self.code[self.get_addr(self.pc + 3, modes[2])] = 1
else:
self.code[self.get_addr(self.pc + 3, modes[2])] = 0
self.pc += 4
def eq(self, modes):
if self.code[self.get_addr(self.pc + 1, modes[0])] == self.code[self.get_addr(self.pc + 2, modes[1])]:
self.code[self.get_addr(self.pc + 3, modes[2])] = 1
else:
self.code[self.get_addr(self.pc + 3, modes[2])] = 0
self.pc += 4
def arb(self, modes):
self.rb += self.code[self.get_addr(self.pc + 1, modes[0])]
self.pc += 2
| true |
335fab4b2a95d1430884d452bd40b952196e7c93 | Python | chris-code/rnn | /src/pg_predict.py | UTF-8 | 2,060 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python3
import argparse
import csv
import keras
import numpy as np
from pg_train import load_data
import signal
import sys
def parse_args():
parser = argparse.ArgumentParser(description="Train recurrent network")
parser.add_argument("-l", "--limit", type=int, help="Maximum number of data points to load from dataset")
parser.add_argument("-t", "--num-targets", type=int, dest="num_targets", default=1, help="Length of the target sequence")
parser.add_argument("-o", "--output_length", type=int, dest="output_length", default=30, help="How many sequence items to predict")
parser.add_argument("data", help="Test data")
parser.add_argument("model", help="Where the model is located")
return parser.parse_args()
def load_model(path):
return keras.models.load_model(path)
def predict(model, X, num_steps):
preds = np.zeros((X.shape[0], num_steps, X.shape[2]))
for idx_seq, sequence in enumerate(X):
sequence = np.expand_dims(sequence, axis=0)
for step_idx in range(num_steps):
pred = model.predict(sequence)
preds[idx_seq, step_idx] = pred[0]
pred = np.expand_dims(pred, axis=1)
sequence = np.concatenate((sequence, pred), axis=1)[:,1:,:] # Append new prediction and discard first element
return preds
def devectorize(preds, idx_to_char):
preds = np.argmax(preds, axis=2)
preds_text = []
for pred in preds:
pt = ""
for char_idx in pred:
pt += idx_to_char[char_idx]
preds_text.append(pt)
return preds_text
def print_predictions(X_text, preds_text):
writer = csv.writer(sys.stdout, delimiter=str("\t"), lineterminator=str("\n"))
writer.writerow("input output".split())
writer.writerows(zip(X_text, preds_text))
if __name__ == "__main__":
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
args = parse_args()
model = load_model(args.model)
X, y, idx_to_char = load_data(args.data, args.num_targets, limit=args.limit)
preds = predict(model, X, args.output_length)
preds_text = devectorize(preds, idx_to_char)
X_text = devectorize(X, idx_to_char)
print_predictions(X_text, preds_text) | true |
815a8049821405c73e5a2ebcb1a81ebfcce936e3 | Python | thangcest2/DataStructureAndAlgorithm | /python/codility/1_iterations/star.py | UTF-8 | 1,689 | 3.484375 | 3 | [] | no_license | # n = 10
# for i in range(n):
# for j in range(n - i):
# print(' ', end='')
# for j in range(2 * i - 1):
# print('*', end='')
# print()
# you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
# def solution(N):
# # write your code in Python 3.6
# a = list("{0:b}".format(N))
# gaps = []
# for i, v in enumerate(a):
# if v == '0':
# for j in range(i+1, len(a)):
# if a[j] == '1':
# gaps.append(j - i)
# break
# if not gaps:
# return 0
# return max(gaps)
def solution(N):
# write your code in Python 3.6
a = list("{0:b}".format(N))
maxx = 0
i = 0
while i < len(a):
if a[i] == '0' and i > 0 and a[i-1] == '1':
i += 1
temp_maxx = 1
while i < len(a):
if a[i] == '1':
if temp_maxx > maxx:
maxx = temp_maxx
temp_maxx = 0
break
else:
i += 1
temp_maxx += 1
i += 1
return maxx
def test():
a = {
'15': 0,
'20': 1,
'1041': 5,
'32': 0,
'328': 2,
'6': 0,
'19': 2,
'5': 1,
'1024': 0,
'42': 1,
'1162': 3,
'561892': 3,
'66561': 9,
'6291457': 20,
'74901729': 4,
'805306373': 25,
'1610612737': 28,
}
for i, v in a.items():
print(i, v, 'checking ..', end='')
assert solution(int(i)) == v
print('passed')
test()
| true |
4340d0c9827a4e436f870bdfb33d7c87d9e7008c | Python | binchen15/leet-python | /greedy/prob1338.py | UTF-8 | 456 | 3.1875 | 3 | [] | no_license | # Reduce Array size to half
class Solution:
def minSetSize(self, arr: List[int]) -> int:
m = len(arr)
d = {}
for n in arr:
d[n] = d.get(n, 0) + 1
candies = list(d.items())
candies.sort(key=lambda x: x[1], reverse=True)
answer = 0
size = 0
for _, cnt in candies:
size += cnt
answer += 1
if size >= m // 2:
return answer
| true |
59f2adc2d3a354b9e1fe311a107d65caa928f584 | Python | WenXiaowei/branching_bound_with_simplex | /branching_bound/Tree.py | UTF-8 | 2,909 | 3.078125 | 3 | [] | no_license | class Tree:
# UNSATISFIABLE = "Constraints are not satisfiable in this node."
UNSATISFIABLE = -1
# BEST_SOL = "The current node contains the optimal solution."
ADMISSIBLE_SOL = 1
# TO_BE_PROCESSED = "The current node needs to be processed."
TO_BE_PROCESSED = 0
PROCESSED = -3
ROOT_NODE = -2
def __init__(self, decision_variables, idx, simp, reduced_cost=None, lf=None, rt=None):
"""
:param decision_variables: decision variables
:param idx:
:param simp:
:param lf:
:param rt:
"""
self.decision_variables = decision_variables
self.reduce_cost = reduced_cost
self.acceptable = True
self.left = lf
self.right = rt
self.simplex = simp
self.ad_sol = -1
self.index = idx
self.node_state = self.TO_BE_PROCESSED
def close(self, reason):
self.acceptable = False
self.node_state = reason
def is_closed(self):
return self.node_state != Tree.TO_BE_PROCESSED
def set_ad_sol(self, ad_sol):
self.ad_sol = ad_sol
self.node_state = self.ADMISSIBLE_SOL
def get_ad_sol(self):
if self.ad_sol == -1:
raise ValueError("The node does not contain a valid solution!")
return self.ad_sol
def __list_nodes__(tree):
if tree is None:
return []
toRet = [tree]
if tree.left is not None:
toRet += tree.left.__list_nodes__()
if tree.right is not None:
toRet += tree.right.__list_nodes__()
return toRet
def get_all_nodes(self):
return self.__list_nodes__()
def depth(self):
return self.__get_depth__()
def __list_leaves__(tree):
if tree is None:
return []
if tree.left is None and tree.right is None:
return [tree]
toRet = []
if tree.left is not None:
toRet += tree.left.__list_nodes__()
if tree.right is not None:
toRet += tree.right.__list_nodes__()
return toRet
def get_leaves(self):
return self.__list_leaves__()
def get_n_level_nodes(self, n):
"""
returns a list of nodes at dept n, if n > max depth of the tree, then an empty list will be returned
:param n:
:return:
"""
return __get_n_level_nodes__(self, n, 0)
def __get_depth__(tree):
if tree is None:
return 0
return 1 + max(tree.left.__get_depth__(), tree.right.__get_depth__())
def __str__(self):
return f"decision variable {self.decision_variables}, ad_sol: {self.ad_sol}"
def __get_n_level_nodes__(tree, n, depth):
if tree is None:
return []
if n == depth:
return [tree]
return __get_n_level_nodes__(tree.left, n, depth + 1) + __get_n_level_nodes__(tree.right, n, depth + 1)
| true |
520082a795199dd694e83b76a8a406e30be0cf30 | Python | hantong91/python_work | /Hello/test/Step07_set.py | UTF-8 | 1,881 | 4.15625 | 4 | [] | no_license | #-*- coding: utf-8 -*-
'''
- set type'
1. 순서가 없다
2. 중복을 허용하지 않는다.
3. 집합(묶음) 이라고 생각하면 된다.
'''
# set type 데이터 만들기
set1 = {10,20,30,40,50}
print set1
print "len(set1)", len(set1)
# set type 에 데이터 추가하기
set1.add(60)
set1.add(70)
set1.add(70)
set1.add(70)
set1.add(10)
print "set1:", set1
# 새로운 set type 데이터블 만들고
set2={60,70,80,90,100}
# 두 set 간의 합집합
unionResult = set1.union(set2)
print "set union set2 : ", unionResult
# 두 set 간의 교집합
intersectionResult = set1.intersection(set2)
print "set1 intersection set2:", intersectionResult
# 두 set 간의 차입합
minusResult = set1 -set2
print "set1 - set2 :", minusResult
set3={"kim","lee"}
list1 = ["park","cho","lee"]
tuple1 = ("one", "two")
# set 에 list type 이나 tuple type 에 있는 원소 병합 시키기
set3.update(list1)
print "list1 병합후 set3:", set3
set3.update(tuple1)
print "tuple1 병합후 set3:", set3
#값 삭제
set3.discard("park")
print "set3 에서 park discard 이후:",set3
#삭제할 값이 존재하지 않으면...
set3.discard("김구라") # 무시한다.
#값 삭제 remove() 이용
#삭제할 값이 존재하지 않으면 ...
#set3.remove("해골") # 예외 발생!
#값 모두 삭제
set3.clear()
print "set3 clear() 이후 : ",set3
# 반복문 돌면서 set1 에 저장된 모든 내용 불러와서 작업
for item in set1:
print item
# list 에 저장된 데이터에서 중복을 제거 하고 싶다라면 ...
list3 = [10,20,30,10,10,30,40,50,50]
# set 를 이용한다.
set4 = set(list3)
print "set4 : ",set4
# 중복 제거후 다시 list type 으로 얻어낸다.
list4 = list(set4)
print "list4 : ",list4 | true |
2ffe49f275aa2b383e0320af7d536ec917f8d19f | Python | takutyan318/master | /output2.py | UTF-8 | 4,994 | 2.765625 | 3 | [] | no_license | #! /usr/bin/env python
# coding: utf-8
import Tkinter as Tk
from PIL import Image, ImageTk
from ttk import *
import sys
class App(Tk.Frame):
EXIST_OR_NOT = False #ウィンドウの二重展開防止のための関数
#bestok = False #bestが選ばれたかどうか
#ウィンドウ全体の設定
def __init__(self,smpimg,master=None):
Tk.Frame.__init__(self, master)
self.master.title(u'ヘアデザインシステム')
self.master.geometry("1440x900")
self.sampimg = smpimg
self.display()
def display(self):
w1 = 288 #front画像表示サイズ
h1 = 288 #front画像表示サイズ
w2 = 144 #side,back画像表示サイズ
h2 = 144 #side,back画像表示サイズ
self.img_f = [0,0,0,0,0,0,0,0,0,0] #前の画像番号
self.img_s = [0,0,0,0,0,0,0,0,0,0] #横の画像番号
self.img_b = [0,0,0,0,0,0,0,0,0,0] #後ろの画像番号
fn = 0 #frame番号
f = [0,0,0,0,0,0,0,0,0,0] #画像のラベルを乗っけるフレーム
note = Notebook(self)
tag1 = Tk.Frame(note)
tag2 = Tk.Frame(note)
note.add(tag1, text=u"ページ1")
note.add(tag2, text=u"ページ2")
#画像の表示部分
for imgnum in self.sampimg:
#画像の読み込みとサイズ変更
imgname1 = "sample" + str(imgnum) + "_front.jpeg"
imgname2 = "sample" + str(imgnum) + "_side.jpeg"
imgname3 = "sample" + str(imgnum) + "_back.jpeg"
image1 = Image.open(imgname1)
image2 = Image.open(imgname2)
image3 = Image.open(imgname3)
image1 = image1.resize((w1,h1))
image2 = image2.resize((w2,h2))
image3 = image3.resize((w2,h2))
#GUIに画像を貼り付ける
if fn < 5:
f[fn] = Tk.LabelFrame(tag1, text=u"候補"+str(fn+1))
if fn < 3:
f[fn].grid(row=0,column=fn, padx=5, pady=10)
else:
f[fn].grid(row=1,column=fn%3, padx=5, pady=10)
else:
f[fn] = Tk.LabelFrame(tag2, text=u"候補"+str(fn+1))
if fn < 8:
f[fn].grid(row=0,column=fn%5, padx=5, pady=10)
else:
f[fn].grid(row=1,column=fn%8, padx=5, pady=10)
self.img_f[fn] = ImageTk.PhotoImage(image1)
self.img_s[fn] = ImageTk.PhotoImage(image2)
self.img_b[fn] = ImageTk.PhotoImage(image3)
il1 = Tk.Label(f[fn], image=self.img_f[fn])
il2 = Tk.Label(f[fn], image=self.img_s[fn])
il3 = Tk.Label(f[fn], image=self.img_b[fn])
il1.grid(row=0, column=0, rowspan=2)
il2.grid(row=0, column=1)
il3.grid(row=1, column=1)
fn = fn + 1
note.pack()
#ベスト選択アンド評価部分
bestevaluate = Tk.Frame(self) #bestの選択評価部分のフレーム
bestbtn = Tk.Button(bestevaluate, text=u"最も良い候補を選択する", command=self.bestselect)
bestbtn.pack(side=Tk.LEFT)
bestevaluate.pack(side=Tk.LEFT)
#選択した候補の印象反映評価
bestevaluate2 = Tk.Frame(self)
bestimpression = Tk.Label(bestevaluate2, text=u"最もいいと思った候補がどの程度印象を反映しているか評価してください")
bestimpression.pack()
self.abhyouka = Tk.IntVar()
self.abhyouka.set(0)
num = -2
for i in ["self.abRadio1", "self.abRadio2", "self.abRadio3", "self.abRadio4", "self.abRadio5"]:
i = Tk.Radiobutton(bestevaluate2, text=str(num), variable=self.abhyouka, value=num)
i.pack(side=Tk.LEFT)
num = num + 1
bestevaluate2.pack()
#決定ボタン
decide = Tk.Button(self, text=u"修正実行", command=self.sendvalue)
decide.pack(anchor=Tk.SE)
def bestselect(self):
listname = [u"候補1", u"候補2", u"候補3", u"候補4", u"候補5", u"候補6", u"候補7", u"候補8", u"候補9", u"候補10"]
if not self.EXIST_OR_NOT:
self.bestslct = Tk.Toplevel()
self.bestslct.title('best決定')
bestlabel = Tk.Label(self.bestslct, text=u'最もあなたの考える印象を反映している候補を選んでください', bg='white')
bestlabel.pack()
self.listbx = Tk.Listbox(self.bestslct)
self.listbx.insert(Tk.END, *listname)
self.listbx.pack(pady=10)
self.EXIST_OR_NOT = self.bestslct.winfo_exists() #サブウィンドウが開いていればtrueを返す
#ベスト選択終了処理、値取得+ウィンドウ閉じなど
bestdecide = Tk.Button(self.bestslct, text=u'決定', command=self.changeFlag)
bestdecide.pack()
self.bestslct.protocol('WM_DELETE_WINDOW', self.changeFlag)
#ここでベストの候補の配列番号を返す
def changeFlag(self):
#リストの情報抽出
self.bestnum = self.listbx.curselection() #タプル形式で帰ってくる
self.bestnum = self.bestnum[0] #bestの候補番号(配列)
#終了処理
self.EXIST_OR_NOT = False
self.bestslct.destroy()
def sendvalue(self):
self.abhyouka_kakutei = self.abhyouka.get() #bestの印象反映評価値
print "bestの候補番号"
print self.sampimg[self.bestnum]
print "印象反映度"
print self.abhyouka_kakutei
self.master.destroy()
if __name__ == '__main__':
sample = [1,2,3,4,5,6,7,8,9,10]
app = App(sample)
app.pack()
app.mainloop() | true |
0dd7d5634ce38f2531062f53a8b0b6411b5daf6b | Python | wbornus/Technologia-Mowy---Rozpoznawanie-Cyfr | /mfcc_loader/mfcc_loader.py | UTF-8 | 368 | 3.078125 | 3 | [] | no_license | import pickle
# wczytanie mfcc_dict z pliku pickle
with open('mfcc_dict.pickle', 'rb') as handle:
mfcc_dict = pickle.load(handle)
# indexy dla 'id_mówcy'
print(mfcc_dict.keys())
# indexy dla 'wypowiedzianej_liczby'
print(mfcc_dict[0].keys())
# przyklad wczytania mfcc dla id_mówcy = 2 i wypowiedzianej_liczby = 3
print(mfcc_dict[2][3])
| true |
e9211498b54cfae0d3d1f71d0e2a5c1aa2a63f82 | Python | tsujio/ml-play | /neural_network/neural_network.py | UTF-8 | 3,372 | 3.125 | 3 | [] | no_license | from matplotlib import pyplot as plt
import numpy as np
from sklearn import datasets, model_selection
def plot_decision_boundary(data, predict):
"""ref: http://scikit-learn.org/stable/auto_examples/svm/plot_iris.html"""
x_min, x_max = data[:, 0].min() - .5, data[:, 0].max() + .5
y_min, y_max = data[:, 1].min() - .5, data[:, 1].max() + .5
h = .02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class NeuralNetwork:
EPOCH = 3000
def __init__(self, layers, rho=1e-1):
self.W_list = [np.random.randn(layers[i], layers[i-1] + 1)
for i in range(1, len(layers))]
self.rho = rho
def train(self, data, label):
for i in range(self.__class__.EPOCH):
# shuffle
perm = np.random.permutation(len(data))
data, label = data[perm], label[perm]
for x, y in zip(list(data), list(label)):
g_list = self._predict(x)
# back propagation
new_W_list = [np.zeros(W.shape) for W in self.W_list]
for i in reversed(range(len(self.W_list))):
_g = g_list[i + 1]
g = g_list[i]
W = self.W_list[i]
if i == len(self.W_list) - 1:
# output layer
e = (_g - y) * _g * (1 - _g)
else:
# hidden layer
_W = self.W_list[i + 1][:, :-1]
e = np.dot(_W.T, e) * _g * (1 - _g)
G = np.repeat(np.concatenate([g, [1]])[np.newaxis, :],
W.shape[0],
axis=0)
E = np.repeat(e[:, np.newaxis],
W.shape[1],
axis=1)
new_W_list[i] = W - self.rho * E * G
self.W_list = new_W_list
def _predict(self, x):
g_list = [x]
for W in self.W_list:
x = np.concatenate([g_list[-1], [1]])
g_list.append(sigmoid(np.dot(W, x)))
return g_list
def predict(self, x):
return np.argmax(self._predict(x)[-1])
def predict_array(self, X):
return np.array([self.predict(x) for x in list(X)])
if __name__ == '__main__':
# prepare dataset
moons = datasets.make_moons(n_samples=500, noise=0.1)
x_train, x_test, y_train, y_test = model_selection.train_test_split(
moons[0], np.eye(2)[moons[1]], test_size=0.5, shuffle=True
)
# train
nn = NeuralNetwork([2, 3, 2])
nn.train(x_train, y_train)
# test
errors = [y[nn.predict(x)] != 1
for x, y in zip(list(x_test), list(y_test))]
error_rate = len(x_test[errors]) / len(x_test)
print(f"error rate={error_rate}")
# display
plot_decision_boundary(x_test, lambda x: nn.predict_array(x))
X_c0 = x_test[np.argmax(y_test, axis=1) == 0]
X_c1 = x_test[np.argmax(y_test, axis=1) == 1]
plt.scatter(X_c0[:, 0], X_c0[:, 1])
plt.scatter(X_c1[:, 0], X_c1[:, 1])
plt.show()
| true |
9013fb244f054b7aa90dd477e0d821891f174ff9 | Python | macbymac/myfirst | /testfile.py | UTF-8 | 184 | 2.6875 | 3 | [] | no_license | import json
import requests
url = "https://api.coinmarketcap.com/v2/ticker"
resp = requests.get(url)
data = json.loads(resp.text)
btc = data['data']['1']
print(btc['quotes']['USD']) | true |
686d8d8ace005704dbfeb9108e0422ae46bf012b | Python | wangyum/Anaconda | /pkgs/fastcache-1.0.2-py27_0/lib/python2.7/site-packages/fastcache/tests/test_clrucache.py | UTF-8 | 4,436 | 2.96875 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | import pytest
import fastcache
import itertools
import warnings
try:
itertools.count(start=0, step=-1)
count = itertools.count
except TypeError:
def count(start=0, step=1):
i = step-1
for j, c in enumerate(itertools.count(start)):
yield c + i*j
def arg_gen(min=1, max=100, repeat=3):
for i in range(min, max):
for r in range(repeat):
for j, k in zip(range(i), count(i, -1)):
yield j, k
@pytest.fixture(scope='module', params=[fastcache.clru_cache,
fastcache.lru_cache])
def cache(request):
param = request.param
return param
def test_function_attributes(cache):
""" Simple tests for attribute preservation. """
def tfunc(a, b):
"""test function docstring."""
return a + b
cfunc = cache()(tfunc)
assert cfunc.__doc__ == tfunc.__doc__
assert hasattr(cfunc, 'cache_info')
assert hasattr(cfunc, 'cache_clear')
assert hasattr(cfunc, '__wrapped__')
def test_function_cache(cache):
""" Test that cache returns appropriate values. """
cat_tuples = [True]
def tfunc(a, b, c=None):
if (cat_tuples[0] == True):
return (a, b, c) + (c, a)
else:
return 2*a-10*b
cfunc = cache(maxsize=100, state=cat_tuples)(tfunc)
for i, j in arg_gen(max=75, repeat=5):
assert cfunc(i, j) == tfunc(i, j)
# change extra state
cat_tuples[0] = False
for i, j in arg_gen(max=75, repeat=5):
assert cfunc(i, j) == tfunc(i, j)
# test dict state
d = {}
cfunc = cache(maxsize=100, state=d)(tfunc)
cfunc(1, 2)
assert cfunc.cache_info().misses == 1
d['a'] = 42
cfunc(1, 2)
assert cfunc.cache_info().misses == 2
cfunc(1, 2)
assert cfunc.cache_info().misses == 2
assert cfunc.cache_info().hits == 1
d.clear()
cfunc(1, 2)
assert cfunc.cache_info().misses == 2
assert cfunc.cache_info().hits == 2
d['a'] = 44
cfunc(1, 2)
assert cfunc.cache_info().misses == 3
def test_memory_leaks(cache):
""" Longer running test to check for memory leaks. """
def tfunc(a, b, c):
return (a-1, 2*c) + (10*b-1, a*b, a*b+c)
cfunc = cache(maxsize=2000)(tfunc)
for i, j in arg_gen(max=1500, repeat=5):
assert cfunc(i, j, c=i-j) == tfunc(i, j, c=i-j)
def test_warn_unhashable_args(cache, recwarn):
""" Function arguments must be hashable. """
@cache(unhashable='warning')
def f(a, b):
return (a, ) + (b, )
with warnings.catch_warnings() :
warnings.simplefilter("always")
assert f([1], 2) == f.__wrapped__([1], 2)
w = recwarn.pop(UserWarning)
assert issubclass(w.category, UserWarning)
assert "Unhashable arguments cannot be cached" in str(w.message)
assert w.filename
assert w.lineno
def test_ignore_unhashable_args(cache):
""" Function arguments must be hashable. """
@cache(unhashable='ignore')
def f(a, b):
return (a, ) + (b, )
assert f([1], 2) == f.__wrapped__([1], 2)
def test_default_unhashable_args(cache):
@cache()
def f(a, b):
return (a, ) + (b, )
with pytest.raises(TypeError):
f([1], 2)
@cache(unhashable='error')
def f(a, b):
pass
with pytest.raises(TypeError):
f([1], 2)
def test_state_type(cache):
""" State must be a list or dict. """
f = lambda x : x
with pytest.raises(TypeError):
cache(state=(1, ))(f)
with pytest.raises(TypeError):
cache(state=-1)(f)
def test_typed_False(cache):
""" Verify typed==False. """
@cache(typed=False)
def cfunc(a, b):
return a+b
# initialize cache with integer args
cfunc(1, 2)
assert cfunc(1, 2) is cfunc(1.0, 2)
assert cfunc(1, 2) is cfunc(1, 2.0)
# test keywords
cfunc(1, b=2)
assert cfunc(1,b=2) is cfunc(1.0,b=2)
assert cfunc(1,b=2) is cfunc(1,b=2.0)
def test_typed_True(cache):
""" Verify typed==True. """
@cache(typed=True)
def cfunc(a, b):
return a+b
assert cfunc(1, 2) is not cfunc(1.0, 2)
assert cfunc(1, 2) is not cfunc(1, 2.0)
# test keywords
assert cfunc(1,b=2) is not cfunc(1.0,b=2)
assert cfunc(1,b=2) is not cfunc(1,b=2.0)
def test_dynamic_attribute(cache):
f = lambda x : x
cfunc = cache()(f)
cfunc.new_attr = 5
assert cfunc.new_attr == 5
| true |
ec19fda5a5903be169fc0d326311ee31e4c9178d | Python | AayushK47/twitter-sentiment-analysis | /script.py | UTF-8 | 2,936 | 3.359375 | 3 | [] | no_license | """
Project name: twitter sentiment analysis
Author: Aayush Kurup
Libraries used: tweepy, nltk, pandas, flask, pickle, sklearn and os
Start Date: 22-12-2018
End Date: 01-02-2019
"""
# Imports
import re
import tweepy
import pickle
import pandas as pd
# nltk.download("stopwords") # Uncomment this line if you do not have stopwords
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# A class with all the required functionalities
class tweets_sentiment_analyzer:
# Class constructor, configure twitter auth
def __init__(self, consumerKey, consumerSecret, accessKey, accessSecret):
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessKey, accessSecret)
# Class attributes
self.__count_vectorizer = pickle.load(open('models/cv_obj.pkl', 'rb'))
self.__classifier = pickle.load(open('models/logistic_reg_model.pkl', 'rb'))
self.__api = tweepy.API(auth)
self.__tweets = None
self.__sentiments = None
# method that searches tweets from twitter based on the provided keyword
def search(self, searchKeyword):
print(searchKeyword)
# if we get nothing, we return None
if (searchKeyword == None or searchKeyword == ''):
return None
# otherwise we search the keyword in twitter abd return the results.
else:
self.__tweets = self.__api.search(searchKeyword)
return self.__api.search(searchKeyword)
def clean_tweet(self, t):
twe = re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", t)
twe = twe.lower()
twe = twe.split()
ps = PorterStemmer()
twe = [ps.stem(word) for word in twe if not word in set(stopwords.words("english"))]
twe = ' '.join(twe)
return twe
# A method that analyzes the tweets and returns the sentiment of the tweet (positive or negative)
def get_sentiments(self):
if self.__tweets == None:
return None
else:
tweets = []
corpus = []
for tweet in self.__tweets:
tweets.append(tweet.text)
corpus.append((self.clean_tweet(tweet.text)))
X = self.__count_vectorizer.transform(corpus)
sentiments = self.__classifier.predict(X)
self.__sentiments = sentiments
return list(sentiments)
# A method that generates .tsv file for the tweets searched most recently
def convert_to_tsv(self):
if self.__tweets == None:
return None
data_dict = {'Tweet': [], 'sentiment': []}
for i in range(len(self.__tweets)):
data_dict['Tweet'].append(self.__tweets[i].text)
data_dict['sentiment'].append(self.__sentiments[i])
df = pd.DataFrame(data_dict)
df.to_csv('tweets.tsv', sep='\t', encoding='utf-8', index=False)
return None
| true |