blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ef07d38b6af8572d1de6308c7ccb21c936ff10d1 | Python | jobejen/Viscid | /viscid/calculator/topology.py | UTF-8 | 2,412 | 2.734375 | 3 | [
"MIT"
] | permissive | """I don't know if this is worth keeping as its own module,
TOPOLOGY_* is copied here so that one can import this module
without needing to have built the cython module streamline.pyx
"""
import numpy as np
TOPOLOGY_MS_NONE = 0 # no translation needed
TOPOLOGY_MS_CLOSED = 1 # translated from 5, 6, 7(4|5|6)
TOPOLOGY_MS_OPEN_NORTH = 2 # translated from 13 (8|5)
TOPOLOGY_MS_OPEN_SOUTH = 4 # translated from 14 (8|6)
TOPOLOGY_MS_SW = 8 # no translation needed
# TOPOLOGY_MS_CYCLIC = 16 # no translation needed
TOPOLOGY_MS_INVALID = [3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15]
# TOPOLOGY_MS_OTHER = list(range(32, 512)) # >= 16
TOPOLOGY_G_NONE = 0
color_map_msphere = {TOPOLOGY_MS_CLOSED: (0.0, 0.8, 0.0),
TOPOLOGY_MS_OPEN_NORTH: (0.0, 0.0, 0.7),
TOPOLOGY_MS_OPEN_SOUTH: (0.7, 0.0, 0.0),
TOPOLOGY_MS_SW: (0.7, 0.7, 0.7)
}
color_map_generic = {}
# for legacy reasons, make some aliases
TOPOLOGY_NONE = TOPOLOGY_MS_NONE
TOPOLOGY_CLOSED = TOPOLOGY_MS_CLOSED
TOPOLOGY_OPEN_NORTH = TOPOLOGY_MS_OPEN_NORTH
TOPOLOGY_OPEN_SOUTH = TOPOLOGY_MS_OPEN_SOUTH
TOPOLOGY_SW = TOPOLOGY_MS_SW
# TOPOLOGY_CYCLIC = TOPOLOGY_MS_CYCLIC
TOPOLOGY_INVALID = TOPOLOGY_MS_INVALID
# TOPOLOGY_OTHER = TOPOLOGY_MS_OTHER
color_map = color_map_msphere
def topology2color(topology, topo_style="msphere", bad_color=None):
"""Determine RGB from topology value
Parameters:
topology (int, list, ndarray): some value in
``calculator.streamline.TOPOLOGY_*``
topo_style (string): msphere, or a dict with its own
mapping
bad_color (tuple): rgb color for invalid topologies
Returns:
Nx3 array of rgb data or (R, G, B) tuple if topology is a
single value
"""
if isinstance(topo_style, dict):
mapping = topo_style
elif topo_style == "msphere":
mapping = color_map_msphere
else:
mapping = color_map_generic
if bad_color is None:
bad_color = (0.0, 0.0, 0.0)
ret = None
try:
ret = np.empty((len(topology), 3))
for i, topo in enumerate(topology):
try:
ret[i, :] = mapping[topo]
except KeyError:
ret[i] = bad_color
except TypeError:
try:
ret = mapping[int(topology)]
except KeyError:
ret = bad_color
return ret
| true |
3886b3f2c43e06bc6fa9497cc0dc376b540073da | Python | YllkaGojani/GreatNumberGameFlask | /server.py | UTF-8 | 864 | 2.8125 | 3 | [] | no_license | from flask import Flask,render_template,request,redirect,session,flash
import random
app = Flask(__name__)
app.secret_key = 'ThiIsSecret'
@app.route('/')
def index():
return render_template("index.html")
@app.route('/num', methods=['POST'])
def guess():
guess = int(request.form['guess'])
session['number'] = 55
#session['number'] = random.randrange(0, 101)
print guess,"random "+ str(session['number'])
if guess < session['number']:
flash("Too low!")
session['color'] = 'red'
elif guess > session['number']:
flash("Too High!")
session['color'] = 'red'
else:
flash(str(guess)+" was the number!")
session['color'] = 'green'
return redirect('/')
@app.route('/reset')
def reset():
session['number'] = random.randrange(0, 101)
return redirect('/')
app.run(debug=True) | true |
79380418e9b2f96b77f94bd5480c14de6377e81e | Python | ansnoussi/NCSC_CTF1 | /brute.py | UTF-8 | 580 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
import hashlib
alpha = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','0','1','2','3','4','5','6','7','8','9']
sta = "securinets"
# print hashlib.md5(ch).hexdigest()
# 55f4b1867f59b3d928893496bb9ce320
s="securinets"
for i in alpha:
for j in alpha:
for k in alpha:
for l in alpha:
x = s+i+j+k+l
if (hashlib.md5(x).hexdigest() == "55f4b1867f59b3d928893496bb9ce320"):
print x
exit(0)
| true |
7e60563514a5f6113af52d1fb4d085ba4b46828b | Python | tridungduong16/research-phd-uts | /CFFair_Emulate/CounterFair_Emulate.py | UTF-8 | 10,015 | 2.875 | 3 | [] | no_license | """
Counterfactual Fairness (Kusner et al. 2017) Replication in Python 3 by Philip Ball
NB: Stan files courtesy of Matt Kusner
Options
-do_l2: Performs the replication of the L2 (Fair K) model, which can take a while depending on computing power
-save_l2: Saves the resultant models (or not) for the L2 (Fair K) model, which produces large-ish files (100s MBs)
Dependencies (shouldn't really matter as long as it's up-to-date Python >3.5):
Python 3.5.5
NumPy 1.14.3
Pandas 0.23.0
Scikit-learn 0.19.1
PyStan 2.17.1.0
StatsModels 0.9.0
"""
import pystan
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error
from argparse import ArgumentParser, ArgumentTypeError
from pathlib import Path
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ArgumentTypeError('Boolean value expected.')
parser = ArgumentParser()
parser.add_argument('-do_l2', type=str2bool, nargs='?', const=True, default=True, help="Perform L2 Train/Test (Warning: TAKES TIME)")
parser.add_argument('-save_l2', type=str2bool, nargs='?', const=True, default=False, help="Save L2 Train/Test Models (Warning: LARGE FILES)")
args = parser.parse_args()
# wrapper class for statsmodels linear regression (more stable than SKLearn)
class SM_LinearRegression():
def __init__(self):
pass
def fit(self, X, y):
N = X.shape[0]
self.LRFit = sm.OLS(y, np.hstack([X,np.ones(N).reshape(-1,1)]),hasconst=True).fit()
def predict(self,X):
N = X.shape[0]
return self.LRFit.predict(np.hstack([X,np.ones(N).reshape(-1,1)]))
# function to convert to a dictionary for use with STAN train-time model
def get_pystan_train_dic(pandas_df, sense_cols):
dic_out = {}
dic_out['N'] = len(pandas_df)
dic_out['K'] = len(sense_cols)
dic_out['a'] = np.array(pandas_df[sense_cols])
dic_out['ugpa'] = list(pandas_df['UGPA'])
dic_out['lsat'] = list(pandas_df['LSAT'].astype(int))
dic_out['zfya'] = list(pandas_df['ZFYA'])
return dic_out
# function to convert to a dictionary for use with STAN test-time model
def get_pystan_test_dic(fit_extract, test_dic):
dic_out = {}
for key in fit_extract.keys():
if key not in ['sigma_g_Sq', 'u', 'eta_a_zfya', 'eta_u_zfya', 'lp__']:
dic_out[key] = np.mean(fit_extract[key], axis = 0)
need_list = ['N', 'K', 'a', 'ugpa', 'lsat']
for data in need_list:
dic_out[data] = test_dic[data]
return dic_out
# Preprocess data for all subsequent experiments
def get_data_preprocess():
law_data = pd.read_csv('./law_data.csv', index_col=0)
law_data = pd.get_dummies(law_data,columns=['race'],prefix='',prefix_sep='')
law_data['male'] = law_data['sex'].map(lambda z: 1 if z == 2 else 0)
law_data['female'] = law_data['sex'].map(lambda z: 1 if z == 1 else 0)
law_data['LSAT'] = law_data['LSAT'].apply(lambda x: int(np.round(x)))
law_data = law_data.drop(axis=1, columns=['sex'])
sense_cols = ['Amerindian','Asian','Black','Hispanic','Mexican','Other','Puertorican','White','male','female']
law_train,law_test = train_test_split(law_data, random_state = 1234, test_size = 0.2);
law_train_dic = get_pystan_train_dic(law_train, sense_cols)
law_test_dic = get_pystan_train_dic(law_test, sense_cols)
return law_train_dic, law_test_dic
# Get the Unfair Model predictions
def Unfair_Model_Replication(law_train_dic, law_test_dic):
lr_unfair = SM_LinearRegression()
lr_unfair.fit(np.hstack((law_train_dic['a'],np.array(law_train_dic['ugpa']).reshape(-1,1),np.array(law_train_dic['lsat']).reshape(-1,1))),law_train_dic['zfya'])
preds = lr_unfair.predict(np.hstack((law_test_dic['a'],np.array(law_test_dic['ugpa']).reshape(-1,1),np.array(law_test_dic['lsat']).reshape(-1,1))))
# Return Results:
return preds
# Get the FTU Model predictions
def FTU_Model_Replication(law_train_dic, law_test_dic):
lr_unaware = SM_LinearRegression()
lr_unaware.fit(np.hstack((np.array(law_train_dic['ugpa']).reshape(-1,1),np.array(law_train_dic['lsat']).reshape(-1,1))),law_train_dic['zfya']);
preds = lr_unaware.predict(np.hstack((np.array(law_test_dic['ugpa']).reshape(-1,1),np.array(law_test_dic['lsat']).reshape(-1,1))))
# Return Results:
return preds
# Get the Fair K/L2 Model predictions
def L2_Model_Replication(law_train_dic, law_test_dic, save_models = False):
check_fit = Path("./model_fit.pkl")
if check_fit.is_file():
print('File Found: Loading Fitted Training Model Samples...')
if save_models:
print('No models will be trained or saved')
with open("model_fit.pkl", "rb") as f:
post_samps = pickle.load(f)
else:
print('File Not Found: Fitting Training Model...\n')
# Compile Model
model = pystan.StanModel(file = './law_school_train.stan')
print('Finished compiling model!')
# Commence the training of the model to infer weights (500 warmup, 500 actual)
fit = model.sampling(data = law_train_dic, iter=1000, chains = 1)
post_samps = fit.extract()
# Save parameter posterior samples if specified
if save_models:
with open("model_fit.pkl", "wb") as f:
pickle.dump(post_samps, f, protocol=-1)
print('Saved fitted model!')
# Retreive posterior weight samples and take means
law_train_dic_final = get_pystan_test_dic(post_samps, law_train_dic)
law_test_dic_final = get_pystan_test_dic(post_samps, law_test_dic)
check_train = Path("./model_fit_train.pkl")
if check_train.is_file():
# load posterior training samples from file
print('File Found: Loading Test Model with Train Data...')
if save_models:
print('No models will be trained or saved')
with open("model_fit_train.pkl", "rb") as f:
fit_train_samps = pickle.load(f)
else:
# Obtain posterior training samples from scratch
print('File Not Found: Fitting Test Model with Train Data...\n')
model_train = pystan.StanModel(file = './law_school_only_u.stan')
fit_train = model_train.sampling(data = law_train_dic_final, iter=2000, chains = 1)
fit_train_samps = fit_train.extract()
if save_models:
with open("model_fit_train.pkl", "wb") as f:
pickle.dump(fit_train_samps, f, protocol=-1)
print('Saved train samples!')
train_K = np.mean(fit_train_samps['u'],axis=0).reshape(-1,1)
check_test = Path("./model_fit_test.pkl")
if check_test.is_file():
# load posterior test samples from file
print('File Found: Loading Test Model with Test Data...')
if save_models:
print('No models will be trained or saved')
with open("model_fit_test.pkl", "rb") as f:
fit_test_samps = pickle.load(f)
else:
# Obtain posterior test samples from scratch
print('File Not Found: Fitting Test Model with Test Data...\n')
model_test = pystan.StanModel(file = './law_school_only_u.stan')
fit_test = model_test.sampling(data = law_test_dic_final, iter=2000, chains = 1)
fit_test_samps = fit_test.extract()
if save_models:
with open("model_fit_test.pkl", "wb") as f:
pickle.dump(fit_test_samps, f, protocol=-1)
print('Saved test samples!')
test_K = np.mean(fit_test_samps['u'],axis=0).reshape(-1,1)
# Train L2 Regression
smlr_L2 = SM_LinearRegression()
smlr_L2.fit(train_K,law_train_dic['zfya'])
# Predict on test
preds = smlr_L2.predict(test_K)
# Return Results:
return preds
# Get the Fair All/L3 Model Predictions
def L3_Model_Replication(law_train_dic, law_test_dic):
# abduct the epsilon_G values
linear_eps_g = SM_LinearRegression()
linear_eps_g.fit(np.vstack((law_train_dic['a'],law_test_dic['a'])),law_train_dic['ugpa']+law_test_dic['ugpa'])
eps_g_train = law_train_dic['ugpa'] - linear_eps_g.predict(law_train_dic['a'])
eps_g_test = law_test_dic['ugpa'] - linear_eps_g.predict(law_test_dic['a'])
# abduct the epsilon_L values
linear_eps_l = SM_LinearRegression()
linear_eps_l.fit(np.vstack((law_train_dic['a'],law_test_dic['a'])),law_train_dic['lsat']+law_test_dic['lsat'])
eps_l_train = law_train_dic['lsat'] - linear_eps_l.predict(law_train_dic['a'])
eps_l_test = law_test_dic['lsat'] - linear_eps_l.predict(law_test_dic['a'])
# predict on target using abducted latents
smlr_L3 = SM_LinearRegression()
smlr_L3.fit(np.hstack((eps_g_train.reshape(-1,1),eps_l_train.reshape(-1,1))),law_train_dic['zfya'])
# predict on test epsilons
preds = smlr_L3.predict(np.hstack((eps_g_test.reshape(-1,1),eps_l_test.reshape(-1,1))))
# Return Results:
return preds
def main():
# Get the data, split train/test
law_train_dic, law_test_dic = get_data_preprocess()
# Get the predictions
unfair_preds = Unfair_Model_Replication(law_train_dic, law_test_dic)
ftu_preds = FTU_Model_Replication(law_train_dic, law_test_dic)
if args.do_l2:
l2_preds = L2_Model_Replication(law_train_dic, law_test_dic, args.save_l2)
l3_preds = L3_Model_Replication(law_train_dic, law_test_dic)
# Print the predictions
print('Unfair RMSE: \t\t\t%.3f' % np.sqrt(mean_squared_error(unfair_preds,law_test_dic['zfya'])))
print('FTU RMSE: \t\t\t%.3f' % np.sqrt(mean_squared_error(ftu_preds,law_test_dic['zfya'])))
if args.do_l2:
print('Level 2 (Fair K) RMSE: \t\t%.3f' % np.sqrt(mean_squared_error(l2_preds,law_test_dic['zfya'])))
print('Level 3 (Fair Add) RMSE: \t%.3f' % np.sqrt(mean_squared_error(l3_preds,law_test_dic['zfya'])))
if __name__ == '__main__':
main() | true |
06a1f2ed305a7de4bf795dc3fd7a070be39b21f0 | Python | jisoo-ho/Python_R_study | /20200427/ml_python_20200427_3.py | UTF-8 | 10,739 | 3.78125 | 4 | [] | no_license | ## 1. ๋ถ๊ฝ์ ํ์ข
๋ถ๋ฅ
### (1) ๋ฐ์ดํฐ ์ ์ฌ
# -scikit-learn ์ ๋ฐ์ดํฐ์
๋ชจ๋์ ํฌํจ๋์ด์๋ค.
import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
iris_dataset = load_iris()
print("iris_dataset์ ํค : \n{}".format(iris_dataset.keys()))
#iris_dataset์ ํค :
#dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names', 'filename'])
print(iris_dataset.DESCR)
print(iris_dataset['DESCR'][:200]+"\n...") #๋ฐ์ดํฐ์
์ค๋ช
์๋ถ๋ถ๋ง(200๊ธ์)
# - ์์ธกํ๋ ค๋ ๋ถ๊ฝ ํ์ข
์ ์ด๋ฆ์ ๊ฐ์ง๊ณ ์๋ key : target_names
format("ํ๊ฒ์ ์ด๋ฆ: {}".format(iris_dataset['target_names']))
#"ํ๊ฒ์ ์ด๋ฆ: ['setosa' 'versicolor' 'virginica']"
# - ํน์ฑ์ ์ค๋ช
ํ๋ ๋ฌธ์์ด ๋ฆฌ์คํธ : feature_names
format("ํน์ฑ์ ์ด๋ฆ : {}".format(iris_dataset['feature_names']))
#"ํน์ฑ์ ์ด๋ฆ : ['sepal length (cm)', 'sepal width (cm)', 'petal length (cm)', 'petal width (cm)']"
# - ์ค์ ๋ฐ์ดํฐ(target, data)์ค data๋
# ๊ฝ์์ ๊ธธ์ด์ ํญ, ๊ฝ๋ฐ์นจ์ ๊ธธ์ด์ ํญ์ ์์น ๊ฐ์ผ๋ก ๊ฐ์ง๊ณ ์๋ Numpy ๋ฐฐ์ด
print("data์ ํ์
: {}".format(type(iris_dataset['data'])))
#data์ ํ์
: <class 'numpy.ndarray'>
print("data์ ํฌ๊ธฐ : {}".format(iris_dataset['data'].shape))
# data์ ํฌ๊ธฐ : (150, 4)
# ๋ฐฐ์ด์ ํ์ ๊ฐ๊ฐ์ ๊ฝ, ์ด์ ๊ฐ ๊ฝ์ ์ธก์ ์น
# - ์ด ๋ฐฐ์ด์ 150๊ฐ์ ๋ถ๊ฝ ๋ฐ์ดํฐ๋ฅผ ๊ฐ์ง๊ณ ์์ผ๋ฉฐ,
# ๊ฐ ๋ถ๊ฝ๋ง๋ค 4๊ฐ์ ์ธก์ ์น๋ฅผ ๊ฐ์ง๊ณ ์์.
# - ๋จธ์ ๋ฌ๋์์ ๊ฐ ์์ดํ
์ ์ํ์ด๋ผ ํ๊ณ ์์ฑ์ ํน์ฑ์ด๋ผ๊ณ ๋ถ๋ฆ.
# - ๊ทธ๋ฌ๋ฏ๋ก data๋ฐฐ์ด์ ํฌ๊ธฐ๋ 150x4๊ฐ ๋จ
# - ์ด๋ scikit-learn์ ์คํ์ผ์ด๋ฉฐ ํญ์ ๋ฐ์ดํฐ๊ฐ ์ด๋ฐ ๊ตฌ์กฐ์ผ๊ฑฐ๋ผ ๊ฐ์ ํ๊ณ
print("data์ ์ฒ์ ๋ค์ฏ ํ : \n{}".format(iris_dataset.data[:5]))
#data์ ์ฒ์ ๋ค์ฏ ํ :
#[[5.1 3.5 1.4 0.2]
# [4.9 3. 1.4 0.2]
# [4.7 3.2 1.3 0.2]
# [4.6 3.1 1.5 0.2]
# [5. 3.6 1.4 0.2]]
# - 1์ด : ๊ฝ๋ฐ์นจ์ ๊ธธ์ด
# - 2์ด : ๊ฝ๋ฐ์นจ์ ํญ
# - 3์ด : ๊ฝ์์ ๊ธธ์ด
# - 4์ด : ๊ฝ์์ ํญ
# - target ๋ฐฐ์ด : ์ํ ๋ถ๊ฝ์ ํ์ข
์ ๋ด์ Numpy ๋ฐฐ์ด
print("data์ ํ์
: {}".format(type(iris_dataset.target)))
#data์ ํ์
: <class 'numpy.ndarray'>
print("data์ ํ์
: {}".format(iris_dataset.target.shape))
# 1์ฐจ์ ๋ฐฐ์ด (150,)
print("ํ๊ฒ : \n{}".format(iris_dataset.target))
# 0:setosa, 1:versicolor, 2:virginica
#ํ๊ฒ :
#[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2
# 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
# 2 2]
'''์ฌ๊ธฐ๊น์ง๊ฐ ๋ถ๊ฝ ๋ฐ์ดํฐ ๋ก๋ ํ ๋ฐ์ดํฐ์ ๋ํ ๊ธฐ๋ณธ ๋ถ์ ๋ถ๋ถ'''
### (2) ํ๋ จ๋ฐ์ดํฐ์ ํ
์คํธ ๋ฐ์ดํฐ
# - ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ์ ๋ง๋ค ๋ ์ฌ์ฉํ๋ ํ๋ จ๋ฐ์ดํฐ์ ๋ชจ๋ธ์ด ์ผ๋ง๋ ์
# ์๋ํ๋์ง ์ธก์ ํ๋ ํ
์คํธ๋ฐ์ดํฐ๋ก ๋๋๋ค.
# -scikit-learn ์ ๋ฐ์ดํฐ์
์ ์์ด์ ๋๋ ์ฃผ๋ train_test_split ํจ์ ์ ๊ณต
# ํ๋ จ ์ธํธ : 75%, ํ
์คํธ์ธํธ : 25%
# - scikit-learn ์์ ๋ฐ์ดํฐ๋ ๋๋ฌธ์ X๋ก ํ์ํ๋ ๋ ์ด๋ธ์ ์๋ฌธ์ y๋ก ํ๊ธฐํ๋ค.
# - ์ด๋ ์ํ์์ ํจ์์ ์
๋ ฅ์ x, ์ถ๋ ฅ์ y๋ก ๋ํ๋ด๋ ํ์ค๊ณต์ f(x) = y์์ ์ ๋๋ ๊ฒ์ด๋ค.
# - ์ํ์ ํ๊ธฐ ๋ฐฉ์์ ๋ฐ๋ฅด๋ ๋ฐ์ดํฐ๋ 2์ฐจ์ ๋ฐฐ์ด(ํ๋ ฌ)์ด๋ฏ๋ก ๋๋ฌธ์X๋ฅผ,
# ํ๊น์ 1์ฐจ์ ๋ฐฐ์ด(๋ฒกํฐ)์ด๋ฏ๋ก ์๋ฌธ์y๋ฅผ ์ฌ์ฉ
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris_dataset.data,
iris_dataset.target,
random_state=0)
# - train ๋ฐ์ดํฐ์, test ๋ฐ์ดํฐ๋ก ๋๋๊ธฐ ์ ์ ๋ฌด์์๋ก ์์ด์ฃผ์ง ์์ผ๋ฉด
# ์์๋๋ก ๋๋์ด ์ง๊ธฐ ๋๋ฌธ์ y_test(ํ
์คํธ๋ ์ด๋ธ) ๊ฐ์ด ๋ชจ๋ 2๊ฐ ๋์ค๊ฒ ๋๋ค.
# - ์ธ ํด๋์ค(ํ์ข
) ์ค ํ๋๋ง ํฌํจํ ํ
์คํธ ์ธํธ๋ฅผ ์ฌ์ฉํ๋ฉด
# ๋ชจ๋ธ์ด ์ผ๋ง๋ ์ ์ผ๋ฐํ ๋์๋์ง ์ ์ ์๋ค.
# - ํ
์คํธ ์ธํธ๋ ๋ชจ๋ ํด๋์ค์ ๋ฐ์ดํฐ๋ฅผ ํฌํจํ๋๋ก ์ ์์ด์ผ ํ๋ค.
# - random_state =0์ ์ด ํจ์๋ฅผ ์ฌ๋ฌ๋ฒ ์คํํด๋ ๊ฐ์ ๋๋ค๊ฐ์ด ๋ฆฌํด๋๋ค.
print("X_train ํฌ๊ธฐ : {}".format(X_train.shape)) # (112, 4)
print("y_train ํฌ๊ธฐ : {}".format(y_train.shape)) # (112,)
print("X_test ํฌ๊ธฐ : {}".format(X_test.shape)) # (38, 4)
print("y_test ํฌ๊ธฐ : {}".format(y_test.shape)) # (38,)
# (3) ๋ฐ์ดํฐ ์ดํด๋ณด๊ธฐ
# - ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ์ ๋ง๋ค๊ธฐ ์ ์ ๋จธ์ ๋ฌ๋ ์์ด๋ ํ ์ ์๋ ๋ฌธ์ ๊ฐ ์๋์ง,
# ํน์ ํ์ํ ์ ๋ณด๊ฐ ๋๋ฝ๋์ด ์๋์ง ๋ฐ์ดํฐ๋ฅผ ์กฐ์ฌํด ๋ณด๋๊ฒ์ด ์ข๋ค.
# - ์ค์ ๋ฐ์ดํฐ์๋ ์ผ๊ด์ฑ์ด ์๊ฑฐ๋ ์ด์ํ ๊ฐ์ด ๋ค์ด๊ฐ ์๋ ๊ฒฝ์ฐ๊ฐ ์ข
์ข
์๋ค.
# ** ์ฐ์ ๋ ํ๋ ฌ์ ํตํด ๋ฐ์ดํฐ์ ํน์ฑ์ ์ฐพ์๋ณด์ **
# - ์ฐ์ ๋ : ์ฌ๋ฌ ๋ณ์๋ก ์ด๋ฃจ์ด์ง ์๋ฃ์์ ๋ ๋ณ์๋ผ๋ฆฌ ์ง์ ์ง์ด ์์ฑ๋ ์ฐ์ ๋๋ฅผ ํ๋ ฌ ํํ๋ก ๋ฐฐ์ด
# X_train ๋ฐ์ดํฐ๋ฅผ ์ฌ์ฉํด์ ๋ฐ์ดํฐ ํ๋ ์์ ๋ง๋ ๋ค.
iris_dataframe = pd.DataFrame(X_train,
columns = iris_dataset.feature_names)
iris_dataframe.head()
pd.plotting.scatter_matrix(iris_dataframe, c=y_train,
figsize=(15,15),
marker='o', hist_kwds={'bins':20},
s=60,alpha=.8)
# - ์ธ ํด๋์ค๊ฐ ๊ฝ์๊ณผ ๊ฝ๋ฐ์นจ์ ์ธก์ ๊ฐ์ ๋ฐ๋ผ
# ๋น๊ต์ ์ ๊ตฌ๋ถ๋์ด ์๋ ๊ฒ์ ๋ณผ ์ ์๋ค.
# - ํด๋์ค ๊ตฌ๋ถ์ ์ํ ๋จธ์ ๋ฌ๋ ๊ธฐ๋ฒ์ ์ฌ์ฉํ๋ฉด ์ ๊ตฌ๋ถ ๋ ๊ฒ์ด๋ค.
# (4) K- ์ต๊ทผ์ ์ด์(k_nearest neighbors, k-nn) ์๊ณ ๋ฆฌ์ฆ์ ์ด์ฉํ ๋จธ์ ๋ฌ๋
# - ํ๋ จ ๋ฐ์ดํฐ๋ฅผ ํตํด ๋ชจ๋ธ์ด ๋ง๋ค์ด์ง๊ณ
# ์๋ก์ด ๋ฐ์ดํฐ๊ฐ ๋ค์ด์ค๋ฉด ๊ฐ๊น์ด ํ๋ จ ๋ฐ์ดํฐ ํฌ์ธํธ๋ฅผ ์ฐพ์ ๋ถ๋ฅํ๋ค.
# -scikit-learn์ ๋ชจ๋ ๋จธ์ ๋ฌ๋ ๋ชจ๋ธ์
# Estimator๋ผ๋ ํ์ด์ฌ ํด๋์ค๋ก ๊ฐ๊ฐ ๊ตฌํ๋์ด ์๋ค.
# - k-์ต๊ทผ์ ์ด์ ๋ถ๋ฅ ์๊ณ ๋ฆฌ์ฆ์
# neighbors ๋ชจ๋ ์๋ KNeighborsClassifier ํด๋์ค์ ๊ตฌํ๋์ด ์๋ค.
# - ๋ชจ๋ธ์ ์ฌ์ฉํ๊ธฐ ์ํด ํด๋์ค๋ก๋ถํฐ ๊ฐ์ฒด๋ฅผ ๋ง๋ค๊ณ parameter๋ฅผ ์ค์ ํ๋ค.
# - ๊ฐ์ฅ ์ค์ํ ์ด์์ ๊ฐ์๋ฅผ 1๋ก ์ง์ ํ๊ณ ๋ชจ๋ธ์ ๋ง๋ค์ด๋ณด์.
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 1)
# - ํ๋ จ ๋ฐ์ดํฐ ์
์ผ๋ก๋ถํฐ ๋ชจ๋ธ์ ๋ง๋ค๊ธฐ ์ํด fit ๋ฉ์๋ ์ฌ์ฉ
knn.fit(X_train, y_train)
# - fit ๋ฉ์๋๋ knn ๊ฐ์ฒด ์์ฒด๋ฅผ ๋ณํ์ํค๋ฉด์ ๋ฐํ ์ํจ๋ค.
# KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
# metric_params=None, n_jobs=None, n_neighbors=1, p=2,
# weights='uniform')
# (5) ์์ธกํ๊ธฐ
# - ์์์ ๋ง๋ ๋ชจ๋ธ์ ์ฌ์ฉํด์ ์ ๋ฐ์ดํฐ์ ๋ํ ์์ธก์ ๋ง๋ค ์ ์๋ค.
# - ์ผ์์์ ๊ฝ๋ฐ์นจ์ ๊ธธ์ด๋ 3cm, ํญ์ 4.2cm,
# ๊ฝ์์ ๊ธธ์ด๋ 0.8cm, ํญ์ 0.4cm์ธ ๋ถ๊ฝ์ ์ฐพ์๋ค๊ณ ๊ฐ์ ํ๊ณ
# ์ด ๋ถ๊ฝ์ ํ์ข
์ ์ฐพ์๋ณด์
# - ์ธก์ ๊ฐ์ numpy๋ฐฐ์ด๋ก ๋ง๋๋๋ฐ,
# ํ๋์ ๋ถ๊ฝ ์ํ(1) ์ 4๊ฐ์ง ํน์ฑ(4)์ด ์์ผ๋ฏ๋ก 1 by 4 ๋ฐฐ์ด์ ๋ง๋ค์ด์ผ ํ๋ค.
#(๋ถ๊ฝ ๋ฐ์ดํฐ๊ฐ ์ด๋ ๊ฒ ๋์ด์๊ธฐ ๋๋ฌธ์ ์ด๋ ๊ฒ ๋ง๋ ๋ค.)
# - ๋ถ๊ฝ ํ๋์ ์ธก์ ๊ฐ์ 2์ฐจ์ numpy ๋ฐฐ์ด์ ํ์ผ๋ก ๋ค์ด๊ฐ๋ฏ๋ก,
# scikit-learn์ ํญ์ ๋ฐ์ดํฐ๊ฐ 2์ฐจ์ ๋ฐฐ์ด์ผ ๊ฒ์ผ๋ก ์์
X_new = np.array([[3, 4.2, 0.8, 0.4]]) # 1์ฐจ์ ๋ฐฐ์ด๋ก ๋ง๋ค๋ฉด ์ค๋ฅ๊ฐ ๋๋ค.
X_new
print("X_new.shape : {}".format(X_new.shape))
# X_new.shape : (1, 4)
prediction = knn.predict(X_new)
print("์์ธก : {}".format(prediction)) # [0] --> 0์ ์ธํ ์ฌ
print("์์ธกํ ๋ถ๊ฝ์ ์ด๋ฆ : {}".format(iris_dataset['target_names'][prediction]))
# ์์ธกํ ๋ถ๊ฝ์ ์ด๋ฆ : ['setosa']
# - ํ๋์ ์
๋ ฅ, ํน์ฑ์ ๊ฐ์ง ๊ฐ์ด ์๋๊ธฐ ๋๋ฌธ์
# ์๋์ ๊ฐ์ด ๋ฒกํฐํํ๋ก ๋ํ๋ด๋ฉด ์๋ฌ๊ฐ ๋๋ค.
X_new2 = np.array([3, 4.2, 0.8, 0.4])
X_new2prediction = knn.predict(X_new2)
#ValueError: Expected 2D array, got 1D array instead:
#array=[3. 4.2 0.8 0.4].
#Reshape your data either using array.reshape(-1, 1)
#if your data has a single feature or array.reshape(1, -1) if it contains a single sample.
# --> X_new = np.array([[3, 4.2, 0.8, 0.4]]) ์ด์ ๊ฐ์ ํํ์ ๋ฐฐ์ด์ด์ด์ผ ํ๋ค.
# (6) ๋ชจ๋ธ ํ๊ฐ
# - ์์์ ๋ง๋ ํ
์คํธ ์
์ ๊ฐ์ง๊ณ
# ํ์ฌ ๋ง๋ ํ์ต ๋ชจ๋ธ์ด ์ ๋ง๋ค์ด ์ก๋์ง ํ์ธํด๋ณด์
y_pred = knn.predict(X_test)
# ๋ง๋ค์ด์ง ํ์ต ๋ชจ๋ธ์ ๊ฐ์ง๊ณ ํ
์คํธ ๋ฐ์ดํฐ์ ๋ถ๊ฝํ์ข
์ ์์ธกํ๋ค.
y_pred
# ํ
์คํธ ๋ฐ์ดํฐ์ ์์ธก ๊ฐ
#array([2, 1, 0, 2, 0, 2, 0, 1, 1, 1, 2, 1, 1, 1, 1, 0, 1, 1, 0, 0, 2, 1,
# 0, 0, 2, 0, 0, 1, 1, 0, 2, 1, 0, 2, 2, 1, 0, 2])
y_pred == y_test
# ์์ธก ํ์ข
๊ณผ ์ค์ ํ์ข
์ด ๊ฐ์ผ๋ฉด true
#array([ True, True, True, True, True, True, True, True, True,
# True, True, True, True, True, True, True, True, True,
# True, True, True, True, True, True, True, True, True,
# True, True, True, True, True, True, True, True, True,
# True, False])
# ํ
์คํธ ์ธํธ์ ์ ํ๋
# y_pred = knn.predict(X_test)
print("ํ
์คํธ ์ธํธ์ ์ ํ๋ : {:.4f}% ".format(np.mean(y_pred==y_test)*100))
#ํ
์คํธ ์ธํธ์ ์ ํ๋ : 97.3684%
# knn ๊ฐ์ฒด์ score ๋ฉ์๋ ์ฌ์ฉ
print("ํ
์คํธ ์ธํธ์ ์ ํ๋ : {:.4f} %".format(knn.score(X_test, y_test)*100))
#ํ
์คํธ ์ธํธ์ ์ ํ๋ : 97.3684 %
# sklearn.metrics ์ accuracy_score ์ฌ์ฉ
from sklearn import metrics
# y_pred = knn.predict(X_test)
print("ํ
์คํธ ์ธํธ์ ์ ํ๋ : {:.4f} %".format(metrics.accuracy_score(y_test, y_pred)*100))
#ํ
์คํธ ์ธํธ์ ์ ํ๋ : 97.3684 %
# (7) k๊ฐ ๋ณ๊ฒฝ
accuracy_set = []
k_set = [1,3,5,7,9,11]
for k in k_set:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
accuracy_set.append(accuracy)
from pprint import pprint
pprint(accuracy_set)
#[0.9736842105263158,
# 0.9736842105263158,
# 0.9736842105263158,
# 0.9736842105263158,
# 0.9736842105263158,
# 0.9736842105263158]
max(accuracy_set)
#0.9736842105263158 | true |
539dbca959e9d1d649bf51df2c6b49b59152b5ae | Python | rahulkmr/Key-Value-Polyglot | /memg_epoll.py | UTF-8 | 1,960 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
import select
import socket
from collections import defaultdict
cache = {}
writes = defaultdict(list)
fd_to_file = {}
def _server_socket():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("127.0.0.1", 11211))
sock.listen(5)
return sock
def main():
sock = _server_socket()
epoll = select.epoll()
epoll.register(sock, select.EPOLLIN)
sock_fd = sock.fileno()
while True:
ready = epoll.poll()
for fd, event in ready:
if fd == sock_fd:
conn, _ = sock.accept()
fd_to_file[conn.fileno()] = conn.makefile()
epoll.register(conn, select.EPOLLIN | select.EPOLLOUT)
else:
sockfile = fd_to_file[fd]
if event & select.EPOLLOUT:
if writes[fd]:
sockfile.write(''.join(writes[fd]))
sockfile.flush()
writes[fd] = []
if event & select.EPOLLIN:
line = sockfile.readline()
if line == 'quit\r\n':
epoll.unregister(fd)
sockfile.close()
continue
handle_read(fd, line, sockfile)
def handle_read(conn, line, sockfile):
parts = line.split()
cmd = parts[0]
if cmd == "get":
key = parts[1]
try:
val = cache[key]
writes[conn].append("VALUE %s 0 %d\r\n" % (key, len(val)))
writes[conn].append(val + "\r\n")
except KeyError:
pass
writes[conn].append("END\r\n")
elif cmd == "set":
key = parts[1]
length = int(parts[4])
val = sockfile.read(length + 2)[:length]
cache[key] = val
writes[conn].append("STORED\r\n")
if __name__ == "__main__":
main()
| true |
ec219db1f31549acfba9ed5373d2a98dc26650ee | Python | hashem78/LectureRusher | /lambda_script.py | UTF-8 | 3,256 | 2.640625 | 3 | [
"MIT"
] | permissive | '''
This is the api script used for the text analysis feature in lecture rusher
'''
import boto3
import json
from pprint import pprint
def lambda_handler(event, context):
comprehend = boto3.client("comprehend")
# Get sentiment data
sentiment = comprehend.detect_sentiment(Text = event['queryStringParameters']['text'], LanguageCode = "en")['Sentiment']
# Get entities
entities = comprehend.detect_entities(Text = event['queryStringParameters']['text'], LanguageCode = "en")
# Format entity data
comercial_items = []
dates = []
events= []
locations = []
organizations = []
others = []
persons = []
quantities = []
titles = []
for entity in entities['Entities']:
tag = entity['Type']
word = entity['Text']
if tag == 'COMMERCIAL_ITEM':
comercial_items.append(word)
elif tag == 'DATE':
dates.append(word)
elif tag == 'EVENT':
events.append(word)
elif tag == 'LOCATION':
locations.append(word)
elif tag == 'ORGANIZATION':
organizations.append(word)
elif tag == 'OTHER':
others.append(word)
elif tag == 'PERSON':
persons.append(word)
elif tag == 'QUANTITY':
quantities.append(word)
elif tag == 'TITLE':
titles.append(word)
# Get syntax data
syntax = comprehend.detect_syntax(Text = event['queryStringParameters']['text'], LanguageCode = "en")
# Format syntax data
verbs = []
adjs = []
nouns = []
pronouns = []
interjections= []
adverbs = []
for token in syntax['SyntaxTokens']:
tag = token["PartOfSpeech"]["Tag"]
word = token["Text"]
if tag == "INTJ":
interjections.append(word)
elif tag == "PRON":
pronouns.append(word)
elif tag == "NOUN":
nouns.append(word)
elif tag == "VERB":
verbs.append(word)
elif tag == 'ADV':
adverbs.append(word)
# Build response body
body = {
'text':event['queryStringParameters']['text'],
"sentiment":sentiment,
"entities": {
'comercial_items':comercial_items,
'dates':dates,
'events':events,
'locations':locations,
'organizations':organizations,
'others':others,
'persons':persons,
'quantities':quantities,
'titles':titles,
},
"syntax":{
'verbs':verbs,
'adjs':adjs,
'nouns':nouns,
'pronouns':pronouns,
'interjections':interjections,
'adverbs':adverbs,
}
}
return {
"isBase64Encoded": False,
"statusCode": 200,
"headers": { "Content-Type": "application/json"},
"body": json.dumps(body)
} | true |
1dade8948fe6ec6f9b816a82401afa20e7b24434 | Python | Google1234/Big-Data-Machine-Learning | /NaiveBayes_MLE.py | UTF-8 | 4,359 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | '''
method: ๆด็ด ่ดๅถๆฏ็ฝ็ป ๆๅคงไผผ็ถไผฐ่ฎก็ฎๆณ
่พๅ
ฅ๏ผ
file_train/file_test txtๆไปถๅ๏ผ่ฆๆฑๆไปถๆๅไธๅไธบlabel๏ผๅ
ถๅฎๅไธบfeature
feature ็นๅพๆฐ็ฎ
label_numbers ๆ ็ญพ็ฑปๅซๆฐ๏ผไบๅ็ฑปไธบ้ขๅณไธบ2
feature_numbers ๆฏไธช็นๅพ็ฑปๅซๆฐ
label ไธบ0/1๏ผfeature ไธบ0/1
'''
def bayes(file_train,file_test,feature_numbers,label_number,feature):
f=open(file_train)
combination_numbers=feature_numbers*label_number# ๅฆไบๅ็ฑป ๆฏไธชfeatureไนๆฏ2 0:feature[i]=0,label=0 1:0:feature[i]=0,label=1 2:0:feature[i]=1,label=0 3:0:feature[i]=1,label=1
count=[[0 for i in range(feature)]for j in range(combination_numbers)]
y_count=[0 for i in range(label_number)]
while 1:
line=f.readline()
if not line:
break
list=line.split('\t')
num=[]
for word in list:
if word[-1]=='\n':
num.append(ord(word[:-1])-ord('0'))
else:
num.append(ord(word)-ord('0'))
for i in range(feature):
index=num[i]*feature_numbers+num[feature]
count[index][i]+=1
###########################
#ๅฆๆไธๆฏไบๅ็ฑป้ฎ้ข๏ผ้่ฆๆดๆน
if num[feature]==0:
y_count[0]+=1
else:
y_count[1]+=1
###########################
f.close()
sum=0
for i in range(label_number):
sum+=y_count[i]
for i in range(label_number):
y_count[i]=y_count[i]*1.0/sum
for i in range(feature):
#################################
#ๅฆๆfeature_numbers๏ผ=2 label_numbers๏ผ=2 ้่ฆๆดๆน
sum=(count[0][i]+count[1][i])
count[0][i]=count[0][i]*1.0/sum
count[1][i]=count[1][i]*1.0/sum
sum=(count[2][i]+count[3][i])
count[2][i]=count[2][i]*1.0/sum
count[3][i]=count[3][i]*1.0/sum
#################################################
print(y_count)
for i in range(combination_numbers):
print(count[i])
f=open(file_test)
wrong=0
correct=0
while 1:
line=f.readline()
if not line:
break
list=line.split('\t')
num=[]
for word in list:
if word[-1]=='\n':
num.append(ord(word[:-1])-ord('0'))
else:
num.append(ord(word)-ord('0'))
#######################
#ๅฆๆไธๆฏไบๅ็ฑป้ฎ้ข๏ผ้่ฆๆดๆน
pro_0=1.0
pro_1=1.0
for i in range(feature):
pro_0=pro_0*count[num[i]*feature_numbers][i]
pro_1=pro_1*count[num[i]*feature_numbers+1][i]
pro_0=pro_0*y_count[0]
pro_1=pro_1*y_count[1]
if (pro_0>pro_1 and num[feature]==0) or(pro_0<=pro_1 and num[feature]==1):
correct+=1
else:
wrong+=1
##########################
f.close()
print('ๅ็ฑปๆญฃ็กฎ๏ผ',correct)
print('ๅ็ฑป้่ฏฏ๏ผ',wrong)
'''
DEMO
'''
def count():
file="trainingData.txt"
f=open(file)
count=0
label=[[0 for i in range(2**5)] for j in range(2)]
while 1:
line=f.readline()
if not line:
break
count+=1
list=line.split('\t')
num=[]
for word in list:
if word[-1]=='\n':
num.append(ord(word[:-1])-ord('0'))
else:
num.append(ord(word)-ord('0'))
index=num[0]+num[1]*2+num[2]*4+num[3]*8+num[4]*16
if num[len(num)-1]==0:
label[0][index]+=1
else :
label[1][index]+=1
f.close()
print(label[0])
print(label[1])
f=open("testingData.txt")
count=0
wrong=0
while 1:
line=f.readline()
if not line:
break
count+=1
list=line.split('\t')
num=[]
for word in list:
if word[-1]=='\n':
num.append(ord(word[:-1])-ord('0'))
else:
num.append(ord(word)-ord('0'))
index=num[0]+num[1]*2+num[2]*4+num[3]*8+num[4]*16
if label[0][index]>label[1][index]:
pre=0
else:
pre=1
if pre!=num[len(num)-1]:
wrong+=1
f.close()
print('wrong',wrong,'total',count)
count()
bayes('trainingData.txt','testingData.txt',2,2,5) | true |
a4cedbf9b3d5be54bb07a047eb1e5830758d93da | Python | pseudoBit/FinanceNewsSpider | /FinanceNewsSpider.py | UTF-8 | 1,611 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from urllib2 import urlopen
import re
news_arr = []
bot_api = '' # bot api
user_api = [''] # channel link : @...
# This program works only for python 2.7
# if has Chinese, apply decode()
import threading
def printit():
global last_news
threading.Timer(15.0, printit).start()
html = urlopen("http://119.29.63.230/24h/news_fbe.json?newsid=0").read().decode('utf-8')
var_exists = 'last_news' in locals() or 'last_news' in globals()
if var_exists:
update_index = int(str(re.findall(r'"newsID":"(.+?)",',html)[0])) - last_news
else:
update_index = 21
if update_index > 0:
rev_list = range(min(update_index+5,20))
news_id = re.findall(r'"newsID":"(.+?)",',html)
news_time = re.split(r'\s',str(re.findall(r'"time":"(.+?)",',html)))
news_moment = news_time[1::2]
news_day = news_time[0::2]
news_day[0] = news_day[0][1:]
news_content = re.findall(r'"content":"(.+?)",',html)
for ix in rev_list[::-1]:
if str(news_id[ix]) not in news_arr:
news_arr.append(str(news_id[ix]))
news_temp = news_day[ix][7:12].encode('utf-8') + str(' ') + news_moment[ix][0:5].encode('utf-8') + str(' ') + news_content[ix].encode('utf-8')
for user_num in range(len(user_api)):
urlopen("https://api.telegram.org/bot"+bot_api+"/sendMessage?chat_id="+user_api[user_num]+"&text="+news_temp).close()
last_news = int(str(news_id[0]))
if len(news_arr)>50:
del news_arr[0:20]
printit()
| true |
a40b4b5e5058ffd4611c49e196020d4751a80a4e | Python | shadyskies/django-projects | /improve_english_app/dict_words/retrieve_random_words.py | UTF-8 | 1,609 | 2.71875 | 3 | [] | no_license | import os
from bs4 import BeautifulSoup
import mysql.connector
from dotenv import load_dotenv
HEADERS = ({'User-Agent':
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5'})
def get_words(count=2):
load_dotenv()
mydb = mysql.connector.connect(
host="localhost",
user="django_projects",
password=os.getenv('DB_PWD'),
database="entries"
)
mycursor = mydb.cursor()
query = "SELECT word, definition FROM entries ORDER BY RAND() LIMIT " + str(count)
mycursor.execute(query)
myresult = mycursor.fetchall()
return myresult
# TODO: implementing sentence for particular word
def get_sentence(result):
for i in result:
try:
print(i[0].lower())
url = 'https://sentence.yourdictionary.com/' + i[0].lower()
page = requests.get(url, headers=HEADERS)
soup = BeautifulSoup(page.content, features='html.parser')
sentences = soup.find_all('div', {'class': 'sentence-item'})
print(sentences)
except:
print('word not found')
def search(word):
load_dotenv()
mydb = mysql.connector.connect(
host="localhost",
user="django_projects",
password=os.getenv('DB_PWD'),
database="entries"
)
mycursor = mydb.cursor()
query = "SELECT word, definition FROM entries WHERE word = " + f"'{word}'"
mycursor.execute(query)
result = mycursor.fetchall()
print(result)
return result
| true |
3de108ccd552a38eef1e37c6b1e01e963d9c69e6 | Python | S1car1o/opencv-cascade-make | /face-align/facealign_imutils.py | UTF-8 | 2,913 | 2.546875 | 3 | [] | no_license | import os, glob
import cv2
import dlib
import numpy
from imutils.face_utils import FaceAligner
from imutils.face_utils import rect_to_bb
import imutils
faceWidth = 120
imgFileType = "jpg"
peopleFolder = "/home/chtseng/works/face-align/peoples"
outputFaceFolder = "/home/chtseng/works/face-align/faces"
faceLandmarkModel = "shape_predictor_68_face_landmarks.dat"
#detector = dlib.get_frontal_face_detector()
#predictor = dlib.shape_predictor(faceLandmarkModel)
#fa = FaceAligner(predictor, desiredFaceWidth=faceWidth)
def load_images_from_folder(folder, outputFolder):
global faceLandmarkModel, faceWidth
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(faceLandmarkModel)
fa = FaceAligner(predictor, desiredFaceWidth=faceWidth)
labels = []
images = []
for folders in glob.glob(folder+"/*"):
label = os.path.basename(folders)
print("Load {} ...".format(label))
if(not os.path.exists(outputFolder + "/" + label)):
os.mkdir(outputFolder + "/" + label)
for filename in os.listdir(folders):
if label is not None:
jpgname, file_extension = os.path.splitext(filename)
if(file_extension.lower() == "." + imgFileType):
print("read file: ", os.path.join(folder,folders,filename))
img = cv2.imread(os.path.join(folder,folders,filename))
if img is not None:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 2)
i = 0
# loop over the face detections
print("find {} faces".format(len(rects)))
for rect in rects:
# extract the ROI of the *original* face, then align the face
# using facial landmarks
#(x, y, w, h) = rect_to_bb(rect)
#faceOrig = image[y:y + h, x:x + w]
faceAligned = fa.align(img, gray, rect)
gray2 = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2GRAY)
rectB = detector( gray2 , 2)
for rectFinal in rectB:
(x2, y2, w2, h2) = rect_to_bb(rectFinal)
face2 = faceAligned[y2:y2 + h2, x2:x2 + w2]
#jpgname, file_extension = os.path.splitext(os.path.join(folder,folders,filename))
print("write face to ", outputFolder + "/" + label + "/" + jpgname + "-" + str(i) + ".jpg")
cv2.imwrite(outputFolder + "/" + label + "/" + jpgname + "-" + str(i) + ".jpg", face2)
i += 1
load_images_from_folder(peopleFolder, outputFaceFolder)
| true |
80d657fda98437ea9c270d0d1cc3d8f0723ed772 | Python | harryturr/harryturr_garmin_dashboard | /env/lib/python3.6/site-packages/dash_core_components/ConfirmDialogProvider.py | UTF-8 | 3,107 | 2.609375 | 3 | [
"MIT"
] | permissive | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class ConfirmDialogProvider(Component):
"""A ConfirmDialogProvider component.
A wrapper component that will display a confirmation dialog
when its child component has been clicked on.
For example:
```
dcc.ConfirmDialogProvider(
html.Button('click me', id='btn'),
message='Danger - Are you sure you want to continue.'
id='confirm')
```
Keyword arguments:
- children (boolean | number | string | dict | list; optional): The children to hijack clicks from and display the popup.
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- message (string; optional): Message to show in the popup.
- submit_n_clicks (number; default 0): Number of times the submit was clicked
- submit_n_clicks_timestamp (number; default -1): Last time the submit button was clicked.
- cancel_n_clicks (number; default 0): Number of times the popup was canceled.
- cancel_n_clicks_timestamp (number; default -1): Last time the cancel button was clicked.
- displayed (boolean; optional): Is the modal currently displayed.
- loading_state (dict; optional): Object that holds the loading state object coming from dash-renderer. loading_state has the following type: dict containing keys 'is_loading', 'prop_name', 'component_name'.
Those keys have the following types:
- is_loading (boolean; optional): Determines if the component is loading or not
- prop_name (string; optional): Holds which property is loading
- component_name (string; optional): Holds the name of the component that is loading"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, message=Component.UNDEFINED, submit_n_clicks=Component.UNDEFINED, submit_n_clicks_timestamp=Component.UNDEFINED, cancel_n_clicks=Component.UNDEFINED, cancel_n_clicks_timestamp=Component.UNDEFINED, displayed=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'message', 'submit_n_clicks', 'submit_n_clicks_timestamp', 'cancel_n_clicks', 'cancel_n_clicks_timestamp', 'displayed', 'loading_state']
self._type = 'ConfirmDialogProvider'
self._namespace = 'dash_core_components'
self._valid_wildcard_attributes = []
self.available_properties = ['children', 'id', 'message', 'submit_n_clicks', 'submit_n_clicks_timestamp', 'cancel_n_clicks', 'cancel_n_clicks_timestamp', 'displayed', 'loading_state']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(ConfirmDialogProvider, self).__init__(children=children, **args)
| true |
dcdc2c63aa7561cb40fd993461f2a99bd8b9d96b | Python | etozhedanila/CodewarsPython | /dataReverse.py | UTF-8 | 331 | 3.28125 | 3 | [] | no_license | def data_reverse(data):
result = []
for i in range(len(data)-8,-8, -8):
for j in range(i, i + 8):
result.append(data[j])
return result
data1 = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,1,0,1,0,1,0]
data2 = [1,0,1,0,1,0,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]
print(data_reverse(data1))
print(data2) | true |
a020e4d7f6942ff9a5a31a3284c59697f6fd3fb6 | Python | IllinoisSocialMediaMacroscope/smm-analytics | /batch/covid19_crimson_sentiment/plot.py | UTF-8 | 978 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import plot
def plot_multiple_pie_chart(labels, values, title):
fig = make_subplots(rows=len(values), cols=len(values[0]),
specs=[[{"type": "pie"} for j in range(len(values[0]))] for i in range(len(values))])
i = 1
for label, value in zip(labels, values):
j = 1
for label_col, value_col in zip(label, value):
fig.add_trace(go.Pie(labels=label_col, values=value_col,
hoverinfo='label+percent+value',
textinfo='label'), row=i, col=j)
i += 1
fig.update_layout(
title_text= title,
font=dict(family='Arial', size=12),
margin=dict(
l=70,
r=70,
t=70,
b=70,
))
div = plot(fig, output_type='div', auto_open=False, image_filename='plot_img')
return div
| true |
f1371c1462db0b0e987d4b189da1c1ce5df337f7 | Python | dhirajberi/flask-training-logs-manager | /app.py | UTF-8 | 3,599 | 2.5625 | 3 | [] | no_license | from flask import Flask, render_template, request, session, redirect, url_for, g, flash
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask_mail import Mail, Message
class User:
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
users = []
users.append(User(id=1, username='Dhiraj', password='dhiraj'))
users.append(User(id=2, username='Smit', password='smit'))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Todo(db.Model):
id = db.Column(db.Integer, primary_key = True)
post_by = db.Column(db.String(200), nullable = False)
content = db.Column(db.String(200), nullable = False)
date_created = db.Column(db.DateTime, default = datetime.utcnow)
app.secret_key = 'dhirajapp'
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
user = [x for x in users if x.id == session['user_id']][0]
g.user = user
@app.route("/", methods=['GET', 'POST'])
@app.route("/login", methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('user_id', None)
username = request.form['username']
password = request.form['password']
user = [x for x in users if x.username == username][0]
if user and user.password == password:
session['user_id'] = user.id
return redirect(url_for('dashboard'))
flash("Wrong Password...")
return redirect(url_for('login'))
return render_template('login.html')
@app.route("/dashboard", methods=['GET', 'POST'])
def dashboard():
if not g.user:
return redirect(url_for('login'))
if request.method == 'POST':
task_content = request.form['content']
task_post_by = g.user.username
new_task = Todo(content=task_content, post_by=task_post_by)
try:
db.session.add(new_task)
db.session.commit()
return redirect(url_for('dashboard'))
except:
return 'There was in issue in adding task'
else:
tasks = Todo.query.order_by(Todo.date_created).filter(Todo.post_by == g.user.username)
return render_template('dashboard.html', tasks = tasks)
#return render_template('dashboard.html')
@app.route("/delete/<int:id>")
def delete(id):
task_to_delete = Todo.query.get_or_404(id)
try:
db.session.delete(task_to_delete)
db.session.commit()
return redirect(url_for('dashboard'))
except:
return "Problem"
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'youremail'
app.config['MAIL_PASSWORD'] = 'yourpass'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
@app.route("/send_mail/<int:id>", methods=['GET', 'POST'])
def send_mail(id):
task = Todo.query.get_or_404(id)
if request.method == 'POST':
email = request.form['email']
msg = Message('Daily Task Report', sender = 'youremail', recipients = [email])
msg.body = f'Task: {task.content} \nDate: {task.date_created.date()} \n\nSend By {g.user.username}'
mail.send(msg)
flash(f"Mail sent successfully to {email}")
return redirect(url_for('dashboard'))
@app.route("/logout")
def logout():
session.pop('user_id', None)
flash("Logout successfully...")
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
| true |
552e51838a7c29aa5013544f67d5c4aeea7e981d | Python | IvayloValkov/Python-the-beginning | /Nested_Loops/demo.py | UTF-8 | 454 | 3.65625 | 4 | [] | no_license | n = int(input())
l = int(input())
for first_symbol in range(1, n + 1):
for second_symbol in range(1, n + 1):
for third_symbol in range(ord("a"), (96 + l + 1)):
for fourth_symbol in range(ord("a"), (96 + l + 1)):
for fifth_symbol in range((max(first_symbol, second_symbol) + 1), n + 1):
print(f"{first_symbol}{second_symbol}{chr(third_symbol)}{chr(fourth_symbol)}{fifth_symbol}", end=' ') | true |
00eceeaaed2649288abf557d9e64ad291dd65d05 | Python | minhthe/practice-algorithms-and-data-structures | /pramp-condility-3month/thousand-str.py | UTF-8 | 211 | 3.453125 | 3 | [] | no_license | '''https://leetcode.com/problems/thousand-separator/'''
class Solution:
def thousandSeparator(self, n: int) -> str:
n = str(n)
n = n[::-1]
return '.'.join( n[i:i+3] for i in range(0, len(n), 3 ) )[::-1] | true |
437e5f35da1da80a118162b7bb4c14d2ca2ffab6 | Python | JNazare/inagural_speech_analysis | /rerun_experiments.py | UTF-8 | 2,756 | 2.734375 | 3 | [] | no_license | from nltk.probability import FreqDist
from nltk.corpus import inaugural, stopwords
import string
import json
from pprint import pprint
import math
import networkx as nx
filenames = inaugural.fileids()
def dump_content(filename, content):
j = json.dumps(content, indent=4)
f = open(filename+'.json', 'w')
print >> f, j
f.close()
def read_content(filename):
json_data=open(filename+'.json')
content = json.load(json_data)
json_data.close()
return content
def remove_punctuation(text):
content = [w.strip(string.punctuation) for w in text]
return content
def remove_stopwords(text):
content = [w for w in text if w.lower() not in stopwords.words('english')]
return content
def clean(text):
content = [w.lower() for w in text if w != '']
content = ' '.join(content)
content = unicode(content, errors='replace')
content = content.split()
return content
def process_speech(filename):
text = inaugural.words(filename)
text = remove_punctuation(text)
text = remove_stopwords(text)
text = clean(text)
return text
def process_speeches(filenames):
texts = {}
for filename in filenames:
text = process_speech(filename)
texts[filename] = text
dump_content('initial_processing', texts)
return texts
def get_buzzwords(docs):
buzzwords = []
for doc in docs:
freqdist = FreqDist(docs[doc])
vocab = freqdist.keys()
freqs = freqdist.values()
buzzwords = buzzwords + vocab[:50]
buzzwords = set(buzzwords)
freq_counts = {}
for buzzword in buzzwords:
print buzzword
l = []
for doc in docs:
freqdist = FreqDist(docs[doc])
t = (doc, freqdist[buzzword])
l.append(t)
freq_counts[buzzword] = l
dump_content('freqs', freq_counts)
return freq_counts
# docs = read_content('initial_processing')
docs = read_content('freqs')
# remove some random ascii chars that stuck around
del docs[u'\ufffd\ufffd']
del docs[u'\ufffd']
# get the top two speeches per buzzword
for doc in docs:
docs[doc] = sorted(docs[doc],key=lambda x: x[1], reverse=True)[:2]
docs[doc] = {docs[doc][0][0][:-4] : docs[doc][0][1], docs[doc][1][0][:-4] : docs[doc][1][1]}
dump_content("top_buzzwords", docs)
# Make dictionary for edge processing
for_edges = {}
for doc in docs:
l = []
for name in docs[doc]:
l.append(name[0])
for_edges[doc]=l
# make dictionary of edges between top two speeches for each buzzword
edge_dict = {}
for doc in for_edges:
tuples = [(x,y) for x in for_edges[doc] for y in for_edges[doc] if x != y]
for entry in tuples:
if (entry[1], entry[0]) in tuples:
tuples.remove((entry[1],entry[0]))
edge_dict[doc] = tuples
# Make the graph
G=nx.Graph()
for doc in edge_dict:
G.add_edges_from(edge_dict[doc])
# export graph to gexf so it can be read into Gephi
nx.write_gexf(G, 'speeches.gexf')
| true |
609b75d6dd0928374888f6e903e14c1b938154aa | Python | vivion-git/nymph | /method/add.py | UTF-8 | 1,454 | 4 | 4 | [] | no_license | the notice in using class
there are three ideas about python's implementation of
OOP:inheritance,polymorpyism and encapsulation.
1)inheritance:
the example is related to the built-in method "add",replace argument
in-place,if we don't want to replace in-place,we should use two argument ,just
like the under example:
class adder:
def __init__(self,data=[]):
self.data=data
def add(self,x,y):
print"Not Implemented"
def __add__(self,other):
return self.add(self,other)
class listadder(adder):
def add(self,x,y):
return x+y
class dictadder(adder):
def add(self,x,y):
new={}
for k in x.keys():
new[k]=x[k]
for k in y.keys():
new[k]=y[k]
return new
note:in above code ,init has define the type of argument,"data=[]",so if x or
y is not that type ,there will be a erro display.(e.g if we input
y=({q:1},{w:2}),there will be erro output )
another way to complete this task ,we also can write code like this ,this way
will save a value in the instance anyhow ,we just take one argument .
class adder:
def __init__(self,start=[]):
self.data=start
def __add__(self,other):
return self.add(other)
def add(self,y):
print 'not implemented!'
class listadder(adder):
def add(self,y):
return self.data+y
class dictadder(adder):
def add(self,y):
pass
to run it in python,we can get the same result with the first one.
| true |
10c6f3800dbacc455788a99e1e8d68c4ee8d038f | Python | Hongze-Wang/LeetCode_Python | /100. Same Tree.py | UTF-8 | 865 | 3.671875 | 4 | [] | no_license | # 100. Same Tree
# 100% faster 100% less ๆ็ด่ง็้ๅฝๆนๆณ่งJava่งฃๆณ
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = Non
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
tmp1, tmp2 = [], []
def inOrder(root, lst):
if root:
lst.append(root.val)
if root.left != None:
inOrder(root.left, lst)
else:
lst.append(None)
if root.right != None:
inOrder(root.right, lst)
else:
lst.append(None)
return lst
res1 = inOrder(p, tmp1)
res2 = inOrder(q, tmp2)
return res1 == res2
| true |
77982677efefad15a2b3977cb3dd034e9b478da6 | Python | mainak0001/Hangman-project | /hangman final.py | UTF-8 | 4,092 | 3.984375 | 4 | [] | no_license | import random
from names import word_list
def get_word():
word=random.choice(word_list)
return word.upper()
def play(word):
word_completion="-"*len(word)
guessed= False
guessed_letters=[]
guessed_words=[]
word_as_list=[]
tries=6
print("let's play hangman!")
print(displayhangman(tries))
print(word_completion)
print("\n")
while not guessed and tries>0:
guess=input("enter your guess: ")
guess=guess.upper()
if len(guess)==1 and guess.isalpha():
if guess in guessed_letters:
print ("you have already guessed the letter ",guess)
elif guess not in word:
print(guess," is not in the word.")
tries-=1
guessed_letters.append(guess)
else:
print("Good job. ",guess ,"is in the word!")
guessed_letters.append(guess)
word_as_list=list(word_completion)
indices=[i for i,letter in enumerate(word) if letter==guess]
for index in indices:
word_as_list[index]=guess
word_completion="".join(word_as_list)
if "-" not in word_completion:
guessed=True
elif (len(guess)==len(word) and guess.isalpha()):
if guess in guessed_words:
print("you already guessed the word", guess)
elif guess!=word:
print(guess," is not the word.")
tries-=1
guessed_words.append(guess)
else:
guessed=True
word_completion=word
else:
print("not a valid guess")
displayhangman(tries)
print(word_completion)
if guessed:
print("congrats, you guessed the word! you win!")
else:
print("Sorry you ran out of tries. the word was ",word)
def displayhangman(tries):
if tries==6:
print("""
__________
| |
| |
| O
|
|
|
|
__|__ """)
elif tries==5:
print("""
__________
| |
| |
| O
| |
|
|
|
__|__ """)
elif tries==4:
print("""
__________
| |
| |
| O
| |/
|
|
|
__|__ """)
elif tries==3:
print("""
__________
| |
| |
| O
| \\|/
|
|
|
__|__ """)
elif tries==2:
print("""
__________
| |
| |
| O
| \\|/
| |
|
|
__|__ """)
elif tries==1:
print("""
__________
| |
| |
| O
| \\|/
| |
| /
|
__|__ """)
elif tries==0:
print("""
__________
| |
| |
| O
| \\|/
| |
| / \\
|
__|__ """)
def main():
word=get_word()
play(word)
while (input("want to play more?(y/n):").upper()=='Y'):
word=get_word()
play(word)
main()
| true |
76b805a1262d5701539eb939cafc152257dd3be4 | Python | michaeljboyle/mlfun | /mlfun/clustering/metrics/hopkins_statistic.py | UTF-8 | 1,965 | 3.03125 | 3 | [] | no_license | from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import train_test_split
from ...utils.data_utils import bounding_box, make_uniform_distribution
import numpy as np
def hopkins(X, random_state=None):
""" Calculates the Hopkins statistic for the data distribution.
Values > 0.5 suggest that the data is not uniformly distributed
and could be clusterable.
Args:
X: A np.array of data
random_state: int or None, for reproducibility
Returns:
A float 0 < x < 1 indicating whether data is potentially clusterable.
"""
# Get subset of X (5%)
_, Xn = train_test_split(
X, test_size=0.05, random_state=random_state)
n = Xn.shape[0]
# Create random uniform distribution with n points in same space as X
mins, maxs = bounding_box(X)
R = make_uniform_distribution(n, mins, maxs, random_state=random_state)
nbrs = NearestNeighbors(n_neighbors=2).fit(X)
# Get nearest neighbors in X for points in Xn
Ws = nbrs.kneighbors(Xn)[0][:, 1]
# Get nearest neighbors in X for points in R
Us = nbrs.kneighbors(R, n_neighbors=1)[0][:, 0]
try:
sumUs = np.sum(Us)
H = sumUs / (sumUs + np.sum(Ws))
except ZeroDivisionError:
H = 0
return H
# d = X.shape[1]
# n = len(X) # rows
# m = int(0.1 * n) # heuristic from article [1]
# nbrs = NearestNeighbors(n_neighbors=1).fit(X.values)
# rand_X = sample(range(0, n, 1), m)
# ujd = []
# wjd = []
# for j in range(0, m):
# u_dist, _ = nbrs.kneighbors(uniform(np.amin(X,axis=0),np.amax(X,axis=0),d).reshape(1, -1), 2, return_distance=True)
# ujd.append(u_dist[0][1])
# w_dist, _ = nbrs.kneighbors(X.iloc[rand_X[j]].values.reshape(1, -1), 2, return_distance=True)
# wjd.append(w_dist[0][1])
# H = sum(ujd) / (sum(ujd) + sum(wjd))
# if isnan(H):
# print ujd, wjd
# H = 0
# return H | true |
a856c6e18e1d1e4cea08304b514c6b00dcc537e2 | Python | hiropppe/ksj-sample-app | /data/kcloud/image.py | UTF-8 | 2,018 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
#! -*- coding:utf-8 -*-
import pymongo, gridfs, json
import requests, urllib
import ssl
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
class TLSv1Adapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
url_template = u'https://www.chiikinogennki.soumu.go.jp/k-cloud-api/v001/kanko/view/{refbase}/{fid}'
def crawl(host='192.168.1.10', port=27017,
data_db='test', data_collection='geo',
image_db='test'):
client = pymongo.MongoClient(host=host, port=port)
geo = client[data_db][data_collection]
gfs = gridfs.GridFS(client[image_db])
data_count = 0
image_count = 0
put_count = 0
q = {
'views': {'$exists': 1}
}
for data in geo.find(q, no_cursor_timeout=True):
data_count += 1
refbase = data['mng']['refbase']
for i, view in enumerate(data['views']):
image_count += 1
fid = view['fid']
caption = None
if 'name' in view:
caption = view['name']['written']
url = url_template.format(refbase=refbase, fid=fid)
s = requests.Session()
s.mount('https://', TLSv1Adapter())
print 'Fetch:', url
r = requests.get(url)
if r.status_code == 200:
gfs.put(r.content,
filename=fid,
data_id=data['_id'],
caption=caption,
content_type='image/jpeg')
else:
print r.status_code
put_count += 1
print 'Total', data_count, 'data', image_count, 'image', put_count, 'put'
| true |
0d0ecd3c9545220613a5033e0531c2ac11c36259 | Python | annis/synmag | /python/sysResponse.py | UTF-8 | 9,642 | 2.765625 | 3 | [] | no_license | import numpy as np
import scipy as sp
import scipy.interpolate
"""Classes for describing a system response.
*** Initializing a sysResp runs code to generate systerm response curves
"""
__author__ = ("Jim Annis <annis@fnal.gov> ")
class sysResp(object):
"""system response
aerosolData can be 1,2,3, and chooses which of the aerosol data fits
of Gutierrez-Moreno et al 196 to use.
A choice of 0 gives no change over the palomar atmopshere.
aerosols are currently disabled.
Run sysResp.go() to generate five filter system response files
DES-g.txt, DES-r.txt, DES-i.txt, DES-z.txt, DES-y.txt
"""
def __init__(self,
ccdpos = "ccdPos-sky-v6.par",
insrespDir = "filter_curves/",
airmass = 1.3,
aerosolData = 0
):
"""Create a sysResponse"""
masterDir = "/home/s1/annis/daedalean/synmag/sysresp/"
self.masterDir = masterDir
self.ccdPosFile = masterDir + ccdpos
self.insresponse = masterDir + insrespDir
self.outDir = masterDir
self.airmass = airmass
self.altitude = 2200.0
# ctio aerosol coefficients from Gutierrez-Moreno et al 1986
A_h = [0.0, 0.05, 0.018, 0.013, 0.017]
alpha = [0.0, 1.0, 1.1, 1.2, 1.8]
self.A_h = A_h[aerosolData]
self.alpha = alpha[aerosolData]
self.filterList = [1,2,3,4,5]
self.ccdData =self.getCCDPositions()
#=================================================================================
#
# Main Routines
#
#=================================================================================
def go (self) :
"""
for the five des filters, construct and save the system response
"""
filterList = self.filterList
atmoData = self.make_atmosphere()
outDir = self.outDir
trans = dict()
for filter in filterList :
fil = self.nameFromFilterNumber(filter)
filename = outDir + "DES-{}.txt".format(fil)
self.sysRespHeader(filename, fil)
for ccd in range(1,63) :
wave, transSpline = self.get_instrument_response(filter, ccd)
wave, trans[ccd] = self.make_sys_ccd(atmoData[0],atmoData[1],wave,transSpline)
print "\t writing {}".format(filename)
self.sysRespWrite (filename, wave, trans)
def make_sys_ccd(self, atWaves, atTransSpline, ccdWaves, ccdTransSpline) :
"""
Combine the atmospheric and instrumental response
on a 1nm grid from 300 to 1100 nm range
This assumes that both atmo and inst response are splines
and their respective waves only cover the range of the spline
"""
waves = np.arange(300,1101)
trans = np.zeros(waves.size)
ix = np.nonzero( (waves >= atWaves[0]) & (waves <= atWaves[-1]) )
atmo = atTransSpline(waves[ix])
trans[ix] = atmo
ix = np.nonzero( (waves >= ccdWaves[0]) & (waves <= ccdWaves[-1]) )
ccd = ccdTransSpline(waves[ix])
trans[ix] = trans[ix]*ccd
return waves, trans
#=================================================================================
#
# Support Routines
#
#=================================================================================
def get_instrument_response(self, filter, ccd) :
"""
A routine to deal with the way William gave us instrument
response in 2013
"""
maxRadius = 1.1 ;# degees
ccdData = self.ccdData
ccdnum, x,y = ccdData[0], ccdData[1], ccdData[2]
insDir = self.insresponse
ins = dict()
ins["g"] = insDir + "g_003.dat"
ins["r"] = insDir + "r_005.dat"
ins["i"] = insDir + "i_003.dat"
ins["z"] = insDir + "z_003.dat"
ins["y"] = insDir + "y_003.dat"
fil = self.nameFromFilterNumber(filter)
data = self.get_ins_filter(ins[fil])
# t = average over all non-exluded amplifiers
# tr1 = averaged only for the inner two CCDs (<10% RMax)
# tr2 = for 10<Rmax<30%
# tr3 = for 30<Rmax<60%
# tr4 = for Rmax>60%
ix = np.nonzero(ccdnum == ccd)
radius = np.sqrt( x[ix]**2 + y[ix]**2 )/maxRadius
wave = data[0]
if radius <= 0.1 : trans = data[2]
elif (radius > 0.1) & (radius <= 0.3) : trans = data[3]
elif (radius > 0.3) & (radius <= 0.6) : trans = data[4]
elif (radius > 0.6) : trans = data[5]
return wave, trans
def get_ins_filter(self, file) :
wave, t, tr1, tr2, tr3, tr4 = np.genfromtxt(file, unpack=True)
t = t/1000.
tr1 = tr1/1000.
tr2 = tr2/1000.
tr3 = tr3/1000.
tr4 = tr4/1000.
st = sp.interpolate.InterpolatedUnivariateSpline(wave, t)
str1 = sp.interpolate.InterpolatedUnivariateSpline(wave, tr1)
str2 = sp.interpolate.InterpolatedUnivariateSpline(wave, tr2)
str3 = sp.interpolate.InterpolatedUnivariateSpline(wave, tr3)
str4 = sp.interpolate.InterpolatedUnivariateSpline(wave, tr4)
return wave, st, str1, str2, str3, str4
def make_atmosphere(self) :
"""
Ting Li's atmosphere
"""
file = self.masterDir + "atmo/ctio.txt"
airmass = self.airmass
altitude = self.altitude
A_h = self.A_h
alpha = self.alpha
# ctio.txt gives wave (nm) and T
waves = []
trans = []
waves, trans = np.genfromtxt(file,unpack=True,comments="#")
# as per Gunn sdss mailing list:
# deals with Rayleigh. And Ozone, sort of.
# transMag = transMag*airmass*np.e**((1700-altitude)/7000)
# as per Gutierrez-Moreno et al 1982
# with coefficients from Gutierrez-Moreno et al 1986
# wave_pivot = waves[29] ;# pivot around 400 nm
# aerosols = (1.086 *A_h * (waves/wave_pivot)**-alpha) * airmass
# transMag = transMag + aerosols
# trans = 10**(-0.4 * transMag)
atmosphere = sp.interpolate.InterpolatedUnivariateSpline(waves, trans)
return waves, atmosphere
def nameFromFilterNumber (self, filter) :
if filter == 0 : fil = "u"
elif filter == 1 : fil = "g"
elif filter == 2 : fil = "r"
elif filter == 3 : fil = "i"
elif filter == 4 : fil = "z"
elif filter == 5 : fil = "y"
return fil
#=================================================================================
#
# Read data
#
#=================================================================================
def getCCDPositions(self) :
""" return positions in cm
ccdno,x,y
"""
ccdpos = self.ccdPosFile
ccdno = []
x = []
y = []
file = open(ccdpos,"r")
for line in file:
if (not line.split()) : continue
if (line.split()[0] != "CCD") : continue
ccdno.append(int( line.split()[8] ))
x.append(float( line.split()[4] ))
y.append(float( line.split()[5] ))
ccdno = np.array(ccdno)
# x,y in arcminutes
x = np.array(x)/60.
y = np.array(y)/60.
# if x,y wanted in centimeters:
# x = x*18.181818
# y = y*18.181818
return [ccdno, x,y]
#=================================================================================
#
# Write data
#
#=================================================================================
def sysRespHeader (self, filename, filterName) :
import datetime
airmass = self.airmass
altitude = self.altitude
A_h = self.A_h
alpha = self.alpha
insfile = "William Wester's 2013 DECam System (instrument) response curves"
insfile2 = "http://home.fnal.gov/~wester/work/"
atfile = "Ting Li's airmass=1.3 CTIO atmosphere"
atfile2 = "uvspec_afglus_pressure780_airmass1.3_asea1_avul1_pw03_tau0.03.out.txt"
now = datetime.datetime.now()
f = open(filename,"w")
f.write("# \n")
f.write("# DES System response, filter {}\n".format(filterName))
f.write("# \n")
f.write("# \tJim Annis here and now {}\n".format(
now.strftime("%Y-%m-%d %H:%M")))
f.write("# \n")
f.write("# Wester's file has responses in 4 annuli.\n")
f.write("# The instrument response in the annuli in which the ccd center lies\n")
f.write("# was taken as the instrument response for that ccd.\n")
f.write("# \n")
f.write("# instrument transmission: \n")
f.write("# {}\n".format(insfile))
f.write("# {}\n".format(insfile2))
f.write("# atmosphere transmission: \n")
f.write("# {}\n".format(atfile))
f.write("# {}\n".format(atfile2))
f.write("# \n")
f.write("# airmass {} \n".format(airmass))
f.write("# aerosols, A_h, alpha {} {} (but Ting's atm includes aerosols)\n".format(A_h, alpha))
f.write("# altitude {}\n".format(altitude))
f.write("# \n")
f.write("# wavelength(nm) Trans(ccd 1) Trans(ccd 2)... Trans(ccd 62)\n")
f.write("# \n")
f.close()
def sysRespWrite (self, filename, waves, trans) :
fd = open(filename,"a")
for i in range(0,waves.size) :
fd.write("{:4.0f} ".format(waves[i]))
for ccd in range(1,63) :
fd.write("{:7.4f} ".format(trans[ccd][i]))
fd.write("\n")
fd.close()
# end sysresponse namespace
| true |
1de73e12fc2c3b79f4348ea2f47dfe204bbfe138 | Python | zjx-ERROR/juecexitong | /algorithm/evaluate/Jaccard_evaluate.py | UTF-8 | 297 | 2.5625 | 3 | [] | no_license | #!/usr/bin/python
__author__ = 'zJx'
from sklearn.metrics import jaccard_similarity_score
"""
jacard_evaluate
"""
def evaluate(testable, pretable):
return jaccard_similarity_score(testable, pretable)
if __name__ == "__main__":
a = [1,2,3,4]
b = [2,2,3,4]
print(evaluate(a,b)) | true |
6ff0784ea6d85cd7249c6a54e80ccabaa86f070e | Python | relsqui/archivebot | /archivebot.py | UTF-8 | 4,233 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
"""Skeleton bot using KitnIRC. Just connects to a server."""
import argparse
import logging
import os
import kitnirc.client
import kitnirc.modular
# Command-line arguments
parser = argparse.ArgumentParser(description="Example IRC client.")
parser.add_argument("host", nargs="?",
help="Address of an IRC server, if not specified in the config.")
parser.add_argument("nick", nargs="?",
help="Nickname to use when connecting, if not specified in the config.")
parser.add_argument("-c", "--config", default="bot.cfg",
help="Path from which to load configuration data.")
parser.add_argument("-p", "--port", type=int, default=None,
help="Port to use when connecting")
parser.add_argument("--username",
help="Username to use. If not set, defaults to nickname.")
parser.add_argument("--realname",
help="Real name to use. If not set, defaults to username.")
parser.add_argument("--password", default=None,
help="IRC server password, if any (and if not using config file).")
parser.add_argument("--loglevel", default="INFO",
help="Logging level for the root logger.",
choices=["FATAL","ERROR","WARNING","INFO","DEBUG"])
# Note: this basic skeleton doesn't verify SSL certificates. See
# http://docs.python.org/2/library/ssl.html#ssl.wrap_socket and
# https://github.com/ayust/kitnirc/wiki/SSL-Connections for details.
parser.add_argument("--ssl", action="store_true",
help="Use SSL to connect to the IRC server.")
def initialize_logging(args):
"""Configure the root logger with some sensible defaults."""
log_handler = logging.StreamHandler()
log_formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s")
log_handler.setFormatter(log_formatter)
root_logger = logging.getLogger()
root_logger.addHandler(log_handler)
root_logger.setLevel(getattr(logging, args.loglevel))
def main():
"""Run the bot."""
args = parser.parse_args()
initialize_logging(args)
# Allow expansion of paths even if the shell doesn't do it
config_path = os.path.abspath(os.path.expanduser(args.config))
client = kitnirc.client.Client()
controller = kitnirc.modular.Controller(client, config_path)
# Make sure the configuration file is loaded so we can check for
# connection information.
controller.load_config()
def config_or_none(section, value, integer=False, boolean=False):
"""Helper function to get values that might not be set."""
if controller.config.has_option(section, value):
if integer:
return controller.config.getint(section, value)
elif boolean:
return controller.config.getboolean(section, value)
return controller.config.get(section, value)
return None
# If host isn't specified on the command line, try from config file
host = args.host or config_or_none("server", "host")
if not host:
argparse.ArgumentParser.error(
"IRC host must be specified if not in config file.")
# If nick isn't specified on the command line, try from config file
nick = args.nick or config_or_none("server", "nick")
if not nick:
argparse.ArgumentParser.error(
"Nick must be specified if not in config file.")
# KitnIRC's default client will use port 6667 if nothing else is specified,
# but since we want to potentially specify something else, we add that
# fallback here ourselves.
port = args.port or config_or_none("server", "port", integer=True) or 6667
ssl = args.ssl or config_or_none("server", "ssl", boolean=True)
password = args.password or config_or_none("server", "password")
username = args.username or config_or_none("server", "username") or nick
realname = args.realname or config_or_none("server", "realname") or username
controller.start()
client.connect(
nick,
host=host,
port=port,
username=username,
realname=realname,
password=password,
ssl=ssl,
)
try:
client.run()
except KeyboardInterrupt:
client.disconnect()
if __name__ == "__main__":
main()
# vim: set ts=4 sts=4 sw=4 et:
| true |
c7220becc69ef4d40e1f0b145e6ea7b7e4e7bb6a | Python | izumism/algorithms | /strings/sedgewick/lec17_test.py | UTF-8 | 1,553 | 3.203125 | 3 | [] | no_license | import unittest
from lcp import lcp
from key_indexed_counting import key_indexed_counting
from lsd_radix_sort import (
lsd_radix_sort, counting_sort, counting_sort_alphabets
)
class Leccture17(unittest.TestCase):
def test_lcp(self):
input = ('prefetch', 'prefix')
actual = lcp(input[0], input[1])
expected = 4
self.assertEqual(actual, expected, 'lcp')
def test_key_indexed_counting(self):
input = 'dacffbdbfbea'
actual = ''.join(key_indexed_counting(input))
expected = 'aabbbcddefff'
self.assertEqual(actual, expected, 'key indexed counting')
def test_lsd_radix_sort(self):
input = [
'dab', 'cab', 'fad', 'bad', 'dad', 'ebb', 'ace', 'add',
'fed', 'bed', 'fee', 'bee'
]
actual = lsd_radix_sort(input, 3)
expected = [
'ace', 'add', 'bad', 'bed', 'bee', 'cab', 'dab', 'dad',
'ebb', 'fad', 'fed', 'fee'
]
self.assertListEqual(actual, expected, 'lsd radix sort')
def test_counting_sort(self):
input = [2, 5, 3, 0, 2, 3, 0, 3]
k = 6
actual = counting_sort(input, k)
expected = [0, 0, 2, 2, 3, 3, 3, 5]
self.assertEqual(actual, expected, 'counting sort')
def test_alphabets_counting_sort(self):
input = 'ababacddaccba'
actual = counting_sort_alphabets(input)
expected = 'aaaaabbbcccdd'
self.assertEqual(actual, expected, 'alphabets counting sort')
if __name__ == "__main__":
unittest.main()
| true |
9f5da2ea37971d1bb83ce92006d07cf8cb3032fd | Python | Carouge/TextSummarization | /src/models/seq2seqWithAttention/summarization_model.py | UTF-8 | 26,212 | 2.859375 | 3 | [
"MIT"
] | permissive | """
This model on based on the work of Jishnu Ray Chowdhury
Source: https://github.com/JRC1995/Abstractive-Summarization
"""
from __future__ import division
import numpy as np
filename = 'glove.6B.50d.txt'
def loadGloVe(filename):
vocab = []
embd = []
file = open(filename,'r')
for line in file.readlines():
row = line.strip().split(' ')
vocab.append(row[0])
embd.append(row[1:])
print('Loaded GloVe!')
file.close()
return vocab,embd
vocab,embd = loadGloVe(filename)
embedding = np.asarray(embd)
embedding = embedding.astype(np.float32)
word_vec_dim = len(embedding[0])
#Pre-trained GloVe embedding
# In[ ]:
def np_nearest_neighbour(x):
#returns array in embedding that's most similar (in terms of cosine similarity) to x
xdoty = np.multiply(embedding,x)
xdoty = np.sum(xdoty,1)
xlen = np.square(x)
xlen = np.sum(xlen,0)
xlen = np.sqrt(xlen)
ylen = np.square(embedding)
ylen = np.sum(ylen,1)
ylen = np.sqrt(ylen)
xlenylen = np.multiply(xlen,ylen)
cosine_similarities = np.divide(xdoty,xlenylen)
return embedding[np.argmax(cosine_similarities)]
def word2vec(word): # converts a given word into its vector representation
if word in vocab:
return embedding[vocab.index(word)]
else:
return embedding[vocab.index('unk')]
def vec2word(vec): # converts a given vector representation into the represented word
for x in xrange(0, len(embedding)):
if np.array_equal(embedding[x],np.asarray(vec)):
return vocab[x]
return vec2word(np_nearest_neighbour(np.asarray(vec)))
# In[ ]:
import pickle
with open ('vec_summaries', 'rb') as fp:
vec_summaries = pickle.load(fp)
with open ('vec_texts', 'rb') as fp:
vec_texts = pickle.load(fp)
# In[ ]:
with open ('vocab_limit', 'rb') as fp:
vocab_limit = pickle.load(fp)
with open ('embd_limit', 'rb') as fp:
embd_limit = pickle.load(fp)
# In[ ]:
vocab_limit.append('<SOS>')
embd_limit.append(np.zeros((word_vec_dim),dtype=np.float32))
SOS = embd_limit[vocab_limit.index('<SOS>')]
np_embd_limit = np.asarray(embd_limit,dtype=np.float32)
# In[ ]:
#DIAGNOSIS
count = 0
LEN = 15
for summary in vec_summaries:
if len(summary)-1>LEN:
count = count + 1
print "Percentage of dataset with summary length beyond "+str(LEN)+": "+str((count/len(vec_summaries))*100)+"% "
count = 0
D = 10
window_size = 2*D+1
for text in vec_texts:
if len(text)<window_size+1:
count = count + 1
print "Percentage of dataset with text length less that window size: "+str((count/len(vec_texts))*100)+"% "
count = 0
LEN = 300
for text in vec_texts:
if len(text)>LEN:
count = count + 1
print "Percentage of dataset with text length more than "+str(LEN)+": "+str((count/len(vec_texts))*100)+"% "
# In[ ]:
MAX_SUMMARY_LEN = 30
MAX_TEXT_LEN = 600
#D is a major hyperparameters. Windows size for local attention will be 2*D+1
D = 10
window_size = 2*D+1
#REMOVE DATA WHOSE SUMMARIES ARE TOO BIG
#OR WHOSE TEXT LENGTH IS TOO BIG
#OR WHOSE TEXT LENGTH IS SMALLED THAN WINDOW SIZE
vec_summaries_reduced = []
vec_texts_reduced = []
i = 0
for summary in vec_summaries:
if len(summary)-1<=MAX_SUMMARY_LEN and len(vec_texts[i])>=window_size and len(vec_texts[i])<=MAX_TEXT_LEN:
vec_summaries_reduced.append(summary)
vec_texts_reduced.append(vec_texts[i])
i=i+1
# In[ ]:
train_len = int((.7)*len(vec_summaries_reduced))
train_texts = vec_texts_reduced[0:train_len]
train_summaries = vec_summaries_reduced[0:train_len]
val_len = int((.15)*len(vec_summaries_reduced))
val_texts = vec_texts_reduced[train_len:train_len+val_len]
val_summaries = vec_summaries_reduced[train_len:train_len+val_len]
test_texts = vec_texts_reduced[train_len+val_len:len(vec_summaries_reduced)]
test_summaries = vec_summaries_reduced[train_len+val_len:len(vec_summaries_reduced)]
# In[ ]:
print train_len
# In[ ]:
def transform_out(output_text):
output_len = len(output_text)
transformed_output = np.zeros([output_len],dtype=np.int32)
for i in xrange(0,output_len):
transformed_output[i] = vocab_limit.index(vec2word(output_text[i]))
return transformed_output
# In[ ]:
#Some MORE hyperparameters and other stuffs
hidden_size = 500
learning_rate = 0.003
K = 5
vocab_len = len(vocab_limit)
training_iters = 9999
# In[ ]:
import tensorflow as tf
#placeholders
tf_text = tf.placeholder(tf.float32, [None,word_vec_dim])
tf_seq_len = tf.placeholder(tf.int32)
tf_summary = tf.placeholder(tf.int32,[None])
tf_output_len = tf.placeholder(tf.int32)
# In[ ]:
def forward_encoder(inp,hidden,cell,
wf,uf,bf,
wi,ui,bi,
wo,uo,bo,
wc,uc,bc,
Wattention,seq_len,inp_dim):
Wattention = tf.nn.softmax(Wattention,0)
hidden_forward = tf.TensorArray(size=seq_len,dtype=tf.float32)
hidden_residuals = tf.TensorArray(size=K,dynamic_size=True,dtype=tf.float32,clear_after_read=False)
hidden_residuals = hidden_residuals.unstack(tf.zeros([K,hidden_size],dtype=tf.float32))
i=0
j=K
def cond(i,j,hidden,cell,hidden_forward,hidden_residuals):
return i < seq_len
def body(i,j,hidden,cell,hidden_forward,hidden_residuals):
x = tf.reshape(inp[i],[1,inp_dim])
hidden_residuals_stack = hidden_residuals.stack()
RRA = tf.reduce_sum(tf.multiply(hidden_residuals_stack[j-K:j],Wattention),0)
RRA = tf.reshape(RRA,[1,hidden_size])
# LSTM with RRA
fg = tf.sigmoid( tf.matmul(x,wf) + tf.matmul(hidden,uf) + bf)
ig = tf.sigmoid( tf.matmul(x,wi) + tf.matmul(hidden,ui) + bi)
og = tf.sigmoid( tf.matmul(x,wo) + tf.matmul(hidden,uo) + bo)
cell = tf.multiply(fg,cell) + tf.multiply(ig,tf.sigmoid( tf.matmul(x,wc) + tf.matmul(hidden,uc) + bc))
hidden = tf.multiply(og,tf.tanh(cell+RRA))
hidden_residuals = tf.cond(tf.equal(j,seq_len-1+K),
lambda: hidden_residuals,
lambda: hidden_residuals.write(j,tf.reshape(hidden,[hidden_size])))
hidden_forward = hidden_forward.write(i,tf.reshape(hidden,[hidden_size]))
return i+1,j+1,hidden,cell,hidden_forward,hidden_residuals
_,_,_,_,hidden_forward,hidden_residuals = tf.while_loop(cond,body,[i,j,hidden,cell,hidden_forward,hidden_residuals])
hidden_residuals.close().mark_used()
return hidden_forward.stack()
# In[ ]:
def backward_encoder(inp,hidden,cell,
wf,uf,bf,
wi,ui,bi,
wo,uo,bo,
wc,uc,bc,
Wattention,seq_len,inp_dim):
Wattention = tf.nn.softmax(Wattention,0)
hidden_backward = tf.TensorArray(size=seq_len,dtype=tf.float32)
hidden_residuals = tf.TensorArray(size=K,dynamic_size=True,dtype=tf.float32,clear_after_read=False)
hidden_residuals = hidden_residuals.unstack(tf.zeros([K,hidden_size],dtype=tf.float32))
i=seq_len-1
j=K
def cond(i,j,hidden,cell,hidden_backward,hidden_residuals):
return i > -1
def body(i,j,hidden,cell,hidden_backward,hidden_residuals):
x = tf.reshape(inp[i],[1,inp_dim])
hidden_residuals_stack = hidden_residuals.stack()
RRA = tf.reduce_sum(tf.multiply(hidden_residuals_stack[j-K:j],Wattention),0)
RRA = tf.reshape(RRA,[1,hidden_size])
# LSTM with RRA
fg = tf.sigmoid( tf.matmul(x,wf) + tf.matmul(hidden,uf) + bf)
ig = tf.sigmoid( tf.matmul(x,wi) + tf.matmul(hidden,ui) + bi)
og = tf.sigmoid( tf.matmul(x,wo) + tf.matmul(hidden,uo) + bo)
cell = tf.multiply(fg,cell) + tf.multiply(ig,tf.sigmoid( tf.matmul(x,wc) + tf.matmul(hidden,uc) + bc))
hidden = tf.multiply(og,tf.tanh(cell+RRA))
hidden_residuals = tf.cond(tf.equal(j,seq_len-1+K),
lambda: hidden_residuals,
lambda: hidden_residuals.write(j,tf.reshape(hidden,[hidden_size])))
hidden_backward = hidden_backward.write(i,tf.reshape(hidden,[hidden_size]))
return i-1,j+1,hidden,cell,hidden_backward,hidden_residuals
_,_,_,_,hidden_backward,hidden_residuals = tf.while_loop(cond,body,[i,j,hidden,cell,hidden_backward,hidden_residuals])
hidden_residuals.close().mark_used()
return hidden_backward.stack()
# In[ ]:
def decoder(x,hidden,cell,
wf,uf,bf,
wi,ui,bi,
wo,uo,bo,
wc,uc,bc,RRA):
# LSTM with RRA
fg = tf.sigmoid( tf.matmul(x,wf) + tf.matmul(hidden,uf) + bf)
ig = tf.sigmoid( tf.matmul(x,wi) + tf.matmul(hidden,ui) + bi)
og = tf.sigmoid( tf.matmul(x,wo) + tf.matmul(hidden,uo) + bo)
cell_next = tf.multiply(fg,cell) + tf.multiply(ig,tf.sigmoid( tf.matmul(x,wc) + tf.matmul(hidden,uc) + bc))
hidden_next = tf.multiply(og,tf.tanh(cell+RRA))
return hidden_next,cell_next
# In[ ]:
def score(hs,ht,Wa,seq_len):
return tf.reshape(tf.matmul(tf.matmul(hs,Wa),tf.transpose(ht)),[seq_len])
def align(hs,ht,Wp,Vp,Wa,tf_seq_len):
pd = tf.TensorArray(size=(2*D+1),dtype=tf.float32)
positions = tf.cast(tf_seq_len-1-2*D,dtype=tf.float32)
sigmoid_multiplier = tf.nn.sigmoid(tf.matmul(tf.tanh(tf.matmul(ht,Wp)),Vp))
sigmoid_multiplier = tf.reshape(sigmoid_multiplier,[])
pt_float = positions*sigmoid_multiplier
pt = tf.cast(pt_float,tf.int32)
pt = pt+D #center to window
sigma = tf.constant(D/2,dtype=tf.float32)
i = 0
pos = pt - D
def cond(i,pos,pd):
return i < (2*D+1)
def body(i,pos,pd):
comp_1 = tf.cast(tf.square(pos-pt),tf.float32)
comp_2 = tf.cast(2*tf.square(sigma),tf.float32)
pd = pd.write(i,tf.exp(-(comp_1/comp_2)))
return i+1,pos+1,pd
i,pos,pd = tf.while_loop(cond,body,[i,pos,pd])
local_hs = hs[(pt-D):(pt+D+1)]
normalized_scores = tf.nn.softmax(score(local_hs,ht,Wa,2*D+1))
pd=pd.stack()
G = tf.multiply(normalized_scores,pd)
G = tf.reshape(G,[2*D+1,1])
return G,pt
# In[ ]:
def model(tf_text,tf_seq_len,tf_output_len):
#PARAMETERS
#1.1 FORWARD ENCODER PARAMETERS
initial_hidden_f = tf.zeros([1,hidden_size],dtype=tf.float32)
cell_f = tf.zeros([1,hidden_size],dtype=tf.float32)
wf_f = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uf_f = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bf_f = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wi_f = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
ui_f = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bi_f = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wo_f = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uo_f = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bo_f = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wc_f = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uc_f = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bc_f = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
Wattention_f = tf.Variable(tf.zeros([K,1]),dtype=tf.float32)
#1.2 BACKWARD ENCODER PARAMETERS
initial_hidden_b = tf.zeros([1,hidden_size],dtype=tf.float32)
cell_b = tf.zeros([1,hidden_size],dtype=tf.float32)
wf_b = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uf_b = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bf_b = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wi_b = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
ui_b = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bi_b = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wo_b = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uo_b = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bo_b = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
wc_b = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,hidden_size],stddev=0.01))
uc_b = tf.Variable(np.eye(hidden_size),dtype=tf.float32)
bc_b = tf.Variable(tf.zeros([1,hidden_size]),dtype=tf.float32)
Wattention_b = tf.Variable(tf.zeros([K,1]),dtype=tf.float32)
#2 ATTENTION PARAMETERS
Wp = tf.Variable(tf.truncated_normal(shape=[2*hidden_size,50],stddev=0.01))
Vp = tf.Variable(tf.truncated_normal(shape=[50,1],stddev=0.01))
Wa = tf.Variable(tf.truncated_normal(shape=[2*hidden_size,2*hidden_size],stddev=0.01))
Wc = tf.Variable(tf.truncated_normal(shape=[4*hidden_size,2*hidden_size],stddev=0.01))
#3 DECODER PARAMETERS
Ws = tf.Variable(tf.truncated_normal(shape=[2*hidden_size,vocab_len],stddev=0.01))
cell_d = tf.zeros([1,2*hidden_size],dtype=tf.float32)
wf_d = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,2*hidden_size],stddev=0.01))
uf_d = tf.Variable(np.eye(2*hidden_size),dtype=tf.float32)
bf_d = tf.Variable(tf.zeros([1,2*hidden_size]),dtype=tf.float32)
wi_d = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,2*hidden_size],stddev=0.01))
ui_d = tf.Variable(np.eye(2*hidden_size),dtype=tf.float32)
bi_d = tf.Variable(tf.zeros([1,2*hidden_size]),dtype=tf.float32)
wo_d = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,2*hidden_size],stddev=0.01))
uo_d = tf.Variable(np.eye(2*hidden_size),dtype=tf.float32)
bo_d = tf.Variable(tf.zeros([1,2*hidden_size]),dtype=tf.float32)
wc_d = tf.Variable(tf.truncated_normal(shape=[word_vec_dim,2*hidden_size],stddev=0.01))
uc_d = tf.Variable(np.eye(2*hidden_size),dtype=tf.float32)
bc_d = tf.Variable(tf.zeros([1,2*hidden_size]),dtype=tf.float32)
hidden_residuals_d = tf.TensorArray(size=K,dynamic_size=True,dtype=tf.float32,clear_after_read=False)
hidden_residuals_d = hidden_residuals_d.unstack(tf.zeros([K,2*hidden_size],dtype=tf.float32))
Wattention_d = tf.Variable(tf.zeros([K,1]),dtype=tf.float32)
output = tf.TensorArray(size=tf_output_len,dtype=tf.float32)
#BI-DIRECTIONAL LSTM
hidden_forward = forward_encoder(tf_text,
initial_hidden_f,cell_f,
wf_f,uf_f,bf_f,
wi_f,ui_f,bi_f,
wo_f,uo_f,bo_f,
wc_f,uc_f,bc_f,
Wattention_f,
tf_seq_len,
word_vec_dim)
hidden_backward = backward_encoder(tf_text,
initial_hidden_b,cell_b,
wf_b,uf_b,bf_b,
wi_b,ui_b,bi_b,
wo_b,uo_b,bo_b,
wc_b,uc_b,bc_b,
Wattention_b,
tf_seq_len,
word_vec_dim)
encoded_hidden = tf.concat([hidden_forward,hidden_backward],1)
#ATTENTION MECHANISM AND DECODER
decoded_hidden = encoded_hidden[0]
decoded_hidden = tf.reshape(decoded_hidden,[1,2*hidden_size])
Wattention_d_normalized = tf.nn.softmax(Wattention_d)
tf_embd_limit = tf.convert_to_tensor(np_embd_limit)
y = tf.convert_to_tensor(SOS) #inital decoder token <SOS> vector
y = tf.reshape(y,[1,word_vec_dim])
j=K
hidden_residuals_stack = hidden_residuals_d.stack()
RRA = tf.reduce_sum(tf.multiply(hidden_residuals_stack[j-K:j],Wattention_d_normalized),0)
RRA = tf.reshape(RRA,[1,2*hidden_size])
decoded_hidden_next,cell_d = decoder(y,decoded_hidden,cell_d,
wf_d,uf_d,bf_d,
wi_d,ui_d,bf_d,
wo_d,uo_d,bf_d,
wc_d,uc_d,bc_d,
RRA)
decoded_hidden = decoded_hidden_next
hidden_residuals_d = hidden_residuals_d.write(j,tf.reshape(decoded_hidden,[2*hidden_size]))
j=j+1
i=0
def attention_decoder_cond(i,j,decoded_hidden,cell_d,hidden_residuals_d,output):
return i < tf_output_len
def attention_decoder_body(i,j,decoded_hidden,cell_d,hidden_residuals_d,output):
#LOCAL ATTENTION
G,pt = align(encoded_hidden,decoded_hidden,Wp,Vp,Wa,tf_seq_len)
local_encoded_hidden = encoded_hidden[pt-D:pt+D+1]
weighted_encoded_hidden = tf.multiply(local_encoded_hidden,G)
context_vector = tf.reduce_sum(weighted_encoded_hidden,0)
context_vector = tf.reshape(context_vector,[1,2*hidden_size])
attended_hidden = tf.tanh(tf.matmul(tf.concat([context_vector,decoded_hidden],1),Wc))
#DECODER
y = tf.matmul(attended_hidden,Ws)
output = output.write(i,tf.reshape(y,[vocab_len]))
#Save probability distribution as output
y = tf.nn.softmax(y)
y_index = tf.cast(tf.argmax(tf.reshape(y,[vocab_len])),tf.int32)
y = tf_embd_limit[y_index]
y = tf.reshape(y,[1,word_vec_dim])
#setting next decoder input token as the word_vector of maximum probability
#as found from previous attention-decoder output.
hidden_residuals_stack = hidden_residuals_d.stack()
RRA = tf.reduce_sum(tf.multiply(hidden_residuals_stack[j-K:j],Wattention_d_normalized),0)
RRA = tf.reshape(RRA,[1,2*hidden_size])
decoded_hidden_next,cell_d = decoder(y,decoded_hidden,cell_d,
wf_d,uf_d,bf_d,
wi_d,ui_d,bf_d,
wo_d,uo_d,bf_d,
wc_d,uc_d,bc_d,
RRA)
decoded_hidden = decoded_hidden_next
hidden_residuals_d = tf.cond(tf.equal(j,tf_output_len-1+K+1), #(+1 for <SOS>)
lambda: hidden_residuals_d,
lambda: hidden_residuals_d.write(j,tf.reshape(decoded_hidden,[2*hidden_size])))
return i+1,j+1,decoded_hidden,cell_d,hidden_residuals_d,output
i,j,decoded_hidden,cell_d,hidden_residuals_d,output = tf.while_loop(attention_decoder_cond,
attention_decoder_body,
[i,j,decoded_hidden,cell_d,hidden_residuals_d,output])
hidden_residuals_d.close().mark_used()
output = output.stack()
return output
# In[ ]:
output = model(tf_text,tf_seq_len,tf_output_len)
#OPTIMIZER
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, labels=tf_summary))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
#PREDICTION
pred = tf.TensorArray(size=tf_output_len,dtype=tf.int32)
i=0
def cond_pred(i,pred):
return i<tf_output_len
def body_pred(i,pred):
pred = pred.write(i,tf.cast(tf.argmax(output[i]),tf.int32))
return i+1,pred
i,pred = tf.while_loop(cond_pred,body_pred,[i,pred])
prediction = pred.stack()
# In[ ]:
import string
init = tf.global_variables_initializer()
from nltk.translate.bleu_score import sentence_bleu
from rouge import Rouge
import pandas as pd
from nltk.tokenize import RegexpTokenizer
testrank_res = pd.DataFrame()
testrank_res['Summary'] = None
testrank_res['BLEU'], testrank_res['ROUGE2_f'], testrank_res['ROUGE1_f'], testrank_res['ROUGE1_p'], testrank_res['ROUGE2_p'] = None, None, None, None, None
row = 0
max_score = -1
saved_sum = ''
true_sum = ''
best_scores = []
with tf.Session() as sess: # Start Tensorflow Session
# Prepares variable for saving the model
sess.run(init) #initialize all variables
saver = tf.train.import_meta_graph('/home/yuriyp/projects/ucu/ml/ml_project/Abstractive-Summarization/saved_model-1000.meta')
saver.restore(sess,tf.train.latest_checkpoint('./'))
# saver = tf.train.Saver()
step = 0
loss_list=[]
acc_list=[]
val_loss_list=[]
val_acc_list=[]
best_val_acc=0
display_step = 3
while step < training_iters:
total_loss=0
total_acc=0
total_val_loss = 0
total_val_acc = 0
for i in xrange(0, train_len):
train_out = transform_out(train_summaries[i][0:len(train_summaries[i])-1])
if i%display_step==0:
print("\nIteration: "+str(i))
print("Training input sequence length: "+str(len(train_texts[i])))
print("Training target outputs sequence length: "+str(len(train_out)))
print("\nTEXT:")
flag = 0
"""
text = ''
for vec in train_texts[i]:
if vec2word(vec) in string.punctuation or flag==0:
pass
#print(str(vec2word(vec)), end='')
text += str(vec2word(vec))
else:
text += " "+str(vec2word(vec))
#print((" "+str(vec2word(vec))), end='')
flag=1
print(text)
print("\n")
"""
# Run optimization operation (backpropagation)
_,loss,pred = sess.run([optimizer,cost,prediction],feed_dict={tf_text: train_texts[i],
tf_seq_len: len(train_texts[i]),
tf_summary: train_out,
tf_output_len: len(train_out)})
if i%display_step==0:
predicted_sum = ''
print("\nPREDICTED SUMMARY:\n")
flag = 0
for index in pred:
#if int(index)!=vocab_limit.index('eos'):
if vocab_limit[int(index)] in string.punctuation or flag==0:
#print(str(vocab_limit[int(index)]),end='')
predicted_sum += vocab_limit[int(index)]
else:
#print(" "+str(vocab_limit[int(index)]),end='')
predicted_sum += " "+str(vocab_limit[int(index)])
flag=1
print(predicted_sum)
print("\n")
print("ACTUAL SUMMARY:\n")
acctual_sum = ''
flag = 0
for vec in train_summaries[i]:
if vec2word(vec)!='eos':
if vec2word(vec) in string.punctuation or flag==0:
#print(str(vec2word(vec)),end='')
acctual_sum += str(vec2word(vec))
else:
#print((" "+str(vec2word(vec))),end='')
acctual_sum += " "+str(vec2word(vec))
flag=1
print(acctual_sum)
print("\n")
print("loss="+str(loss))
actual_mean = np.sum(train_summaries[i])/len(train_summaries[i])
predicted_mean = np.sum(pred)/len(pred)
embed_score = np.linalg.norm(predicted_mean-actual_mean)
print ("embed_score = ", embed_score)
tokenizer = RegexpTokenizer(r'\w+')
tokenized_predicted_sum = tokenizer.tokenize(predicted_sum)
tokenized_acctual_sum= tokenizer.tokenize(acctual_sum)
bleu_score = 0
try:
bleu_score = sentence_bleu(tokenized_predicted_sum, tokenized_acctual_sum)
rouge = Rouge()
rouge_score = rouge.get_scores(' '.join(tokenized_predicted_sum), ' '.join(tokenized_acctual_sum))
testrank_res.loc[row] = [predicted_sum, bleu_score, rouge_score[0]['rouge-2']['f'], rouge_score[0]['rouge-1']['f'],
rouge_score[0]['rouge-2']['p'], rouge_score[0]['rouge-1']['p']]
row += 1
step_score = np.sum([bleu_score, rouge_score[0]['rouge-2']['f'], rouge_score[0]['rouge-1']['f'],
rouge_score[0]['rouge-2']['p'], rouge_score[0]['rouge-1']['p']])
if step_score > max_score:
best_scores = [bleu_score, rouge_score[0]['rouge-2']['f'], rouge_score[0]['rouge-1']['f'],
rouge_score[0]['rouge-2']['p'], rouge_score[0]['rouge-1']['p']]
max_score = step_score
saved_sum = predicted_sum
true_sum = acctual_sum
print("predicted_sum ", saved_sum)
print("true_sum ", true_sum)
print("best_scores ", best_scores)
print(testrank_res.tail(1))
except Exception as e:
pass
if i%100==0:
testrank_res.to_csv('textrank_scores.csv', index=False)
saver.save(sess, '/home/yuriyp/projects/ucu/ml/ml_project/Abstractive-Summarization/saved_model',global_step=1000)
step=step+1
| true |
897e2f3a2005b71dcf5d27c1d898459df1fbae9a | Python | albertotb/solar | /src/train_conv_choose_dir.py | UTF-8 | 4,422 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Build train and test matrices
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
import keras
from utils.build_matrix import df_shift, to_array
from utils.clr import CyclicLR
from utils.models import conv1D_lon, conv1D_lon_lat
df = pd.read_pickle('/home/SHARED/SOLAR/data/oahu_min_final.pkl')
df_roll = df_shift(df, periods=1)
# Split target (time t) and variables (times t-1 to t-width+1)
y = df_roll['t']
X = df_roll.drop(columns='t', level='time')
# Split train-test, approximately 12 and 4 months respectively
X_train, X_test = X[:'2011-07-31'], X['2011-08-01':]
y_train, y_test = y[:'2011-07-31'], y['2011-08-01':]
# We only use the previous timestep as features
X_tr1 = X_train['t-1']
y_tr1 = y_train
X_te1 = X_test['t-1']
y_te1 = y_test
# We load the info of the sensors to extract the longitude information
info = pd.read_pickle('/home/SHARED/SOLAR/data/info.pkl')
info = info.drop("AP3")
info = info[["Longitude", "Latitude"]]
info["MAE"] = 0 ## to store MAEs
##
u = np.array([-1,1]) # Direction to order sensors. (1,0) = Longitude, (0,1) = Latitude
norm_u = np.sqrt(np.sum(u**2))
u_n = u/norm_u
##
info['Order'] = np.dot(info[["Longitude", "Latitude"]].values, u_n)
order = info['Order'].sort_values(ascending=False)
##
path = 'results/conv1D_Long' + str(u[0]) + "_Lat" + str(u[1]) + ".csv" ## to save results
# Finally, we sort the data according to the defined order
X_tr_ord = X_tr1[order.index]
y_tr_ord = y_tr1[order.index]
X_te_ord = X_te1[order.index]
y_te_ord = y_te1[order.index]
lr = 0.0001
opt = keras.optimizers.Adam(lr=lr)
# We add a callback to log metrics and another one to schedule the learning rate
c1 = keras.callbacks.BaseLogger(stateful_metrics=None)
c2 = CyclicLR(step_size=250, base_lr=lr)
c3 = keras.callbacks.History()
batch_size = 1 << 11 # as big as possible so we can explore many models
epochs = 1 << 5
def train_and_test_sensor(idx_sensor, id_sensor, n_sensors, use_lat=False):
X_tr1, y_tr1, X_te1, y_te1 = to_array(X_tr_ord, y_tr_ord, X_te_ord, y_te_ord, id_sensor=id_sensor)
if use_lat:
X_tr2, y_tr2, X_te2, y_te2 = to_array(X_tr_lat, y_tr_lat, X_te_lat, y_te_lat, id_sensor=id_sensor)
# Validation using TS split (just to obtain different MAE estimations, no hyperoptimization for the moment)
cv_loss = []
for tr_idx, va_idx in TimeSeriesSplit(n_splits=5).split(X_tr1):
if not use_lat:
train_data = np.atleast_3d(X_tr1[tr_idx])
validation_data = np.atleast_3d(X_tr1[va_idx])
model = conv1D_lon(idx_sensor, n_sensors=n_sensors)
else:
train_data = [np.atleast_3d(X_tr1[tr_idx]), np.atleast_3d(X_tr2[tr_idx])]
validation_data = [np.atleast_3d(X_tr1[va_idx]), np.atleast_3d(X_tr2[va_idx])]
model = conv1D_lon_lat(idx_sensor, n_sensors=n_sensors)
model.compile(opt, loss='mean_absolute_error')
model.fit(train_data, y_tr1[tr_idx],
batch_size=batch_size,
epochs=epochs,
validation_data=(validation_data, y_tr1[va_idx]),
callbacks=[c2, c3],
verbose=0)
cv_loss.append(c3.history['val_loss'][-1])
# Testing
if not use_lat:
train_data = np.atleast_3d(X_tr1)
validation_data = np.atleast_3d(X_te1)
model = conv1D_lon(idx_sensor, n_sensors=n_sensors)
else:
train_data = [np.atleast_3d(X_tr1), np.atleast_3d(X_tr2)]
validation_data = [np.atleast_3d(X_te1), np.atleast_3d(X_te2)]
model = conv1D_lon_lat(idx_sensor, n_sensors=n_sensors)
model.compile(opt, loss='mean_absolute_error')
model.fit(train_data, y_tr1,
batch_size=batch_size,
epochs=epochs,
validation_data=(validation_data, y_te1),
callbacks=[c2, c3],
verbose=0)
test_loss = c3.history['val_loss'][-1]
#model.save('../models/conv1D_{}_{:1d}.h5'.format(id_sensor, use_lat))
print('MAE_val ', cv_loss)
print('MAE_test ', test_loss)
return test_loss, cv_loss
maes1 = {}
maes2 = {}
for idx_sensor, id_sensor in enumerate(order.index.values):
print(idx_sensor, id_sensor)
test_loss, _ = train_and_test_sensor(idx_sensor, id_sensor, n_sensors=16)
info.MAE[info.index == id_sensor] = test_loss
info.to_csv(path)
| true |
ccdb988c633824a606ecc490e689d4ac210a03e2 | Python | AjxGnx/python-funcional | /ejercicio7.py | UTF-8 | 176 | 3.34375 | 3 | [] | no_license | def calculate_p_escalar(t1, t2):
result = 0
for i in range(len(t1)):
result += (t1[i] * t2[i])
return result
print(calculate_p_escalar((3, 5), (2, 3)))
| true |
14e8f39d176577ad04150b734708d3cc6a1843ff | Python | vurokrazia/Curse-Python-101 | /Strings/reverse.py | UTF-8 | 568 | 4.0625 | 4 | [] | no_license | def tipo (adn):
if type(adn) is str:
print ('\n' + adn + ' is a string\n')
def sus (w):
if w == "G":
return "C"
elif w == "A":
return "T"
elif w == "T":
return "A"
elif w == "C":
return "G"
return ""
def palabra(siz,b):
new_word = []
for a in range(b):
new_word.append(sus(siz[a]))
return new_word
"""adn = "gattaca".upper()"""
adn = input ('The word ').upper()
tipo(adn)
word_two = palabra(adn,len(adn))
print ("The complement is \t" + "".join(word_two))
word_two = palabra(adn[::-1],len(adn))
print ("The reverse is \t\t" + "".join(word_two)) | true |
2b60d48cc0b4f00a00fbe70cdbd04daadeec88db | Python | Ligh7bringer/Chatbot | /chatbot/bot.py | UTF-8 | 4,108 | 2.765625 | 3 | [
"MIT"
] | permissive | import logging
import stat
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
import os
import chatbot.constants as const
from chatbot.crawler import Crawler
class Bot:
def __init__(self, db=None):
# custom database name in the event that
# multiple chatbots need to exist
if db is None:
self.db = const.DB_FILE + const.DB_FILE_EXT
else:
self.db = db + const.DB_FILE_EXT
# store path to the database
self.db_path = os.path.join(const.PROJECT_ROOT, self.db)
# initialise chatbot
self.chatbot = ChatBot(
# name
"C++ bot",
read_only=True,
storage_adapter="chatbot.storage.SQLStorageAdapter",
preprocessors=[
'chatterbot.preprocessors.unescape_html'
],
logic_adapters=[
{
'import_path': 'chatbot.logic.BestMatch',
'default_response': const.BOT_NOT_UNDERSTAND,
'maximum_similarity_threshold': 0.90
},
{
'import_path': 'chatbot.logic.SpecificResponseAdapter',
'input_text': 'Help',
'output_text': const.BOT_HELP_MSG
}
],
database_uri='sqlite:///' + self.db
)
self.logger = self.chatbot.logger
logging.basicConfig(level=logging.INFO)
# returns a list of files in directory 'loc'
def get_files(self, loc):
files = []
try:
files = os.listdir(loc)
except (FileNotFoundError, FileExistsError, OSError):
self.logger.warning(f"{loc} does not exist or is empty. Skipping...")
return files
# trains the chatbot
def train(self):
# initialise trainer
trainer = ChatterBotCorpusTrainer(self.chatbot)
# make sure chatterbot-corpus is installed
try:
trainer.train("chatterbot.corpus.english.greetings")
# show an error message if it's not
except (OSError, FileExistsError, FileNotFoundError):
self.logger.error("Couldn't find chatterbot-corpus! Are you sure it's installed?\n"
"(try pip install chatterbot-corpus)")
# get the file names of files made by the crawler
files = self.get_files(const.DATA_DIR_PATH)
# iterate over them
for file in files:
# train the chatbot with each file
trainer.train(os.path.join(const.DATA_DIR_PATH, file))
# returns a response to statement 'statement'
def get_response(self, question):
return self.chatbot.get_response(question)
# updates the rating for answer 'answer'
def update_rating(self, answer, rating):
self.chatbot.storage.update_rating(answer, rating)
# initialises and runs a crawler to collect data
def collect_data(self, threads, pages, verbose):
crawler = Crawler(threads, pages, verbose)
crawler.crawl()
# deletes the database
def del_db(self):
try:
os.remove(self.db_path)
except FileNotFoundError:
self.logger.warning(f"{self.db_path} does not exist.")
# try and gain permissions to delete the file
except PermissionError:
self.logger.warning(f"{self.db_path} is open in another program and cannot be deleted.")
os.chmod(self.db_path, stat.S_IWRITE)
os.remove(self.db_path)
# deletes the training data
def clean(self):
files = self.get_files(const.DATA_DIR_PATH)
for file in files:
os.remove(os.path.join(const.DATA_DIR_PATH, file))
self.logger.info(f"Deleted {len(files)} files from {const.DATA_DIR_PATH}")
try:
os.rmdir(const.DATA_DIR_PATH)
except (FileNotFoundError, OSError):
self.logger.info(f"{const.DATA_DIR_PATH} does not exist. Skipping.")
| true |
b132ce8a261e53e4a3b139aead02c90c7ff8b81b | Python | hung0422/practice_test | /t01.py | UTF-8 | 5,804 | 5.0625 | 5 | [] | no_license | #1-1้ก
'''
่ซๆฐๅฏซไธ็จๅผ๏ผ่ฎไฝฟ็จ่
่ผธๅ
ฅไบๅๆธๅญ๏ผ่จ็ฎไธฆ่ผธๅบ้ไบๅๆธๅญไนๆธๅผใ็ธฝๅๅๅนณๅๆธใ
ๆ็คบ๏ผ็ธฝๅ่ๅนณๅๆธ็่ผธๅบๅฐๅฐๆธ้ปๅพ็ฌฌ1ไฝใ
'''
'''
A = float(input('่ซ่ผธๅ
ฅๆธๅญ1'))
B = float(input('่ซ่ผธๅ
ฅๆธๅญ2'))
C = float(input('่ซ่ผธๅ
ฅๆธๅญ3'))
D = float(input('่ซ่ผธๅ
ฅๆธๅญ4'))
E = float(input('่ซ่ผธๅ
ฅๆธๅญ5'))
print('{:<2.1f} {:<2.1f} {:<2.1f} {:<2.1f} {:<2.1f} '.format(A,B,C,D,E))
print('็ธฝๅ' , '{:<2.1f}'.format(A+B+C+D+E))
print('ๅนณๅๆธ' , '{:<2.1f}'.format((A+B+C+D+E)/5))
'''
#1-2้ก
'''
ๅ่จญไธ่ณฝ่ท้ธๆๅจxๅy็ง็ๆ้่ทๅฎzๅ
ฌ้๏ผ่ซๆฐๅฏซไธ็จๅผ๏ผ่ผธๅ
ฅxใyใzๆธๅผ๏ผ
ๆๅพ้กฏ็คบๆญค้ธๆๆฏๅฐๆ็ๅนณๅ่ฑๅฉ้ๅบฆ๏ผ1่ฑๅฉ็ญๆผ1.6ๅ
ฌ้๏ผใ
ๆ็คบ๏ผ่ผธๅบๆตฎ้ปๆธๅฐๅฐๆธ้ปๅพ็ฌฌไธไฝใ
'''
'''
X =eval(input('ๅนพๅ'))
Y =eval(input('ๅนพ็ง'))
Z =eval(input('ๅนพๅ
ฌ้'))
Y =60 * X + Y
W = Z / 1.6
print('ๅนณๅ่ฑ้','{:.1f}'.format(W/Y*3600))
'''
#1-3้ก
'''
่ซๆฐๅฏซไธ็จๅผ๏ผ่ผธๅ
ฅๅ
ฉๅๆญฃๆธ๏ผไปฃ่กจไธ็ฉๅฝขไนๅฏฌๅ้ซ๏ผ่จ็ฎไธฆ่ผธๅบๆญค็ฉๅฝขไน้ซ๏ผHeight๏ผใๅฏฌ๏ผWidth๏ผใๅจ้ท๏ผPerimeter๏ผๅ้ข็ฉ๏ผArea๏ผใ
ๆ็คบ๏ผ่ผธๅบๆตฎ้ปๆธๅฐๅฐๆธ้ปๅพ็ฌฌไบไฝใ
'''
'''
A = eval(input('ๅฏฌ:'))
B = eval(input('้ซ:'))
print('Height' , '{:.2f}'.format(A))
print('Width' , '{:.2f}'.format(B))
print('Perimeter' ,'{:.2f}'.format(2*(A+B)))
print('Area','{:.2f}'.format(A*B))
'''
#1-4้ก
'''
่ซไฝฟ็จ้ธๆๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผ่ฎไฝฟ็จ่
่ผธๅ
ฅไธๅ้้ท๏ผๆชขๆฅ้ไธๅ้้ทๆฏๅฆๅฏไปฅ็ตๆไธๅไธ่งๅฝขใ
่ฅๅฏไปฅ๏ผๅ่ผธๅบ่ฉฒไธ่งๅฝขไนๅจ้ท๏ผๅฆๅ้กฏ็คบใInvalidใใ
ๆ็คบ๏ผๆชขๆฅๆนๆณ = ไปปๆๅ
ฉๅ้้ทไน็ธฝๅๅคงๆผ็ฌฌไธ้้ทใ
'''
'''
A =eval(input('ๅจ้ท1'))
B =eval(input('ๅจ้ท2'))
C =eval(input('ๅจ้ท3'))
if A + B > C and A + C > B and B + C > A:
print('ๅจ้ท','{}'.format(A+B+C))
else:
print('{}'.format('Invalid'))
'''
#1-5้ก
'''
่ซไฝฟ็จ้ธๆๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผ่ฎไฝฟ็จ่
่ผธๅ
ฅไธๅๅ้ฒไฝๆดๆธnum(0 โค num โค 15)๏ผๅฐnum่ฝๆๆๅๅ
ญ้ฒไฝๅผใ
ๆ็คบ๏ผ่ฝๆ่ฆๅ = ๅ้ฒไฝ0~9็ๅๅ
ญ้ฒไฝๅผ็บๅ
ถๆฌ่บซ๏ผๅ้ฒไฝ10~15็ๅๅ
ญ้ฒไฝๅผ็บA~Fใ
'''
'''
A = eval(input('่ผธๅ
ฅ0~15'))
if A >= 0 and A <= 15:
print('{:X}'.format(A))
'''
#1-6้ก
'''
่ซไฝฟ็จ้ธๆๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผ่ฆๆฑไฝฟ็จ่
่ผธๅ
ฅ่ณผ็ฉ้้ก๏ผ่ณผ็ฉ้้ก้ๅคงๆผ8,000๏ผๅซ๏ผไปฅไธ๏ผ
ไธฆ้กฏ็คบๆๆฃๅชๆ ๅพ็ๅฏฆไป้้กใ่ณผ็ฉ้้กๆๆฃๆนๆกๅฆไธ่กจๆ็คบ๏ผ
้้ก ๆๆฃ
8,000๏ผๅซ๏ผไปฅไธ 9.5ๆ
18,000๏ผๅซ๏ผไปฅไธ 9ๆ
28,000๏ผๅซ๏ผไปฅไธ 8ๆ
38,000๏ผๅซ๏ผไปฅไธ 7ๆ
'''
'''
A = eval(input('่ณผ็ฉ้้ก'))
if A >= 38000:
print('ๆๆฃๅพ้้ก','{}'.format(A*0.7))
elif A >= 28000:
print('ๆๆฃๅพ้้ก', '{}'.format(A * 0.8))
elif A >= 18000:
print('ๆๆฃๅพ้้ก', '{}'.format(A * 0.9))
elif A >= 8000:
print('ๆๆฃๅพ้้ก', '{}'.format(A * 0.95))
'''
#1-7้ก
'''
่ซไฝฟ็จ้ธๆๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผๆ นๆไฝฟ็จ่
่ผธๅ
ฅ็ๅๆธ้กฏ็คบๅฐๆ็็ญ็ดใๆจๆบๅฆไธ่กจๆ็คบ๏ผ
ๅๆธ ็ญ็ด
80 ~ 100 A
70 ~ 79 B
60 ~ 69 C
<= 59 F
'''
'''
A = eval(input('ๆ็ธพ'))
if A >=80 and A <=100:
print('A')
elif A >= 70 and A <80:
print('B')
elif A >=60 and A <70:
print('C')
elif A <= 59:
print('F')
'''
#1-8้ก
'''
่ซไฝฟ็จ้ธๆๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผ่ฎไฝฟ็จ่
่ผธๅ
ฅไธๅๆญฃๆดๆธ๏ผ็ถๅพๅคๆทๅฎๆฏ3ๆ5็ๅๆธ๏ผ
้กฏ็คบใx is a multiple of 3.ใๆใx is a multiple of 5.ใ๏ผ
่ฅๆญคๆธๅผๅๆ็บ3่5็ๅๆธ๏ผ้กฏ็คบใx is a multiple of 3 and 5.ใ๏ผ
ๅฆๆญคๆธๅผ็ไธๅฑฌๆผ3ๆ5็ๅๆธ๏ผ้กฏ็คบใx is not a multiple of 3 or 5.ใ๏ผ
ๅฐไฝฟ็จ่
่ผธๅ
ฅ็ๆธๅผไปฃๅ
ฅxใ
'''
'''
X =eval(input('ๆญฃๆดๆธ'))
if X % 3 == 0 and X % 5 == 0:
print('x is a multiple of 3 and 5.')
elif X % 3 == 0:
print('x is a multiple of 3.')
elif X % 5 == 0:
print('x is a multiple of 5.')
else:
print('x is not a multiple of 3 or 5.')
'''
#1-9้ก
'''
่ซไฝฟ็จ่ฟดๅๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผๆ็คบไฝฟ็จ่
่ผธๅ
ฅ้้ก๏ผๅฆ10,000๏ผใๅนดๆถ็็๏ผๅฆ5.75๏ผ๏ผ
ไปฅๅ็ถ้็ๆไปฝๆธ๏ผๅฆ5๏ผ๏ผๆฅ่้กฏ็คบๆฏๅๆ็ๅญๆฌพ็ธฝ้กใ
ๆ็คบ๏ผๅๆจไบๅ
ฅ๏ผ่ผธๅบๆตฎ้ปๆธๅฐๅฐๆธ้ปๅพ็ฌฌไบไฝใ
่ไพ๏ผ
ๅ่จญๆจๅญๆฌพ$10,000๏ผๅนดๆถ็็บ5.75%ใ
้ไบไธๅๆ๏ผๅญๆฌพๆๆฏ๏ผ10000 + 10000 * 5.75 / 1200 = 10047.92
้ไบๅ
ฉๅๆ๏ผๅญๆฌพๆๆฏ๏ผ10047.92 + 10047.92 * 5.75 / 1200 = 10096.06
้ไบไธๅๆ๏ผๅญๆฌพๅฐๆฏ๏ผ10096.06 + 10096.06 * 5.75 / 1200 = 10144.44
ไปฅๆญค้กๆจใ
'''
'''
A = eval(input('้้ก'))
B = eval(input('ๅนดๆถ็็'))
C = eval(input('ๆไปฝ'))
I = 1
print('{} {}'.format('ๆไปฝ','ๅญๆฌพ็ธฝ้ก'))
while I <= C:
#X =A + A * ((B * I) / 1200)
A += (A * B /1200)
#print('{:.2f}'.format(X))
#print('{:.2f}'.format(A))
print('{:>2} {:>3.2f}'.format(I,A))
I += 1
print()
'''
#1-10้ก
'''
่ซไฝฟ็จ่ฟดๅๆ่ฟฐๆฐๅฏซไธ็จๅผ๏ผ่ฎไฝฟ็จ่
่ผธๅ
ฅไธๅๆญฃๆดๆธa๏ผ
ๅฉ็จ่ฟดๅ่จ็ฎๅพ1ๅฐaไน้๏ผๆๆ5ไนๅๆธๆธๅญ็ธฝๅใ
'''
'''
A =eval(input('ๆญฃๆดๆธ'))
C = 0
for B in range(1,A+1):
if B % 5 == 0:
C += B
print(C)
'''
#1-11้ก
'''
่ซๆฐๅฏซไธ็จๅผ๏ผ่ผธๅ
ฅX็ตๅY็ตๅ่ช็็ง็ฎ่ณ้ๅไธญ๏ผ
ไปฅๅญไธฒ"end"ไฝ็บ็ตๆ้ป๏ผ้ๅไธญไธๅ
ๅซๅญไธฒ"end"๏ผใ
่ซไพๅบๅ่ก้กฏ็คบ
(1) X็ตๅY็ต็ๆๆ็ง็ฎใ
(2)X็ตๅY็ต็ๅ
ฑๅ็ง็ฎใ
(3)Y็ตๆไฝX็ตๆฒๆ็็ง็ฎ๏ผ
ไปฅๅ(4) X็ตๅY็ตๅฝผๆญคๆฒๆ็็ง็ฎ๏ผไธๅ
ๅซ็ธๅ็ง็ฎ๏ผ
'''
'''
a = str(input('ๅญๅ
'))
x = eval(input('ๅๆธ'))
y = eval(input('ๅๆธ'))
def compute(a,x,y):
for i in range(y):
for p in range(x):
print('{} '.format(a),end='')
print()
def main():
compute(a,x,y)
if __name__ == '__main__':
main()
''' | true |
a2e91d080a8744df0c409d8941ad5428e5ce4156 | Python | jfangah/Sound_Classification | /scripts/train.py | UTF-8 | 3,123 | 2.828125 | 3 | [] | no_license | import keras
from keras.layers import Activation, Dense, Dropout, Conv2D, Flatten, MaxPooling2D
from keras.models import Sequential
from tqdm import tnrange, tqdm
import numpy as np
import random
import h5py
DIST_TRAIN = '../data/extracted_data.hdf5'
EPOCH = 30
def load_data(path):
data = []
labels = []
h5_in = h5py.File(path, 'r')
for i in range(1, 11):
key = 'fold' + str(i)
curr_data = h5_in[key + '_data']
label = h5_in[key + '_label']
print(key, 'shape:', curr_data.shape, label.shape)
data.append(curr_data)
labels.append(label)
return data, labels
def split_train_test(data, labels, test_id):
train = []
val = []
for i in range(10):
if i == test_id:
for j in range(labels[i].shape[0]):
val.append((data[i][j], labels[i][j]))
else:
for j in range(labels[i].shape[0]):
train.append((data[i][j], labels[i][j]))
return train, val
def create_model():
model = Sequential()
input_shape=(128, 128, 1)
model.add(Conv2D(24, (5, 5), strides = (1, 1), input_shape = input_shape))
model.add(MaxPooling2D((4, 2), strides = (4, 2)))
model.add(Activation('relu'))
model.add(Conv2D(48, (5, 5), padding = 'valid'))
model.add(MaxPooling2D((4, 2), strides = (4, 2)))
model.add(Activation('relu'))
model.add(Conv2D(48, (5, 5), padding = 'valid'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(rate = 0.5))
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(rate = 0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(
optimizer = 'Adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
return model
def main():
data, labels = load_data(DIST_TRAIN)
cumulate_loss = 0
cumulate_accuracy = 0
for i in range(10):
train, val = split_train_test(data, labels, i)
X_train, Y_train = zip(*train)
X_val, Y_val = zip(*val)
X_train = np.array([x.reshape((128, 128, 1)) for x in X_train])
X_val = np.array([x.reshape((128, 128, 1)) for x in X_val])
Y_train = np.array(keras.utils.to_categorical(Y_train, 10))
Y_val = np.array(keras.utils.to_categorical(Y_val, 10))
model = create_model()
model.fit(
x = X_train,
y = Y_train,
epochs = EPOCH,
batch_size = 128,
validation_data = (X_val, Y_val)
)
score = model.evaluate(
x = X_val,
y = Y_val
)
cumulate_loss += score[0]
cumulate_accuracy += score[1]
model.save('../models/Sample_CNN_Epoch_' + str(EPOCH) +'_Fold_' + str(i + 1) + '.h5')
print('Cross Validation Fold', i + 1)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
print('Summary:')
print('Average loss:', cumulate_loss / 10)
print('Average accuracy:', cumulate_accuracy / 10)
if __name__ == '__main__':
main() | true |
0a73b8b5973c375e4c439ae346bd63aac96a1c14 | Python | phoro3/atcoder | /ABC/ABC_114/C_problem.py | UTF-8 | 269 | 2.96875 | 3 | [] | no_license | def solve(s):
if int(s) > N:
return 0
ret = 0
if all(s.count(c) >= 1 for c in '753'):
ret = 1
for c in '753':
ret += solve(s + c)
return ret
if __name__ == "__main__":
N = int(input())
print(solve('0')) | true |
a1bd6e6c6e0476ae4ab7a00747644b73676f44a3 | Python | ylfingr/certsrv | /certsrv.py | UTF-8 | 12,401 | 2.703125 | 3 | [
"MIT"
] | permissive | """
A Python client for the Microsoft AD Certificate Services web page.
https://github.com/magnuswatn/certsrv
"""
import re
import urllib
import urllib.request
import requests
import base64
__version__ = '1.7.0'
class RequestDeniedException(Exception):
"""Signifies that the request was denied by the ADCS server."""
def __init__(self, message, response):
Exception.__init__(self, message)
self.response = response
class CouldNotRetrieveCertificateException(Exception):
"""Signifies that the certificate could not be retrieved."""
def __init__(self, message, response):
Exception.__init__(self, message)
self.response = response
class CertificatePendingException(Exception):
"""Signifies that the request needs to be approved by a CA admin."""
def __init__(self, req_id):
Exception.__init__(self, 'Your certificate request has been received. '
'However, you must wait for an administrator to issue the'
'certificate you requested. Your Request Id is %s.' % req_id)
self.req_id = req_id
def _get_response(session, username, password, url, data, **kwargs):
"""
Helper Function to execute the HTTP request againts the given url.
Args:
http: The PoolManager object
username: The username for authentication
pasword: The password for authentication
url: URL for Request
data: The data to send
auth_method: The Authentication Methos to use. (basic or ntlm)
cafile: A PEM file containing the CA certificates that should be trusted
(only works with basic auth)
Returns:
HTTP Response
"""
cafile = kwargs.pop('cafile', None)
auth_method = kwargs.pop('auth_method', 'basic')
if kwargs:
raise TypeError('Unexpected argument: %r' % kwargs)
# We need certsrv to think we are a browser, or otherwise the Content-Type of the
# retrieved certificate will be wrong (for some reason)
headers = {'User-agent': 'Mozilla/5.0 certsrv (https://github.com/magnuswatn/certsrv)'}
if data:
req = requests.Request('POST', url, data=data, headers=headers)
else:
req = requests.Request('GET', url, headers=headers)
if auth_method == "ntlm":
# We use the HTTPNtlmAuthHandler from python-ntlm for NTLM auth
from ntlm import HTTPNtlmAuthHandler
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, username, password)
auth_handler = HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(passman)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
else:
# We don't bother with HTTPBasicAuthHandler for basic auth, since
# it doesn't add the credentials before receiving an 401 challange
# as thus doubless the requests unnecessary.
# Plus, it's easier just to add the header ourselves
authinfo = "{}:{}".format(username, password)
req.headers.update({'Authorization': 'Basic {}'.format(
base64.b64encode(authinfo.encode()).decode())})
prepreq = session.prepare_request(req)
if cafile:
response = session.send(prepreq, stream=False, verify=True,
proxies=None, cert=cafile, timeout=10)
else:
response = session.send(prepreq, stream=False, verify=True,
proxies=None, timeout=10)
# The response code is not validated when using the HTTPNtlmAuthHandler
# so we have to check it ourselves
if response.status_code == 200:
return response
else:
raise requests.HTTPError(response.text)
def get_cert(server, csr, template, username, password, encoding='b64', **kwargs):
"""
Gets a certificate from a Microsoft AD Certificate Services web page.
Args:
server: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
csr: The certificate request to submit
template: The certificate template the cert should be issued from
username: The username for authentication
pasword: The password for authentication
encoding: The desired encoding for the returned certificate.
Possible values are "bin" for binary and "b64" for Base64 (PEM)
auth_method: The chosen authentication method. Either 'basic' (the default) or 'ntlm'
cafile: A PEM file containing the CA certificates that should be trusted
Returns:
The issued certificate
Raises:
RequestDeniedException: If the request was denied by the ADCS server
CertificatePendingException: If the request needs to be approved by a CA admin
CouldNotRetrieveCertificateException: If something went wrong while fetching the cert
.. note:: The cafile parameter does not work with NTLM authentication.
"""
data = {
'Mode': 'newreq',
'CertRequest': csr,
'CertAttrib': 'CertificateTemplate:{}'.format(template),
'UserAgent': 'certsrv (https://github.com/magnuswatn/certsrv)',
'FriendlyType': 'Saved-Request Certificate',
'TargetStoreFlags': '0',
'SaveCert': 'yes'
}
session = requests.Session()
url = "https://{}/certsrv/certfnsh.asp".format(server)
#data_encoded = urllib.urlencode(data)
response = _get_response(session, username, password, url, data, **kwargs)
response_page = response.text
# We need to parse the Request ID from the returning HTML page
try:
req_id = re.search(r'certnew.cer\?ReqID=(\d+)&', response_page).group(1)
except AttributeError:
# We didn't find any request ID in the response. It may need approval.
if re.search(r'Certificate Pending', response_page):
req_id = re.search(r'Your Request Id is (\d+).', response_page).group(1)
raise CertificatePendingException(req_id)
else:
# Must have failed. Lets find the error message and raise a RequestDeniedException
try:
error = re.search(r'The disposition message is "([^"]+)', response_page).group(1)
except AttributeError:
error = 'An unknown error occured'
raise RequestDeniedException(error, response_page)
return get_existing_cert(session, server, req_id, username, password, encoding, **kwargs)
def get_existing_cert(session, server, req_id, username, password, encoding='b64', **kwargs):
"""
Gets a certificate that has already been created.
Args:
server: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
req_id: The request ID to retrieve
username: The username for authentication
pasword: The password for authentication
encoding: The desired encoding for the returned certificate.
Possible values are "bin" for binary and "b64" for Base64 (PEM)
auth_method: The chosen authentication method. Either 'basic' (the default) or 'ntlm'
cafile: A PEM file containing the CA certificates that should be trusted
Returns:
The issued certificate
Raises:
CouldNotRetrieveCertificateException: If something went wrong while fetching the cert
.. note:: The cafile parameter does not work with NTLM authentication.
"""
cert_url = 'https://{}/certsrv/certnew.cer?ReqID={}&Enc={}'.format(server, req_id, encoding)
response = _get_response(session, username, password, cert_url, None, **kwargs)
response_content = response.text
if response.headers['Content-Type'] != 'application/pkix-cert':
# The response was not a cert. Something must have gone wrong
try:
error = re.search('Disposition message:[^\t]+\t\t([^\r\n]+)', response_content).group(1)
except AttributeError:
error = 'An unknown error occured'
raise CouldNotRetrieveCertificateException(error, response_content)
else:
return response_content
def get_ca_cert(session, username, password, encoding='b64', **kwargs):
"""
Gets the (newest) CA certificate from a Microsoft AD Certificate Services web page.
Args:
server: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
username: The username for authentication
pasword: The password for authentication
encoding: The desired encoding for the returned certificate.
Possible values are "bin" for binary and "b64" for Base64 (PEM)
auth_method: The chosen authentication method. Either 'basic' (the default) or 'ntlm'
cafile: A PEM file containing the CA certificates that should be trusted
Returns:
The newest CA certificate from the server
.. note:: The cafile parameter does not work with NTLM authentication.
"""
url = 'https://{}/certsrv/certcarc.asp'.format(server)
response = _get_response(session, username, password, url, None, **kwargs)
response_page = response.text
# We have to check how many renewals this server has had, so that we get the newest CA cert
renewals = re.search(r'var nRenewals=(\d+);', response_page).group(1)
cert_url = 'https://{}/certsrv/certnew.cer?ReqID=CACert&Renewal={}&Enc={}'.format(
server, renewals, encoding)
response = _get_response(session, username, password, cert_url, None, **kwargs)
cert = response.text
return cert
def get_chain(session, server, username, password, encoding='bin', **kwargs):
"""
Gets the chain from a Microsoft AD Certificate Services web page.
Args:
server: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
username: The username for authentication
pasword: The password for authentication
encoding: The desired encoding for the returned certificates.
Possible values are "bin" for binary and "b64" for Base64 (PEM)
auth_method: The chosen authentication method. Either 'basic' (the default) or 'ntlm'
cafile: A PEM file containing the CA certificates that should be trusted
Returns:
The CA chain from the server, in PKCS#7 format
.. note:: The cafile parameter does not work with NTLM authentication.
"""
url = 'https://{}/certsrv/certcarc.asp'.format(server)
response = _get_response(session, username, password, url, None, **kwargs)
response_page = response.text
# We have to check how many renewals this server has had, so that we get the newest chain
renewals = re.search(r'var nRenewals=(\d+);', response_page).group(1)
chain_url = 'https://{}/certsrv/certnew.p7b?ReqID=CACert&Renewal={}&Enc={}'.format(
server, renewals, encoding)
chain = _get_response(session, username, password, chain_url, None, **kwargs).read()
return chain
def check_credentials(session, server, username, password, **kwargs):
"""
Checks the specified credentials against the specified ADCS server
Args:
ca: The FQDN to a server running the Certification Authority
Web Enrollment role (must be listening on https)
username: The username for authentication
pasword: The password for authentication
auth_method: The chosen authentication method. Either 'basic' (the default) or 'ntlm'
cafile: A PEM file containing the CA certificates that should be trusted
Returns:
True if authentication succeeded, False if it failed.
.. note:: The cafile parameter does not work with NTLM authentication.
"""
url = 'https://{}/certsrv/'.format(server)
try:
_get_response(session, username, password, url, None, **kwargs)
except urllib.error.HTTPError as error:
if error.code == 401:
return False
else:
raise
else:
return True
| true |
fcdc0f47329c01240b3e9b81a0340573bf7a5400 | Python | noveroa/DataBases | /practiceCodes/pythonpractice.py | UTF-8 | 6,155 | 3.984375 | 4 | [] | no_license | import sys
def yrto100():
## Datetime! and user input. How long until you are 100? What year will it be?
import datetime
curyr = datetime.date.today().year
name, year = raw_input('What is your name?'), 100 - int(raw_input('What is your current age?'))
print ('Hello, %s. You will be 100 in %d years, the year %d' % (name, year, curyr + year))
def oddseven():
##Mod functions!
import math
number, divisor = int(raw_input('What number are you thinking of?')), int(raw_input('the second?'))
#Check odd or even
if number%4 == 0:
print('even by 4!')
elif number%2 == 0:
print('only by 2')
else:
print('odd')
#check divisibility
if number%divisor == 0:
print('%d is divisible by %d' %(number, divisor))
def shortlist():
#random numbers, listcomprehension, if/else
import random
inlist = random.sample(xrange(100), 10)
number = int(raw_input('what number are you thinking of?'))
newlist = [x for x in inlist if x < 5]
if number in inlist:
print('%d is in the list' % (number), inlist)
if number in newlist:
print('%d is within bounds' % (number), newlist)
else:
print('%d is in the not within bounds' % (number), newlist)
else:
print('Your number is not in the list!', inlist)
def divisors():
#list comprehension and modulo
number = int(raw_input('what number are you thinking of?'))
divisors = [x for x in range(1,number+1) if number%x == 0]
print divisors
def listoverlap():
import random
#given two lists what overlap? working with sets!
list1, list2 = set(random.sample( xrange(50), 20)), set(random.sample( xrange(50), 15))
print list1, list2
print list1.intersection(list2)
def stringlist():
word = raw_input('What word are you thinking of?')
if word == word[::-1]:
print'Palindrome!'
else:
print'no palindrome'
def evenlist():
import random
print [x for x in random.sample(xrange(10000), 10) if x%2==0]
def rockpaperscissors():
import random
play = raw_input('What is your choice? Rock Paper or Scissors?')[0].lower()
comp = random.choice(['s','r','p'])
if play == comp:
print 'You tie'
elif play == 'p':
if comp == 's':
print('You lose')
else:
print ('You win!')
elif play == 's':
if comp == 'r':
print('You lose')
else:
print ('You win!')
elif play == 'r':
if comp == 'p':
print('You lose')
else:
print ('You win!')
print comp, play
def guessinggame():
import random
i = 1
#num = [x for x in random.sample(xrange(10), 10)]
target = random.randint(0, 10)
guess = int(raw_input('Guess a number: '))
play = raw_input('Want to play?')
while guess != target and play != 'quit':
if guess > target:
print('Lower')
else:
print('Higher')
guess = int(raw_input('Guess a number: '))
i += 1
print "%d was it. You took %d tries to guess %d" %(guess, i, target)
def listoverlap():
import random
# nonset = harder, need to check length?>
list1, list2 = random.sample( xrange(50), 20), random.sample( xrange(50), 20)
print ([x for x in list1 if x in list2])
print list1
print list2
def isprime():
num = int(raw_input('Number:'))
#case1: 0 or 1
if num == 0 or num == 1:
print('Not prime')
#case2: Two
elif num == 2:
print( "prime number")
#Case: others
elif num > 2:
for x in range(2, num):
if num%x == 0:
print('composite')
break
else:
print('prime')
def divisors2(number):
#list comprehension and modulo
#number = int(raw_input('what number are you thinking of?'))
# returns True if not prime
divisors = [x for x in range(1,number+1) if number%x == 0]
if len(divisors) > 2:
return True
else:
return False
def isprime2():
num = int(raw_input('Number, please:'))
#case1: 0 or 1
if num == 0 or num == 1:
print('Not prime')
#case2: Two
elif num == 2:
print( "prime number")
#Case: otherwise:
elif num > 2:
if divisors2(num):
print('%d is a composite' %(num))
else:
print('%d is a prime' %(num))
def startend():
import random
#givena list return only start and end
list1 = random.sample(xrange(1, 30), 10)
return list1, [list1[0], list1[-1]]
def fibonacci():
#fb: sequence of numbers where next number in sequence is sum of previous two.
#first 5: (1, 1, 2, 3, 4, 5 )
number, start, end = ( int(raw_input('Number: ')),
int(raw_input('Start: ')),
int(raw_input('End: '))
)
a = start
b = end
fib = []
evens = []
while sum(evens) < number:
fib.append(a)
if a%2 == 0:
evens.append(a)
a, b = b, a+b
#print fib, sum(fib)
print sum(evens)
def dups():
import numpy as np
mylist = np.random.choice(50, 75, replace=True)
#print mylist, len(mylist)
print set(mylist), len(set(mylist))
def mirror():
words = raw_input('What do you have to say?').split(' ')
print(' '.join(words[::-1]))
def password():
import random, string
import numpy as np
level = int(raw_input('How long do you want the password? '))
pw = []
while len(pw) < level:
pw.append(random.choice(string.letters + string.digits + "@#$&*_-:;',.?/"))
print ''.join(pw)
def cowsandbulls():
import random, string
numbers = ''.join([random.choice(string.digits)for x in range(4)])
tries, score = 0, [0, 0]
while score[0] != 4:
score = [0,0]
tries +=1
guess = raw_input('What is your guess?')
for i,x in enumerate(guess):
if x == numbers[i]:
score[0] +=1
elif x in numbers:
score[1] += 1
print score, guess
print('You took %d tries to guess %s' % (tries, numbers))
def elementsearch():
print 'I hate my life currently'
if __name__ == '__main__':
#practice functions
#yrto100()
#oddseven()
#shortlist()
#divisors()
#listoverlap() #with set
#stringlist()
#evenlist()
#while raw_input('Do you want to play?') == 'Yes':
#rockpaperscissors()
#guessinggame()
#listoverlap() #non set
#isprime()
#isprime2()
#print(startend())
#fibonacci()
#dups()
#mirror()
#password()
#cowsandbulls()
elementsearch() | true |
ce6cf96e7f77a5cb48cfd66f9785e57175b8c875 | Python | sandeepr0y/python_learning | /daa/queue.py | UTF-8 | 1,211 | 4.03125 | 4 | [] | no_license | from doubly_linked_list import MyDoublyLinkedList
class MyQueue(object):
def __init__(self, data_iter=None):
self.__db_ll = MyDoublyLinkedList()
if data_iter:
for elem in data_iter:
self.__db_ll.prepend(elem)
def push(self, data):
self.__db_ll.append(data)
def pop(self):
return self.__db_ll.pop()
def __iter__(self):
n = self.__db_ll.first
while n:
yield n.data
n = n.next
def __len__(self):
return len(self.__db_ll)
def __repr__(self):
s = ''
for n in self.__db_ll:
s += '[%s]-->' % n.data
return s.rstrip('-->')
if __name__ == '__main__':
queue = MyQueue()
queue.push('A')
queue.push('B')
queue.push('C')
queue.push('D')
for elem in queue:
print(elem)
# A
# B
# C
# D
print()
print(queue)
# [A]-->[B]-->[C]-->[D]
print()
print(f'Pop: {queue.pop()}')
print(queue)
# Pop: A
# [B]-->[C]-->[D]
print()
print(f'Pop: {queue.pop()}')
print(queue)
# Pop: B
# [C]-->[D]
print()
print(f'Length: {len(queue)}')
# Length: 2
| true |
1e3a5c22df37b65c1a3abff413b59ff0e714d107 | Python | onukura/Racoon | /racoon_tests/lib_tests/eval_tests/test_regression.py | UTF-8 | 995 | 2.828125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from unittest import TestCase
import numpy as np
from racoon.lib.evals.regression import MetricRegression
class Test(TestCase):
def setUp(self) -> None:
self.answer = np.array([1, 2, 3, 4, 5])
self.predict = np.array([2, 2, 2, 2, 2])
def test_mae(self):
r = float(np.abs((self.answer - self.predict)).mean())
self.assertEqual(r, MetricRegression.score_mae(self.answer, self.predict))
def test_mse(self):
r = float(np.mean((self.answer - self.predict) ** 2))
self.assertEqual(r, MetricRegression.score_mse(self.answer, self.predict))
def test_msle(self):
r = float(np.mean((np.log1p(self.answer) - np.log1p(self.predict)) ** 2))
self.assertEqual(r, MetricRegression.score_msle(self.answer, self.predict))
def test_rmse(self):
r = float(np.sqrt(np.mean((self.answer - self.predict) ** 2)))
self.assertEqual(r, MetricRegression.score_rmse(self.answer, self.predict))
| true |
9598a48e79666f65916b1ed7a41ac253308a0eed | Python | lixiang2017/leetcode | /problems/1091.0_Shortest_Path_in_Binary_Matrix.py | UTF-8 | 2,103 | 3.703125 | 4 | [] | no_license | '''
BFS
Runtime: 630 ms, faster than 86.36% of Python3 online submissions for Shortest Path in Binary Matrix.
Memory Usage: 15.4 MB, less than 26.25% of Python3 online submissions for Shortest Path in Binary Matrix.
'''
class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
n = len(grid)
if grid[0][0] != 0:
return -1
if n == 1:
return 1 if grid[0][0] == 0 else -1
# (x, y, step)
q = deque([(0, 0, 1)])
seen = set([(0, 0)])
DIR = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
while q:
x, y, step = q.popleft()
for dx, dy in DIR:
nx, ny = x + dx, y + dy
if 0 <= nx < n and 0 <= ny < n and (nx, ny) not in seen and grid[nx][ny] == 0:
if nx == n - 1 and ny == n - 1:
return step + 1
q.append((nx, ny, step + 1))
seen.add((nx, ny))
return -1
'''
Runtime: 749 ms, faster than 27.16% of Python3 online submissions for Shortest Path in Binary Matrix.
Memory Usage: 17.9 MB, less than 12.33% of Python3 online submissions for Shortest Path in Binary Matrix.
'''
class Solution:
def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:
n = len(grid)
if grid[0][0] != 0:
return -1
elif grid == [[0]]:
return 1
seen = set([(0, 0)])
# (x, y, length)
q = deque([(0, 0, 1)])
while q:
x, y, _len = q.popleft()
for dx in range(-1, 2):
for dy in range(-1, 2):
nx, ny = x + dx, y + dy
if 0 <= nx < n and 0 <= ny < n and grid[nx][ny] == 0 and not (x == nx and y == ny) and (nx, ny) not in seen:
if nx == n - 1 and ny == n - 1:
return _len + 1
seen.add((nx, ny))
q.append((nx, ny, _len + 1))
return -1
| true |
8eeceba130a8f248a640dc25bb71040dee751003 | Python | jtraver/dev | /python/psutil/count1.py | UTF-8 | 210 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import time
import sys
def main():
# for count in xrange(10000000):
for count in xrange(10):
print "%s" % str(count)
sys.stdout.flush()
time.sleep(1)
main()
| true |
82b05114259819629aed1bccd0495041bf6c1e23 | Python | rasoolims/ImageTranslate | /src/scripts/wiki/extract_clean_titles.py | UTF-8 | 486 | 3 | 3 | [] | no_license | import os
import sys
print("\nReading docs")
found = 0
with open(os.path.abspath(sys.argv[1]), "r") as reader, open(os.path.abspath(sys.argv[2]), "w") as writer:
for i, line in enumerate(reader):
try:
src, dst = line.strip().split("\t")
if "(" not in src and "(" not in dst:
writer.write(src + " ||| " + dst + "\n")
found += 1
except:
pass
print(found, "/", i, end="\r")
print("\nDone!")
| true |
2a679020b74deaa56c1718d2bbbe2c116ae454d9 | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/arc094/B/2420739.py | UTF-8 | 217 | 2.703125 | 3 | [] | no_license | from math import ceil
q = int(input())
for _ in range(q):
a, b = sorted(map(int, input().split()))
print(min(b-a, max((ceil((a*b)**0.5-a)-1)*2, (ceil((-(2*a-1)+(1+4*a*b)**0.5)/2)-1)*2-1, 0)) + 2*(a-1)) | true |
c8c26a4bdcd15006fa1b4f4e1609d08890972900 | Python | mvpeng/bpm-playlists | /bpm_playlists/utils.py | UTF-8 | 3,878 | 2.71875 | 3 | [
"MIT"
] | permissive | import requests
import math, random
import json
def createPlaylistWithBPM(playlist_info, access_token):
tracks = getUsersMostRecentTracks(access_token)
tracks = filterTracksByBPM(tracks, playlist_info['min_bpm'], playlist_info['max_bpm'], access_token)
playlistURI = createAndPopulatePlaylist(playlist_info['playlist_name'], tracks, access_token)
return {'uri': playlistURI, 'tracks': tracks}
def getUsersMostRecentTracks(access_token):
url = 'https://api.spotify.com/v1/me/tracks'
headers = {'Authorization': 'Bearer ' + access_token}
params = {'limit': 50, 'offset': 0 }
response = requests.get(url, headers=headers, params=params)
if response.status_code == 200:
return response.json()['items']
def filterTracksByBPM(tracks, minBPM, maxBPM, access_token):
url = 'https://api.spotify.com/v1/audio-features'
headers = {'Authorization': 'Bearer ' + access_token}
ids = ""
for track in tracks:
ids += str(track['track']['id']) + ","
response = requests.get(url, headers=headers, params={'ids': ids})
filtered = []
if response.status_code == 200:
results = response.json()['audio_features']
for i in xrange(len(tracks)):
tracks[i]['track']['bpm'] = int(results[i]['tempo'])
if minBPM != "" and maxBPM != "":
for track in tracks:
bpm = track['track']['bpm']
if bpm >= int(minBPM) and bpm <= int(maxBPM):
filtered.append(track)
else:
filtered = tracks
return filtered
def createAndPopulatePlaylist(playlistName, tracks, access_token):
userId = getUserId(access_token)
playlist = createPlaylist(userId, playlistName, access_token)
addTracksToPlaylist(userId, playlist['id'], tracks, access_token)
return playlist['uri']
def getUserId(access_token):
headers = {'Authorization': 'Bearer ' + access_token}
response = requests.get('https://api.spotify.com/v1/me', headers=headers)
if response.status_code == 200:
return response.json()['id']
def createPlaylist(userId, playlistName, access_token):
url = 'https://api.spotify.com/v1/users/' + userId + '/playlists'
headers = { 'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json' }
body = json.dumps({ 'name': playlistName , 'public': False })
response = requests.post(url, headers=headers, data=body)
if response.status_code == 200 or response.status_code == 201:
return response.json()
def addTracksToPlaylist(userId, playlistId, tracks, access_token):
url = 'https://api.spotify.com/v1/users/' + userId + '/playlists/' + playlistId + '/tracks'
headers = { 'Authorization': 'Bearer ' + access_token,
'Content-Type': 'application/json' }
uris = []
for track in tracks:
uris.append(track['track']['uri'])
body = json.dumps({ 'uris': uris })
response = requests.post(url, headers=headers, data=body)
def getPreviewTracks():
url = 'https://api.spotify.com/v1/tracks'
previewTracks = { '3RiPr603aXAoi4GHyXx0uy': 90, # Hymn for the Weekend
'5yZvaUVyuXfSVUaMumFi6l': 140, # Runnin
'5hPWHC0L6z0KLET5rRkpTR': 175, # Better Now
'6MDijuuArPJv1vbp7K1x3f': 94 } # Genghis Khan
response = requests.get(url, params={'ids': ','.join(previewTracks.keys())})
if response.status_code == 200:
tracks = response.json()['tracks']
for t in tracks:
t['bpm'] = previewTracks[t['id']]
return tracks
def generateRandomString(length):
result = ''
possible = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
for i in xrange(length):
result += possible[int(math.floor(random.random() * len(possible)))]
return result | true |
22cbc22c47b1aca65cc070efa07134efa358efa9 | Python | lasyakoneru/Lasya-Koneru | /proj04.py | UTF-8 | 7,172 | 4 | 4 | [] | no_license |
###########################################################
# Computer Project #4
#
# Algorithm
# prompt for a file
# open a file
# read file
# find average salary
# find median income
# find salary range
# find cumulative percentage
# loop and display data
###########################################################
import pylab
def do_plot(x_vals,y_vals,year):
'''Plot x_vals vs. y_vals where each is a list of numbers of the same length.'''
pylab.xlabel('Income')
pylab.ylabel('Cumulative Percent')
pylab.title("Cumulative Percent for Income in "+str(year))
pylab.plot(x_vals,y_vals)
pylab.show()
def open_file():
'''opens the file and returns the year and fp'''
while True:
year_str = input("Enter a year where 1990 <= year <= 2015: ")
file = 'year' + year_str + '.txt' # constructs file name
try:
fp = open(file, 'r')#opens file
return fp, year_str
except FileNotFoundError: # if file not found
if year_str == '1999':
print('Error in file name: year' + year_str + '.txt \
Please try again.')
else:
print("Error in year. Please try again.")
def read_file(fp):
'''reads the file and constructs a list of the data'''
L1 = []
fp.readline()#skips lines
fp.readline()
reader = fp.readlines()
for line in reader: #every individual line in the file
line = line.split()
l = []
line_0 = line[0].replace(',','') #removes commas
line_0 = float(line_0)
try:
line_2 = line[2].replace(',','')
line_2 = float(line_2)
l.append(line_2) #adds to the small list of the line
except ValueError:
l.append(None)
line_5 = float(line[5])
line_6 = line[6].replace(',','')
line_6 = float(line_6)
line_7 = line[7].replace(',','')
line_7 = float(line_7)
line_3 = line[3].replace(',','')
line_3 = int(line_3)
line_4 = line[4].replace(',','')
line_4 = int(line_4)
l.append(line_0)
l.append(line_3)
l.append(line_4)
l.append(line_5)
l.append(line_6)
l.append(line_7)
L1.append(l) # adds small list to the big list of all the lines
return L1 # returns big list
def find_average(data_lst):
'''returns the average salary'''
numerator = 0 #initialization
for lst in data_lst: #looks through one list at a time
numerator += lst[5] # adds all numbers in column
denominator = data_lst[-1][3] #finds number from last row in column
average = numerator/denominator
return average
def find_median(data_lst):
''' returns the median income'''
x = 0
y = 100
for lst in data_lst:
if x < abs(lst[4]) <= 50: #using abs and finding if less than 50
x = abs(lst[4])
avg1 = lst[6]
elif y > abs(lst[4]) >= 50:#using abs and finding if greater than 50
y = abs(lst[4])
avg2 = lst[6]
if (50 - x) > (y - 50): #finding which one is smaller
avg = avg2#setting to avg
else:
avg = avg1
return avg
def get_range(data_lst, percent):
'''returns the salary range as a tuple,the cumulative percentage value,
and the average income'''
percent = float(percent)
ans = []
for lst in data_lst:#goes through each list in major list
if lst[4] >= percent: #checks if greater
lst2 = []
col0 = lst[1] #finds column
lst2.append(col0) #appends to list
col2 = lst[0]
lst2.append(col2)
ans.append(tuple(lst2)) #converts to tuple and appends to list
col5 = lst[4]
ans.append(col5)
col7 = lst[6]
ans.append(col7)
break
return tuple(ans) #converts to a tuple
def get_percent(data_lst,salary):
'''returns the cumulative percentage in the income range and the
income range'''
salary = float(salary)
tup = ()
salary_tup = ()
for lst in data_lst:#goes through each list in major list
col0 = lst[1]#finds column
col2 = lst[0]
if salary >= col0 and salary <= col2: #chceks if values are in range
salary_tup = (col0, col2) #creates tuple
col5 = lst[4]
tup = (salary_tup, col5) #creates large tuple
return tup
def main():
fp, year_str = open_file()#opens file
data_lst = read_file(fp)#reads file
avg = find_average(data_lst) #finds average
median = find_median(data_lst) #finds median
print('Year Mean Median ')
median = '{:,.2f}'.format(median) #formatting to 2 decimal places
median = str(median)
avg = '{:,.2f}'.format(avg)
avg = str(avg)
print(year_str+' $'+avg+' $'+ median) #concatination
response = input("Do you want to plot values (yes/no)? ")
if response.lower() == 'yes': # if yes
x_vals = []
y_vals = []
for i in range(40): #through 40
income = float(data_lst[i][0])
cumulative_percent = float(data_lst[i][5])
x_vals.append(income) # append to list
y_vals.append(cumulative_percent) # append to list
do_plot(x_vals,y_vals, year_str) #plot
choice = input("Enter a choice to get (r)ange, (p)ercent,\
or nothing to stop: ")
while choice:
choice = choice.lower() #lowercase
if choice == '':
break
elif choice == 'r':
try:
percent = float(input("Enter a percent: "))
if (percent >= 0) and (percent <= 100): #if valid print
range_num = get_range(data_lst,percent)#call function
print("{:>6.2f}% of incomes are below ${:<13,.2f}.".\
format(percent,range_num[0][0]))
else:
print('Error in percent. Please try again') #error message
except ValueError:
print('Error in percent. Please try again')#error message
elif choice.lower() == 'p':
try:
income = float(input("Enter an income: "))
if income >= 0:#if valid print
percent_num = get_percent(data_lst,income)#call function
print("An income of ${:<13,.2f} is in the top {:>6.2f}% \
of incomes.".format(income,percent_num[1]))
else:
print('Error: income must be positive')#error message
except ValueError:
print('Error: income must be positive')#error message
elif choice == '':
break
else:
print('Error in selection.')#error message
choice = input("Enter a choice to get (r)ange, (p)ercent, or nothing\
to stop: ")
if __name__ == "__main__":
main() | true |
768f34eb0d9631a2492adf6397863c87b622d767 | Python | COHRINT/robosub_controller | /estimator_wrapper.py | UTF-8 | 2,445 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
"""
ROS wrapper for RoboSub control system estimator. Queue measurement messages
from sensors and runs an instance of a UKF.
"""
import rospy
from .estimator import UKF
from .helpers import load_config
class EstimatorWrapper(object):
"""
ROS wrapper for RoboSub control system estimator. Queues measurement messages
from sensors and runs an instance of a UKF.
"""
def __init__(self,filter_cfg,ros_cfg):
"""
Inputs:
filter_cfg -- dictionary of parameters for filter
ros_cfg -- dictionary of parameters for ros node
"""
# create filter
self.filter = UKF(**filter_cfg)
# initialize ros node
rospy.init_node('estimator')
# subscribe to sensors
rospy.Subscriber(imu_topic, ImuMsg, self.imu_callback)
rospy.Subscriber(depth_topic, DepthMsg, self.depth_callback)
# create publisher for state estimate and covariance
self.pub = rospy.Publisher(estimator_topic, StateMsg)
# start filter update loop
while not rospy.is_shutdown():
# update filter
self.filter.update()
# generate state msg
msg = self.gen_state_msg(self.filter.x)
self.pub.publish(msg)
def gen_state_msg(self,x,P=None):
"""
Generates state estimate message to be published.
"""
msg = StateMsg()
# code here to populate message
return msg
def imu_callback(self,msg):
"""
IMU measurement callback function.
Add measurement to measurement queue.
Inputs:
msg -- IMU measurement ROS message
Returns:
none
"""
self.filter.measurement_queue.put(msg)
def depth_callback(self,msg):
"""
Depth sensor measurment callback function.
Adds measurement to measurement queue.
Inputs:
msg -- depth sensor measurement ROS message
Returns:
none
"""
self.filter.measurement_queue.put(msg)
if __name__ == "__main__":
# load filter and ros configs
filter_cfg_fn = '../config/config.yaml'
filter_cfg = load_config(filter_cfg_fn)
ros_cfg_fn = '../config/ros_config.yaml'
ros_cfg = load_config(cfg_fn)
# instantiate wrapper
EstimatorWrapper(filter_cfg=filter_cfg,**ros_cfg) | true |
106f6987dbaec7ef18cab6fae683c51684cff0d5 | Python | yeomkyeorae/algorithm | /SWEA/D3/SWEA_5178_sum_of_nodes.py | UTF-8 | 593 | 2.890625 | 3 | [] | no_license | tries = int(input())
for t in range(1, tries + 1):
n, m, l = map(int, input().split())
V = [0] * (n + 1)
for _ in range(m):
ix, value = map(int, input().split())
V[ix] = value
visited = [0] * (n + 1)
while V.count(0) != 1:
for i in range(len(V) - 1, 0, -1):
if not visited[i]:
visited[i] = 1
if i // 2 > 0:
if V[i // 2] == 0:
V[i // 2] = V[i]
else:
V[i // 2] = V[i] + V[i // 2]
print('#{} {}'.format(t, V[l]))
| true |
d0a0b11244be0655c27c4d0b9746bd0f8a91f80c | Python | t-lanigan/leet-code | /top-interview-questions/math/fizz-buzz.py | UTF-8 | 667 | 3.75 | 4 | [] | no_license | from typing import List
class Solution:
def fizzBuzz(self, n: int) -> List[str]:
return [self.getFizzBuzz(s) for s in range(1, n+1)]
def getFizzBuzz(self, n: int) -> str:
fizzBuzz = ""
if n % 3 == 0:
fizzBuzz += "Fizz"
if n % 5 == 0:
fizzBuzz += "Buzz"
if fizzBuzz == "":
fizzBuzz = str(n)
return fizzBuzz
solution = Solution()
test1 = [
"1",
"2",
"Fizz",
"4",
"Buzz",
"Fizz",
"7",
"8",
"Fizz",
"Buzz",
"11",
"Fizz",
"13",
"14",
"FizzBuzz"
]
assert solution.fizzBuzz(15) == test1
print("All tests passed!") | true |
ecbbb7a26ee234125c92f74785a41b8b387d8be5 | Python | jkobrin/pos1 | /format_time_diff.py | UTF-8 | 1,688 | 3.109375 | 3 | [] | no_license |
def format_time_from_now(now, other_time):
total_secs = int((now - other_time).total_seconds())
is_future = total_secs < 0
total_secs = abs(total_secs)
total_minutes, secs = divmod(total_secs, 60)
total_hours, minutes = divmod(total_minutes, 60)
total_days, hours = divmod(total_hours, 24)
if total_hours < 1:
result = '{minutes}m'.format(**locals())
elif total_hours < 4:
result = '{hours}h {minutes}m'.format(**locals())
elif now.date() == other_time.date():
result = other_time.strftime('%I:%M %p')
elif is_future and total_days < 6:
result = other_time.strftime('%a %I:%M %p')
else:
result = other_time.strftime('%a %b %d %I:%M %p')
if is_future: sign = '-'
else: sign = '+'
return sign + result;
if __name__ == '__main__':
datetimeFormat = '%Y-%m-%d %H:%M:%S'
now = datetime.datetime.strptime("2020-06-14 13:17:00", datetimeFormat)
time_strs = (
"2020-06-03 03:17:00",
"2020-06-13 13:17:00",
"2020-06-13 23:17:00",
"2020-06-14 19:17:00",
"2020-06-14 9:17:00",
"2020-06-14 11:17:00",
"2020-06-14 12:17:00",
"2020-06-14 13:15:54",
"2020-06-14 13:16:04",
"2020-06-14 13:17:00",
"2020-06-14 13:17:18",
"2020-06-14 13:17:38",
"2020-06-14 13:18:38",
"2020-06-14 14:17:00",
"2020-06-14 15:17:00",
"2020-06-17 15:17:00",
"2020-06-20 9:17:00",
"2020-06-20 20:17:00",
"2020-06-21 9:17:00",
"2020-06-21 19:17:00",
"2020-06-22 15:17:00",
"2020-06-27 15:17:00",
"2020-07-27 15:17:00",
)
for time_str in time_strs:
other_time = datetime.datetime.strptime(time_str, datetimeFormat)
print format_time_from_now(now, other_time)
| true |
53081ebeef1144b49dfeda1bdb78c6c02dfe0102 | Python | JuliaClaireLee/old-python-course-work | /untitled folder 2/lab6.py | UTF-8 | 246 | 3.125 | 3 | [] | no_license | mountains = {}
mountains["Mount Everest"] = "29,029 feet"
mountains["k2"] = "28,251 feet"
mountains["Kangchenjunga"] = "28,169 feet"
for moutain, height in mountains.items():
print("\nmountain: %s" % moutain)
print("height: %s" % height)
| true |
a3c2815a25e497c15d55d7af267ffabf4fca7327 | Python | lobo-death/tccdatascience | /scripts/models/models.py | UTF-8 | 2,504 | 2.875 | 3 | [] | no_license | from peewee import *
from peewee import PostgresqlDatabase
from environs import Env
env = Env()
env.read_env()
postgres_database = env("POSTGRES_DATABASE")
postgres_user = env("POSTGRES_USER")
postgres_password = env("POSTGRES_PASSWORD")
postgres_host = env("POSTGRES_HOST")
db = PostgresqlDatabase(
postgres_database,
user=postgres_user,
password=postgres_password,
host=postgres_host)
class User(Model):
id = IntegerField(primary_key=True)
name = CharField(max_length=125)
street = CharField(max_length=255)
class Meta:
database = db
db_table = 'user'
class Product(Model):
id = IntegerField(primary_key=True)
name = CharField(max_length=75)
price = DoubleField()
class Meta:
database = db
db_table = 'product'
class Purchase(Model):
id = AutoField()
user = ForeignKeyField(User)
class Meta:
database = db
db_table = 'purchase'
class Items(Model):
id = AutoField()
purchase = ForeignKeyField(Purchase)
product = ForeignKeyField(Product)
qt = DoubleField()
class Meta:
database = db
db_table = 'items'
if __name__ == "__main__":
try:
User.create_table()
except OperationalError:
print
"User table already exists!"
try:
Product.create_table()
data_source = [
{'id': 1, 'name': 'Maminha Angus', 'price': 45.99},
{'id': 2, 'name': 'Picanha Argentina Angus', 'price': 79.99},
{'id': 3, 'name': 'Chorizo Angus', 'price': 52.99},
{'id': 4, 'name': 'Entrecรดt Angus', 'price': 59.99},
{'id': 5, 'name': 'Peito', 'price': 16.99},
{'id': 6, 'name': 'Tulipinha', 'price': 22.99},
{'id': 7, 'name': 'Coxa', 'price': 19.99},
{'id': 8, 'name': 'Coraรงรฃo', 'price': 12.99},
{'id': 9, 'name': 'Lombinho', 'price': 13.99},
{'id': 10, 'name': 'Panceta', 'price': 10.99},
{'id': 11, 'name': 'Linguiรงa Toscana', 'price': 9.99}
]
for data_dict in data_source:
Product.create(**data_dict)
print("All tables created and initials data inserted!")
except OperationalError:
print("Product table already exists!")
try:
Purchase.create_table()
except OperationalError:
print("Purchase table already exists!")
try:
Items.create_table()
except OperationalError:
print("Items table already exists!")
| true |
d4b20707e6e1504af9a6a5a7844862d444e81f8c | Python | haonancool/OnlineJudge | /leetcode/python/majority_element_ii.py | UTF-8 | 922 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
n1 = n2 = None
c1 = c2 = 0
for num in nums:
if n1 == num:
c1 += 1
elif n2 == num:
c2 += 1
elif c1 == 0:
n1 = num
c1 = 1
elif c2 == 0:
n2 = num
c2 = 1
else:
c1 -= 1
c2 -= 1
c1 = c2 = 0
for num in nums:
if n1 == num:
c1 += 1
elif n2 == num:
c2 += 1
ret = []
n = len(nums)
if c1 > n/3:
ret.append(n1)
if c2 > n/3:
ret.append(n2)
return ret
if __name__ == '__main__':
sol = Solution()
print sol.majorityElement([8, 8, 7, 7, 7])
| true |
9a85e268eaff98a756a533698eb713e45ce8c09e | Python | rollila/python-udp | /shared/server_to_client.py | UTF-8 | 322 | 3.046875 | 3 | [] | no_license | import struct
def serialize(id, location):
return struct.pack('III', id, location[0], location[1])
def deserialize(packet):
unpacked = struct.unpack('III', packet)
return {
'id': unpacked[0],
'location': (unpacked[1], unpacked[2])
}
def item_size():
return struct.calcsize('III')
| true |
c3632dcf8362abdae43befb95c994ec3e41c0428 | Python | daniel-reich/ubiquitous-fiesta | /ZwmfET5azpvBTWoQT_12.py | UTF-8 | 172 | 2.578125 | 3 | [] | no_license |
import re
โ
def valid_word_nest(word, nest):
rgx = re.compile(word)
while nest and len(rgx.findall(nest)) == 1:
nest = nest.replace(word, '')
return not nest
| true |
e2111004f36226d628504b02066a2ddc3b13298e | Python | OZ-T/leetcode | /2/course_schedule2.py | UTF-8 | 947 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
graph = {i: set() for i in range(numCourses)}
in_degree = {i: 0 for i in range(numCourses)}
for (e, s) in prerequisites:
graph[s] |= {e}
in_degree[e] += 1
queue = [i for i in range(numCourses) if in_degree[i] == 0]
visited = set(queue)
for node in queue:
for neighbour in graph[node]:
if neighbour in visited:
return []
in_degree[neighbour] -= 1
if in_degree[neighbour] == 0:
visited.add(neighbour)
queue += [neighbour]
return queue if len(queue) == numCourses else []
s = Solution()
print(s.findOrder(4, [[1, 0], [2, 1], [3, 2], [1, 3]]))
| true |
5e5098c4193a1f1290b9c7e79372c728f51e6639 | Python | Adriantega12/log-fibonacci | /fibonacci.py | UTF-8 | 484 | 3.8125 | 4 | [] | no_license | import numpy as np
def expBySquaring(x, n):
if n == 1: # Stop criteria
return x
if n % 2 == 0: # N is even
return expBySquaring(x.dot(x), n // 2)
return x.dot(expBySquaring(x.dot(x), n // 2)) # N is odd
n = int(input('n = '))
m = np.array([[1, 1], [1, 0]], dtype = np.object)
if n > 1:
fib_m = expBySquaring(m, n - 1)
elif n == 1:
fib_m = np.array([[1]], dtype = np.object)
elif n == 0:
fib_m = np.array([[0]], dtype = np.object)
print(f'fibonacci({n}) = {fib_m[0, 0]}') | true |
927983ea39a2c81e1b96dbbe621f9aedefc8ae94 | Python | A159951123/MIDTERN | /3.py | UTF-8 | 168 | 3.3125 | 3 | [] | no_license | animal=["rat","ox","tiger","rabbit","dragon","snake","horse","sheep","monkey","rooster","dog","pig"]
Year = int(input("่ซ่ผธๅ
ฅๅนดไปฝ"))
print(animal[(Year + 8) % 12]) | true |
ae44e5b554b5bcb82f1f51c7b9a577a47e202f7f | Python | furlong-cmu/NTRTsim | /bin/python_scripts/src/utilities/file_utils.py | UTF-8 | 354 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | class FileUtils:
"""
Contains utilities related to file system operations.
"""
@staticmethod
def open(filePath, permArgs):
"""
Wraps the open command. To ease testing, all open() calls should be done
using this method, rather than invoking open() directly.
"""
return open(filePath, permArgs)
| true |
a70e7ac44908c0ef4a1498e3581a4775d0d3bb3a | Python | thestrawberryqueen/python | /2_intermediate/chapter13/solutions/polar_coordinates.py | UTF-8 | 769 | 4.46875 | 4 | [
"MIT"
] | permissive | """
Write a class called PolarCoordinates which will take a
value called radius and angle. When we print this class,
we want the coordinates in Cartesian coordinates, or we want
you to print two values: x and y. (If you don't know the
conversion formula, x = radius * cos(angle), y = radius * sin(angle).
Use Python's built-in math library for the cosine and sine operators)
"""
# write your code below
import math
class PolarCoordinates:
def __init__(self, radius, angle):
self.radius = radius
self.angle = angle
def __str__(self):
self.x = self.radius * math.cos(self.angle)
self.y = self.radius * math.sin(self.angle)
return "{},{}".format(self.x, self.y)
group = PolarCoordinates(2, math.pi)
print(str(group))
| true |
8bac36549c1114d9321f1a5e6396d42ed3a61f7b | Python | jorgesanme/Python | /Modulo_POO/ConversorTemperatura.py | UTF-8 | 1,697 | 3.71875 | 4 | [] | no_license | # se define la classe
class Termometro():
# se definen los atributos del objeto
def __init_(self):
self.__unidadM = 'C'
self.__temperatura = 0
# definir la funcion que hace la conversiรณn de unidades
def conversor ( self, temperatura, unidad):
if unidad == 'C':
return "{}ยบ F".format(temperatura * 9/5 +32)
elif unidad == 'F':
return "{}ยบ C".format((temperatura - 32) * 5/9)
else:
return "unidad incorrecta"
# se mostrara la informaciรณn del objeto
def __str__(self):
return "{}ยบ {}".format(self.__temperatura, self.__unidadM)
## se define los setter y getter
def unidadMedida (self, unidM=None):
#Si no tiene valor para la variable, se hace getter.
if unidM == None:
return self.__unidadM
#si el valor pasado es igual a una unidad valida, se hace setter.
else:
if unidM == 'F' or unidM== 'C':
self.__unidadM = unidM
def temp(self, temperatura=None):
#Si no tiene valor para la variable, se hace getter.
if temperatura == None:
return self.__temperatura
#si el valor pasado es igual a una unidad valida, se hace setter.
else:
self.__temperatura = temperatura
# devolvera la media que tenga la unidad
def mide (self, unidM=None):
if unidM== None or unidM == self.unidadM:
return self.__str__()
else:
if unidM== 'F' or unidM== 'C':
return self.__conversor(self.__temperatura,self.__unidadM)
else:
return self.__str__() | true |
891458f4dd6c36d451efad8bd4e7eafe616fe530 | Python | mrheyday/Sovryn-smart-contracts | /tests/protocol/addingMargin/test_deposit_collareral_using_TestToken.py | UTF-8 | 1,804 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | '''
Test adding more margin to existing loans.
1. Deposit more collateral
2. Should fail to deposit collateral to an non-existent loan
3. Should fail to deposit 0 collateral
'''
import pytest
from brownie import Contract, Wei, reverts
from fixedint import *
import shared
def test_deposit_collateral(sovryn,set_demand_curve,lend_to_pool,open_margin_trade_position, RBTC):
#prepare the test
set_demand_curve()
(receiver, _) = lend_to_pool()
(loan_id, trader, loan_token_sent, leverage_amount) = open_margin_trade_position()
startCollateral = sovryn.getLoan(loan_id).dict()["collateral"]
deposit_amount = startCollateral/2
#deposit collateral to add margin to the loan created above
RBTC.approve(sovryn, deposit_amount)
tx = sovryn.depositCollateral(loan_id, deposit_amount)
#verify the deposit collateral event
print(tx.info())
assert(tx.events['DepositCollateral']['loanId'] == loan_id)
assert(tx.events['DepositCollateral']['depositAmount'] == deposit_amount)
#make sure, collateral was increased
endCollateral = sovryn.getLoan(loan_id).dict()["collateral"]
assert(endCollateral-startCollateral == deposit_amount)
def test_deposit_collateral_to_non_existent_loan(sovryn, RBTC):
#try to deposit collateral to a loan with id 0
RBTC.approve(sovryn, 1e15)
with reverts("loan is closed"):
sovryn.depositCollateral("0", 1e15)
def test_deposit_collateral_0_value(sovryn,set_demand_curve,lend_to_pool,open_margin_trade_position, RBTC):
#prepare the test
set_demand_curve()
(receiver, _) = lend_to_pool()
(loan_id, trader, loan_token_sent, leverage_amount) = open_margin_trade_position()
with reverts("depositAmount is 0"):
sovryn.depositCollateral(loan_id, 0)
| true |
1f998c353b64a9a2a8cb33a80e8a83d0f3b88bbc | Python | aldebjer/pysim | /pysim/tests/simulation_test.py | UTF-8 | 4,046 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | ๏ปฟ"""Tests various aspects of the Sim object that is not tested in other
places
"""
import numpy as np
import pytest
from pysim.simulation import Sim
from pysim.systems import VanDerPol
from pysim.systems import MassSpringDamper
from pysim.systems import DiscretePID
from pysim.systems import RigidBody
from pysim.systems import LogisticMap
from pysim.systems.python_systems import VanDerPol as PyVanDerPol
__copyright__ = 'Copyright (c) 2014-2016 SSPA Sweden AB'
@pytest.mark.parametrize("test_class",[VanDerPol,PyVanDerPol])
def test_gettime(test_class):
"""Test that the elapsed time is returned from the simulation"""
sys = test_class()
sim = Sim()
sim.add_system(sys)
integrationlength = 2.0
assert sim.get_time() == 0.0
sim.simulate(integrationlength, 0.1)
assert sim.get_time() == integrationlength
def test_connected_system():
"""Check that the time for stored values in a discrete system is
regurarly spaced"""
#Create Simulaton
sim = Sim()
#Create, setup and add system to simulation
sys = MassSpringDamper()
sys.store("x1")
sys.inputs.b = 50
sys.inputs.f = 0
sim.add_system(sys)
controlsys = DiscretePID()
controlsys.inputs.refsig = 1.0
controlsys.inputs.p = 1
controlsys.inputs.plim = 400.0
controlsys.inputs.i = 0
controlsys.inputs.stepsize = 0.3
controlsys.store("outsig")
sim.add_system(controlsys)
sys.connections.add_connection("x1", controlsys, "insig")
sys.connections.add_connection("x2", controlsys, "dsig")
controlsys.connections.add_connection("outsig", sys, "f")
controlsys.inputs.d = 1
sim.simulate(5, 0.1)
assert np.max(np.abs(np.diff(controlsys.res.time))-0.1) < 1e-14
assert np.max(np.abs(np.diff(sys.res.time))-0.1) < 1e-14
def test_multiple_simulationobject():
"""Tests that it is possible to run multiple instances of the Sim object
and that the results stay the same."""
sim = Sim()
sys = MassSpringDamper()
sys.store("x1")
sys.inputs.b = 50
sys.inputs.f = 0
sim.add_system(sys)
sim.simulate(5, 0.1)
xref = sys.res.x1
for dummy in range(60):
#Create Simulaton
sim = Sim()
sys = MassSpringDamper()
sys.store("x1")
sys.inputs.b = 50
sys.inputs.f = 0
sim.add_system(sys)
sim.simulate(5, 0.1)
x = sys.res.x1
assert np.all(xref == x)
def test_state_break_larger():
"""Stop the simulation once the value of a state is
larger than a preset value
"""
sim = Sim()
sys = VanDerPol()
sys.add_break_greater("y",1.0)
sim.add_system(sys)
sim.simulate(20,0.01)
#If correct the simulation should break at time 0.79
assert sys.res.time[-1] == 0.79
def test_state_break_smaller():
"""Stop the simulation once the value of a state is
larger than a preset value
"""
sim = Sim()
sys = VanDerPol()
sys.add_break_smaller("x",-1.0)
sim.add_system(sys)
sim.simulate(20,0.01)
#If correct the simulation should break at time 2.52
assert sys.res.time[-1] == 2.52
def test_boost_vector_states():
"""Perform a basic simulation of a system with boost vector states"""
sim = Sim()
sys = RigidBody()
sys.store("position")
sys.inputs.force = [1.0,0.0,0.0]
sys.inputs.mass = 1.0
sim.add_system(sys)
sim.simulate(20,0.01)
pos = sys.res.position
diff = np.abs(pos[-1,:]-[200,0,0])
assert np.max(diff) <= 1
def test_discrete_system():
"""Test a discrete system to make sure the results
are correct. The system tested is a logistical map
system and it is compared to a function created in
this test which also gives the solution.
"""
lm = LogisticMap()
lm.inputs.r = 3.6
lm.states.x = 0.5
lm.store("x")
sim = Sim()
sim.add_system(lm)
sim.simulate(10,0.1)
x = [0.5]
r = 3.6
for dummy in range(9):
x.append(r*x[-1]*(1-x[-1]))
assert np.all(np.abs(lm.res.x[1::10]-x)<1e-18)
| true |
0638ebd9b7b79bb811ceebc4f96c3790fb3da964 | Python | pombredanne/test-performance-run-in-a-loop-vs-run-standalone | /run-in-a-loop.py | UTF-8 | 221 | 2.84375 | 3 | [
"CC0-1.0"
] | permissive | from timeit import default_timer as timer
a = range(500)
times = []
for i in range(100000):
st = timer()
sum(a)
times.append(timer() - st)
print("min=%.2f us, max=%.2f us" % (min(times)*1e6, max(times)*1e6))
| true |
596d88472805c3ae6119d2c888717837a640581e | Python | ramalho/python-para-desenvolvedores | /07-bib-padrรฃo/p80.py | UTF-8 | 474 | 4 | 4 | [] | no_license | import datetime
# datetime() recebe como parรขmetros:
# ano, mรชs, dia, hora, minuto, segundo
# e retorna um objeto do tipo datetime
dt = datetime.datetime(2020, 12, 31, 23, 59, 59)
# Objetos date e time podem ser criados
# a partir de um objeto datetime
data = dt.date()
hora = dt.time()
# Quanto tempo falta para 31/12/2020
dd = dt - dt.today()
print('Data:', data)
print('Hora:', hora)
print('Quanto tempo falta para 31/12/2020:',
str(dd).replace('days', 'dias'))
| true |
58e402f8485087d2cd4434e9b496bb3a45211146 | Python | jeffrimko/Verace | /tests/linefunc_test_1.py | UTF-8 | 1,609 | 2.5625 | 3 | [
"MIT"
] | permissive | """Tests the basic usage of VerChecker."""
##==============================================================#
## SECTION: Imports #
##==============================================================#
from testlib import *
from verace import VerChecker
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class TestCase(unittest.TestCase):
def setUp(test):
test.verchk = VerChecker("Basic Version", __file__)
def test_version(test):
def getver(line):
if "version =" in line:
return line.split('"')[1]
test.verchk.include("checkfiles/multi.txt", func=(getver, "line"))
test.assertEqual(test.verchk.string(), "1.2.3")
def test_different(test):
def getdiff(line):
if "different =" in line:
return line.split('"')[1].split(",")[0]
test.verchk.include("checkfiles/multi.txt", func=(getdiff, "line"))
test.assertEqual(test.verchk.string(), "0.0.0")
def test_none(test):
def getnone(line):
return
test.verchk.include("checkfiles/multi.txt", func=(getnone, "line"))
test.assertEqual(test.verchk.string(), None)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
unittest.main()
| true |
3102dfa6235108b8fc5d8b0d657311504eadd7ad | Python | tanvee19/MachineLearning | /Day23/Day23_Code_Challenges.py | UTF-8 | 3,228 | 3.421875 | 3 | [] | no_license | """Code Challenge:
dataset: BreadBasket_DMS.csv
Q1. In this code challenge, you are given a dataset which has data and time
wise transaction on a bakery retail store.
1. Draw the pie chart of top 15 selling items.
2. Find the associations of items where min support should be 0.0025,
min_confidence=0.2, min_lift=3.
3. Out of given results sets, show only names of the associated item
from given result row wise.
"""
import matplotlib.pyplot as plt
import pandas as pd
from apyori import apriori
dataset = pd.read_csv('BreadBasket_DMS.csv')
d = dataset["Item"].value_counts().head(15)
plt.pie(d.values,explode = None,labels = d.index,colors = ['red','green','aqua','red','blue','purple','orange','red','green','blue','purple','orange','black','white','green'] )
dataset = dataset.mask(dataset.eq("NONE")).dropna()
def sort(values):
s = ','.join(values)
return s
df = dataset.groupby("Transaction")["Item"].apply(sort)
"""
transactions = []
for j in range(len(df)):
transactions.append(list(df.values[j].split(',')))
"""
rules = list(apriori(df, min_support = 0.0025, min_confidence = 0.2, min_lift = 3))
for item in rules:
# first index of the inner list
# Contains base item and add item
pair = item[0]
items = [x for x in pair]
print("Rule: " + items[0] + " -> " + items[1])
#second index of the inner list
print("Support: " + str(item[1]))
#third index of the list located at 0th
#of the third index of the inner list
print("Confidence: " + str(item[2][0][2]))
print("Lift: " + str(item[2][0][3]))
print("=====================================")
"""
Code Challenge:
Datset: Market_Basket_Optimization.csv
Q2. In today's demo sesssion, we did not handle the null values before
fitting the data to model, remove the null values from each row and
perform the associations once again.
Also draw the bar chart of top 10 edibles.
"""
import matplotlib.pyplot as plt
import pandas as pd
from apyori import apriori
# Data Preprocessing
dataset = pd.read_csv('Market_Basket_Optimisation.csv', header = None)
transactions = []
l = []
dataset = dataset.fillna("None")
for i in range(len(dataset)+1):
transactions.append([])
for j in range(len(list(dataset.columns))):
for i in range(0, len(dataset)):
if dataset[j][i] != "None":
transactions[i].append(dataset[j][i])
l.append(dataset[j][i])
else:
pass
# Training Apriori on the dataset
rules = apriori(transactions, min_support = 0.003, min_confidence = 0.25, min_lift = 4)
# Visualising the results
results = list(rules)
for item in results:
# first index of the inner list
# Contains base item and add item
pair = item[0]
items = [x for x in pair]
print("Rule: " + items[0] + " -> " + items[1])
#second index of the inner list
print("Support: " + str(item[1]))
#third index of the list located at 0th
#of the third index of the inner list
print("Confidence: " + str(item[2][0][2]))
print("Lift: " + str(item[2][0][3]))
print("=====================================")
d = pd.DataFrame(l,)
d1 = d[0].value_counts().head(10)
plt.bar(d,d1 )
| true |
85a1db0b75ef97f0580bc93df2744256e95468e6 | Python | JatinTiwaricodes/expmath | /plots/waermeleitung.py | UTF-8 | 7,222 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
from bokeh.layouts import Row, WidgetBox
from bokeh.io import curdoc
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Slider, RadioButtonGroup, Toggle
from bokeh.plotting import Figure
"""
This plot presents the transient behaviour of the analytical solution to a
simple heat transfer problem in 1D with isotropic and homogeneous temperature
conductivity properties of the underlying material.
The user can set the parameters as well as initial and boundary conditions. We
only consider the more easy Dirichlet boundary conditions, i.e., fixed
temperature (but possibly non-zero) at both ends.
A toggle allows to activate advanced options so that the first user is not
distracted by the functionality.
"""
# Define constants for the plot. These are in accordance with all the other
# plots
HEIGHT = 400
WIDTH_PLOT = 600
WIDTH_TOTAL = 800
# The thickness of the line of the solution curve u(t,x)
LINE_WIDTH = 2
# The size of the dots marking the temperatures at the end of the bar (obviously
# has to correspond with the boundary conditions)
DOT_SIZE = 10
def update_data(length_factor, conductivity, first, second, third, left, right,
time):
"""
Callback to update the analytical solution. It will therefore superpose the
contribution of each eigenfunction (we consider the first three). A
superposition with the trivial solution is necessary when the temperature at
the bar ends is unequal to zero.
"""
x = np.linspace(0, length_factor*np.pi, 50)
# Contribution of the first Eigenfunction
y_first = first * np.exp(-conductivity * time/ length_factor**2) *\
np.sin(x / length_factor)
# Contribution of the second Eigenfunction
y_second = second * np.exp(-4 * conductivity * time / length_factor**2) *\
np.sin(2 * x / length_factor)
# Contribution of the thirs Eigenfunction
y_third = third * np.exp(-9 * conductivity * time / length_factor**2) *\
np.sin(3 * x / length_factor)
y = y_first + y_second + y_third
# Superposition with the trivial solution (necessary for non-homogeneous
# boundary conditions)
y_trivial_endpoints = [0, 0]
if (left != 0 or right != 0):
y_trivial = (right - left)/(length_factor * np.pi) * x + left
y += y_trivial
y_trivial_endpoints = [y_trivial[0], y_trivial[49]]
return (x, y, y_trivial_endpoints)
# Data source for the analytical solution. Whenever its data is changed, it will
# send the new information to the client to display it
data_source = ColumnDataSource(data={'x': [], 'y': []})
# Line for the trivial solution, especially helpful if boundary conditions are
# not homogeneous
trivial_line_source = ColumnDataSource(data={'x': [], 'y': []})
plot = Figure(plot_height=HEIGHT, plot_width=WIDTH_PLOT, x_range=[-1, 2*np.pi+1],
y_range=[-2, 2], tools="")
plot.xaxis.axis_label = "Ort x"
plot.yaxis.axis_label = "Temperatur u(t, x)"
# Analytical solution
plot.line(x="x", y="y", source=data_source, line_width=LINE_WIDTH, color="blue")
# Trivial solution
plot.line(x="x", y="y", source=trivial_line_source, color="black",
line_dash="dashed")
# Dots marking the boundary conditions
plot.circle(x="x", y="y", source=trivial_line_source, color="black",
size=DOT_SIZE)
# Line, indicating zero temperature, i.e., the x-axis
plot.line(x=[-5, 15], y=[0, 0], color="black")
# Define all widgets
# The length of the bar
length = Slider(title="Lรคnge des Stabes (mal pi)", value=1, start=0.5, end=2,
step=0.5)
# Temperature conductivity, how fast heat is transported through the material.
# Here, we assume a linear isotropic homogoneous material in 1 dimension
conductivity = Slider(title="Temperaturleifรคhigkeit", value=1, start=0.1, end=2,
step=0.1)
# The coefficient for the first Eigenfunction, determined by the initial
# condition
first = Slider(title="Auslenkung der ersten Eigenform", value=1, start=-2, end=2, step=0.1)
# Enables the visibility for advanced widgets that are collapsed for a better
# readability of the plot
advanced_toggle = Toggle(label="Mehr Optionen")
# The coefficient for the second Eigenfunction, determined by the initial
# condition
second = Slider(title="Auslenkung der zweiten Eigenform", value=0, start=-2, end=2, step=0.1,
visible=False)
# The coefficient for the third Eigenfunction, determined by the initial
# condition
third = Slider(title="Auslenkung der dritten Eigenform", value=0, start=-2, end=2, step=0.1,
visible=False)
# The temperature on the left and right vertex, determined by the boundary
# condition to the problem. If one of them is unequal to zero then the general
# solution is superposed with a linear function connecting both points
left = Slider(title="Temperatur am linken Rand u(t, x=0)", value=0, start=-2,
end=2, step=0.1, visible=False)
right = Slider(title="Temperatur am rechten Rand u(t, x=L)", value=0, start=-2,
end=2, step=0.1, visible=False)
# Toggle that adds a periodic callback function, so that the plot seems to be
# moving
animation_toggle = Toggle(label="Animieren")
# Lets the user adjust the time in the transient simulation on its own
time = Slider(title="Zeit", value=0, start=0, end=10, step=0.1)
def toggle_callback(source):
"""
Enables the 'more advanced' options so that the user is not distracted by
the functionality in the first place
"""
advanced_toggle.visible = False
second.visible = True
third.visible = True
left.visible = True
right.visible = True
def animate():
if time.value >= time.end:
time.value = time.start
else:
time.value += time.step
callback_id = 0
def animation_callback(source):
"""
Function adds a callback to the function which animates it. The callback_id
is the id of the process the bokeh server spawns. It is necessary to
remove it later on.
"""
global callback_id
if animation_toggle.active == 1:
callback_id = curdoc().add_periodic_callback(animate, 100)
else:
curdoc().remove_periodic_callback(callback_id)
def slider_callback(attr, old, new):
"""
Callback associated with a change in every slider. It calls to calculate the
new value pairs and then outfits the ColumnDataSources with this
information.
"""
x, y, y_trivial = update_data(length.value, conductivity.value, first.value,
second.value, third.value, left.value, right.value, time.value)
data_source.data = {'x': x, 'y': y}
trivial_line_source.data = {'x': [0, length.value * np.pi], 'y': y_trivial}
# Populate the plot by calling the callback manually
slider_callback(0,0,0)
# Connect the widgets with their respective callbacks
advanced_toggle.on_click(toggle_callback)
animation_toggle.on_click(animation_callback)
for slider in (length, conductivity, first, second, third, left, right, time):
slider.on_change("value", slider_callback)
# Assemble the plot
inputs = WidgetBox(length, conductivity, first, advanced_toggle, second, third,
left, right, animation_toggle, time)
curdoc().add_root(Row(plot, inputs, width=WIDTH_TOTAL))
| true |
59d1c62c8713b23d728578aa9237a123703a9a8e | Python | jaepyoung/algorithmstudygroup | /day3/lcsubstring.py | UTF-8 | 401 | 2.875 | 3 | [] | no_license | def getlongestsubstringnumb(a,b):
solutionmax=[ [ 0 for i in range(len(a)) ] for j in range(len(b)) ]
for i in range(len(a)):
for j in range(len(b)):
if (i==0 or j==0):
solutionmax[i][j]=0
if (a[i]==b[j]):
solutionmax[i][j]=1+solutionmax[i-1][j-1]
print solutionmax
getlongestsubstringnumb("adbcdefa","thahdecd") | true |
49d21f5dcf42163678dc73589f486b3a30ce498c | Python | flaugusto/mc102 | /15/lab15.py | UTF-8 | 3,934 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env python3
# Modulo de funcรตes, campeonato PES
# Nome: Flavio Augusto Pereira Cunha
# RA: 197083
#*******************************************************************************
# Funcao: atualizaTabela
#
# Parametros:
# tabela: uma matriz com os dados da tabela do campeonato
# jogo: string contendo as informaรงรตes de um jogo no formato especificado no lab.
#
# Descriรงรฃo:
# Deve inserir as informaรงรตes do parametro 'jogo' na tabela.
# OBSERVAรรO: nesse momento nรฃo รฉ necessรกrio ordenar a tabela, apenas inserir as informaรงรตes.
def atualizaTabela(tabela, jogo):
# Quebra os itens da string de um jogo para avaliar os valores
itens = jogo.split()
time1 = {'nome': itens[0], 'gols': int(itens[1]), 'pontos': 0}
time2 = {'nome': itens[4], 'gols': int(itens[3]), 'pontos': 0}
# Verifica se o time1 ganhou do time2 ou houve empate
if (time1['gols'] > time2['gols']):
time1['pontos'] = 3
elif (time1['gols'] < time2['gols']):
time2['pontos'] = 3
else:
time1['pontos'] = 1
time2['pontos'] = 1
# Insere os pontos e os saldos de gols na tabela
for time in tabela:
# Procura os times que jogaram na tabela e insere os dados do jogo
if (time[0] == time1['nome']):
time[1] += time1['pontos'] # Soma os pontos ganhos
if (time1['pontos'] == 3): # Soma vitรณrias
time[2] += 1
time[3] += time1['gols'] # Saldo gols positivo
time[3] -= time2['gols'] # Saldo gols contra
time[4] += time1['gols'] # Soma gols prรณ
# Executa o mesmo processo para o time2
if (time[0] == time2['nome']):
time[1] += time2['pontos']
if (time2['pontos'] == 3):
time[2] += 1
time[3] += time2['gols']
time[3] -= time1['gols']
time[4] += time2['gols']
#*******************************************************************************
#*******************************************************************************
# Funcao: comparaTimes
#
# Parametros:
# time1: informaรงรตes de um time
# time2: informaรงรตes de um time
#
# Descricรฃo:
# retorna 1, se o time1>time2, retorna -1, se time1<time2, e retorna 0, se time1=time2
# Observe que time1>time2=true significa que o time1 deve estar em uma posiรงรฃo melhor do que o time2 na tabela.
def comparaTimes(time1, time2):
# Itera pelas colunas dos dados dos times e compara cada campo
for col in range(1, len(time1)):
if (time1[col] > time2[col]):
return 1
elif (time1[col] < time2[col]):
return -1
return 0
#*******************************************************************************
#*******************************************************************************
# Funcao: ordenaTabela
#
# Parametros:
# tabela: uma matriz com os dados da tabela do campeonato.
#
# Descricรฃo:
# Deve ordenar a tabela com campeonato de acordo com as especificaรงoes do lab.
#
def ordenaTabela(tabela):
# Aplica a funรงรฃo de comparaรงรฃo em cada time para comparar as posiรงรตes na tabela
for i in range(len(tabela) - 1, 0, -1):
for j in range(0, i):
if (comparaTimes(tabela[j],tabela[j+1]) == -1):
tabela[j], tabela[j+1] = tabela[j+1], tabela[j]
#*******************************************************************************
#*******************************************************************************
# Funcao: imprimeTabela
#
# Parametros:
# tabela: uma matriz com os dados da tabela do campeonato.
#
# Descriรงรฃo:
# Deve imprimir a tabela do campeonato de acordo com as especificaรงรตes do lab.
def imprimeTabela(tabela):
for time in tabela:
for n in range(len(time) -1):
print(str(time[n]) + ', ', end = '')
print(str(time[-1]))
#******************************************************************************* | true |
155a6b1db1fa31c88cef004606dae9858ccd30ce | Python | zbut/euler | /21-30/21.py | UTF-8 | 528 | 3.453125 | 3 | [] | no_license | divisors_sum = [-i for i in range(10000)]
for i in range(1, 10000):
for j in range(i, 10000, i):
divisors_sum[j] += i
if j == 220:
print("Adding {}".format(i))
amicable_sum = 0
for idx, div_sum in enumerate(divisors_sum):
if div_sum < 10000:
if divisors_sum[div_sum] == idx and idx != div_sum:
print("Found {} and {}".format(idx, div_sum))
amicable_sum += idx
print("amicable sum: {}".format(amicable_sum))
print(divisors_sum[220])
print(divisors_sum[284]) | true |
1c7c19bd028ffa1f592fd3988ef57fade8dcb505 | Python | lambricm/ao3_database_storage | /collect_data.py | UTF-8 | 5,705 | 2.65625 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | from ao3.search import search
from pathlib import Path
import json
import datetime
from re import sub
"""
TODO:
- retrieve chapter data
- connect to db
- add data (choose sample fandom
- check data presence
- check data correctness
"""
#fandom = "Ergo Proxy (Anime)"
fandom = "Crimson Cross"
db_name = "test_pub"
timestamp_file = db_name + "_timestamp.txt"
#retrieves list of works on same day/after prev. timestamp
# srch -> the search you want to view the works from
# return -> worklist item
def get_worklist(srch):
timestamp = format_timestamp(load_timestamp())
srch.set_date_from(timestamp)
save_timestamp()
return srch.get_result().get_all_works()
#retrieves list of works on same day/after prev. timestamp
# srch -> the search you want to view the works from
# return -> worklist item
def get_work_iterator(srch):
timestamp = format_timestamp(load_timestamp())
srch.set_date_from(timestamp)
save_timestamp()
return srch.get_result().get_work_iterator()
# prints all ids in worklist (used for testing)
def print_work_ids(wrklst):
try:
wrk = wrklst.next_work
while (not (wrk is None)):
print(wrk.id)
wrk = wrklst.next_work
except:
print("Error: no works in worklist")
#CURRENTLY ONLY RETRIEVES DATA
# wrklst -> retrieved worklist from search
def add_work_data(wrkit):
print("Adding work data...")
try:
wrk = wrkit.next_work()
while (not (wrk is None)):
work_id = wrk.id
print("work id: " + str(work_id))
fandoms = wrk.fandoms
for fandom in fandoms:
#check if exists in fandom table
#add if it doesn't
#get fandom id
#connect fandom id w/ work id
print("fandom: " + fandom)
authors = wrk.author
#for author in authors:
#check if author exists in author table
#add if it doesn't
#get author id
#connect author id w/ work id
print("author: " + authors)
tags = wrk.additional_tags
for tag in tags:
tag_search = search(tag).get_result()
parent_tag = tag_search.main_tag
#check if tag entry exists
#add tag & parent tag if doesn't exists
#update parent tag if needed
#get tag id
#connect tag & work
print("tag: " + tag)
print("parent tag: " + parent_tag)
warnings = wrk.warnings
for warning in warnings:
#check exists
#add if no
#get id
#connect w/ fic
print("warning:" + warning)
ratings = wrk.rating
for rating in ratings:
#check exists
#add if no
#get id
#connect w/ fic
print("rating:" + rating)
relationships = wrk.relationship
for relationship in relationships:
#check exists
#add if no
#get id
#connect w/ fic
print("relationship:" + relationship)
categories = wrk.category
for categories in categories:
#check exists
#add if no
#get id
#connect w/ fic
print("categories:" + categories)
characters = wrk.characters
for character in characters:
#check exists
#add if no
#get id
#connect w/ fic
print("character:" + character)
series = wrk.series
if not (series is None):
series_id = series["id"]
series = series["title"]
#check exists
#add if no
#connect w/ fic
print("series id, name: " + series_id + ", " + series)
pub_date = wrk.published
print("publish date: " + str(pub_date))
upd_date = wrk.updated
print("update date: " + str(upd_date))
words = wrk.words
print("words: " + str(words))
chapters = wrk.chapters
total_chapters = chapters["total_chapters"]
chapters = chapters["num_chapters"]
print("chapters: " + chapters + "/" + total_chapters)
comments = wrk.comments
print("comments: " + str(comments))
kudos = wrk.kudos
print("kudos: " + str(kudos))
bookmarks = wrk.bookmarks
print("bookmarks: " + str(bookmarks))
hits = wrk.hits
print("hits: " + str(hits))
title = wrk.title
print("title: " + title)
summary = wrk.summary
summary = sub(r"<\/p><p>","\n",summary)
summary = sub(r"<\/p>|<p>","",summary)
print("summary: " + summary)
wrk = wrkit.next_work()
except Exception as e:
print("ERROR: " + str(e))
#ensures file exists
# pth -> file to check
def check_file(pth):
return Path(pth).is_file()
#loads previous timestamp - don't want to get data that hasn't been updated since our last data retrieval
# return - timestamp
def load_timestamp():
ret = None
pth = "./" + timestamp_file
if check_file(pth):
with open(pth, 'r') as inFile:
try:
json_data = json.loads(inFile.read())
if "timestamp" in json_data:
ret = json_data["timestamp"]
except:
ret = None
return ret
#saves current date as timestamp
def save_timestamp():
pth = "./" + timestamp_file
curr_date = datetime.date.today()
timestamp = {"timestamp":{"year":curr_date.year, "month":curr_date.month, "day":curr_date.day}}
#with open(pth,'w') as outFile:
# outFile.write(json.dumps(timestamp))
#formats the timestamp for ao3 search compatability
# timestamp -> loaded timestamp
# return -> text ready for ao3 search class
def format_timestamp(timestamp):
date_str = ""
if not (timestamp is None):
try:
year = str(timestamp["year"])
month = str(timestamp["month"])
day = str(timestamp["day"])
while len(month) < 2:
month = "0" + month
while len(day) < 2:
day = "0" + day
date_str = year + "-" + month + "-" + day
except:
return ""
return date_str
it = get_work_iterator(search(fandom))
add_work_data(it)
"""
wrk = it.next_work()
while not (wrk is None):
print(wrk.id)
wrk = it.next_work()
print(len(it.total_works))
"""
| true |
11a2efd25122603a993e27502eb4faaafa257038 | Python | Interiority/Byzantium | /GPS/GNS.py | UTF-8 | 2,466 | 2.578125 | 3 | [] | no_license | from NMEA.Utilities import quality_indicator, faa_mode_indicator, convert_dm_to_dd
class GNS_Talker:
def __init__(self, talker_id): # Init for instances
self.talker_id = talker_id
self.NewMeasurement = False
self.MeasurementValid = False
self.Latitude = 55.1
self.Longitude = 7.10
def parse_gns(self, sentence):
# GNS - GNSS fix data
# $GNGNS,150233.00,5510.00879,N,00726.10787,W,FF,19,0.66,111.2,53.9,1.0,0000*5A
# $GNGNS,145146.00,5510.01172,N,00726.09453,W,AA,18,0.64,109.5,53.9,,*7C
# $GNGNS, 082636.00, 5507.55102, N, 00727.92759,W,RR,14,0.77,0.8,53.9,1.0,0000*58
# 1 - Fix taken at time in UTC
# 2 - Latitude (Northing)
# 3 - NS Hemisphere
# 4 - Longitude (Easting)
# 5 - EW Hemisphere
# 6 - Position Mode, GPS, GLONASS, Galileo, BeiDou (N, E, F, R, A/D)
# 7 - Number of satellites being tracked
# 8 - Horizontal dilution of position (HDOP)
# 9 - Altitude, Meters, above mean sea level
# 10 - Unit
# 11 - Height of geoid (mean sea level) above ETRS89 ellipsoid
# 12 - Unit
# 13 - Time in seconds since last DGPS update, SC104
# 14 - DGPS station ID number
# print('[GPS-parseGNS] Parsing = ' + sentence)
list_of_values = sentence.split(',')
try:
if list_of_values[1] == '':
print('[Parse GNS] No data')
measurement_valid = False
else:
# Check the fix quality
#fix_quality = list_of_values[6]
#quality_indicator(fix_quality)
self.MeasurementValid = True
# If its a valid measurement, process
if self.MeasurementValid:
_GPSTime = list_of_values[1]
_LatitudeDM = (list_of_values[2])
_NSHemisphere = list_of_values[3]
_LongitudeDM = (list_of_values[4])
_EWHemisphere = list_of_values[5]
_NumberOfSatellitesBeingTracked = list_of_values[7]
_HDOP = list_of_values[8]
_Altitude = list_of_values[9]
_HeightOfGeoid = list_of_values[10]
self.Latitude, self.Longitude = convert_dm_to_dd(_LatitudeDM, _LongitudeDM)
except ValueError:
print('[GPS-parseGLL] Error parsing GLL') | true |
edca2c19b0c3d484ed42af128a3098a66d90c04d | Python | zmyao88/nastyboys | /nasty.py | UTF-8 | 4,796 | 3.375 | 3 | [] | no_license | """
This runs the #NastyBoys trading algorithm from a command line interface
Use at your own risk!
"""
import bs4
import datetime
import urllib
import sys
from get_filings import get_latest_document
from trend import determine_trend
DEFAULT_TREND_LENGTH = 20
def get_extreme_performers (best=True):
"""Return a list of candidates stock symbols which
performed the best (best=True) or worst (best=False)
for the current trade date.
Implementation: Chris Tan
"""
gainers = 'http://finance.yahoo.com/gainers?e=us'
losers = 'http://finance.yahoo.com/losers?e=us'
url = gainers if best else losers
html = urllib.urlopen(url).read()
soup = bs4.BeautifulSoup(html)
movers = soup.find('div', attrs={'id':'yfitp'})
stocks = movers.find_all('td', class_='first')
return [stock.string for stock in stocks]
def get_latest_filing (symbol, filing_type='10-Q'):
"""Return a tuple: (the filing date as a string in
'YYYY-MM-DD' format, text of the latest public filing)
corresponding to the given symbol and filing type, or
(None, None) if the symbol is invalid or no such filing
is available, etc.
"""
return get_latest_document (symbol.upper(), filing_type)
def get_sentiment (filing_text):
"""Run a sentiment analysis on the text of the
filing document, returning a float in the range
-1.0 (bad) to 1.0 (good)
Implementation: Myf Ma & Zaiming Yao
"""
return 0.0
def get_performance_trend (symbol, trade_date=datetime.datetime.now()):
"""Determine the performance trend for the stock
symbol from the given date, to trade_date, return
a float in the range -1.0 (perfect negative trend)
to 1.0 (perfect positive trend).
Implementation: Chris Natali
"""
return determine_trend(symbol, trade_date, DEFAULT_TREND_LENGTH,
trend_end_days_ago=1)
def matches_bounce_expectation (symbol, sentiment, trend, best=True):
"""Rule for determining whether or not this symbol
should be bought (best=False) or sold short (best=True)
based on its sentiment and performance trend scores."""
result = False
if best:
# Big Gainer, but the previous trend
# was "consistently" down...bet that
# it's going to come back down
if trend < -0.9 and sentiment < 0.0:
result = True
else:
# Big Loser, but the previous trend
# was "consistently" up...bet that
# it's going to come back up
if trend > 0.9 and sentiment > 0.0:
result = True
return result
def test_candidate_symbols (best=True, use_sentiment=True):
"""Get a list of candidate symbols, based on their being
either the best performers (best=True) or the worst (best=False),
and decide whether or not to trade them, using their sentiment
and trend scores."""
trade_symbols = []
for sym in get_extreme_performers(best):
if sym not in trade_symbols:
try:
trend = get_performance_trend(sym)
if use_sentiment:
filing_date, filing_text = get_latest_filing(sym)
sentiment = get_sentiment(filing_text)
else:
# make sentiment match our criteria
# for the case we're interested in for now
# (so we don't have to rewrite the
# matches_bounce_expectation() fn)
sentiment = -1.0 if best else 1.0
if matches_bounce_expectation(sym, sentiment, trend, best):
trade_symbols.append(sym)
except Exception, e:
sys.stderr.write("Exception processing symbol %s, Exception: %s\n" % (sym, e))
return trade_symbols
def main():
"""Command-line entry point: provide the root folder of the csv log
files and this module will parse + load all the csv files it finds
under it"""
if len(sys.argv[1:]) != 1:
print ' '.join(["\nUsage:\n\tpython",
sys.argv[0],
"[with-sentiment-analysis (boolean)\n\n"])
else:
# Run the full algorithm and produce two lists:
# symbols to buy, in the expectation they will rise
# symbols to sell short, in the expectation they will fall.
use_sentiment = (sys.argv[1].upper()[0] == 'T')
to_buy = test_candidate_symbols(best=False, use_sentiment=use_sentiment)
to_sell = test_candidate_symbols(best=True, use_sentiment=use_sentiment)
if len(to_buy) > 0:
for sym in to_buy:
print sym, 'long'
if len(to_sell) > 0:
for sym in to_sell:
print sym, 'short'
if __name__ == "__main__":
main()
| true |
c5e956396eeb0169aedfe07aea05abecb60f947f | Python | joshuasewhee/practice_python | /Divisors.py | UTF-8 | 490 | 4.78125 | 5 | [] | no_license | # Joshua Sew-Hee
# 6/14/18
# Divisors
# Create a program that asks the user for a number and then prints out a
# list of all the divisors of that number.
# (If you donโt know what a divisor is, it is a number that divides evenly
# into another number.
# For example, 13 is a divisor of 26 because 26 / 13 has no remainder.)
number = int(input("Enter a number to divide: "))
x = list(range(1,number+1))
print(x)
a = []
for elem in x:
if(number % elem == 0):
a.append(elem)
print(a)
| true |
b65de3532dbd61f4f584bb35758c181105077287 | Python | yang4978/Huawei-OJ | /Python/0070. ๅพช็ฏๅฐๆฐ.py | UTF-8 | 734 | 3.65625 | 4 | [] | no_license | # If you need to import additional packages or classes, please import here.
def gcd(a,b):
while a%b:
a = a%b
if a<b:
a,b = b,a
return b
def func():
# please define the python3 input here.
# For example: a,b = map(int, input().strip().split())
# please finish the function body here.
# please define the python3 output here. For example: print().
while True:
try:
n = input()
l = len(n)
n = int(n)
if n == 0:
break
p = pow(10,l)-1
x = gcd(p,n)
print(str(n//x)+'/'+str(p//x))
except EOFError:
break
if __name__ == "__main__":
func()
| true |
43169ba159b44fd4c3fca8fe246de4b697b5b46a | Python | jiabraham/Hacker-Rank | /interview_prep/strings/make_anagrams.py | UTF-8 | 3,999 | 3.28125 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the makeAnagram function below.
def makeAnagram(a, b):
a_histogram = {}
b_histogram = {}
deletions = 0
additions = 0
#initialize histograms
for i in range(0, 26):
a_histogram[i] = 0
b_histogram[i] = 0
for i in range(0, len(a)):
if (a[i:i+1] == 'a'):
a_histogram[0] += 1
if (a[i:i+1] == 'b'):
a_histogram[1] += 1
if (a[i:i+1] == 'c'):
a_histogram[2] += 1
if (a[i:i+1] == 'd'):
a_histogram[3] += 1
if (a[i:i+1] == 'e'):
a_histogram[4] += 1
if (a[i:i+1] == 'f'):
a_histogram[5] += 1
if (a[i:i+1] == 'g'):
a_histogram[6] += 1
if (a[i:i+1] == 'h'):
a_histogram[7] += 1
if (a[i:i+1] == 'i'):
a_histogram[8] += 1
if (a[i:i+1] == 'j'):
a_histogram[9] += 1
if (a[i:i+1] == 'k'):
a_histogram[10] += 1
if (a[i:i+1] == 'l'):
a_histogram[11] += 1
if (a[i:i+1] == 'm'):
a_histogram[12] += 1
if (a[i:i+1] == 'n'):
a_histogram[13] += 1
if (a[i:i+1] == 'o'):
a_histogram[14] += 1
if (a[i:i+1] == 'p'):
a_histogram[15] += 1
if (a[i:i+1] == 'q'):
a_histogram[16] += 1
if (a[i:i+1] == 'r'):
a_histogram[17] += 1
if (a[i:i+1] == 's'):
a_histogram[18] += 1
if (a[i:i+1] == 't'):
a_histogram[19] += 1
if (a[i:i+1] == 'u'):
a_histogram[20] += 1
if (a[i:i+1] == 'v'):
a_histogram[21] += 1
if (a[i:i+1] == 'w'):
a_histogram[22] += 1
if (a[i:i+1] == 'x'):
a_histogram[23] += 1
if (a[i:i+1] == 'y'):
a_histogram[24] += 1
if (a[i:i+1] == 'z'):
a_histogram[25] += 1
for i in range(0, len(b)):
if (b[i:i+1] == 'a'):
b_histogram[0] += 1
if (b[i:i+1] == 'b'):
b_histogram[1] += 1
if (b[i:i+1] == 'c'):
b_histogram[2] += 1
if (b[i:i+1] == 'd'):
b_histogram[3] += 1
if (b[i:i+1] == 'e'):
b_histogram[4] += 1
if (b[i:i+1] == 'f'):
b_histogram[5] += 1
if (b[i:i+1] == 'g'):
b_histogram[6] += 1
if (b[i:i+1] == 'h'):
b_histogram[7] += 1
if (b[i:i+1] == 'i'):
b_histogram[8] += 1
if (b[i:i+1] == 'j'):
b_histogram[9] += 1
if (b[i:i+1] == 'k'):
b_histogram[10] += 1
if (b[i:i+1] == 'l'):
b_histogram[11] += 1
if (b[i:i+1] == 'm'):
b_histogram[12] += 1
if (b[i:i+1] == 'n'):
b_histogram[13] += 1
if (b[i:i+1] == 'o'):
b_histogram[14] += 1
if (b[i:i+1] == 'p'):
b_histogram[15] += 1
if (b[i:i+1] == 'q'):
b_histogram[16] += 1
if (b[i:i+1] == 'r'):
b_histogram[17] += 1
if (b[i:i+1] == 's'):
b_histogram[18] += 1
if (b[i:i+1] == 't'):
b_histogram[19] += 1
if (b[i:i+1] == 'u'):
b_histogram[20] += 1
if (b[i:i+1] == 'v'):
b_histogram[21] += 1
if (b[i:i+1] == 'w'):
b_histogram[22] += 1
if (b[i:i+1] == 'x'):
b_histogram[23] += 1
if (b[i:i+1] == 'y'):
b_histogram[24] += 1
if (b[i:i+1] == 'z'):
b_histogram[25] += 1
num_del = 0
for i in range(0, 26):
num_del += abs(a_histogram[i] - b_histogram[i])
return num_del
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = input()
b = input()
res = makeAnagram(a, b)
fptr.write(str(res) + '\n')
fptr.close()
| true |
f6e7cd7b4f826f8ef6810d5673c75228f6185cbf | Python | hhu-stups/pyB | /pyB/definition_handler.py | UTF-8 | 7,786 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from ast_nodes import *
from config import USE_RPYTHON_CODE
from external_functions import EXTERNAL_FUNCTIONS_DICT
from helpers import file_to_AST_str_no_print, print_ast
from pretty_printer import pretty_print
if USE_RPYTHON_CODE:
from rpython_b_objmodel import frozenset
# This class modifies an AST. It generates a "definition free" AST ahead of time. (after parsing, before interpretation)
class DefinitionHandler():
def __init__(self, env, parsing_method):
self.def_map = {} # string --> AST
self.external_functions_found = [] # string
self.external_functions_types_found = {} #
self.used_def_files = []
self.env = env # needed for search path of definition-files
# avoid cyclic import: parser needs to handele definitions inside the AST and
# definition handler needs to parse (definition-) files
self.str_ast_to_python_ast = parsing_method
def repl_defs(self, root):
for clause in root.children:
if isinstance(clause, ADefinitionsMachineClause):
self._save_definitions(clause)
self._replace_definitions(root)
self._replace_ext_funcs_in_solution_file(self.env.solution_root)
# fill def_map with "definition-definitions"
def _save_definitions(self, clause):
assert isinstance(clause, ADefinitionsMachineClause)
self._process_definition_files(clause)
for definition in clause.children:
if isinstance(definition, AFileDefinitionDefinition):
continue
assert isinstance(definition, AExpressionDefinitionDefinition) or isinstance(definition, APredicateDefinitionDefinition) or isinstance(definition, ASubstitutionDefinitionDefinition)
self.def_map[definition.idName] = definition
# make sure only ext. funs. are replaced if definition entry is presend
if definition.idName in EXTERNAL_FUNCTIONS_DICT.keys():
self.external_functions_found.append(definition.idName)
if definition.idName.startswith("EXTERNAL_FUNCTION_"):
self.external_functions_types_found[definition.idName[18:]] = definition.children[0]
# Defs can use definitions from these files.
# All of them musst be processed before any def in this file
def _process_definition_files(self, clause):
for definition in clause.children :
if isinstance(definition, AFileDefinitionDefinition): #TODO: implement me
if definition.idName in self.used_def_files: # avoid def-file loops
continue
self.used_def_files.append(definition.idName)
# get def-file ast
file_path_and_name = self.env._bmachine_search_dir + definition.idName
ast_string, error = file_to_AST_str_no_print(file_path_and_name)
root = self.str_ast_to_python_ast(ast_string)
assert isinstance(root, ADefinitionFileParseUnit)
assert isinstance(root.children[0], ADefinitionsMachineClause)
# used definitions
self._save_definitions(root.children[0])
# side-effect: change definitions to def free Asts
def _replace_definitions(self, root):
try:
for i in range(len(root.children)):
child = root.children[i]
if isinstance(child, ADefinitionExpression) or isinstance(child, ADefinitionPredicate) or isinstance(child, ADefinitionSubstitution):
# replace with ext. fun node if necessary
if child.idName in self.external_functions_found:
name = child.idName
type_ast = self.external_functions_types_found[name]
func = EXTERNAL_FUNCTIONS_DICT[name]
root.children[i] = AExternalFunctionExpression(name, type_ast, func)
root.children[i].children = child.children # args of the function
return
def_free_ast = self._gen_def_free_ast(child)
root.children[i] = def_free_ast
else:
self._replace_definitions(child)
except AttributeError as e: # leaf:no children
print "AttributeError while definition handling",e
return
# solution files dont know they use extern functions.
# comments like /*EXT:*/ are removed by the parser.
def _replace_ext_funcs_in_solution_file(self, root):
if root is None: # e.g. no solution file present
return
try:
for i in range(len(root.children)):
child = root.children[i]
if isinstance(child, AFunctionExpression) and isinstance(child.children[0], AIdentifierExpression):
try:
name = child.children[0].idName
type_ast = self.external_functions_types_found[name]
func = EXTERNAL_FUNCTIONS_DICT[name]
root.children[i] = AExternalFunctionExpression(name, type_ast, func)
root.children[i].children = child.children[1:] # args of the function, first id is function name
except KeyError:
continue
else:
self._replace_ext_funcs_in_solution_file(child)
except AttributeError as e:
print "AttributeError while definition handling", e
return
def _gen_def_free_ast(self, def_node):
ast = self.def_map[def_node.idName]
assert isinstance(ast, AExpressionDefinitionDefinition) or isinstance(ast, APredicateDefinitionDefinition) or isinstance(ast, ASubstitutionDefinitionDefinition)
replace_nodes = {}
# (1) find nodes to be replaced
for i in range(ast.paraNum):
if isinstance(ast.children[i], AIdentifierExpression):
replace_nodes[ast.children[i].idName] = def_node.children[i]
else:
raise Exception("Definition-Parametes must be IdentifierExpressions! %s" % ast.children[i])
# (2) replace nodes
# the ast instance is a reusable pattern found in the definition clause and used
# in some other clause (def_node). Def_node can be a INITIALISATION or the body
# of a operation. The copy is needed because the 'pattern' ast can be used on
# more than one location
#
# example:
# def_node: INITIALISATION Assign(z, 1+1) || Assign(b, TRUE)
# ast: DEFINITIONS Assign(VarName,Expr) == VarName := Expr;
# replace nodes: {(VarName,z),(Expr,1+1)}
ast_clone = self._clone_ast(ast)
self._replace_nodes(ast_clone, replace_nodes)
return ast_clone.children[-1]
# side-effect: change definition-nodes to def-free nodes
def _replace_nodes(self, ast, map):
try:
for i in range(len(ast.children)):
child = ast.children[i]
if isinstance(child, AIdentifierExpression) and child.idName in map:
ast.children[i] = map[child.idName]
else:
self._replace_nodes(child, map)
except AttributeError: # leaf:no children
return
def _clone_ast(self, ast):
# deepcopy is not Rpython
#import copy
#print "original"
#print_ast(ast)
#result = copy.deepcopy(ast)
result = ast.deepcopy()
#print "clone"
#print_ast(result)
return result
| true |
ad56079120d8455fe81d27cadc24aba61e143a0e | Python | Tofu-Gang/advent_of_code_2019 | /day_05/day_05.py | UTF-8 | 9,522 | 3.828125 | 4 | [] | no_license | __author__ = "Tofu Gang"
__email__ = "tofugangsw@gmail.com"
from intcode_computer.computer import IntcodeComputer
"""
--- Day 5: Sunny with a Chance of Asteroids ---
You're starting to sweat as the ship makes its way toward Mercury. The Elves
suggest that you get the air conditioner working by upgrading your ship computer
to support the Thermal Environment Supervision Terminal.
"""
################################################################################
def puzzle_1() -> None:
"""
--- Part One ---
The Thermal Environment Supervision Terminal (TEST) starts by running a
diagnostic program (your puzzle input). The TEST diagnostic program will run
on your existing Intcode computer after a few modifications:
First, you'll need to add two new instructions:
Opcode 3 takes a single integer as input and saves it to the position given
by its only parameter. For example, the instruction 3,50 would take an input
value and store it at address 50.
Opcode 4 outputs the value of its only parameter. For example, the
instruction 4,50 would output the value at address 50.
Programs that use these instructions will come with documentation that
explains what should be connected to the input and output. The program
3,0,4,0,99 outputs whatever it gets as input, then halts.
Second, you'll need to add support for parameter modes:
Each parameter of an instruction is handled based on its parameter mode.
Right now, your ship computer already understands parameter mode 0, position
mode, which causes the parameter to be interpreted as a position - if the
parameter is 50, its value is the value stored at address 50 in memory.
Until now, all parameters have been in position mode.
Now, your ship computer will also need to handle parameters in mode 1,
immediate mode. In immediate mode, a parameter is interpreted as a value -
- if the parameter is 50, its value is simply 50.
Parameter modes are stored in the same value as the instruction's opcode.
The opcode is a two-digit number based only on the ones and tens digit of
the value, that is, the opcode is the rightmost two digits of the first
value in an instruction. Parameter modes are single digits, one per
parameter, read right-to-left from the opcode: the first parameter's mode is
in the hundreds digit, the second parameter's mode is in the thousands
digit, the third parameter's mode is in the ten-thousands digit, and so on.
Any missing modes are 0.
For example, consider the program 1002,4,3,4,33.
The first instruction, 1002,4,3,4, is a multiply instruction - the rightmost
two digits of the first value, 02, indicate opcode 2, multiplication. Then,
going right to left, the parameter modes are 0 (hundreds digit), 1
(thousands digit), and 0 (ten-thousands digit, not present and therefore
zero):
ABCDE
1002
DE - two-digit opcode, 02 == opcode 2
C - mode of 1st parameter, 0 == position mode
B - mode of 2nd parameter, 1 == immediate mode
A - mode of 3rd parameter, 0 == position mode, omitted due to being a
leading zero
This instruction multiplies its first two parameters. The first parameter, 4
in position mode, works like it did before - its value is the value stored
at address 4 (33). The second parameter, 3 in immediate mode, simply has
value 3. The result of this operation, 33 * 3 = 99, is written according to
the third parameter, 4 in position mode, which also works like it did
before - 99 is written to address 4.
Parameters that an instruction writes to will never be in immediate mode.
Finally, some notes:
It is important to remember that the instruction pointer should increase by
the number of values in the instruction after the instruction finishes.
Because of the new instructions, this amount is no longer always 4.
Integers can be negative: 1101,100,-1,4,0 is a valid program (find 100 + -1,
store the result in position 4).
The TEST diagnostic program will start by requesting from the user the ID of
the system to test by running an input instruction - provide it 1, the ID
for the ship's air conditioner unit.
It will then perform a series of diagnostic tests confirming that various
parts of the Intcode computer, like parameter modes, function correctly. For
each test, it will run an output instruction indicating how far the result
of the test was from the expected value, where 0 means the test was
successful. Non-zero outputs mean that a function is not working correctly;
check the instructions that were run before the output instruction to see
which one failed.
Finally, the program will output a diagnostic code and immediately halt.
This final output isn't an error; an output followed immediately by a halt
means the program finished. If all outputs were zero except the diagnostic
code, the diagnostic program ran successfully.
After providing 1 to the only input instruction and passing all the tests,
what diagnostic code does the program produce?
The answer should be 14522484.
"""
with open("day_05/input.txt", 'r') as f:
program = tuple([int(data.strip())
for data in f.read().strip().split(',')])
computer = IntcodeComputer()
computer.load_program(program)
computer.load_input(1)
computer.start()
computer.join()
print(computer.get_output())
################################################################################
def puzzle_2() -> None:
"""
--- Part Two ---
The air conditioner comes online! Its cold air feels good for a while, but
then the TEST alarms start to go off. Since the air conditioner can't vent
its heat anywhere but back into the spacecraft, it's actually making the air
inside the ship warmer.
Instead, you'll need to use the TEST to extend the thermal radiators.
Fortunately, the diagnostic program (your puzzle input) is already equipped
for this. Unfortunately, your Intcode computer is not.
Your computer is only missing a few opcodes:
Opcode 5 is jump-if-true: if the first parameter is non-zero, it sets the
instruction pointer to the value from the second parameter. Otherwise, it
does nothing.
Opcode 6 is jump-if-false: if the first parameter is zero, it sets the
instruction pointer to the value from the second parameter. Otherwise, it
does nothing.
Opcode 7 is less than: if the first parameter is less than the second
parameter, it stores 1 in the position given by the third parameter.
Otherwise, it stores 0.
Opcode 8 is equals: if the first parameter is equal to the second parameter,
it stores 1 in the position given by the third parameter. Otherwise, it
stores 0.
Like all instructions, these instructions need to support parameter modes as
described above.
Normally, after an instruction is finished, the instruction pointer
increases by the number of values in that instruction. However, if the
instruction modifies the instruction pointer, that value is used and the
instruction pointer is not automatically increased.
For example, here are several programs that take one input, compare it to
the value 8, and then produce one output:
3,9,8,9,10,9,4,9,99,-1,8 - Using position mode, consider whether the input
is equal to 8; output 1 (if it is) or 0 (if it is not).
3,9,7,9,10,9,4,9,99,-1,8 - Using position mode, consider whether the input
is less than 8; output 1 (if it is) or 0 (if it is not).
3,3,1108,-1,8,3,4,3,99 - Using immediate mode, consider whether the input
is equal to 8; output 1 (if it is) or 0 (if it is not).
3,3,1107,-1,8,3,4,3,99 - Using immediate mode, consider whether the input
is less than 8; output 1 (if it is) or 0 (if it is not).
Here are some jump tests that take an input, then output 0 if the input was
zero or 1 if the input was non-zero:
3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9 (using position mode)
3,3,1105,-1,9,1101,0,0,12,4,12,99,1 (using immediate mode)
Here's a larger example:
3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,
1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,
999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99
The above example program uses an input instruction to ask for a single
number. The program will then output 999 if the input value is below 8,
output 1000 if the input value is equal to 8, or output 1001 if the input
value is greater than 8.
This time, when the TEST diagnostic program runs its input instruction to
get the ID of the system to test, provide it 5, the ID for the ship's
thermal radiator controller. This diagnostic test suite only outputs one
number, the diagnostic code.
What is the diagnostic code for system ID 5?
The answer should be 4655956.
"""
with open("day_05/input.txt", 'r') as f:
program = tuple([int(data.strip())
for data in f.read().strip().split(',')])
computer = IntcodeComputer()
computer.load_program(program)
computer.load_input(5)
computer.start()
computer.join()
print(computer.get_output())
################################################################################
| true |
10ef4029c5ef6e95790a9d8cb6005d7d2ee296fa | Python | liuyuan1002/shopping-website | /taobao/views_api.py | UTF-8 | 9,274 | 2.5625 | 3 | [] | no_license | #coding=utf-8
from taobao.models import goods
from django.http import HttpResponse
from django.http import JsonResponse
from django.core.paginator import Paginator ,PageNotAnInteger ,EmptyPage
from django.contrib import auth
from django.contrib.auth.models import User
from .forms import UserForm
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
import io
#ๆณจๅ
@csrf_exempt
def register_view(req):
'''
:param req:
:type POST: username,password,verifcode
:return: status = {200:ๆๅ
}
'''
context = {'inputFormat':True,'userExit':False,'verify' : True}
if req.method == 'POST':
form = UserForm(req.POST)
if form.is_valid():
#่ทๅพ่กจๅๆฐๆฎ
username = form.cleaned_data['username']
password = form.cleaned_data['password']
#้ช่ฏ็ ๆฃ้ช
verifycode = req.POST.get('verifycode')
verify = req.session.get('verifycode')
if verify:
verify = verify.strip().lower()
if verifycode:
verifycode = verifycode.strip().lower()
if verify != verifycode:
context['verify'] = False
return render(req,'register.html',context)
#ๆฐๆฎๆ ผๅผๆฏๅฆๆญฃ็กฎ
if len(username) < 6 or len(password) < 6:
context['inputFormat'] = False
return render(req,'register.html',context)
#ๅคๆญ็จๆทๆฏๅฆๅญๅจ
user = auth.authenticate(username = username,password = password)
if user:
context['userExit']=True
return render(req, 'register.html', context)
user = User.objects.create_user(username=username, password=password)
user.save()
User_cart.objects.create(username=username).save()
req.session['username'] = username
auth.login(req, user)
return redirect('/taobao/')
return render(req,'register.html',context)
#้ช่ฏ็ ๅถไฝ๏ผstrๅญsessionไธญ๏ผๅพ็่ฟๅใ
def verifyCode(req):
import random
from PIL import Image, ImageDraw, ImageFont, ImageFilter
_letter_cases = "abcdefghjkmnpqrstuvwxy" # ๅฐๅๅญๆฏ๏ผๅป้คๅฏ่ฝๅนฒๆฐ็i๏ผl๏ผo๏ผz
_upper_cases = _letter_cases.upper() # ๅคงๅๅญๆฏ
_numbers = ''.join(map(str, range(3, 10))) # ๆฐๅญ
init_chars = ''.join((_letter_cases, _upper_cases, _numbers))
def create_validate_code(size=(120, 30),
chars=init_chars,
img_type="GIF",
mode="RGB",
bg_color=(255, 255, 255),
fg_color=(0, 0, 255),
font_size=30,
font_type="Arial.ttf",
length=4,
draw_lines=True,
n_line=(1, 4),
draw_points=True,
point_chance=2):
"""
@todo: ็ๆ้ช่ฏ็ ๅพ็
@param size: ๅพ็็ๅคงๅฐ๏ผๆ ผๅผ๏ผๅฎฝ๏ผ้ซ๏ผ๏ผ้ป่ฎคไธบ(120, 30)
@param chars: ๅ
่ฎธ็ๅญ็ฌฆ้ๅ๏ผๆ ผๅผๅญ็ฌฆไธฒ
@param img_type: ๅพ็ไฟๅญ็ๆ ผๅผ๏ผ้ป่ฎคไธบGIF๏ผๅฏ้็ไธบGIF๏ผJPEG๏ผTIFF๏ผPNG
@param mode: ๅพ็ๆจกๅผ๏ผ้ป่ฎคไธบRGB
@param bg_color: ่ๆฏ้ข่ฒ๏ผ้ป่ฎคไธบ็ฝ่ฒ
@param fg_color: ๅๆฏ่ฒ๏ผ้ช่ฏ็ ๅญ็ฌฆ้ข่ฒ๏ผ้ป่ฎคไธบ่่ฒ#0000FF
@param font_size: ้ช่ฏ็ ๅญไฝๅคงๅฐ
@param font_type: ้ช่ฏ็ ๅญไฝ๏ผ้ป่ฎคไธบ ae_AlArabiya.ttf
@param length: ้ช่ฏ็ ๅญ็ฌฆไธชๆฐ
@param draw_lines: ๆฏๅฆๅๅนฒๆฐ็บฟ
@param n_lines: ๅนฒๆฐ็บฟ็ๆกๆฐ่ๅด๏ผๆ ผๅผๅ
็ป๏ผ้ป่ฎคไธบ(1, 2)๏ผๅชๆdraw_linesไธบTrueๆถๆๆ
@param draw_points: ๆฏๅฆ็ปๅนฒๆฐ็น
@param point_chance: ๅนฒๆฐ็นๅบ็ฐ็ๆฆ็๏ผๅคงๅฐ่ๅด[0, 100]
@return: [0]: PIL Imageๅฎไพ
@return: [1]: ้ช่ฏ็ ๅพ็ไธญ็ๅญ็ฌฆไธฒ
"""
width, height = size # ๅฎฝ้ซ
# ๅๅปบๅพๅฝข
img = Image.new(mode, size, bg_color)
draw = ImageDraw.Draw(img) # ๅๅปบ็ป็ฌ
def get_chars():
"""็ๆ็ปๅฎ้ฟๅบฆ็ๅญ็ฌฆไธฒ๏ผ่ฟๅๅ่กจๆ ผๅผ"""
return random.sample(chars, length)
def create_lines():
"""็ปๅถๅนฒๆฐ็บฟ"""
line_num = random.randint(*n_line) # ๅนฒๆฐ็บฟๆกๆฐ
for i in range(line_num):
# ่ตทๅง็น
begin = (random.randint(0, size[0]), random.randint(0, size[1]))
# ็ปๆ็น
end = (random.randint(0, size[0]), random.randint(0, size[1]))
draw.line([begin, end], fill=(0, 0, 0))
def create_points():
"""็ปๅถๅนฒๆฐ็น"""
chance = min(100, max(0, int(point_chance))) # ๅคงๅฐ้ๅถๅจ[0, 100]
for w in range(width):
for h in range(height):
tmp = random.randint(0, 100)
if tmp > 100 - chance:
draw.point((w, h), fill=(0, 0, 0))
def create_strs():
"""็ปๅถ้ช่ฏ็ ๅญ็ฌฆ"""
c_chars = get_chars()
strs = ' %s ' % ' '.join(c_chars) # ๆฏไธชๅญ็ฌฆๅๅไปฅ็ฉบๆ ผ้ๅผ
# font = ImageFont.truetype(font_type, font_size)
font = ImageFont.truetype('E:\workspace\PycharmProjects\shopping-website\Arial.ttf', random.randint(21, 25))
# font = ImageFont.truetype(r'/home/ubuntu/shopping-website/Arial.ttf', random.randint(21, 25))
draw.text((random.randint(0, 10), random.randint(0, 5)),
strs, font=font, fill=fg_color)
return ''.join(c_chars)
if draw_lines:
create_lines()
if draw_points:
create_points()
strs = create_strs()
del draw
# ๅพๅฝขๆญๆฒๅๆฐ
params = [1 - float(random.randint(1, 2)) / 100, 0, 0, 0, 1 - float(random.randint(1, 10)) / 100,
float(random.randint(1, 2)) / 500, 0.001, float(random.randint(1, 2)) / 500]
img = img.transform(size, Image.PERSPECTIVE, params) # ๅๅปบๆญๆฒ
img = img.filter(ImageFilter.EDGE_ENHANCE_MORE) # ๆปค้๏ผ่พน็ๅ ๅผบ๏ผ้ๅผๆดๅคง๏ผ
return img,strs
res = create_validate_code()
strs , img = res[1],res[0]
req.session['verifycode'] = strs
buf = io.BytesIO()
img.save(buf, 'png')
# img.save('code.jpg', 'jpeg')
# print(strs)
return HttpResponse(buf.getvalue(), 'image/png')
#ๅคๆญ็จๆทๆฏๅฆ็ปๅฝ
@csrf_exempt
def user_session(req):
'''
url = r'^api/user/'
:param req:
:return:
'''
context = {'status': 200,'msg':'ๅทฒ็ปๅฝ'}
if 'username' in req.session:
username = req.session['username']
context['isLogin'] = True
context['username'] = username
else:
context['isLogin'] = False
context['username'] = ''
context['msg'] = 'ๆช็ปๅฝ'
return JsonResponse(context)
#ๅ็ฑปๅฑ็คบ
@csrf_exempt
def classify(req):
context ={'status':200}
type = req.POST.get('type','')
page = req.POST.get('page','')
context['type'] , context['page'] = type ,page
if type == '0':
goods_list = goods.objects.order_by('sales_Volume').all()
else:
goods_list = goods.objects.all().filter(category = int(type)).order_by('sales_Volume').all()
if goods_list == None:
return JsonResponse({'status':10021,'message':'parameter error'})
paginator = Paginator(goods_list,8)
try:
goodss = paginator.page(int(page))
except PageNotAnInteger:
goodss = paginator.page(1)
except EmptyPage:
goodss = paginator.page(paginator.num_pages)
context['queryNum'],context['hasPrevios'],context['hasNext'] = len(goodss),goodss.has_previous(),goodss.has_next()
context['endIndex'] = paginator.num_pages
data = []
if goodss:
for i in goodss:
good = {}
good['goods_id'] = i.goods_id
good['goods_name'] = i.goods_name
good['goods_price'] = i.goods_price
good['goods_stock'] = i.goods_Stock
good['sales_volume'] = i.sales_Volume
good['goods_introduce'] = i.goods_introduce
data.append(good)
context.update({'data':data})
return JsonResponse(context)
else:
return JsonResponse({'status':10022,'message':'query goods isempty'})
# return render(req,'classify.html',context)
# category = models.IntegerField('ๅ็ฑป',default=0)
# goods_id = models.CharField('ๅๅID',max_length=10)
# goods_name = models.CharField('ๅๅๅ',max_length=100,default='')
# goods_price = models.DecimalField('ๅๅไปทๆ ผ',max_digits=10,decimal_places=2)
# goods_Stock = models.IntegerField('ๅๅๅบๅญ',default=100)
# sales_Volume = models.IntegerField('้้',default=0)
# goods_introduce = models.CharField('ๅๅ็ฎไป',max_length=250,default='') | true |
1e9a45aa699d5e7012360ba507b5a66c44fc3a05 | Python | sam1208318697/Leetcode | /Leetcode_env/2019/7_19/Min_Stack.py | UTF-8 | 1,659 | 4.90625 | 5 | [] | no_license | # 155. ๆๅฐๆ
# ่ฎพ่ฎกไธไธชๆฏๆ push๏ผpop๏ผtop ๆไฝ๏ผๅนถ่ฝๅจๅธธๆฐๆถ้ดๅ
ๆฃ็ดขๅฐๆๅฐๅ
็ด ็ๆ ใ
# push(x)ย -- ๅฐๅ
็ด x ๆจๅ
ฅๆ ไธญใ
# pop()ย -- ๅ ้คๆ ้กถ็ๅ
็ด ใ
# top()ย -- ่ทๅๆ ้กถๅ
็ด ใ
# getMin() -- ๆฃ็ดขๆ ไธญ็ๆๅฐๅ
็ด ใ
# ็คบไพ:
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.getMin(); --> ่ฟๅ -3.
# minStack.pop();
# minStack.top(); --> ่ฟๅ 0.
# minStack.getMin(); --> ่ฟๅ -2.
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.minstack = []
def push(self, x: int) -> None:
self.stack.append(x)
if len(self.minstack) == 0:
self.minstack.append(x)
elif self.minstack[-1]<x:
self.minstack.append(self.minstack[-1])
else:
self.minstack.append(x)
def pop(self) -> None:
if len(self.stack) == 0:
return False
self.stack.pop()
self.minstack.pop()
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.minstack[-1]
def getStack(self):
return self.stack
def getMinstack(self):
return self.minstack
minstack = MinStack()
minstack.push(1)
print(minstack.getStack())
print(minstack.getMinstack())
minstack.push(-2)
print(minstack.getStack())
print(minstack.getMinstack())
minstack.push(3)
print(minstack.getStack())
print(minstack.getMinstack())
print(minstack.top())
minstack.pop()
print(minstack.top())
print(minstack.getMin()) | true |
66ca375e1f586164605cd4f5e1e223c8a893455f | Python | paultovt/mai_labs | /XOR/lab.py | UTF-8 | 2,152 | 3.171875 | 3 | [] | no_license | import sys
import operator
from math import floor
key = 'Alexandre Dumas'
if __name__ == '__main__':
if sys.argv[2:]:
action = sys.argv[1]
filename = sys.argv[2]
else:
print('\nUsage: python3 lab.py e/d <file>\n')
exit()
# encrypt file
if action == 'e':
outfile = open(filename.split('.')[0] + '.enc', 'wb')
with open(filename, 'r') as infile:
c = 0
while True:
if c >= len(key):
c = 0
data = infile.read(1)
if data == '':
break
bin_chars = bin(ord(data))[2:].zfill(16)
bin_keychars = bin(ord(key[c]))[2:].zfill(16)
enc_bin = ''
for ch, kch in zip(bin_chars, bin_keychars):
xor = operator.xor(int(ch), int(kch))
enc_bin += str(xor)
outfile.write((int(enc_bin,2)).to_bytes(4, byteorder='big', signed=True))
c += 1
outfile.close()
infile.close()
print('\nEncrypted data saved to', filename.split('.')[0] + '.enc\n')
# decrypt file
elif action == 'd':
outfile = open(filename.split('.')[0] + '.dec', 'w')
with open(filename, 'rb') as infile:
c = 0
while True:
if c >= len(key):
c = 0
data = infile.read(4)
if data == b'':
break
bin_chars = bin(int.from_bytes(data, byteorder='big'))[2:].zfill(16)
bin_keychars = bin(ord(key[c]))[2:].zfill(16)
dec_bin = ''
for ch, kch in zip(bin_chars, bin_keychars):
xor = operator.xor(int(ch), int(kch))
dec_bin += str(xor)
dec_char = chr(int(dec_bin,2))
outfile.write(dec_char)
c += 1
outfile.close()
infile.close()
print('\nDecrypted data saved to', filename.split('.')[0] + '.dec\n')
else:
print('\nUsage: python3 lab.py e/d <file>\n')
exit()
| true |
de874ec289387a99fa9532f9de41c31717f251dc | Python | Sammion/DAStudy | /src/NLTK/ch02/data_import.py | UTF-8 | 582 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on 2018/6/17
@author: Samuel
@Desc:
@dependence: Noting
"""
import csv
with open("../../data/test01.csv") as f:
reader = csv.reader(f, delimiter=';', quotechar='"')
for line in reader:
print(line)
import json
with open("../../data/test01.json") as f:
data = json.load(f)
print(data)
print(data["boolean"])
import nltk
nltk.download()
input_str = "Today is a good day. It is sunny. I want to go to study in the conpany."
from nltk.tokenize import sent_tokenize
all_sent = sent_tokenize(input_str)
print(all_sent)
| true |
96d1fb6cf0817ea400e4aef2947c06d08cd0d41e | Python | Aasthaengg/IBMdataset | /Python_codes/p03700/s643531555.py | UTF-8 | 515 | 2.78125 | 3 | [] | no_license | n, a, b = map(int,input().split())
h = [ int(input()) for i in range(n)]
maxim_h = max(h)
ok = (maxim_h + a - 1) // a * n
ng = 0
while abs(ok - ng) > 1:
X = (ok + ng) // 2
#Xๅ็็บใ่ตทใใๅฟ
่ฆใใใใจไปฎๅฎใใ
cnt = 0
flag = 1
for val in h:
if val <= b * X:continue
temp = (val - b * X + a - b - 1) // (a - b)
cnt += temp
if cnt > X:
flag = 0
break
#print(X, cnt, ok)
if flag:ok = X
else:ng = X
print(ok)
| true |
8301f122bbe16d72fb1ee521cf10d7b06b630d8c | Python | DarthRoco/AlloyML | /AlloyML/gui.py | UTF-8 | 6,016 | 2.703125 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
import pygad
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.ttk import *
import constants
from PIL import Image, ImageTk
import ga
import threading
import numpy as np
import time
from tkinter import messagebox
#Use these values while trying to get decent answer in decent runtime
# TARGET=347
# MODEL='ys'
# vars=['S','Cu','Nb','Exit temperature']
#The Parameters we wish to provide option to user for optimisation
PARAMETERS=['Furnace temperature', 'Exit temperature', 'Annealing temperature',
'Sulphur', 'Copper', 'Nickel', 'Chromium', 'Molybdenum', 'Niobium', 'Aluminium(Total)',
'Tin', 'Arsenic', 'Calcium', 'Lead', 'Carbon(Eq1)', 'Carbon(Eq2)', 'Vanadium', 'Titanium',
'Antimony', 'Zirconium', 'Nitrogen', 'Boron', 'Oxygen']
#loads corresponding model from memory
def load_models(opn):
if opn=='el':
model = tf.keras.models.load_model('models/el.h5')
elif opn=='ts':
model = tf.keras.models.load_model('models/ts.h5')
elif opn=='ys':
model = tf.keras.models.load_model('models/ys.h5')
return model
#Create Root Window
root=tk.Tk()
root.title('Alloy Compose')
#Create Title
welcometext=tk.Label(root,text="ISTE CLUTCH ALLOY RECOMMENDATION SYSTEM",padx=10,pady=10,fg="blue",font=25)
welcometext.pack()
#Create frame to insert options
frame=tk.Frame(root,borderwidth=5)
frame.pack(padx=50,pady=50)
#Create Dropdown for Required Property
options=[
'Yield Strength',
'Yield Strength',
'Tensile Strength',
'Elongation Limit']
dropdown=tk.StringVar()
drop=OptionMenu(frame,dropdown,*options)
drop.grid(row=1,column=0)
space=Label(frame,text=" ")
space.grid(row=1,column=1)
#Create Input Box to accept Target Value
tgr=tk.Label(frame,text="REQUIRED VALUE:")
tgr.grid(row=1,column=2)
e=tk.Entry(frame)
e.grid(row=1,column=3)
intvar_dict = {}
#Create an array of checkboxes for various compositions user may want to optimise
checkbutton_list = []
row,col=2,0
frame2=Frame(frame)
frame2.grid(row=2,column=0,columnspan=4)
for key in PARAMETERS:
intvar_dict[key]=IntVar()
c=Checkbutton(frame2,text=key,variable=intvar_dict[key])
c.grid(row=row,column=col)
col+=1
if (col+1)%4==0:
col=0
row+=1
checkbutton_list.append(c)
warn=tk.Label(frame,text='Note checking any of the below boxes will significantly increase runtime.It is recomended NOT to use them unless absolutely neccessary',fg="red")
warn.grid(row=3,column=0,columnspan=4)
#Extra Functionality in case of low convergence
converge=IntVar()
complex=Checkbutton(frame,text='Check this box if GA failed to converge',variable=converge)
complex.grid(row=4,column=0,columnspan=2)
#Extra Functionality in case one wants to get more accurate results
acc=IntVar()
accurate=Checkbutton(frame,text='Check this box for greater precision in answer',variable=acc)
accurate.grid(row=4,column=2,columnspan=2)
image1 = Image.open("media/iste.jpg")
image2 = image1.resize((100, 100), Image.ANTIALIAS)
test = ImageTk.PhotoImage(image2)
label1 = tk.Label(image=test)
label1.image = test
# Position image
label1.place(x=0, y=0)
#Primary Function,Called when optimize button is pressed.Processes user input and passes to GAsolver.
#Also displays results in new window
def test():
vararr=[]
for key, value in intvar_dict.items():
if value.get() > 0:
vararr.append(key)
for key, value in intvar_dict.items():
value.set(0)
try:
TARGET=float(e.get())
except:
TARGET=""
mode=dropdown.get()
if mode==options[1]:
MODEL='ys'
elif mode==options[2]:
MODEL='ts'
elif mode==options[3]:
MODEL='el'
e.delete(0,END)
accure=acc.get()
if accure==0:
ACC=100
elif accure>=1:
ACC=800
COMP=35
comp=converge.get()
if comp>=1:
COMP=50
elif comp==0:
COMP=20
if (TARGET=="" )or (MODEL==None):
messagebox.showerror("Error","Please select a target value")
elif (len(vararr)==0):
messagebox.showerror("Error", "Please select at least one parameter to optimize")
else:
top=Toplevel()
top.title('Result')
wait=Label(top,text="Please wait while computation is ongoing....")
wait.pack()
#Progress Bar
my_progress=ttk.Progressbar(top, orient="horizontal",length=300,mode='indeterminate')
my_progress.pack(pady=20)
my_progress.start(10)
btw=tk.Button(top,text="close",command=top.destroy,borderwidth=5).pack()
star=time.time()
ans=ga.ga_solver(TARGET,vararr,MODEL,COMP,ACC)
k = []
for key in ans[1]:
k.append(ans[1][key])
k=np.asarray(k)
k=np.reshape(k,(1,26))
model=load_models(MODEL)
an=model.predict(k)
my_progress.stop()
my_progress.destroy()
wait.config(text=f"The operation took roughly {int((time.time()-star)/60)+1} minutes.Here are the results")
ss=dict(ans[0])
table=ttk.Treeview(top,columns=('Property','Value'),show='headings',height=26)
for col in ('Property','Value'):
table.heading(col,text=col)
for i,(key,value) in enumerate(ss.items()):
table.insert("","end",values=(key,round(value,5)))
table.pack()
trg=Label(top,text=f"The Target was {TARGET}")
pred=Label(top,text=f"The GA acheived {an[0][0]}")
trg.pack()
pred.pack()
#Threading so as to prevent gui lockup
def step():
threading.Thread(target=test).start()
#Primary button,triggers GA
btn1 =tk.Button(root, text="Optimize", command=step,borderwidth= 5)
btn1.pack()
#Main loop
root.mainloop() | true |
16f1151f08051e1c6c3c172758b23cd764b27436 | Python | saviobobick/luminarsavio | /flowcontrols/pythoncollections/sumlist.py | UTF-8 | 264 | 3.3125 | 3 | [] | no_license | lst=[3,4,6,7,8]
# for i in lst:
# if(i>5):
#
# print(lst)
# elist=list()
# olist=[]
# for num in lst:
# if num%2==0:
# elist.append(num)
# else:
# olist.append(num)
# print(elist)
# print(olist)
sum=0
for i in lst:
sum+=i
print(sum) | true |
65c6400b040d9021b0f1b358b0b1c03f556bca80 | Python | michaeltrias/python-challenge | /Pybank/other_attempts/pydata_mct4.py | UTF-8 | 1,498 | 3.125 | 3 | [] | no_license | import os
import csv
csvpath = os.path.join('..','Pybank_Resources', 'PyBank__data.csv')
month = []
profit_loss=[]
greatest_increase =0
greatest_descrease =0
monthly_change = []
prev_value = 0
new_value =0
greatest_value = 0
i=0
with open(csvpath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
# Read the header row first (skip this step if there is now header)
csv_header = next(csvreader)
for row in csvreader:
# print(row)
month.append(row)
length_month = len(month)
#list comprehension to convert string into int
profit_loss = [int(i) for i in profit_loss]
total_profit = sum(profit_loss)
profit_loss.append(row[1])
current_value = int(row[1])
new_value = current_value - prev_value
monthly_change.append(new_value)
prev_value = current_value
# code includes the first value 867884, So had to omit that value and decrease length by 1 (85 instead of 86 months)
average_change = sum(monthly_change[1:])/((len(monthly_change)-1))
# total_profit = sum(profit_loss), need to create a new list, append new vvalues then get the average
print( "Financial Analysis")
print("--------------------------")
print(f'Total Months : {length_month}')
print(f'Total: ${total_profit}')
print(f'Average Change: {(str(round(average_change,2)))}')
print(monthly_change.index(max(monthly_change)))
| true |
301f090b8bc5145a9579282c7679821c91ca6a29 | Python | arorashu/mnist-digit | /tic-tac-toe.py | UTF-8 | 7,730 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ## Train an agent to play tic tac toe
#
# ### Strategies
# 1. Play at random
# 2. Ideal Player
# 3. Imitation learning
#
# In[1]:
import itertools
import random
# In[2]:
COMPUTER = False
HUMAN = True
class Player():
HUMAN = 0
RANDOM = 1
EXPERT = 2
STUDENT = 3
class Entry():
Empty = '-'
X = 'X'
O = 'O'
class TicTacToe():
"""
define the game class
COMPUTER always plays an 'O'
HUMAN always plays a 'X'
"""
MNMX_MOVES = 0
def __init__(self):
"""
turn = False -> computer's turn, True-> human turn
"""
self.state = ['-'] * 9
self.turn = random.choice([COMPUTER, HUMAN])
self.game_ended = False
self.winner = Entry.Empty
self.computer_player = Player.EXPERT
def __str__(self):
x = str(self.state[0:3]) + '\n' + str(self.state[3:6]) + '\n' \
+ str(self.state[6:9])
return (f'board state: \n{x}\n' +
f'player turn: {self.turn}\n' +
f'game ended: {self.game_ended}\n' +
f'winner: {self.winner}\n')
def pretty_state(self, state):
x = str(self.state[0:3]) + '\n' + str(self.state[3:6]) + '\n' \
+ str(self.state[6:9])
return x
def play(self):
print('play a turn')
if self.game_ended:
print('Game Over')
print(self)
return
avail_positions = []
for i, x in enumerate(self.state):
if x == Entry.Empty: avail_positions.append(i)
if len(avail_positions) == 0:
self.game_ended = True
self.winner = 'DRAW'
print('board is full')
return
if self.turn == COMPUTER:
print('COMPUTER to play')
print(f'available positions: {avail_positions}')
if self.computer_player == Player.RANDOM:
play_id = random.choice(avail_positions)
print(play_id)
self.state[play_id] = Entry.O
elif self.computer_player == Player.EXPERT:
play_id = self.play_pro()
self.state[play_id] = Entry.O
elif self.turn == HUMAN:
print('HUMAN to play')
self.user_input_prompt()
valid_input = False
while not valid_input:
inp = input('where do you wanna play [0-9]?')
if str.isdigit(inp): valid_input = True
if valid_input:
pos = int(inp)
if pos not in avail_positions:
valid_input = False
if not valid_input:
print('invalid input')
print(f'please enter a number from the list: {avail_positions}')
# got a valid position to play
self.state[pos] = Entry.X
self.evaluate()
self.turn = not self.turn
print(self)
def play_pro(self):
"""
play as an expert(pro)
using minimax
"""
state_copy = self.state.copy()
self.MNMX_MOVES = 0
best_move, best_score = self._minimax(state_copy, COMPUTER)
print(f'minimax moves taken: {self.MNMX_MOVES}')
return best_move
def _evaluate(self, state):
"""
evaluate state, returns game_ended
"""
rows = [self.state[k:k+3] for k in range(0, 9, 3)]
cols = [[self.state[k], self.state[k+3], self.state[k+6]]
for k in range(0, 3, 1)]
diags = [[self.state[0], self.state[4], self.state[8]],
[self.state[2], self.state[4], self.state[6]]]
arrs = [rows, cols, diags]
for arr in itertools.chain(*arrs):
if (arr[0] != Entry.Empty
and arr[0] == arr[1]
and arr[0] == arr[2]):
return True
return False
def _minimax(self, state, player):
self.MNMX_MOVES += 1
# print(f'enter mnmx with state:\n{self.pretty_state(state)}')
empty_pos = self.get_available_pos(state)
if len(empty_pos) == 0:
print('no moves available. exiting!')
print(f'player: {player}')
new_state = self.state
best_score = -100
best_move = -1
for pos in empty_pos:
# print(f'make move: {pos}')
if player == COMPUTER: new_state[pos] = Entry.O
else: new_state[pos] = Entry.X
if self._evaluate(new_state): # played the winning move
# print('winning minimax move')
# print(f'player: {player}, state:\n{state}')
# return pos, 10
cur_score = 10
else:
cur_score = -100
if len(empty_pos) == 1: # draw state, last move
cur_score = 0
else:
# play more
_, opp_score = self._minimax(new_state, not player)
cur_score = -opp_score
if cur_score > best_score:
best_score = cur_score
best_move = pos
# reset state
new_state[pos] = Entry.Empty
# print(f'UNDO move: {pos}')
# print(f'player: {player}, best_move = {pos}, best_score = {best_score}')
# print(f'exit mnmx with state:\n{self.pretty_state(state)}')
return best_move, best_score
def evaluate(self):
"""
evaluate if there is a winner
if game ended, update `game_ended` and `winner`
"""
win = False
# check rows
rows = [self.state[k:k+3] for k in range(0, 9, 3)]
cols = [[self.state[k], self.state[k+3], self.state[k+6]]
for k in range(0, 3, 1)]
diags = [[self.state[0], self.state[4], self.state[8]],
[self.state[2], self.state[4], self.state[6]]]
arrs = [rows, cols, diags]
for arr in itertools.chain(*arrs):
if (arr[0] != Entry.Empty
and arr[0] == arr[1]
and arr[0] == arr[2]):
win = True
print(f'winning row: {arr}')
break
if win:
print('we have a winner')
if self.turn: self.winner = "HUMAN"
else: self.winner = "COMPUTER"
self.game_ended = True
def get_available_pos(self, state):
avail_positions = []
for i, x in enumerate(state):
if x == Entry.Empty: avail_positions.append(i)
return avail_positions
def get_state(self):
state = 0
for i in range(9):
s = self.state[i]
val = 0
if s == Entry.X:
val = 0x3
elif s == Entry.O:
val = 0x2
state |= val << (i*2)
return state
def user_input_prompt(self):
"""
shows prompt human user to get position to play
"""
prompt = ''
for i, x in enumerate(self.state):
prompt += f'[{i}| {x}]'
if (i+1) % 3 == 0: prompt += '\n'
print(f'board state: \n{prompt}\n')
def reset(self):
self.state = ['-'] * 9
self.turn = random.choice([COMPUTER, HUMAN])
self.game_ended = False
self.winner = Entry.Empty
# In[3]:
game = TicTacToe()
def play_new_game(game):
print(f'old game state: {game}')
game.reset()
while not game.game_ended:
game.play()
print('done.')
play_new_game(game)
| true |
f68f62df848e4b6be24655911e943d89ed273deb | Python | RegiusQuant/nlp-practice | /nlp_pytorch/language_model_p1/data.py | UTF-8 | 2,779 | 3.109375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# @Time : 2020/3/11 ไธๅ8:03
# @Author : RegiusQuant <315135833@qq.com>
# @Project : nlp-practice
# @File : data.py
# @Desc : ่ฏญ่จๆจกๅๆ้็ๆฐๆฎๅฎไน
from pathlib import Path
import torch
class Vocab:
"""ๅ่ฏ่กจ็ฑป
Args:
vocab_path (Path): ๅ่ฏ่กจๆไปถ่ทฏๅพ
Attributes:
stoi (Dict): ๅ่ฏ->็ดขๅผ ๅญๅ
ธ
itos (List): ็ดขๅผ->ๅ่ฏ ๅ่กจ
"""
def __init__(self, vocab_path: Path):
self.stoi = {} # token -> index (dict)
self.itos = [] # index -> token (list)
with open(vocab_path) as f:
# bobsue.voc.txtไธญ๏ผๆฏไธ่กๆฏไธไธชๅ่ฏ
for w in f.readlines():
w = w.strip()
if w not in self.stoi:
self.stoi[w] = len(self.itos)
self.itos.append(w)
def __len__(self):
return len(self.itos)
class Corpus:
"""่ฏญๆๅบ็ฑป
Args:
data_path (Path): ่ฏญๆๅบๆๅจๆไปถๅคน่ทฏๅพ
sort_by_len (bool): ่ฏญๆๆฏๅฆๆ็
ง้ฟๅบฆ้ๅบๆๅ
Attributes:
vocab (Vocab): ๅ่ฏ่กจๅฎไพ
train_data (List): ่ฎญ็ปๆฐๆฎ, ๅทฒ็ปๅค็ไธบๅ่ฏ็ดขๅผ็ผๅทๅ่กจ
valid_data (List): ้ช่ฏๆฐๆฎ
test_data (List): ๆต่ฏๆฐๆฎ
"""
def __init__(self, data_path: Path, sort_by_len: bool = False):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.sort_by_len = sort_by_len
self.train_data = self.tokenize(data_path / 'bobsue.lm.train.txt')
self.valid_data = self.tokenize(data_path / 'bobsue.lm.dev.txt')
self.test_data = self.tokenize(data_path / 'bobsue.lm.test.txt')
def tokenize(self, text_path: Path):
with open(text_path) as f:
index_data = [] # ็ดขๅผๆฐๆฎ๏ผๅญๅจๆฏไธชๆ ทๆฌ็ๅ่ฏ็ดขๅผๅ่กจ
for s in f.readlines():
index_data.append(self.sentence_to_index(s))
if self.sort_by_len: # ไธบไบๆๅ่ฎญ็ป้ๅบฆ๏ผๅฏไปฅ่่ๅฐๆ ทๆฌๆ็
ง้ฟๅบฆๆๅบ๏ผ่ฟๆ ทๅฏไปฅๅๅฐpadding
index_data = sorted(index_data, key=lambda x: len(x), reverse=True)
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
class BobSueLMDataSet(torch.utils.data.Dataset):
"""่ฏญ่จๆจกๅๆฐๆฎ้"""
def __init__(self, index_data):
self.index_data = index_data
def __getitem__(self, i):
# ๆ นๆฎ่ฏญ่จๆจกๅๅฎไน๏ผ่ฟ้ๆไปฌ่ฆ็จๅn-1ไธชๅ่ฏ้ขๆตๅn-1ไธชๅ่ฏ
return self.index_data[i][:-1], self.index_data[i][1:]
def __len__(self):
return len(self.index_data)
| true |
8788df4be299ce6ae70570db28cef282607c2add | Python | athdsantos/Basic-Python | /CourseC&V/mundo-2/ex055.py | UTF-8 | 326 | 4.0625 | 4 | [] | no_license | maior = 0
menor = 0
for c in range(1, 6):
peso = float(input('Digite seu peso: '))
if c == 1:
maior = peso
menor = peso
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
print('Maior', maior)
print('Menor', menor)
print('FIM')
| true |
98b795759bdb33fcddbf405aff8c10ad5f3b854b | Python | astrofrog/dupeguru | /core_pe/tests/block_test.py | UTF-8 | 10,012 | 2.890625 | 3 | [] | no_license | # Created By: Virgil Dupras
# Created On: 2006/09/01
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# The commented out tests are tests for function that have been converted to pure C for speed
from pytest import raises, skip
from hscommon.testutil import eq_
try:
from ..block import *
except ImportError:
skip("Can't import the block module, probably hasn't been compiled.")
def my_avgdiff(first, second, limit=768, min_iter=3): # this is so I don't have to re-write every call
return avgdiff(first, second, limit, min_iter)
BLACK = (0,0,0)
RED = (0xff,0,0)
GREEN = (0,0xff,0)
BLUE = (0,0,0xff)
class FakeImage:
def __init__(self, size, data):
self.size = size
self.data = data
def getdata(self):
return self.data
def crop(self, box):
pixels = []
for i in range(box[1], box[3]):
for j in range(box[0], box[2]):
pixel = self.data[i * self.size[0] + j]
pixels.append(pixel)
return FakeImage((box[2] - box[0], box[3] - box[1]), pixels)
def empty():
return FakeImage((0,0), [])
def single_pixel(): #one red pixel
return FakeImage((1, 1), [(0xff,0,0)])
def four_pixels():
pixels = [RED,(0,0x80,0xff),(0x80,0,0),(0,0x40,0x80)]
return FakeImage((2, 2), pixels)
class TestCasegetblock:
def test_single_pixel(self):
im = single_pixel()
[b] = getblocks2(im, 1)
eq_(RED,b)
def test_no_pixel(self):
im = empty()
eq_([], getblocks2(im, 1))
def test_four_pixels(self):
im = four_pixels()
[b] = getblocks2(im, 1)
meanred = (0xff + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4
eq_((meanred,meangreen,meanblue),b)
# class TCdiff(unittest.TestCase):
# def test_diff(self):
# b1 = (10, 20, 30)
# b2 = (1, 2, 3)
# eq_(9 + 18 + 27,diff(b1,b2))
#
# def test_diff_negative(self):
# b1 = (10, 20, 30)
# b2 = (1, 2, 3)
# eq_(9 + 18 + 27,diff(b2,b1))
#
# def test_diff_mixed_positive_and_negative(self):
# b1 = (1, 5, 10)
# b2 = (10, 1, 15)
# eq_(9 + 4 + 5,diff(b1,b2))
#
# class TCgetblocks(unittest.TestCase):
# def test_empty_image(self):
# im = empty()
# blocks = getblocks(im,1)
# eq_(0,len(blocks))
#
# def test_one_block_image(self):
# im = four_pixels()
# blocks = getblocks2(im, 1)
# eq_(1,len(blocks))
# block = blocks[0]
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
#
# def test_not_enough_height_to_fit_a_block(self):
# im = FakeImage((2,1), [BLACK, BLACK])
# blocks = getblocks(im,2)
# eq_(0,len(blocks))
#
# def xtest_dont_include_leftovers(self):
# # this test is disabled because getblocks is not used and getblock in cdeffed
# pixels = [
# RED,(0,0x80,0xff),BLACK,
# (0x80,0,0),(0,0x40,0x80),BLACK,
# BLACK,BLACK,BLACK
# ]
# im = FakeImage((3,3), pixels)
# blocks = getblocks(im,2)
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
#
# def xtest_two_blocks(self):
# # this test is disabled because getblocks is not used and getblock in cdeffed
# pixels = [BLACK for i in xrange(4 * 2)]
# pixels[0] = RED
# pixels[1] = (0,0x80,0xff)
# pixels[4] = (0x80,0,0)
# pixels[5] = (0,0x40,0x80)
# im = FakeImage((4, 2), pixels)
# blocks = getblocks(im,2)
# eq_(2,len(blocks))
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
# eq_(BLACK,blocks[1])
#
# def test_four_blocks(self):
# pixels = [BLACK for i in xrange(4 * 4)]
# pixels[0] = RED
# pixels[1] = (0,0x80,0xff)
# pixels[4] = (0x80,0,0)
# pixels[5] = (0,0x40,0x80)
# im = FakeImage((4, 4), pixels)
# blocks = getblocks2(im, 2)
# eq_(4,len(blocks))
# block = blocks[0]
# #Because the block is smaller than the image, only blocksize must be considered.
# meanred = (0xff + 0x80) // 4
# meangreen = (0x80 + 0x40) // 4
# meanblue = (0xff + 0x80) // 4
# eq_((meanred,meangreen,meanblue),block)
# eq_(BLACK,blocks[1])
# eq_(BLACK,blocks[2])
# eq_(BLACK,blocks[3])
#
class TestCasegetblocks2:
def test_empty_image(self):
im = empty()
blocks = getblocks2(im,1)
eq_(0,len(blocks))
def test_one_block_image(self):
im = four_pixels()
blocks = getblocks2(im,1)
eq_(1,len(blocks))
block = blocks[0]
meanred = (0xff + 0x80) // 4
meangreen = (0x80 + 0x40) // 4
meanblue = (0xff + 0x80) // 4
eq_((meanred,meangreen,meanblue),block)
def test_four_blocks_all_black(self):
im = FakeImage((2, 2), [BLACK, BLACK, BLACK, BLACK])
blocks = getblocks2(im,2)
eq_(4,len(blocks))
for block in blocks:
eq_(BLACK,block)
def test_two_pixels_image_horizontal(self):
pixels = [RED,BLUE]
im = FakeImage((2, 1), pixels)
blocks = getblocks2(im,2)
eq_(4,len(blocks))
eq_(RED,blocks[0])
eq_(BLUE,blocks[1])
eq_(RED,blocks[2])
eq_(BLUE,blocks[3])
def test_two_pixels_image_vertical(self):
pixels = [RED,BLUE]
im = FakeImage((1, 2), pixels)
blocks = getblocks2(im,2)
eq_(4,len(blocks))
eq_(RED,blocks[0])
eq_(RED,blocks[1])
eq_(BLUE,blocks[2])
eq_(BLUE,blocks[3])
class TestCaseavgdiff:
def test_empty(self):
with raises(NoBlocksError):
my_avgdiff([], [])
def test_two_blocks(self):
im = empty()
b1 = (5,10,15)
b2 = (255,250,245)
b3 = (0,0,0)
b4 = (255,0,255)
blocks1 = [b1,b2]
blocks2 = [b3,b4]
expected1 = 5 + 10 + 15
expected2 = 0 + 250 + 10
expected = (expected1 + expected2) // 2
eq_(expected, my_avgdiff(blocks1, blocks2))
def test_blocks_not_the_same_size(self):
b = (0,0,0)
with raises(DifferentBlockCountError):
my_avgdiff([b,b],[b])
def test_first_arg_is_empty_but_not_second(self):
#Don't return 0 (as when the 2 lists are empty), raise!
b = (0,0,0)
with raises(DifferentBlockCountError):
my_avgdiff([],[b])
def test_limit(self):
ref = (0,0,0)
b1 = (10,10,10) #avg 30
b2 = (20,20,20) #avg 45
b3 = (30,30,30) #avg 60
blocks1 = [ref,ref,ref]
blocks2 = [b1,b2,b3]
eq_(45,my_avgdiff(blocks1,blocks2,44))
def test_min_iterations(self):
ref = (0,0,0)
b1 = (10,10,10) #avg 30
b2 = (20,20,20) #avg 45
b3 = (10,10,10) #avg 40
blocks1 = [ref,ref,ref]
blocks2 = [b1,b2,b3]
eq_(40,my_avgdiff(blocks1,blocks2,45 - 1,3))
# Bah, I don't know why this test fails, but I don't think it matters very much
# def test_just_over_the_limit(self):
# #A score just over the limit might return exactly the limit due to truncating. We should
# #ceil() the result in this case.
# ref = (0,0,0)
# b1 = (10,0,0)
# b2 = (11,0,0)
# blocks1 = [ref,ref]
# blocks2 = [b1,b2]
# eq_(11,my_avgdiff(blocks1,blocks2,10))
#
def test_return_at_least_1_at_the_slightest_difference(self):
ref = (0,0,0)
b1 = (1,0,0)
blocks1 = [ref for i in range(250)]
blocks2 = [ref for i in range(250)]
blocks2[0] = b1
eq_(1,my_avgdiff(blocks1,blocks2))
def test_return_0_if_there_is_no_difference(self):
ref = (0,0,0)
blocks1 = [ref,ref]
blocks2 = [ref,ref]
eq_(0,my_avgdiff(blocks1,blocks2))
# class TCmaxdiff(unittest.TestCase):
# def test_empty(self):
# self.assertRaises(NoBlocksError,maxdiff,[],[])
#
# def test_two_blocks(self):
# b1 = (5,10,15)
# b2 = (255,250,245)
# b3 = (0,0,0)
# b4 = (255,0,255)
# blocks1 = [b1,b2]
# blocks2 = [b3,b4]
# expected1 = 5 + 10 + 15
# expected2 = 0 + 250 + 10
# expected = max(expected1,expected2)
# eq_(expected,maxdiff(blocks1,blocks2))
#
# def test_blocks_not_the_same_size(self):
# b = (0,0,0)
# self.assertRaises(DifferentBlockCountError,maxdiff,[b,b],[b])
#
# def test_first_arg_is_empty_but_not_second(self):
# #Don't return 0 (as when the 2 lists are empty), raise!
# b = (0,0,0)
# self.assertRaises(DifferentBlockCountError,maxdiff,[],[b])
#
# def test_limit(self):
# b1 = (5,10,15)
# b2 = (255,250,245)
# b3 = (0,0,0)
# b4 = (255,0,255)
# blocks1 = [b1,b2]
# blocks2 = [b3,b4]
# expected1 = 5 + 10 + 15
# expected2 = 0 + 250 + 10
# eq_(expected1,maxdiff(blocks1,blocks2,expected1 - 1))
# | true |
452a026b17f98a2d270b1a934c52565722dc5547 | Python | lawrann/AI-for-stock-market-trending-analysis | /fyp_/Stocks/stock_split_train_test.py | UTF-8 | 1,779 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import csv
from datetime import datetime
from tqdm import tqdm
#%%
def split_train_test_csv(source_filepath, dest_folder, num_training_records, train_name, test_name):
train_list = []
test_list = []
with open(source_filepath, 'r') as source:
reader = csv.reader(source)
headers = next(reader)
columns = headers
train_list.append(columns)
test_list.append(columns)
headers = next(reader) # skip the headers
for i in range(num_training_records):
train_list.append(headers)
headers = next(reader)
while 1:
test_list.append(headers)
try:
headers = next(reader)
except:
break
with open(dest_folder + '\\' + train_name + '.csv', 'w', newline='', encoding='utf-8') as train_file:
csv_writer = csv.writer(train_file, quoting=csv.QUOTE_ALL)
for data in train_list:
csv_writer.writerow(data)
with open(dest_folder + '\\' + test_name + '.csv', 'w', newline='', encoding='utf-8') as test_file:
csv_writer = csv.writer(test_file, quoting=csv.QUOTE_ALL)
for data in test_list:
csv_writer.writerow(data)
#%%
# Change num_training_records according to ur number of training records.
# The rest will be testing set
# source_filepath, dest_folder, num_training_records, train_name, test_name
#%%
#SPY
split_train_test_csv('spy\spy_Sentiment_Final.csv', 'spy', 1007, 'spy_train', 'spy_test')
#%%
#Citi
split_train_test_csv('citi\citi_Sentiment_Final.csv', 'citi', 1007, 'citi_train', 'citi_test')
#%%
#Atvi
split_train_test_csv(r'atvi\atvi_Sentiment_Final.csv', 'atvi', 1007, 'atvi_train', 'atvi_test')
#%%
| true |
26705a067aad85e9eae87225761038b67b19b32a | Python | piaoliangkb/python-socket | /socket-client_multiconn.py | UTF-8 | 1,111 | 2.875 | 3 | [] | no_license | from socket import *
import time
# ๅๅปบsocket
tcpClientSocket = socket(AF_INET, SOCK_STREAM)
# # ๅฎขๆท็ซฏๅฟ่ทณ็ปดๆค
# # ้ฟ้พๆฅๅจๆฒกๆๆฐๆฎ้ไฟกๆถ๏ผๅฎๆถๅ้ๅฟ่ทณ๏ผ็ปดๆ้พๆฅ็ถๆ
# # ๅฆๆTCPๅจ10็งๅ
ๆฒกๆ่ฟ่กๆฐๆฎไผ ่พ๏ผๅๅ้ๅ
ๆขๅ
๏ผ
# # ๆฏ้3็งๅ้ไธๆฌก๏ผๅ
ฑๅ้5ๆฌกใๅฆๆ5ๆฌก้ฝๆฒกๆถๅฐ็ธๅบ๏ผๅ่กจ็คบ่ฟๆฅๅทฒไธญๆญใ
# tcpClientSocket.setsockopt(SOL_SOCKET, SO_KEEPALIVE, 1)
# tcpClientSocket.setsockopt(IPPROTO_TCP, TCP_KEEPIDLE, 10)
# tcpClientSocket.setsockopt(IPPROTO_TCP, TCP_KEEPINTVL, 3)
# tcpClientSocket.setsockopt(IPPROTO_TCP, TCP_KEEPCNT, 5)
# ้พๆฅๆๅกๅจ
serAddr = ('192.168.120.3', 9996)
tcpClientSocket.connect(serAddr)
while True:
localtime = time.asctime(time.localtime(time.time()))
data = input()
data = (data + f" at time [{time.time()}]").encode("utf-8")
tcpClientSocket.send(data)
print(f"finish sending data at time [{time.time()}]")
# recvdata = tcpClientSocket.recv(1024)
# print("data received : {} at time {}".format(recvdata, time.time()))
# ๅ
ณ้ญๅฅๆฅๅญ
tcpClientSocket.close()
| true |
0706633186fe68eb3597078c1c7af77bbe2ea0b4 | Python | Anaconda-Platform/anaconda-project | /anaconda_project/status.py | UTF-8 | 1,453 | 2.671875 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""The Status type."""
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from anaconda_project.internal.metaclass import with_metaclass
class Status(with_metaclass(ABCMeta)):
"""Class describing a failure or success status, with logs.
Values of this class evaluate to True in a boolean context
if the status is successful.
Values of this class are immutable.
"""
def __init__(self):
"""Construct an abstract Status."""
@property
@abstractmethod
def status_description(self):
"""Get a one-line-ish description of the status."""
pass # pragma: no cover
@property
@abstractmethod
def errors(self):
"""Get error logs relevant to the status.
A rule of thumb for this field is that anything in here should also have been
logged to a ``Frontend`` instance, so this field is kind of just a cache
of error messages generated by a particular operation for the convenience
of the caller.
"""
pass # pragma: no cover
| true |
3f7dfb6ff5936e33c48f67ffe26cc7f3011d8914 | Python | PriyaSSinha/MachineLearning | /GenderClassifier.py | UTF-8 | 1,316 | 2.84375 | 3 | [] | no_license | from sklearn import tree
from sklearn.metrics import accuracy_score
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn import neighbors
#training data [height,weight,shoe size]
X=[[181,80,44],[177,70,43],[160,60,38],[154,54,37],[166,65,40],[190,90,47],[175,64,39],[177,70,40],[159,55,37],[171,75,42],[181,85,43]]
Y = ['male', 'female', 'female', 'female', 'male', 'male','male', 'female', 'male', 'female', 'male']
clf=tree.DecisionTreeClassifier()
clf1 = svm.SVC() #Support Vector classifier
clf2 = GaussianNB() #Naive Bayes
clf3 = neighbors.KNeighborsClassifier() #K neighbors classifier
clf=clf.fit(X,Y)
clf1 = clf1.fit(X,Y)
clf2 = clf2.fit(X,Y)
clf3 = clf3.fit(X,Y)
#test data
test=[[190,70,43],[170,43,38],[179,90,40]]
test1=['male','female','male']
predict=clf.predict(test)
prediction1 = clf1.predict(test)
prediction2 = clf2.predict(test)
prediction3 = clf3.predict(test)
print(predict)
print("Prediction for SVM : ",prediction1)
print("Accuracy for SVM : ",accuracy_score(test1,prediction1))
print("Prediction for Naive Bayes : ",prediction2)
print("Accuracy for Naive Bayes : ",accuracy_score(test1,prediction2))
print("Prediction for K neighbors : ",prediction3)
print("Accuracy for K neighbors : ",accuracy_score(test1,prediction3))
p=input("") | true |
c4af6ac4f3b8b7b14414c155611811aa2a2eea11 | Python | connoryang/1v1dec | /carbonui/util/sortUtil.py | UTF-8 | 702 | 2.609375 | 3 | [] | no_license | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\carbonui\util\sortUtil.py
def Sort(lst):
lst.sort(lambda x, y: cmp(str(x).upper(), str(y).upper()))
return lst
def SortListOfTuples(lst, reverse = 0):
lst = sorted(lst, reverse=reverse, key=lambda data: data[0])
return [ item[1] for item in lst ]
def SortByAttribute(lst, attrname = 'name', idx = None, reverse = 0):
newlst = []
for item in lst:
if idx is None:
newlst.append((getattr(item, attrname, None), item))
else:
newlst.append((getattr(item[idx], attrname, None), item))
ret = SortListOfTuples(newlst, reverse)
return ret
| true |
bc6a3a751cc3e2566e1c79d6bb86a9bf994b815b | Python | ciblois/data-prework | /1.-Python/3.-Bus/bus.py | UTF-8 | 1,203 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 18:05:29 2020
@author: Cinthya Blois
"""
#bus_stop = (in, out)
# Variables
stops = [(10, 0), (4, 1), (3, 5), (3, 4), (5, 1), (1, 5), (5, 8), (4, 6), (2, 3)]
stops_list = []
#print(stops[0])
#print(stops[1][0])
print(len(stops))
passengers_in = map(lambda x: x[0], stops)
passengers_in = list(passengers_in)
passengers_out = map(lambda x: x[1], stops)
passengers_out = list(passengers_out)
passengers = []
for i in range(0, len(passengers_in)):
x = passengers_in[i] - passengers_out[i]
passengers.append(x)
passengers_stops = []
fisrt_stop = passengers_in[0] - passengers_out[0]
passengers_stops.append(fisrt_stop)
for i in range(1,len(passengers)):
#while i < len(passengers):
try:
x = passengers_stops[i-1] + passengers[i]
passengers_stops.append(x)
except:
break
#print(passengers_in)
#print(passengers_out)
#print(passengers)
print(passengers_stops)
print("The maximum occupation of the bus is", max(passengers_stops))
average = sum(passengers_stops)/len(passengers_stops)
print("The average occupation is", average)
import statistics
print("The standart deviation is", statistics.stdev(passengers_stops)) | true |
cd2cd03ac0d388e89ce9ce40245a7534036c0970 | Python | whileskies/data-mining-project | /decision_tree_classification/android_dt_classify.py | UTF-8 | 2,149 | 2.84375 | 3 | [] | no_license | import sys
sys.path.append("..")
from data_preprocessing import android_data_process as dp
from decision_tree_classification import id3
import os
import time
tree_file_dir = 'pickle_data/android_dt_tree.pickle'
def classify():
build_tree_start_time = 0
build_tree_end_time = 0
if os.path.exists(tree_file_dir):
print('ๅณ็ญๆ ๅทฒไฟๅญ')
d_tree, feature_labels, train_data_set, test_features_set, test_class_labels = id3.load_tree(tree_file_dir)
else:
print('ๅณ็ญๆ ๆชไฟๅญ๏ผ้ๆฐๅปบๆ ไธญ')
if not os.path.exists(dp.android_dataset_dir):
print('ๆฐๆฎ้ขๅค็ๅนถไฟๅญไธญ')
android_dataset = dp.load_file_save_dataset()
else:
android_dataset = dp.load_android_dataset(dp.android_dataset_dir)
train_data_set = android_dataset.get_combined_train()
feature_labels = android_dataset.feature_labels
test_features_set = android_dataset.test_features
test_class_labels = android_dataset.test_class_labels
# print(feature_labels)
build_tree_start_time = time.perf_counter()
d_tree = id3.create_tree(train_data_set, feature_labels)
build_tree_end_time = time.perf_counter()
id3.store_tree(tree_file_dir, d_tree, feature_labels, train_data_set, test_features_set, test_class_labels)
print(d_tree)
predict_start_time = time.perf_counter()
acc = 0
for i in range(len(test_features_set)):
class_label = id3.classify(d_tree, feature_labels, test_features_set[i])
if class_label == test_class_labels[i]:
acc += 1
print('ๆญฃ็กฎ', class_label)
else:
print('้่ฏฏ, ้ขๆต็ฑปๅซ๏ผ%d, ๆญฃ็กฎ็ฑปๅซ๏ผ%d' % (class_label, test_class_labels[i]))
print('\nๆญฃ็กฎ็๏ผ%.2f%%' % (100.0 * acc / len(test_features_set)))
predict_end_time = time.perf_counter()
print('ๅณ็ญๆ -ๆๅปบๅณ็ญๆ ่ฟ่กๆถ้ด๏ผ%s็ง' % (build_tree_end_time - build_tree_start_time))
print('ๅณ็ญๆ -้ขๆต้ถๆฎต่ฟ่กๆถ้ด๏ผ%s็ง' % (predict_end_time - predict_start_time))
if __name__ == '__main__':
classify()
| true |
55b631ee541d140418886ab16b090e20f3e61cdc | Python | dhruv423/Software-Dev-Coursework | /A2/src/CompoundT.py | UTF-8 | 1,809 | 3.5625 | 4 | [] | no_license | ## @file CompoundT.py
# @author Dhruv Bhavsar
# @brief Class for holding a MolecSet
# @date Feb 3, 2020
from MoleculeT import *
from ChemEntity import *
from Equality import *
from ElmSet import *
from MolecSet import *
## @brief Class that represents a Compound, inherits ChemEntity and Equality
class CompoundT(ChemEntity, Equality):
## @brief Constructor to initalize the object with a MolecSet
# @param molec_set - MolecSet to be stored in a Compound
def __init__(self, molec_set):
self.__C = molec_set
## @brief Return the Compound which is a MolecSet
# @return MolecSet
def get_molec_set(self):
return self.__C
## @brief Return the number of atoms in the MolecSet with the specified element
# @details Using functional programming functions, turn the
# MolecSet into a sequence to iterate over
# and find the number of atoms for each MolecSet
# with a specified element then sum up the list
# @param element - ElementT
# @return the total number of atoms of element in the Compound
def num_atoms(self, element):
return sum([m.num_atoms(element) for m in self.__C.to_seq()])
## @brief Returns the ElmSet of the all the different ElementT in the compound
# @details Using get_elm() function for MoleculeT to get the element
# @return ElmSet of all the ElementT
def constit_elems(self):
return ElmSet([m.get_elm() for m in self.__C.to_seq()])
## @brief Check if the two compounds are equal
# @param other_compound - other compound to compare with
# @return true if equal else false
def equals(self, other_compound):
return self.__C.equals(other_compound.get_molec_set())
def __eq__(self, other):
return self.equals(other)
| true |
23dfa429d24ccebc3e83776cb1cc8eecf6bd07d1 | Python | Karthikzee/SENTINA | /analysis.py | UTF-8 | 464 | 3.125 | 3 | [] | no_license | from textblob import TextBlob
def get_tweet_sentiment(tweet):
# Utility function to classify sentiment of passed tweet using textblob's sentiment method
# create TextBlob object of passed tweet text
analysis = TextBlob(tweet)
# set sentiment
if analysis.sentiment.polarity > 0:
return 'positive'
elif analysis.sentiment.polarity == 0:
return 'neutral'
else:
return 'negative'
| true |
044c7c2a83624b9a2a0f58660a10359f04e10993 | Python | alk051/chapter7 | /modules/environment.py | UTF-8 | 223 | 2.53125 | 3 | [] | no_license | print ("Este modulo obtem qualquer variavel de ambiente definida no computador remoto em que o cavalo de TRoia estivar executando ")
import os
def run(**args):
print "[*] In environment module."
return str(os.environ)
| true |