blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
63d627c88a9aa71a2769bf5b8910517e7fd68c8d | Python | MdAlSiam/My-Codeforces-Solutions | /1293B.py | UTF-8 | 88 | 3.3125 | 3 | [] | no_license | n = int(input())
ans = 0.00
for i in range (1, n+1):
ans = ans + (1.00 / i)
print(ans)
| true |
edcf6b0dd77e51c4dd1d843da33877a96b236c2f | Python | isthegoal/Predicting_Air_Quality | /model/XGBOOST.py | UTF-8 | 14,153 | 2.53125 | 3 | [] | no_license | #-*-coding:utf-8-*-
import pandas as pd
import lightgbm as lgb
import numpy as np
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
import h5py
import pickle
from xgboost import XGBRegressor as XGBR
from pandas import DataFrame as DF
from sklearn import metrics
def smape_error(preds, train_data):
labels = train_data.get_label()
return 'error', np.mean(np.fabs(preds - labels) / (preds + labels) * 2), False
# 指标2 RMSE
def rmse(y_test, y):
return np.sqrt(np.mean((y_test - y) ** 2))
def R2(y_test, y_true):
return 1 - ((y_true - y_test) ** 2).sum() / ((y_true - y_true.mean()) ** 2).sum()
# 自己跟据公式仿写Bias的计算函数
def Bias(y_true, y_test):
return np.mean((y_true - y_test))
def f1(x):
return np.log(x+1)
def rf1(x):
return np.exp(x)-1
def get_score(pred, valid_y_exp):
return np.mean(np.abs(pred - valid_y_exp) / (pred + valid_y_exp) * 2)
#
def get_pic(model, feature_name):
ans = DF()
print('is is ',len(feature_name))
print('is ',len(model.feature_importances_))
ans['name'] = feature_name
ans['score'] = model.feature_importances_
# print(ans[ans['score']>0].shape)
print('获得最重要的名称')
return ans.sort_values(by=['score'], ascending=False).reset_index(drop=True)
def pre_model_xgb_train():
f = h5py.File(
'./data/slice_eve_data/the_last_all__7_13_pca_com_800.h5',
'r')
# f['data'].value存放的是时间戳 上空间的流量数据
data = f['data'].value
weidu_size = data.shape[1]
pm25_data = pd.DataFrame(data, columns=[str(i) for i in range(0, weidu_size)])
pm25_data_60 = pm25_data.ix[1:600000, :]
train = pm25_data_60[[str(i) for i in range(0, weidu_size - 1)]]
y = pm25_data_60[str(weidu_size - 1)]
print('小训练结束')
reg = XGBR()
reg.fit(train, y)
feature_name1 = [str(i) for i in range(0, weidu_size - 1)]
nums=300 #important feature number
important_column = list(set(get_pic(reg, feature_name1).head(nums)['name']))
reg1 = XGBR()
reg1.fit(train[important_column], y)
# 打印所有数据集的时刻上的预测的分
print('重要特恒长度为:',len(important_column))
the_sum_score = 0
the_sum_num = 0
for i in range(750, 1200):
start_num = 840 * i
end_num = (i + 1) * 840
print('start_num:', start_num)
print('end_num:', end_num)
pm_data = pm25_data.ix[int(start_num):int(end_num), :]
pred_PM25 = reg1.predict(pm_data[important_column])
score = get_score(pred_PM25, pm_data[str(weidu_size - 1)])
print(str(i) + '次,计算留出集合上损失得分:', score)
the_sum_score = the_sum_score + score
the_sum_num = the_sum_num + 1
# f['data'].value存放的是时间戳 上空间的流量数据
print('探索特征数两 平均得分:', the_sum_score / the_sum_num)
print('重要的列有:',important_column)
def model_xgb_train():
f = h5py.File(
'./data/slice_eve_data/the_last_all__7_13_pca_com_800.h5',
'r')
# f['data'].value存放的是时间戳 上空间的流量数据
data = f['data'].value
weidu_size = data.shape[1]
pm25_data = pd.DataFrame(data, columns=[str(i) for i in range(0, weidu_size)])
pm25_data_60 = pm25_data.ix[1:600000, :]
train = pm25_data_60[[str(i) for i in range(0, weidu_size - 1)]]
y = pm25_data_60[str(weidu_size - 1)]
important_column = ['1322', '2184', '429', '1430', '2555', '768', '1473', '40', '336', '1739', '1734', '544', '24',
'1251', '1540', '1274', '194', '693', '1757', '705', '1173', '432', '793', '362', '195', '1394',
'279', '1284', '288', '458', '1429', '1549', '396', '669', '2542', '1505', '1772', '1642',
'753', '1076', '1281', '1106', '542', '1442', '2540', '1469', '1716', '681', '518', '1457',
'1269', '813', '1083', '1279', '158', '434', '1775', '871', '657', '1659', '1737', '691', '455',
'555', '1707', '1567', '264', '1684', '1552', '2559', '1414', '1651', '1203', '1464', '820',
'729', '1660', '1545', '99', '1438', '796', '1509', '2539', '384', '1730', '127', '1776',
'1701', '1729', '2528', '1626', '1493', '12', '650', '647', '482', '211', '1161', '1107',
'1675', '1311', '903', '314', '1736', '1623', '1197', '964', '645', '783', '494', '1299',
'1541', '537', '549', '2541', '782', '182', '916', '1725', '348', '1238', '2139', '381', '206',
'28', '146', '4', '444', '369', '2560', '2549', '1250', '1740', '1726', '890', '169', '1711',
'1752', '1722', '225', '1557', '554', '597', '1149', '614', '183', '1447', '1599', '697', '398',
'825', '177', '1342', '698', '734', '1239', '16', '1418', '372', '861', '877', '446', '147',
'204', '928', '1413', '2561', '506', '1488', '2530', '578', '674', '2041', '837', '2543',
'1320', '949', '468', '1395', '572', '1754', '302', '1226', '291', '1742', '148', '1131', '276',
'1717', '710', '1671', '70', '1735', '1354', '1779', '873', '1514', '945', '1440', '1310',
'626', '1435', '60', '1721', '2558', '2534', '590', '1663', '1491', '499', '1720', '1728',
'633', '360', '1767', '1763', '495', '1495', '1613', '110', '324', '1713', '207', '1081',
'1577', '218', '1648', '2531', '2546', '358', '2556', '746', '2', '988', '779', '2545', '1485',
'1209', '1478', '1176', '1468', '976', '638', '979', '1743', '2537', '170', '1647', '2552', '9',
'1718', '1012', '1334', '1738', '876', '662', '2535', '1683', '1687', '1060', '607', '1179',
'865', '1378', '2532', '2551', '108', '530', '1512', '770', '237', '1719', '849', '1759',
'1636', '721', '1477', '1831', '1589', '0', '2553', '2550', '2533', '1503', '386', '1125',
'1579', '1695', '453', '1543', '685', '1833', '1069', '351', '1741', '1611', '1591', '1497',
'1404', '819']
reg1 = XGBR(learning_rate=0.05,
n_estimators=600,
max_depth=5,
min_child_weight=1,
subsample=0.8,
colsample_bytree=0.8,
gamma=0,
reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, n_jobs=-1)
# # cv_model = cv(lgb_model, train_data[feature_name], train_label, cv=10, scoring='f1')
reg1.fit(train[important_column], y)
print('重要特恒长度为:', len(important_column))
the_sum_score = 0
the_sum_num = 0
for i in range(750, 1200):
start_num = 840 * i
end_num = (i + 1) * 840
print('start_num:', start_num)
print('end_num:', end_num)
pm_data = pm25_data.ix[int(start_num):int(end_num), :]
pred_PM25 = reg1.predict(pm_data[important_column])
score = get_score(pred_PM25, pm_data[str(weidu_size - 1)])
print(str(i) + '次,计算留出集合上损失得分:', score)
the_sum_score = the_sum_score + score
the_sum_num = the_sum_num + 1
# f['data'].value存放的是时间戳 上空间的流量数据
print('探索特征数两 平均得分:', the_sum_score / the_sum_num)
print('重要的列有:', important_column)
# 模型存储
model_file = './data/save_model/xgb_best_yishen_0.425.model'
with open(model_file, 'wb') as fout:
pickle.dump(reg1, fout)
#
def model_xgb_predict():
f = h5py.File(
'./data/slice_eve_data/the_last_all__7_13_pca_com_800.h5',
'r')
# f['data'].value存放的是时间戳 上空间的流量数据
data = f['data'].value
weidu_size = data.shape[1]
pm25_data = pd.DataFrame(data, columns=[str(i) for i in range(0, weidu_size)])
model_path = './data/save_model/xgb_best_yishen_0.425.model'
xgb_model = pickle.load(open(model_path, 'rb'))
important_column = ['1322', '2184', '429', '1430', '2555', '768', '1473', '40', '336', '1739', '1734', '544', '24',
'1251', '1540', '1274', '194', '693', '1757', '705', '1173', '432', '793', '362', '195', '1394',
'279', '1284', '288', '458', '1429', '1549', '396', '669', '2542', '1505', '1772', '1642',
'753', '1076', '1281', '1106', '542', '1442', '2540', '1469', '1716', '681', '518', '1457',
'1269', '813', '1083', '1279', '158', '434', '1775', '871', '657', '1659', '1737', '691', '455',
'555', '1707', '1567', '264', '1684', '1552', '2559', '1414', '1651', '1203', '1464', '820',
'729', '1660', '1545', '99', '1438', '796', '1509', '2539', '384', '1730', '127', '1776',
'1701', '1729', '2528', '1626', '1493', '12', '650', '647', '482', '211', '1161', '1107',
'1675', '1311', '903', '314', '1736', '1623', '1197', '964', '645', '783', '494', '1299',
'1541', '537', '549', '2541', '782', '182', '916', '1725', '348', '1238', '2139', '381', '206',
'28', '146', '4', '444', '369', '2560', '2549', '1250', '1740', '1726', '890', '169', '1711',
'1752', '1722', '225', '1557', '554', '597', '1149', '614', '183', '1447', '1599', '697', '398',
'825', '177', '1342', '698', '734', '1239', '16', '1418', '372', '861', '877', '446', '147',
'204', '928', '1413', '2561', '506', '1488', '2530', '578', '674', '2041', '837', '2543',
'1320', '949', '468', '1395', '572', '1754', '302', '1226', '291', '1742', '148', '1131', '276',
'1717', '710', '1671', '70', '1735', '1354', '1779', '873', '1514', '945', '1440', '1310',
'626', '1435', '60', '1721', '2558', '2534', '590', '1663', '1491', '499', '1720', '1728',
'633', '360', '1767', '1763', '495', '1495', '1613', '110', '324', '1713', '207', '1081',
'1577', '218', '1648', '2531', '2546', '358', '2556', '746', '2', '988', '779', '2545', '1485',
'1209', '1478', '1176', '1468', '976', '638', '979', '1743', '2537', '170', '1647', '2552', '9',
'1718', '1012', '1334', '1738', '876', '662', '2535', '1683', '1687', '1060', '607', '1179',
'865', '1378', '2532', '2551', '108', '530', '1512', '770', '237', '1719', '849', '1759',
'1636', '721', '1477', '1831', '1589', '0', '2553', '2550', '2533', '1503', '386', '1125',
'1579', '1695', '453', '1543', '685', '1833', '1069', '351', '1741', '1611', '1591', '1497',
'1404', '819']
print('重要特恒长度为:', len(important_column))
the_sum_score = 0
the_sum_num = 0
the_rmse_score = 0
the_r2_score = 0
the_mae_score = 0
the_bias_score = 0
for i in range(750, 1200):
start_num = 840 * i
end_num = (i + 1) * 840
print('start_num:', start_num)
print('end_num:', end_num)
pm_data = pm25_data.ix[int(start_num):int(end_num), :]
pred_PM25 = xgb_model.predict(pm_data[important_column])
score = get_score(pred_PM25, pm_data[str(weidu_size - 1)])
rmse_score = rmse(pred_PM25, pm_data[str(weidu_size - 1)])
r2_score = R2(pred_PM25, pm_data[str(weidu_size - 1)])
mae_score = metrics.mean_absolute_error(pred_PM25, pm_data[str(weidu_size - 1)])
bias_score = Bias(pred_PM25, pm_data[str(weidu_size - 1)])
print(str(i) + '次,计算留出集合上损失得分:', score)
the_sum_score = the_sum_score + score
the_rmse_score = the_rmse_score + rmse_score
the_r2_score = the_r2_score + r2_score
the_mae_score = the_mae_score + mae_score
the_bias_score = the_bias_score + bias_score
the_sum_num = the_sum_num + 1
# f['data'].value存放的是时间戳 上空间的流量数据
print('探索特征数两 平均得分:', the_sum_score / the_sum_num)
print('rmse 平均得分:', the_rmse_score / the_sum_num)
print('r2 平均得分:', the_r2_score / the_sum_num)
print('mae 平均得分:', the_mae_score / the_sum_num)
print('bia 平均得分:', the_bias_score / the_sum_num)
score_recore_list = []
for she_end_hour in range(0, 24):
the_sum_score = 0
the_sum_num = 0
for i in range(800, 801):
start_num = 840 * 750 + she_end_hour
end_num = (1202) * 840
# end_num=start_num+
print('start_num:', start_num)
print('end_num:', end_num)
# pm_data = pm25_data.ix[int(start_num):int(end_num), :
pm_data = pm25_data.ix[[i for i in range(start_num, end_num, 24)], :]
pred_PM25 = xgb_model.predict(pm_data[important_column])
# 分别计算 rmse_score r2_score mae_score bias_score
score = get_score(pred_PM25, pm_data[str(weidu_size - 1)])
print(str(i) + '次,计算留出集合上损失得分:', score)
# the_sum_score = the_sum_score + score
# the_rmse_score = the_rmse_score + rmse_score
# the_r2_score = the_r2_score + r2_score
# the_mae_score = the_mae_score + mae_score
# the_bias_score = the_bias_score + bias_score
score_recore_list.append(score)
print('1-3 小时上平均smape指标值:',np.mean(score_recore_list[0:3]))
print('4-9 小时上平均smape指标值:',np.mean(score_recore_list[3:9]))
print('9-16 小时上平均smape指标值:',np.mean(score_recore_list[9:16]))
print('17-24 小时上平均smape指标值:',np.mean(score_recore_list[16:24]))
| true |
92dd735cac212d3f3ed043761a96474a16eb7488 | Python | dillonmk/Python_git_practice | /helloworld.py | UTF-8 | 144 | 3.046875 | 3 | [] | no_license | import os
os.system('clear')
# This is a comment
'''
For a multiline comment
'''
full_name = "Dillon Kabot"
print('hello world')
print(full_name)
| true |
79be4b55c082a9607014d7ba1d612e251ceb90fe | Python | cheery/20131031-compiler | /analysis.py | UTF-8 | 3,367 | 2.515625 | 3 | [] | no_license | from structures import Variable
def dominance_frontiers(function):
def peel(obj, depth):
for _ in range(depth, obj.idom_depth):
obj = obj.idom
return obj
for block in function:
block.idom = None
block.idom_depth = 0
block.frontiers = set()
assert len(function[0].prec) == 0
breath = [function[0]]
while len(breath) > 0:
block = breath.pop(0)
for target in block.succ:
idom = target
if target.idom is None:
idom = block
breath.append(target)
else:
depth = min(target.idom_depth, block.idom_depth)
idom = peel(target, depth)
cdom = peel(block, depth)
while idom is not cdom:
idom = idom.idom
cdom = cdom.idom
if target is not idom:
target.idom = idom
target.idom_depth = idom.idom_depth + 1
for block in function:
block.phi = set()
if len(block.prec) >= 2:
for runner in block.prec:
while runner != block.idom:
runner.frontiers.add(block)
runner = runner.idom
def frontier_visit(frontier, var):
if var in frontier.phi:
return
if var in frontier.sustains:
frontier.phi.add(var)
for deep_frontier in frontier.frontiers:
frontier_visit(deep_frontier, var)
for block in function:
for var in block.provides:
for frontier in block.frontiers:
frontier_visit(frontier, var)
# done = False
# while not done:
# done = True
# for block in function:
# phis = block.provides | block.phi
# for frontier in block.frontiers:
# k = len(frontier.phi)
# frontier.phi.update(frontier.sustains & phis)
# if k < len(frontier.phi):
# done = False
def variable_flow(function):
for block in function:
provides = set()
needs = set()
for instruction in reversed(block):
args = iter(instruction)
if instruction.name == 'let':
var = args.next()
provides.add(var)
needs.discard(var)
needs.update(arg for arg in args if isinstance(arg, Variable))
block.provides = provides
block.needs = needs
block.sustains = needs.copy()
block.succ = block_jump_targets(block)
block.prec = []
for block in function:
for target in block.succ:
target.prec.append(block)
done = False
while not done:
done = True
for block in function:
k = len(block.sustains)
for target in block.succ:
block.sustains.update(target.sustains - block.provides)
if k < len(block.sustains):
done = False
def block_jump_targets(block):
instruction = block[-1]
if instruction.name == 'branch':
return (instruction[0],)
elif instruction.name == 'cbranch':
return (instruction[1], instruction[2])
elif instruction.name == 'ret':
return ()
else:
raise Exception("unknown terminator: %s" % instruction.repr())
| true |
1aec5b12d7ad9b5e6672827932acf93f9c49d171 | Python | VinidiktovEvgenijj/PY111-april | /Tasks/e2_dijkstra.py | UTF-8 | 651 | 3.625 | 4 | [] | no_license | from typing import Any
import networkx as nx
def dijkstra_algo(g: nx.DiGraph, starting_node: Any) -> dict:
"""
Count shortest paths from starting node to all nodes of graph g
:param g: Graph from NetworkX
:param starting_node: starting node from g
:return: dict like {'node1': 0, 'node2': 10, '3': 33, ...} with path costs, where nodes are nodes from g
"""
x = {node: float('inf') for node in g.nodes}
queue = [starting_node]
x[starting_node] = 0
while queue:
elem = queue.pop(0)
for node, edg in g[elem].items():
y = x[elem] + edg['weight']
if x[node] > y:
x[node] = y
queue.append(node)
return x
| true |
08275277792398c8942b42e2dee05099419bc035 | Python | gmgall/python-hpc | /multiprocessing/multiprocessing2.py | UTF-8 | 378 | 3.09375 | 3 | [] | no_license | import multiprocessing
import time
class Processo(multiprocessing.Process):
def __init__(self, id):
super(Processo, self).__init__()
self.id = id
def run(self):
time.sleep(1)
print("Sou o processo com ID: {}".format(self.id))
if __name__ == '__main__':
p = Processo(0)
p.start()
p.join()
print("Sou o processo mestre")
| true |
3fe851b211f25093a40d9c7e054eaa08b0c03c2f | Python | JonathanSomer/StatisticalLearningClassCompetition | /regressors/dl_regressor.py | UTF-8 | 2,504 | 2.6875 | 3 | [] | no_license | import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from regressors.base_regressor import BaseRegressor
from data_pre_processing.clean_data import remove_bad_movies
from data_pre_processing.fill_missing_values import fill_ratings_with_mean_per_user
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization
from keras import optimizers
from keras import callbacks
from keras import regularizers
from keras import backend
class DLRegressor(BaseRegressor):
def __init__(self, n_epochs = 20):
self._reg = None
self.bad_movie_indexes = None
self.scaler = MinMaxScaler()
self.pca = None
self.n_epochs = n_epochs
def fit(self, X, Y):
assert len(X.shape) == 3
X = self._prepare_X(X, Y, train = True)
self._reg = self._model(X)
self._reg.fit(X, Y, epochs = self.n_epochs)
def predict(self, X):
assert len(X.shape) == 3
X = self._prepare_X(X)
return self._reg.predict(X)
def __str__(self):
return "knn"
def _prepare_X(self, X_raw, Y=None, train = False):
X_raw, self.bad_movie_indexes = remove_bad_movies(X_raw, self.bad_movie_indexes)
X, _ = fill_ratings_with_mean_per_user(X_raw)
X = X[:,1,:] # only ratings
# if train:
# self.scaler.fit(X)
# X_rescaled = self.scaler.transform(X)
# if train:
# self.pca = PCA(n_components = 75).fit(X_rescaled)
# X_components = self.pca.transform(X_rescaled)
# return X_components.reshape(X.shape[0], -1)
return X
def rmse(self, y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
def _model(self, X_train):
model = Sequential()
model.add(Dense(100, kernel_regularizer=regularizers.l2(0.2), input_shape=X_train[0].shape))
model.add(Activation('relu'))
# model.add(Dropout(0.5, name='dropout_1'))
model.add(Dense(100, kernel_regularizer=regularizers.l2(0.2)))
model.add(Activation('relu'))
model.add(Dense(10, kernel_regularizer=regularizers.l2(0.2)))
model.add(Activation('relu'))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam', metrics=[self.rmse])
return model
| true |
30a7fe78323ba04b7dcd27daa249e115d5765fe3 | Python | sharmapieyush/python | /Data Types Assignment.py | UTF-8 | 643 | 3.921875 | 4 | [] | no_license | #Question1
b=[]
a=input("Enter the input for list")
b.append(a)
f=input("Enter the input for list ")
b.append(f)
print("The list is",b)
#Question2
c=['google','apple','facebook','microsoft','tesla']
d=c+b
print(d)
#Question3
e=[1,1,2,6,4,1]
print(e.count(1))
#Question4
m=[3,8,1]
m.sort()
print(m)
#Question5
x=[7,8,9]
y=[2,3,4]
x.sort()
y.sort()
z=x+y
z.sort()
print(z)
#Question6
#Implementing stack
n=[1,2,3,4,5]
n.pop()
print(n)
n.append(2)
print(n)
#Question7
even=[]
odd=[]
for num in range(1,31):
if num%2 ==0:
even.append(num)
else:
odd.append(num)
print(even)
print(odd)
print(len(even))
print(len(odd))
| true |
1831ab27da901a2c477bc829bb3224621b84855d | Python | ssangitha/guvicode | /hunter_128.py | UTF-8 | 145 | 3.21875 | 3 | [] | no_license | s=input()
z=[]
for i in range(0,len(s)-1):
for j in range(i+1,len(s)):
a=s[i:j+1]
b=a[::-1]
if a==b:
z.append(a)
for i in z:
print(i)
| true |
57a799d6e481e484bc5a8591d543c1faf24210d1 | Python | eulersformula/Lintcode-LeetCode | /Zoombie_In_Matrix.py | UTF-8 | 3,200 | 3.734375 | 4 | [] | no_license | # Lintcode 598//Medium
# Description
# Give a two-dimensional grid, each grid has a value, 2 for wall, 1 for zombie, 0 for human (numbers 0, 1, 2).Zombies can turn the nearest people(up/down/left/right) into zombies every day, but can not through wall. How long will it take to turn all people into zombies? Return -1 if can not turn all people into zombies.
# Example
# Example 1:
# Input:
# [[0,1,2,0,0],
# [1,0,0,2,1],
# [0,1,0,0,0]]
# Output:
# 2
# Example 2:
# Input:
# [[0,0,0],
# [0,0,0],
# [0,0,1]]
# Output:
# 4
from typing import (
List,
)
class Solution:
"""
@param grid: a 2D integer grid
@return: an integer
"""
def helper(self, grid: List[List[int]], i: int, j: int) -> bool:
if i >= 0 and i < len(grid) and j >= 0 and j < len(grid[0]) \
and grid[i][j] == 0:
grid[i][j] = 1
return True
return False
def zombie(self, grid: List[List[int]]) -> int:
# write your code here
# Questions to ask:
# 1. Does the zombie turn only one people into zombie every day, or
# all possible ones?
# 2. Can grid be empty or a row of grid be empty?
if len(grid) == 0 or len(grid[0]) == 0:
return -1
from collections import deque
zombie_locs = set()
n_human = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
zombie_locs.add((i, j))
elif grid[i][j] == 0:
n_human += 1
# print(zombie_locs)
n_days = 0
dxdy = [(0, 1), (0, -1), (1, 0), (-1, 0)]
while n_human > 0 and len(zombie_locs) > 0:
cur_locs = set()
for (i, j) in zombie_locs:
for (dx, dy) in dxdy:
if self.helper(grid, i+dx, j+dy):
cur_locs.add((i+dx, j+dy))
n_human -= 1
print(grid)
print(cur_locs)
zombie_locs = cur_locs
n_days += 1
if n_human > 0:
return -1
return n_days
# Lintcode 官方
class Solution:
# @param {int[][]} grid a 2D integer grid
# @return {int} an integer
def zombie(self, grid):
# Write your code here
n = len(grid)
if n == 0:
return 0
m = len(grid[0])
if m == 0:
return 0
q = []
for i in xrange(n):
for j in xrange(m):
if grid[i][j] == 1:
q.append((i, j))
d = [[0, -1], [0, 1], [-1, 0], [1, 0]]
days = 0
while q:
days += 1
new_q = []
for node in q:
for k in xrange(4):
x = node[0] + d[k][0]
y = node[1] + d[k][1]
if x >= 0 and x < n and y >= 0 and y < m and grid[x][y] == 0:
grid[x][y] = 1
new_q.append((x, y))
q = new_q
for i in xrange(n):
for j in xrange(m):
if grid[i][j] == 0:
return -1
return days - 1
| true |
c19d52e24f107a68879d019ae535eeca13cd821e | Python | johnfercher/b2w-backend | /tests/application/filters/has_valid_data_in_response_test.py | UTF-8 | 1,034 | 2.78125 | 3 | [
"MIT"
] | permissive | from random import randint
from flask import Response
from src.application.filters.response_data_validator import has_valid_data_in_response
from src.domain.entities.planet import Planet
def test_when_return_is_none_should_return_404():
@has_valid_data_in_response
def return_none():
return None
result = return_none()
response = Response(status=404)
assert result.status == response.status
def test_when_return_is_object_should_return_200():
@has_valid_data_in_response
def return_object():
return Planet.random()
result = return_object()
response = Response(status=200)
assert result.status == response.status
assert result.json is not None
def test_when_return_is_list_should_return_200():
@has_valid_data_in_response
def return_list():
return [Planet.random() for i in range(1, randint(2, 10))]
result = return_list()
response = Response(status=200)
assert result.status == response.status
assert result.json is not None
| true |
26ef0a5959e63e306a3d6e6c92c19bb0be0b2bde | Python | yadukrishnanaj/droneprogramming | /codes/guidedflight.py | UTF-8 | 1,834 | 2.609375 | 3 | [] | no_license | from dronekit import connect,VehicleMode,LocationGlobalRelative,APIException
import socket
import time
import exceptions
import math
import argparse
def connectmy():
parser=argparse.ArgumentParser(description="commands")
parser.add_argument('--connect')
args=parser.parse_args()
connection_string=args.connect
if not connection_string:
import dronekit_sitl
sitl=dronekit_sitl.start_default()
connection_string=sitl.connection_string()
vehicle=connect(connection_string,wait_ready=True)
return vehicle
def armandtakeoff(targetheight):
while vehicle.is_armable == False:
print("waiting")
time.sleep(1)
print("ready")
vehicle.mode=VehicleMode("GUIDED")
while vehicle.mode !="GUIDED":
time.sleep(1)
print("waitng to cahan")
print("mode guide")
vehicle.armed=True
while vehicle.armed == False:
print("waiting for armin")
time.sleep(1)
print("vehicle armed")
vehicle.simple_takeoff(targetheight)
while True:
print(vehicle.location.global_relative_frame.alt)
if vehicle.location.global_relative_frame.alt>=.95*targetheight:
print(vehicle.location.global_relative_frame.alt)
break
time.sleep(1)
return None
def get_distannce(targetlocation,currentlocation):
dlat=targetlocation.lat-currentlocation.lat
dlong=targetlocation.lon-currentlocation.lat
return math.sqrt((dlat*dlat)+(dlong*dlong))*1.113195e5
def goto(targetlocation):
distance=get_distannce(targetlocation,vehicle.location.global_relative_frame)
vehicle.simple_goto(targetlocation)
while vehicle.mode.name=="GUIDED":
currentdistnce=get_distannce(targetlocation,vehicle.location.global_relative_frame)
if currentdistnce<=0.01*distance:
print("reachde")
break
time.sleep(2)
time.sleep(1)
return None
wp1=LocationGlobalRelative(10.12057624,76.35245919,10)
vehicle=connectmy()
armandtakeoff(10)
goto(wp1)
| true |
a3d6f19ad3de3e401ef0be705ec6e9851afbac31 | Python | MiaoLi/trimesh | /trimesh/ray/ray_triangle_cpu.py | UTF-8 | 3,578 | 3.0625 | 3 | [
"MIT"
] | permissive | '''
Narrow phase ray- triangle intersection
'''
import numpy as np
import time
from ..constants import log, tol
from ..util import diagonal_dot
def rays_triangles_id(triangles,
rays,
ray_candidates = None,
return_any = False):
'''
Intersect a set of rays and triangles.
Arguments
---------
triangles: (n, 3, 3) float array of triangle vertices
rays: (m, 2, 3) float array of ray start, ray directions
ray_candidates: (m, *) int array of which triangles are candidates
for the ray.
return_any: bool, exit loop early if any ray hits any triangle
and change output of function to bool
Returns
---------
if return_any:
hit: bool, whether the set of rays hit any triangle
else:
intersections: (m) sequence of triangle indexes hit by rays
'''
# default set of candidate triangles to be queried
# is every triangle. this is very slow
candidates = np.ones(len(triangles), dtype=np.bool)
hits = [None] * len(rays)
for ray_index, ray in enumerate(rays):
if not (ray_candidates is None):
candidates = ray_candidates[ray_index]
# query the triangle candidates
hit = ray_triangles(triangles[candidates], *ray)
if return_any:
if hit.any(): return True
else:
hits[ray_index] = np.array(candidates)[hit]
if return_any: return False
return np.array(hits)
def ray_triangles(triangles,
ray_origin,
ray_direction):
'''
Intersection of multiple triangles and a single ray.
Uses Moller-Trumbore intersection algorithm
'''
candidates = np.ones(len(triangles), dtype=np.bool)
# edge vectors and vertex locations in (n,3) format
vert0 = triangles[:,0,:]
vert1 = triangles[:,1,:]
vert2 = triangles[:,2,:]
edge0 = vert1 - vert0
edge1 = vert2 - vert0
#P is a vector perpendicular to the ray direction and one
# triangle edge.
P = np.cross(ray_direction, edge1)
#if determinant is near zero, ray lies in plane of triangle
det = diagonal_dot(edge0, P)
candidates[np.abs(det) < tol.zero] = False
if not candidates.any(): return candidates
# remove previously calculated terms which are no longer candidates
inv_det = 1.0 / det[candidates]
T = ray_origin - vert0[candidates]
u = diagonal_dot(T, P[candidates]) * inv_det
new_candidates = np.logical_not(np.logical_or(u < -tol.zero,
u > (1+tol.zero)))
candidates[candidates] = new_candidates
if not candidates.any(): return candidates
inv_det = inv_det[new_candidates]
T = T[new_candidates]
u = u[new_candidates]
Q = np.cross(T, edge0[candidates])
v = np.dot(ray_direction, Q.T) * inv_det
new_candidates = np.logical_not(np.logical_or((v < -tol.zero),
(u + v > (1+tol.zero))))
candidates[candidates] = new_candidates
if not candidates.any(): return candidates
Q = Q[new_candidates]
inv_det = inv_det[new_candidates]
t = diagonal_dot(edge1[candidates], Q) * inv_det
candidates[candidates] = t > tol.zero
return candidates
| true |
9034be0fa12f086163dae9defc6cb5a193b18268 | Python | ASketin/python_developer | /hw2/tests/test_patient_collection.py | UTF-8 | 3,450 | 2.671875 | 3 | [
"CC0-1.0"
] | permissive | import os
import pytest
from hw2.homework.config import PASSPORT_TYPE, CSV_PATH
from hw2.homework.patient import PatientCollection, Patient
from hw2.tests.constants import PATIENT_FIELDS
GOOD_PARAMS = (
("Кондрат", "Рюрик", "1971-01-11", "79160000000", PASSPORT_TYPE, "0228 000000"),
("Евпатий", "Коловрат", "1972-01-11", "79160000001", PASSPORT_TYPE, "0228 000001"),
("Ада", "Лавлейс", "1978-01-21", "79160000002", PASSPORT_TYPE, "0228 000002"),
("Миртл", "Плакса", "1880-01-11", "79160000003", PASSPORT_TYPE, "0228 000003"),
("Евлампия", "Фамилия", "1999-01-21", "79160000004", PASSPORT_TYPE, "0228 000004"),
("Кузя", "Кузьмин", "2000-01-21", "79160000005", PASSPORT_TYPE, "0228 000005"),
("Гарри", "Поттер", "2020-01-11", "79160000006", PASSPORT_TYPE, "0228 000006"),
("Рон", "Уизли", "1900-04-20", "79160000007", PASSPORT_TYPE, "0228 000007"),
("Билл", "Гейтс", "1978-12-31", "79160000008", PASSPORT_TYPE, "0228 000008"),
("Владимир", "Джугашвили", "1912-01-31", "79160000009", PASSPORT_TYPE, "0228 000009"),
("Вован", "ДеМорт", "1978-11-30", "79160000010", PASSPORT_TYPE, "0228 000010"),
("Гопник", "Районный", "1978-01-25", "79160000011", PASSPORT_TYPE, "0228 000011"),
("Фёдор", "Достоевский", "1978-01-05", "79160000012", PASSPORT_TYPE, "0228 000012"),
)
@pytest.fixture()
def prepare():
with open(CSV_PATH, 'w', encoding='utf-8') as f:
f.write('')
for params in GOOD_PARAMS:
Patient(*params).save()
yield
os.remove(CSV_PATH)
@pytest.mark.usefixtures('prepare')
def test_collection_iteration():
collection = PatientCollection(CSV_PATH)
for i, patient in enumerate(collection):
true_patient = Patient(*GOOD_PARAMS[i])
for field in PATIENT_FIELDS:
assert getattr(patient, field) == getattr(true_patient, field), f"Wrong attr {field} for {GOOD_PARAMS[i]}"
@pytest.mark.usefixtures('prepare')
def test_limit_usual():
collection = PatientCollection(CSV_PATH)
try:
len(collection.limit(8))
assert False, "Iterator should not have __len__ method"
except (TypeError, AttributeError):
assert True
for i, patient in enumerate(collection.limit(8)):
true_patient = Patient(*GOOD_PARAMS[i])
for field in PATIENT_FIELDS:
assert getattr(patient, field) == getattr(true_patient, field), f"Wrong attr {field} for {GOOD_PARAMS[i]} in limit"
@pytest.mark.usefixtures('prepare')
def test_limit_add_record():
collection = PatientCollection(CSV_PATH)
limit = collection.limit(len(GOOD_PARAMS) + 10)
for _ in range(len(GOOD_PARAMS)):
next(limit)
new_patient = Patient("Митрофан", "Космодемьянский", "1999-10-15", "79030000000", PASSPORT_TYPE, "4510 000444")
new_patient.save()
last_patient = next(limit)
for field in PATIENT_FIELDS:
assert getattr(new_patient, field) == getattr(last_patient, field), f"Wrong attr {field} for changed limit"
@pytest.mark.usefixtures('prepare')
def test_limit_remove_records():
collection = PatientCollection(CSV_PATH)
limit = collection.limit(4)
with open(CSV_PATH, 'w', encoding='utf-8') as f:
f.write('')
assert len([_ for _ in limit]) == 0, "Limit works wrong for empty file"
| true |
701a6215fbf3dbafcb11bbfea5389afd713f3dc5 | Python | mabogunje/pentago | /pentago/assets.py | UTF-8 | 5,427 | 3.765625 | 4 | [] | no_license | '''
@author: Damola Mabogunje
@contact: damola@mabogunje.net
@summary: Pentago pieces
'''
from pentago import *;
class BLOCK(object):
'''
Represents a game block on the pentago board
Note: Assigned values are important!
DO NOT MODIFY!
'''
(TOP_LEFT, TOP_RIGHT, BOTTOM_LEFT, BOTTOM_RIGHT) = range(1, 5);
def validate(self, block):
'''
Return false if game block is invalid, true otherwise
'''
if (block < BLOCK.TOP_LEFT) or (block > BLOCK.BOTTOM_RIGHT):
return False;
return True;
def str(self, block):
'''
Returns the human-readable block
'''
format = "Game Block %d";
return format % block;
class Board(object):
'''
The game board is a 6x6 grid which can be manipulated as 4 quarters
called blocks i.e Four 3x3 grids. Each block can also be twisted
90 degrees in any direction.
'''
EMPTY_CELL = '.';
def __init__(self):
self.size = 6;
self.block_size = self.size / 2;
self.grid = [ [Board.EMPTY_CELL for col in range(self.size)] for row in range(self.size) ];
self.state = GAME_STATE.IN_PLAY;
def is_empty(self, block, cell):
'''
Returns True if value at block [block], cell[cell] is Board.EMPTY_CELL
'''
row_offset = 0 if block < BLOCK.BOTTOM_LEFT else self.block_size;
col_offset = 0 if (block % 2 != 0) else self.block_size;
col = (cell - 1) % self.block_size;
row = (cell - 1) / self.block_size;
return self.grid[row + row_offset][col + col_offset] is Board.EMPTY_CELL;
def update(self, block, cell, colour):
'''
Sets value at block [block], cell[cell] to colour
'''
row_offset = 0 if block < BLOCK.BOTTOM_LEFT else self.block_size;
col_offset = 0 if (block % 2 != 0) else self.block_size;
col = (cell - 1) % self.block_size;
row = (cell - 1) / self.block_size;
self.grid[row + row_offset][col + col_offset] = COLOUR().str(colour);
def rotate(self, block, direction):
'''
Rotates the given block of the game board in the specified direction
'''
if direction is DIRECTION.RIGHT:
self.grid = self.rotate_right(block);
else:
self.grid = self.rotate_left(block);
def rotate_left(self, block):
'''
Rotates the given block of the game board 90 degrees to the left
'''
rotated_grid = [ [self.grid[row][col] for col in range(self.size)] for row in range(self.size) ];
row_offset = ((block - 1) / 2) * self.block_size;
col_offset = ((block -1 ) % 2) * self.block_size;
for i in range(row_offset, (row_offset + self.block_size)):
for j in range(col_offset, (col_offset + self.block_size)):
'''
No idea how this works
'''
rotated_grid[2 - j + row_offset + col_offset][i - row_offset + col_offset] = self.grid[i][j];
return rotated_grid;
def rotate_right(self, block):
'''
Rotates the given block of the game board 90 degrees to the right
'''
rotated_grid = [ [self.grid[row][col] for col in range(self.size)] for row in range(self.size) ];
row_offset = ((block - 1) / 2) * self.block_size;
col_offset = ((block -1 ) % 2) * self.block_size;
for i in range(row_offset, (row_offset + self.block_size)):
for j in range(col_offset, (col_offset + self.block_size)):
'''
No idea how this works
'''
rotated_grid[j + row_offset - col_offset][2 - i + row_offset + col_offset] = self.grid[i][j];
return rotated_grid;
def __str__(self):
border = "+-------+-------+\n";
format = "| %s %s %s | %s %s %s |\n";
output = "";
for i in range(0, self.size):
row = tuple([ str(x) for x in self.grid[i] ]);
needs_border = ((i % self.block_size) == 0);
if(needs_border):
output += border;
output += (format % row);
output += border;
return output;
def __repr__(self):
format = "%s %s %s %s %s %s\n";
output = "";
for i in range(0, self.size):
row = tuple([ str(x) for x in self.grid[i] ]);
output += (format % row);
return output;
class Player(object):
'''
Players must be of a certain colour and may either
1. Put a piece of their colour on the game board.
2. Rotate a block of the game board.
'''
def __init__(self, name, colour, ai=None):
self.name = name;
self.colour = colour;
self.ai = ai;
def put(self, block, pos, board):
'''
Put player's piece at position [pos] in block [block]
of the game board.
'''
board.update(block, pos, self.colour);
def twist(self, block, direction, board):
'''
Rotate block [block] of the game board 90 degress in
direction [direction]
'''
board.rotate(block, direction);
def __str__(self):
'''
Returns the human-readable player value
'''
format = "%s (%s)";
return format % (self.name, COLOUR().str(self.colour));
| true |
17f634688eee06cd6d905a7b8104f83b61326b5f | Python | shehzad-lalani/Pyda-PythonDigitalAssistant | /pyda-advancedwikipedia.py | UTF-8 | 170 | 2.953125 | 3 | [] | no_license | import wikipedia
# Translate wikipedia data to GERMAN LANGUAGE
while True:
input = input("Wiki Q: ")
wikipedia.set_lang("de")
print(wikipedia.summary(input))
| true |
f7f28bc4984d0e389e40b59ba99395f5c08d1c6e | Python | ThomasBollmeier/GObjectCreator3 | /src/gobjcreator3/model/module.py | UTF-8 | 6,743 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | class ModuleElement(object):
MODULE_SEP = '/'
def __init__(self, name):
self.name = name
self.module = None
self.filepath_origin = ""
def get_fullname(self):
res = self.name
module = self.module
while module and module.name:
res = module.name + ModuleElement.MODULE_SEP + res
module = module.module
return res
class Module(ModuleElement):
PARENT = ".."
def __init__(self, name):
ModuleElement.__init__(self, name)
self.cfunc_prefix = ""
self._elements = []
self._elements_d = {}
def merge(self, module):
for elem in module._elements:
if not isinstance(elem, Module):
self.add_element(elem)
else:
try:
existing_element = self._elements_d[elem.name]
if isinstance(existing_element, Module):
existing_element.merge(elem)
else:
raise Exception("'%s' has been already defined" % elem.name)
except KeyError:
self.add_element(elem)
def get_root(self):
res = self
while res.module:
res = res.module
return res
def get_path(self):
path = []
module = self
while module:
if module.name:
path.insert(0, module.name)
module = module.parent
return path
def get_module(self, path):
res = self._get_element(path)
if res and isinstance(res, Module):
return res
else:
return None
def get_type(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.OTHER:
return res
else:
return None
def get_object(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.OBJECT:
return res
else:
return None
def get_interface(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.INTERFACE:
return res
else:
return None
def get_error_domain(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.ERROR_DOMAIN:
return res
else:
return None
def get_enumeration(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.ENUMERATION:
return res
else:
return None
def get_flags(self, path):
res = self._get_element(path)
if res and isinstance(res, Type) and res.category == Type.FLAGS:
return res
else:
return None
def get_type_element(self, path):
res = self._get_element(path)
if res and isinstance(res, Type):
return res
else:
return None
def get_element(self, path):
return self._get_element(path)
def _get_element(self, path):
if not path:
return self
root = self.get_root()
start_modules = []
names = path.split(ModuleElement.MODULE_SEP)
if names[0]:
start_modules = [self, root] # <-- search first in current module, secondly in root
else: # absolute path:
start_modules = [root]
names = names[1:]
if len(names) == 1:
element_name = names[0]
module_names = []
else:
element_name = names[-1]
module_names = names[:-1]
for start_module in start_modules:
parent_module = start_module
failed = False
for module_name in module_names:
if module_name != Module.PARENT:
parent_module = parent_module._get_element(module_name)
else:
parent_module = parent_module.module
if not isinstance(parent_module, Module):
if start_module is root:
raise Exception("'%s' is not a module!" % module_name)
else:
failed = True
break
if failed:
continue
try:
return parent_module._elements_d[element_name]
except KeyError:
pass
return None
def _get_type_elements(self, category):
return [elem for elem in self._elements if isinstance(elem, Type) and elem.category == category]
def _get_modules(self):
return [elem for elem in self._elements if isinstance(elem, Module)]
modules = property(_get_modules)
def _get_types(self):
return self._get_type_elements(Type.OTHER)
types = property(_get_types)
def _get_objects(self):
return self._get_type_elements(Type.OBJECT)
objects = property(_get_objects)
def _get_interfaces(self):
return self._get_type_elements(Type.INTERFACE)
interfaces = property(_get_interfaces)
def _get_error_domains(self):
return self._get_type_elements(Type.ERROR_DOMAIN)
error_domains = property(_get_error_domains)
def _get_enumerations(self):
return self._get_type_elements(Type.ENUMERATION)
enumerations = property(_get_enumerations)
def _get_flags(self):
return self._get_type_elements(Type.FLAGS)
flags = property(_get_flags)
def add_element(self, element):
if element.name in self._elements_d:
raise Exception("'%s' has been already defined" % element.name)
self._elements_d[element.name] = element
self._elements.append(element)
element.module = self
class RootModule(Module):
def __init__(self):
Module.__init__(self, '')
from gobjcreator3.model.type import Type | true |
7c00aa8cb9b58ab0183ec3577d718ad84ee93dd9 | Python | premsair/Logistic-Regression | /rank_fft_data.py | UTF-8 | 2,152 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 02 23:11:42 2015
@author: Prem Sai Kumar Reddy Gangana (psreddy@unm.edu)
"""
import numpy as np
def rank_fft_features(genre_list,fft_Data):
############## Approach 1 : Based on Standard Deviations of Data
std_dev_genre_fft_features=[]
# Collects the genre data and calculates the standard deviation for each feature genrewise
for index,each in enumerate(genre_list):
genre_fft_features=fft_Data[(index*100):((index*100)+100),:]
std_dev_genre_fft_features.append(genre_fft_features.std(axis=0))
# Standard Deviation for each feature on whole dataset
std_dev_fft_features=fft_Data.std(axis=0)
# Gets the deviation of each feature per genre with respect to the deviation of whole data
diff_std_dev=np.abs(std_dev_genre_fft_features-std_dev_fft_features)
# Sorts the deviations in ascending order and collects the top 20 per each genre
rank_features=diff_std_dev.argsort(axis=1)
top_20_per_genre=rank_features[:,0:20]
return(np.unique(top_20_per_genre))
############# Approach 2 : Based on Entropy of Data. Comment above block and Uncomment below block if need to test the below
# sum_genre_fft_features=[]
# for index,each in enumerate(genre_list):
# genre_fft_features=fft_Data[(index*100):((index*100)+100),:]
# sum_genre_fft_features.append(genre_fft_features.sum(axis=0))
#
# sum_genre_fft_features=np.array(sum_genre_fft_features)
# probability_of_x_given_y=sum_genre_fft_features/sum_genre_fft_features.sum(axis=1).reshape(6,1)
#
# probability_of_y=1/6.0
# probability_of_x=probability_of_x_given_y.sum(axis=0)/6.0
#
# entropy_of_x_given_y=np.sum(-(probability_of_y*probability_of_x_given_y*np.log2(probability_of_x_given_y))-(probability_of_y*(1-probability_of_x_given_y)*np.log2(1-probability_of_x_given_y)),axis=0)
# entropy_of_x=-(probability_of_x*np.log2(probability_of_x))-((1-probability_of_x)*np.log2((1-probability_of_x)))
#
# info_gain=entropy_of_x-entropy_of_x_given_y
# rank_features=info_gain.argsort()[::-1][:120]
# return(rank_features)
| true |
a883a9d45e33dcb3492ad105ed144d990e391ab2 | Python | nikoneko035/Kaggle | /venns_atmaCup#5.py | UTF-8 | 675 | 2.546875 | 3 | [] | no_license | fig, axes = plt.subplots(3,4,figsize=(15,5))
venn2(subsets=(train.shape[0],test.shape[0],0), set_labels=("train", "test"), ax=axes[0,0])
for ax in axes.ravel()[:4]:
ax.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False,
bottom=False, left=False, right=False, top=False)
ax.spines["right"].set_color("none"); ax.spines["left"].set_color("none")
ax.spines["top"].set_color("none"); ax.spines["bottom"].set_color("none")
ax.patch.set_facecolor('white')
for ax, col in zip(axes.ravel()[4:], test.columns):
venn2([set(train[col]), set(test[col])], ax=ax, set_labels=(col, col))
plt.show(plt.tight_layout())
| true |
a29a6c160ee3e5d717333bbd8a7729f007f8ebe6 | Python | harshiniravula/DSP_LAB | /lab4_prog2.py | UTF-8 | 762 | 3.03125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def movavg(x):
l=int(input("enter the order:"))
n=len(x)
z=[]
for i in range(n):
s=0
for k in range(l):
if i-k<n and i-k>=0:
s=s+x[i-k]
k=float(s)/float(l)
z=np.append(z,k)
return(z)
l=[]
g=int(input("enter no of samples:"))
for j in range(g):
s=int(input("enter samples:"))
l=np.append(l,s)
print(l)
r=movavg(l)
print(r)
f=int(input("enter signal frequency:"))
fs=int(input("enter sampling frequency:"))
a=np.arange(0,100,0.1)
x=np.sin(2*np.pi*(float(f)/float(fs))*a)
plt.subplot(4,1,1)
plt.plot(x)
N=np.random.rand(x.shape[0])
plt.subplot(4,1,2)
plt.plot(N)
y=x+N
plt.subplot(4,1,3)
plt.plot(y)
d=movavg(y)
plt.subplot(4,1,4)
plt.plot(d)
plt.show()
| true |
e999832d4a6f6a094df5343e1e6cfc0afad11fa2 | Python | matthewmjm/data-science-exercises | /6-programing-in-python/6-8_strings.py | UTF-8 | 723 | 3.65625 | 4 | [] | no_license | #Hello World
def hello(name):
return('Hello, ' + name)
#Quotable
def quotable(name, quote):
return name + ' said: \"' + quote + '\"'
#Repeater
def repeater(string, n):
return string * n
#Repeater, level 2
def repeater(string, n):
return '\"' + string + '\"' + ' repeated ' + str(n) + ' times is: \"' + (string * n) + '\"'
#Jedi name
def greet_jedi(first, last):
return "Greetings, master " + last[0:3].capitalize() + first[0:2].capitalize() + ""
#Areacode extractor
def area_code(message):
ac_start = message.find("(")
ac_end = message.find(")")
return message[ac_start+1:ac_end]
#Poem formatter
def format_poem(poem):
lst = poem.split('. ')
s = ".\n"
return s.join(lst) | true |
8d52a90b4f52cf1a48fdc2632d5c8f3435a8dd31 | Python | YeswanthRajakumar/LeetCode_Problems | /Easy/day-2/Create Target Array in the Given Order.py | UTF-8 | 169 | 3.03125 | 3 | [] | no_license | nums = [0,1,2,3,4]
index = [0,1,2,2,1]
target =[]
l = len(index)
for i in range(l):
# print(nums[i],index[i])
target.insert(index[i],nums[i])
print(target)
| true |
0eb1d52449f7c629e6c0ceae3f5af0356ccec33b | Python | zubairwazir/technical_tests | /question_A.py | UTF-8 | 332 | 3.703125 | 4 | [] | no_license | def check_for_overlap(line_1, line_2):
for num_point in line_2:
if num_point in xrange(line_1[0], line_1[1]):
return 'Both lines overlap!'
break
else:
return 'Lines do not overlap!'
break
line_1 = [1, 4]
line_2 = [5, 6]
print(check_for_overlap(line_1, line_2))
| true |
d37183c125829791680bf95eb477188add1f61f7 | Python | lxconfig/UbuntuCode_bak | /algorithm/牛客网/40-数组中只出现一次的数字.py | UTF-8 | 1,665 | 4.21875 | 4 | [] | no_license |
"""
一个整型数组里除了两个数字之外,其他的数字都出现了两次。请写程序找出这两个只出现一次的数字。
"""
class Solution:
# 返回[a,b] 其中ab是出现一次的两个数字
def FindNumsAppearOnce(self, array):
# 运行时间:22ms 占用内存:5732k
# 先异或,找出两个只出现一次数字的异或结果
tmp = l = m = 0
if not array:
return []
for i in array:
tmp ^= i
# 根据异或结果的二进制,找到其最低位的1的位置,由此位置把数组拆分开
index = self.FindFirst1(tmp)
for i in array:
if self.isBit1(i, index):
l ^= i
else:
m ^= i
return [l, m]
def FindFirst1(self, num):
"""找到二进制数中最低位的1"""
index = 0
while num & 1 == 0:
# 如果是偶数,说明还没有找到最低位的1
# 如:0010 右移一次后是:001 所以index=1
num >>= 1
index += 1
return index
def isBit1(self, num, index):
"""检查num二进制中index位置是否为1"""
# 因为要检查的是index位,所以index后面的位数都不用看,右移直接去掉
# 之前是index位,右移后,肯定是最低位,如果为1,肯定是奇数,如果为0,肯定是偶数
return (num >> index) & 1 == 0
if __name__ == "__main__":
solution = Solution()
array = [1, 2, 2, 3, 4, 4]
print(solution.FindNumsAppearOnce(array))
| true |
a39be9ed2f40c401dae5e94d3439edfb68ab1396 | Python | jeeten/ThinkBoard | /common/validation.py | UTF-8 | 562 | 2.734375 | 3 | [] | no_license | from common import log
logging = log.logging
def checkKey(dict, key):
if isinstance(key, str) and key not in dict.keys():
raise Exception("{} Key is missing!".format(key))
elif isinstance(key, list) :
logging.debug("validation: {} is tupple".format(key))
for k in key:
logging.debug("validation: {} iterating".format(k))
if not k in dict.keys():
logging.debug("validation: {} is not in dict".format(k))
raise Exception("{} Key is missing!".format(k))
else:
pass
| true |
0b940c4a54c1efa706ede7c5953341d11cd0fe33 | Python | powerthecoder/TeamRandomizer | /main_2.py | UTF-8 | 1,542 | 3.359375 | 3 | [
"MIT"
] | permissive | import random
import time
import sys
# Developed By: Leo Power
# https://powerthecoder.xyz
main_list= []
list_am = input("Enter amount of players: ")
for i in range(int(list_am)):
name = input("Enter Player Name: ")
main_list.append(name)
x = 0
while x != 1:
print()
amount_per_team = input("Player Per Team: ")
if(amount_per_team == 0):
print("Developed By: Leo Power")
print("https://powerthecoder.xyz")
elif(amount_per_team < 0):
print("Shuting Down...")
time.sleep(1)
sys.exit()
else:
arg = "run"
if(arg.lower() == "run"):
print()
print("Team 1: ")
print()
z = 0
list1 = []
list2 = []
while z != int(amount_per_team):
new_pick = random.choice(main_list)
if not new_pick in list1:
print(new_pick)
list1.append(new_pick)
z += 1
else:
pass
print()
print("Team 2:")
print()
v = 0
while v != int(amount_per_team):
new_pick = random.choice(main_list)
if not new_pick in list2:
if not new_pick in list1:
print(new_pick)
list2.append(new_pick)
v += 1
else:
pass
else:
pass
pass | true |
049f389600472b5b43e562b11f3a6cbf62227586 | Python | laits1/Algorithm | /0714_Algorithm/Code07-01.py | UTF-8 | 484 | 3.25 | 3 | [] | no_license | SIZE = 5
queue = [None for _ in range(SIZE)]
front, rear= -1, -1
# enQueue
rear += 1
queue[rear] = '화사'
rear += 1
queue[rear] = '솔라'
rear += 1
queue[rear] = '문별'
# deQueue
front += 1; data = queue[front]
queue[front] = None; print('입장손님-->', data)
front += 1; data = queue[front]
queue[front] = None; print('입장손님-->', data)
front += 1; data = queue[front]
queue[front] = None; print('입장손님-->', data)
print('출구<---', queue, '<---입구') | true |
c90cc9f951789edc4ec845f6abdd21e14b8b94c9 | Python | lylenchamberlain/teamx | /Classes/World.py | UTF-8 | 2,510 | 3.15625 | 3 | [] | no_license | from Classes.AbstractWorld import AbstractWorld
import pygame
pygame.font.init()
class World(AbstractWorld):
def __init__(self):
AbstractWorld.__init__(self)
self.height = 600
self.width = 800
self.screen = pygame.display.set_mode((self.width, self.height))
self.black = (0,0,0)
self.clock = pygame.time.Clock()
self.font = pygame.font.SysFont('Comic Sans MS', 30)
def runSimulation(self, fps=1, initialTime=5*60, finalTime=23*60):
'''
This will give you a list of ALL cars which are in the system
'''
trucks = self.getInitialTruckLocations()
for i,t in enumerate(trucks):
print("vehicle %d: %s"%(i, str(t)))
'''
We will run a simulation where "t" is the time index
'''
for t in range(initialTime,finalTime):
print("\n\n Time: %02d:%02d"%(t/60, t%60))
# each minute we can get a few new orders
newOrders = self.getNewOrdersForGivenTime(t)
print("New orders:")
for c in newOrders:
print(c)
text = self.font.render("Time: %02d:%02d"%(t/60, t%60), True, (255, 0, 0), (255, 255, 255))
textrect = text.get_rect()
textrect.centerx = 100
textrect.centery = 30
self.screen.fill((255, 255, 255))
self.screen.blit(text, textrect)
<<<<<<< HEAD
#print("Verticies" , self.Verticies)
#print("Edges", self.Edges)
=======
print("Verticies" , self.Verticies)
print("Edges", self.Edges)
>>>>>>> branch 'master' of https://github.com/lylenchamberlain/teamx.git
#print Vertices
for item in range(len(self.Verticies)):
pygame.draw.rect(self.screen,(0,0,0),(800*self.Verticies[item][1],800*self.Verticies[item][2],10,10))
#print(self.Verticies[item])
<<<<<<< HEAD
for x in range(len(self.Edges)):
=======
for x in range(len(self.Edges)):
>>>>>>> branch 'master' of https://github.com/lylenchamberlain/teamx.git
#Iterate through all the points of path
for y in range(len(self.Edges[x][3]) - 1):
pygame.draw.line(self.screen,(90,200,90), (self.Edges[x][3][y][0]*800, self.Edges[x][3][y][1]*800), (self.Edges[x][3][y+1][0]*800, self.Edges[x][3][y+1][1]*800) , 4)
'''
You should plot the vetrices, edges and cars and customers
Each time, cars will move and you should visualize it
accordingly
'''
pygame.display.update()
gameExit = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
pygame.quit()
self.clock.tick(fps)
if gameExit == True:
break
| true |
aa31e5a802c3cfd46b2180f516a960cfd146edc3 | Python | RossHann/data_integration_iii | /main.py | UTF-8 | 16,954 | 2.578125 | 3 | [] | no_license | # import numpy as np
# import similaritymeasures
# import matplotlib.pyplot as plt
#
# # Generate random experimental data
# x = np.random.random(100)
# y = np.random.random(100)
# exp_data = np.zeros((100, 2))
# exp_data[:, 0] = x
# exp_data[:, 1] = y
#
# # Generate random numerical data
# x = np.random.random(100)
# y = np.random.random(100)
# num_data = np.zeros((100, 2))
# num_data[:, 0] = x
# num_data[:, 1] = y
#
# print(num_data)
#
# # quantify the difference between the two curves using PCM
# # pcm = similaritymeasures.pcm(exp_data, num_data)
#
# # quantify the difference between the two curves using
# # Discrete Frechet distance
# # df = similaritymeasures.frechet_dist(exp_data, num_data)
#
# # quantify the difference between the two curves using
# # area between two curves
# # area = similaritymeasures.area_between_two_curves(exp_data, num_data)
#
# # quantify the difference between the two curves using
# # Curve Length based similarity measure
# # cl = similaritymeasures.curve_length_measure(exp_data, num_data)
#
# # quantify the difference between the two curves using
# # Dynamic Time Warping distance
# # dtw, d = similaritymeasures.dtw(exp_data, num_data)
#
# # print the results
# # print(pcm, df, area, cl, dtw)
# #
# # # plot the data
# # plt.figure()
# # plt.plot(exp_data[:, 0], exp_data[:, 1])
# # plt.plot(num_data[:, 0], num_data[:, 1])
# # plt.show()
# !/usr/bin/env python
# from unicodedata import decimal
#
# import similaritymeasures
# from Similarity import *
# import csv
#
#
# def main():
# """ main function to create Similarity class instance and get use of it """
#
# measures = Similarity()
#
# csv_reader = csv.reader(open("./reformat/000002.csv", 'r', encoding='gbk'))
# n = 0
# data_set_1 = []
# for row in csv_reader:
# if n == 0:
# n += 1
# continue
# else:
# n += 1
# if row[4] == 'None':
# continue
# data_set_1.append(float(row[4]))
#
# csv_reader = csv.reader(open("./reformat/000004.csv", 'r', encoding='gbk'))
# n = 0
# data_set_2 = []
# for row in csv_reader:
# if n == 0:
# n += 1
# continue
# else:
# n += 1
# # print(row[4])
# if row[4] == 'None':
# continue
# data_set_2.append(float(row[4]))
# # print(data_set_2)
# print(measures.jaccard_similarity(data_set_1, data_set_2))
#
# # print(measures.euclidean_distance([0, 3, 4, 5], [7, 6, 3, -1]))
# # print(measures.jaccard_similarity([0, 1, 2, 5, 6], [0, 2, 3, 5, 7, 9]))
# # print(measures.euclidean_distance([1, 1, 0, 0], [1, 1, 1, -1]))
import time
from math import *
from dateutil import rrule
import matplotlib.pyplot as plt
import numpy
import csv
import json
import os
import os.path
import re
import sys
import codecs
from datetime import datetime
def print_matrix(mat):
print('[matrix] width : %d height : %d' % (len(mat[0]), len(mat)))
print('-----------------------------------')
for i in range(len(mat)):
print
mat[i] # [v[:2] for v in mat[i]]
def dist_for_float(p1, p2):
dist = 0.0
elem_type = type(p1)
if elem_type == float or elem_type == int:
dist = float(abs(p1 - p2))
else:
sumval = 0.0
for i in range(len(p1)):
sumval += pow(p1[i] - p2[i], 2)
dist = pow(sumval, 0.5)
return dist
def dtw(s1, s2, dist_func):
w = len(s1)
h = len(s2)
mat = [([[0, 0, 0, 0] for j in range(w)]) for i in range(h)]
# print_matrix(mat)
for x in range(w):
for y in range(h):
dist = dist_func(s1[x], s2[y])
mat[y][x] = [dist, 0, 0, 0]
# print_matrix(mat)
elem_0_0 = mat[0][0]
elem_0_0[1] = elem_0_0[0] * 2
for x in range(1, w):
mat[0][x][1] = mat[0][x][0] + mat[0][x - 1][1]
mat[0][x][2] = x - 1
mat[0][x][3] = 0
for y in range(1, h):
mat[y][0][1] = mat[y][0][0] + mat[y - 1][0][1]
mat[y][0][2] = 0
mat[y][0][3] = y - 1
for y in range(1, h):
for x in range(1, w):
distlist = [mat[y][x - 1][1], mat[y - 1][x][1], 2 * mat[y - 1][x - 1][1]]
mindist = min(distlist)
idx = distlist.index(mindist)
mat[y][x][1] = mat[y][x][0] + mindist
if idx == 0:
mat[y][x][2] = x - 1
mat[y][x][3] = y
elif idx == 1:
mat[y][x][2] = x
mat[y][x][3] = y - 1
else:
mat[y][x][2] = x - 1
mat[y][x][3] = y - 1
result = mat[h - 1][w - 1]
retval = result[1]
path = [(w - 1, h - 1)]
while True:
x = result[2]
y = result[3]
path.append((x, y))
result = mat[y][x]
if x == 0 and y == 0:
break
# print_matrix(mat)
# print(retval)
return retval, sorted(path)
def display(s1, s2):
val, path = dtw(s1, s2, dist_for_float)
w = len(s1)
h = len(s2)
mat = [[1] * w for i in range(h)]
for node in path:
x, y = node
mat[y][x] = 0
mat = numpy.array(mat)
plt.subplot(2, 2, 2)
c = plt.pcolor(mat, edgecolors='k', linewidths=10)
plt.title('Dynamic Time Warping (%f)' % val)
plt.subplot(2, 2, 1)
plt.plot(s2, range(len(s2)), 'g')
plt.subplot(2, 2, 4)
plt.plot(range(len(s1)), s1, 'r')
# plt.savefig('./best_matches/result' + str(id) + '.png')
plt.savefig('test.png')
plt.show()
def try_1st():
# csv_reader = csv.reader(open("./reformat/000002.csv", 'r', encoding='gbk'))
# n = 0
# data_set_1 = []
# for row in csv_reader:
# if n == 0:
# n += 1
# continue
# else:
# n += 1
# if row[4] == 'None':
# continue
# data_set_1.append(float(row[4]))
#
# csv_reader = csv.reader(open("./reformat/000004.csv", 'r', encoding='gbk'))
# n = 0
# data_set_2 = []
# for row in csv_reader:
# if n == 0:
# n += 1
# continue
# else:
# n += 1
# # print(row[4])
# if row[4] == 'None':
# continue
# data_set_2.append(float(row[4]))
# # print(data_set_2)
data_set_1 = [1, 1]
data_set_2 = [1, 10]
display(data_set_1, data_set_2)
def csv_to_json():
path = './reformat/'
files = os.listdir(path)
data = []
for file in files:
# print(file.title())
with open('./reformat/' + file.title(), encoding='gbk') as f:
is_first = True
dic = {}
detailed = []
for line in f:
line = list(line.split(','))
dict = {}
if is_first:
is_first = False
else:
dic['股票代码'] = line[0]
dic['名称'] = line[1]
# print(line[2])
# dict['开始日期'] = time.strptime(line[2], '%Y-%m-%d')
# dict['结束日期'] = time.strptime(line[3], '%Y-%m-%d')
dict['开始日期'] = line[2]
dict['结束日期'] = line[3]
if line[4] == 'None\n':
dict['涨幅'] = 0.0
else:
dict['涨幅'] = float(line[4])
if dict != {}:
detailed.append(dict)
dic['变动信息'] = detailed
if dic['变动信息'] != []:
data.append(dic)
print(dic)
with open('./reformed_data.json', 'w', encoding='utf8') as f:
json.dump(data, f)
dic_from_json = open('./reformed_data.json', 'r')
info_data = json.load(dic_from_json)
with open('./reformed_data.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
def split_by_date():
# data = []
# dic_from_json = open('reformed_data.json', 'r')
# info_data = json.load(dic_from_json)
# print(type(info_data))
# string_test_1 = "2021-04-01"
# string_test_2 = "2021-06-01"
# time_1 = time.strptime(string_test_1, "%Y-%m-%d")
# time_2 = time.strptime(string_test_2, "%Y-%m-%d")
# # print(type(time_1.tm_mday))
# months = rrule.rrule(freq=rrule.MONTHLY, dtstart=datetime(time_1.tm_year, time_1.tm_mon, time_1.tm_mday), until=datetime(time_2.tm_year, time_2.tm_mon, time_2.tm_mday))
# print(months.count())
data = []
dic_from_json = open('reformed_data.json', 'r')
info_data = json.load(dic_from_json)
need_to_change_starting_point_or_not = True
for i in info_data:
# 每只股票
dic = {}
i = dict(i)
# print(i)
dic['股票代码'] = i['股票代码']
dic['名称'] = i['名称']
split_change_info = []
# temp_dic = {}
# period_starting_data = i['变动信息'][0]['开始日期']
change_info_list = i['变动信息']
for ii in change_info_list:
# 每条变动信息
# print(ii)
if need_to_change_starting_point_or_not:
period_starting_data = ii['开始日期']
need_to_change_starting_point_or_not = False
temp = []
temp_dic = {}
# start_date = time.strptime(ii['开始日期'], '%Y-%m-%d')
# end_date = time.strptime(ii['结束日期'], '%Y-%m-%d')
temp.append(ii['涨幅'])
# print(temp)
# print(ii['涨幅'])
if len(temp) >= 12:
need_to_change_starting_point_or_not = True
temp_dic[period_starting_data + '_' + ii['结束日期']] = temp
split_change_info.append(temp_dic)
print(temp_dic)
need_to_change_starting_point_or_not = True
dic['变动信息'] = split_change_info
data.append(dic)
# for i in data:
# if i['变动信息'] != []:
# change_info = i['变动信息'].remove(i['变动信息'][-1])
with open('./reformed_data_version_2.json', 'w', encoding='utf8') as f:
json.dump(data, f)
dic_from_json = open('./reformed_data_version_2.json', 'r')
info_data = json.load(dic_from_json)
with open('./reformed_data_version_2.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
def string_to_time(str):
return time.strptime(str, "%Y-%m-%d")
def how_many_days_in_between(earlier, later):
return rrule.rrule(freq=rrule.DAILY, dtstart=datetime(earlier.tm_year, earlier.tm_mon, earlier.tm_mday),
until=datetime(later.tm_year, later.tm_mon, later.tm_mday)).count()
def getSingleDictionaryKey(object):
for i in dict(object).keys():
result = i
return result
def getSingleDictionaryValue(object):
for i in dict(object).values():
result = i
return result
def try_2nd():
nodes = []
edges = []
dic_from_json = open('reformed_data_version_2.json', 'r')
info_data = json.load(dic_from_json)
stock_1_date = getSingleDictionaryKey(info_data[0]['变动信息'][-1])
stock_1_data = getSingleDictionaryValue(info_data[0]['变动信息'][-1])
dictn = {}
dicte = {}
dictn['name'] = info_data[0]['名称']
dictn['uuid'] = info_data[0]['股票代码']
# print(stock_1_date)
# print(stock_1_data)
# print(stock_1)
current_best = 9999.0
index = []
for i in range(1, len(info_data)):
for ii in range(0, len(info_data[i]['变动信息']) - 1):
# print(info_data[i]['名称'], end=':')
# print(info_data[i]['变动信息'][ii])
# print(ii[1])
temp_ = dtw(stock_1_data, getSingleDictionaryValue(info_data[i]['变动信息'][ii]), dist_for_float)[0]
print(current_best)
print(index)
if temp_ <= current_best:
current_best = temp_
index = [i, ii, getSingleDictionaryKey(info_data[i]['变动信息'][ii])]
print(index)
#
dictn['img'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1] + 1])
dictn['nextData'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1] + 1])[:3]
target = info_data[index[0]]['变动信息'][index[1]]
dicte['sourceName'] = info_data[0]['名称']
dicte['targetName'] = info_data[index[0]]['名称']
dicte['sourceId'] = info_data[0]['股票代码']
dicte['targetId'] = info_data[index[0]]['股票代码']
dicte['sourceDate'] = getSingleDictionaryKey(info_data[0]['变动信息'][-1])
dicte['targetDate'] = index[2]
dicte['sourceImg'] = getSingleDictionaryValue(info_data[0]['变动信息'][-1])
dicte['targetImg'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1]])
dicte['value'] = current_best
nodes.append(dictn)
edges.append(dicte)
with open('./nodes.json', 'w', encoding='utf8') as f:
json.dump(nodes, f)
dic_from_json = open('./nodes.json', 'r')
info_data = json.load(dic_from_json)
with open('./nodes.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
with open('./edges.json', 'w', encoding='utf8') as f:
json.dump(edges, f)
dic_from_json = open('./edges.json', 'r')
info_data = json.load(dic_from_json)
with open('./edges.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
# display(stock_1, info_data[index[0]]['变动信息'][index[1]])
def finding_closest():
indexes = []
nodes = []
edges = []
dic_from_json = open('reformed_data_version_2.json', 'r')
info_data = json.load(dic_from_json)
for i in range(0, len(info_data)):
# 每支股票的最新三个月的变动信息
current_best = 99999
index = []
dictn = {}
dicte = {}
dictn['name'] = info_data[i]['名称']
dictn['uuid'] = info_data[i]['股票代码']
change_info = info_data[i]['变动信息']
if change_info != []:
latest_feature = getSingleDictionaryValue(info_data[i]['变动信息'][-1])
else:
continue
for ii in range(0, len(info_data)):
# 与除了自己的每只股票相比较
if ii != i:
current_in_comparison = info_data[ii]['变动信息']
for iii in range(0, len(current_in_comparison) - 1):
# 每只股票的每个周期
print(info_data[i]['名称'], end=':')
print(info_data[ii]['名称'], end=':')
print(info_data[ii]['变动信息'][iii])
temp_result = \
dtw(getSingleDictionaryValue(info_data[ii]['变动信息'][iii]), latest_feature, dist_for_float)[0]
if temp_result <= current_best:
current_best = temp_result
index = [ii, iii, getSingleDictionaryKey(info_data[ii]['变动信息'][iii])]
dictn['nextData'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1] + 1])[:3]
dictn['img'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1] + 1])
dicte['sourceName'] = info_data[i]['名称']
dicte['targetName'] = info_data[index[0]]['名称']
dicte['sourceId'] = info_data[i]['股票代码']
dicte['targetId'] = info_data[index[0]]['股票代码']
dicte['sourceDate'] = getSingleDictionaryKey(info_data[i]['变动信息'][-1])
dicte['targetDate'] = index[2]
dicte['sourceImg'] = getSingleDictionaryValue(info_data[i]['变动信息'][-1])
dicte['targetImg'] = getSingleDictionaryValue(info_data[index[0]]['变动信息'][index[1]])
dicte['value'] = current_best
# result_set.append(temp_best_conbo)
indexes.append(index)
nodes.append(dictn)
edges.append(dicte)
with open('./nodes.json', 'w', encoding='utf8') as f:
json.dump(nodes, f)
dic_from_json = open('./nodes.json', 'r')
info_data = json.load(dic_from_json)
with open('./nodes.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
with open('./edges.json', 'w', encoding='utf8') as f:
json.dump(edges, f)
dic_from_json = open('./edges.json', 'r')
info_data = json.load(dic_from_json)
with open('./edges.json', "w", encoding='utf8') as ff:
json.dump(info_data, ff, ensure_ascii=False)
if __name__ == "__main__":
# csv_to_json()
# split_by_date()
# try_1st()
# try_2nd()
finding_closest()
# print(dtw([1, 1, 1, 1, 1], [1, 10, 1, 1, 1], dist_for_float)[0])
| true |
fa2b6ce0fc9cf1f2b5ea025a7168c1794c383021 | Python | carderne/descarteslabs-python | /descarteslabs/workflows/types/containers/tests/test_check_valid_binop.py | UTF-8 | 1,273 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import operator
import pytest
from ...primitives import Int, Float, Bool
from .._check_valid_binop import check_valid_binop_for
def test_valid():
check_valid_binop_for(operator.add, Int, "While testing", valid_result_types=(Int,))
def test_unsupported():
with pytest.raises(
TypeError, match="While testing: Bool does not support operator add"
):
check_valid_binop_for(operator.add, Bool, "While testing")
def test_invalid():
with pytest.raises(
TypeError,
match="While testing: operator and_ on Bool produces type Bool. Must produce one of: Int",
):
check_valid_binop_for(
operator.and_, Bool, "While testing", valid_result_types=(Int,)
)
def test_unsupported_custom_msg():
with pytest.raises(TypeError, match="Bool add"):
check_valid_binop_for(
operator.add, Bool, "not shown", unsupported_msg="{type_name} {op_name}"
)
def test_invalid_custom_msg():
with pytest.raises(TypeError, match="Bool Bool Int, Float"):
check_valid_binop_for(
operator.and_,
Bool,
"not shown",
valid_result_types=(Int, Float),
invalid_msg="{type_name} {result_name} {valid_result_names}",
)
| true |
662cbf2e2cf4a9787261a87eedf0c9c239910de8 | Python | Brian-Yang-Git/EE599 | /MNIST_Example.py | UTF-8 | 1,413 | 2.765625 | 3 | [] | no_license | import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
print(f'Using tensorflow version {tf.__version__}')
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
results = model.fit(train_images, train_labels, batch_size=32, epochs=40, validation_split=0.1)
#results.history is a dictionary
loss = results.history['loss']
val_loss = results.history['val_loss']
acc = results.history['accuracy']
val_acc = results.history['val_accuracy']
epochs = np.arange(len(loss))
plt.figure()
plt.plot(epochs, loss, label='loss')
plt.plot(epochs, val_loss, label='val_loss')
plt.xlabel('epochs')
plt.ylabel('Multiclass Cross Entropy Loss')
plt.legend()
plt.savefig('learning_loss.png', dpi=256)
plt.figure()
plt.plot(epochs, acc, label='acc')
plt.plot(epochs, val_acc, label='val_acc')
plt.xlabel('epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('learning_acc.png', dpi=256) | true |
aa9db7af98d1a9674eb379d91844e65ec2264840 | Python | m04kA/my_work_sckool | /pycharm/Для школы/в разработке/ПанаринЯрослав_ИКТ_ИР_20.py | UTF-8 | 3,208 | 3.625 | 4 | [] | no_license | import time
from random import randint
test = int(input('Количество тестов: '))
ranger = int(input('Длинна массива: '))
my_min = int(input('Min: '))
my_max = int(input('Max: '))
def massive(minim, maxim, count):
my_masive = []
for idx in range(count): #заполняем массив рандомными элементами
my_masive.append(randint(minim, maxim))
return my_masive
def sheyker(spisok):
left = 0 #Переменная для движения с права на лева
right = len(spisok)-1 #Переменная для движения с лева на права
while left <= right:
for i in range(left, right, +1): #Самый тяжёлый элемент в право
if spisok[i] > spisok[i+1]:
spisok[i],spisok[i+1] = spisok[i+1],spisok[i]
right -=1 #Пропускаем самый большой элемент, который уже занял своё место
for i in range(right,left,-1): #Самый лёгкий элемент в лево
if spisok[i] < spisok[i-1]:
spisok[i],spisok[i-1] = spisok[i-1],spisok[i]
left +=1 #Пропускаем самый лёгкий элемент, занявший своё место
#return spisok
def bubble(file):
for i in range(len(file)-1):
for j in range((len(file)-1-i)):
if file[j] > file[j+1]:
file[j],file[j+1] = file[j+1],file[j]
#return file
def chet_nechet(massive):
pologenie = False
while not pologenie:
pologenie = True
for num in range(1, len(massive) - 1, 2):
if massive[num] > massive[num + 1]:
massive[num], massive[num + 1] = massive[num + 1], massive[num]
pologenie = False
for num in range(0, len(massive) - 1, 2):
if massive[num] > massive[num + 1]:
massive[num], massive[num + 1] = massive[num + 1], massive[num]
pologenie = False
#return massive
def work(num_min, num_max, count_test, my_len):
time_buble = 0
time_sheyker = 0
time_odd_even = 0
for _ in range(count_test):
mas_1 = massive(num_min, num_max, my_len)
mas_2 = list(mas_1)
mas_3 = list(mas_1)
start = time.time()
bubble(mas_1)
time_buble += time.time() - start
start = time.time()
sheyker(mas_2)
time_sheyker += time.time() - start
start = time.time()
chet_nechet(mas_3)
time_odd_even += time.time() - start
time_buble /= count_test
time_sheyker /= count_test
time_odd_even /= count_test
return time_buble,time_sheyker,time_odd_even
tim_bub,tim_shey,tim_odd = work(my_min, my_max, test, ranger)
print(f'Среднее время сортировки пузырьком: {tim_bub}')
print(f'Среднее время сортировки шейкером: {tim_shey}')
print(f'Среднее время сортировки Чётно - нечётной: {tim_odd}')
| true |
b78f8f04017521d925389fdcad67386ec3c46623 | Python | KRISHNA1432/Digit-Recognition | /nn_cnn.py | UTF-8 | 1,732 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 04 19:39:57 2018
@author: user
"""
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Input, Reshape
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
import numpy as np
x_train_flat = np.array([i.reshape((784,1))for i in x_train])
x_test_flat = np.array([i.reshape((784,1))for i in x_test])
print (x_train[0].shape)
print (y_train[0:5])
y_train_one_hot = keras.utils.to_categorical(y_train)
y_test_one_hot = keras.utils.to_categorical(y_test)
print (y_train_one_hot[0:5])
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
inp = Input(shape=(784,), name = 'inp')
hid = Dense(100, activation='sigmoid')(inp)
out = Dense(10, activation='sigmoid')(hid)
model = Model([inp],out)
model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
model.fit(x_train_flat,y_train_one_hot,epochs=10,verbose=2)
inp = Input(shape=(28,28,1))
#inp = Reshape((28,28,1))(inp)
conv = Conv2D(32, (3,3), activation='relu')(inp)
pool = MaxPooling2D((2,2))(conv)
flat = Flatten()(pool)
hid = Dense(100, activation='sigmoid')(flat)
out = Dense(10, activation='sigmoid')(hid)
model = Model([inp],out)
model.compile(loss='mse', optimizer='adam',metrics=['accuracy'])
model.fit(x_train,y_train_one_hot,epochs=10,verbose=2) | true |
14be2669951ae1fd69c4344e47838c242926e787 | Python | krusse-bah/aos-cx-python | /src/system.py | UTF-8 | 878 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | from src import common_ops
def get_system_info(params={}, **kwargs):
"""
Perform a GET call to get system information
:param params: Dictionary of optional parameters for the GET request
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: Dictionary containing system information
"""
target_url = kwargs["url"] + "system"
response = kwargs["s"].get(target_url, params=params, verify=False)
if not common_ops._response_ok(response, "GET"):
print("FAIL: Getting dictionary of system information failed with status code %d"
% response.status_code)
system_info_dict = {}
else:
print("SUCCESS: Getting dictionary of system information succeeded")
system_info_dict = response.json()
return system_info_dict
| true |
7702bfbbae07e76921e19157e9710e541cbaffc8 | Python | GordonGustafson/Poker | /Player.py | UTF-8 | 260 | 2.78125 | 3 | [] | no_license | class Player(object):
def __init__(self, name, money):
self.name = name
self.money = money
self.has_folded = False
self.hand = None
self.in_pot_total = 0
def get_move(self, gamestate):
return {"bet": 0}
| true |
748e6b12779ffa55af50e22446b66553cf14d172 | Python | Rodrigo61/GUAXINIM | /UVa/10198/main.py | UTF-8 | 603 | 3.0625 | 3 | [] | no_license | import sys
target = 0
memo = []
def solve(sum):
if sum > target:
return 0
if sum == target:
return 1
if memo[sum] != -1:
print("memorizou")
return memo[sum]
res = 0
res += solve(sum + 1)
res += solve(sum + 1)
res += solve(sum + 2)
res += solve(sum + 3)
memo[sum] = res
return res
for line in sys.stdin:
target = int(line)
dp = [0] * (target + 1)
dp[target] = 1
for i in range(target - 1, -1, -1):
if i + 1 <= target:
dp[i] += dp[i + 1]
dp[i] += dp[i + 1]
if i + 2 <= target:
dp[i] += dp[i + 2]
if i + 3 <= target:
dp[i] += dp[i + 3]
print(dp[0])
| true |
042d3b2025dfa3f45503a636bc7205caf6afd939 | Python | nazariyv/leetcode | /solutions/easy/moving_average_from_data_stream/main.py | UTF-8 | 525 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env python
from __future__ import division
from collections import deque
class MovingAverage:
def __init__(self, size: int):
self._d: deque = deque(maxlen=size)
def next(self, val: int) -> float:
self._d.append(val)
return sum(self._d) / len(self._d)
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param_1 = obj.next(val)
if __name__ == '__main__':
m = MovingAverage(3)
m.next(3)
m.next(4)
m.next(5)
... | true |
39e52a761d75a0fd9f3191fb9048e3e893161294 | Python | cascav4l/20Questions | /main.py | UTF-8 | 16,747 | 4.03125 | 4 | [] | no_license | ###################################################################
# Author:
# Adrian Negrea
#
#
# 20 Questions Game
# This program is a game which aims to guess the animal that someone
# is thinking about using a series of 20 yes or no questions.
#
# A database is used to store the questions in a table, with a
# separate table holding the values which each animal has for each
# question.
#
# On program start:
# - user given 3 options:
# Start a new game
# Add an animal
# Exit
#
# On animal addition:
# - all the questions in the questions table are asked in order
# - the answers are stored in a dictionary in order to preserve
# correct order
# - a new row is added to the animals table with the name of the
# animal and the answers that have just been supplied
#
# On game start:
# - a dictionary is initialized which holds the value of each
# animal, which is incremented by 1 each time the answer given
# by the player matches up with the value of that animal for that
# question
# animals_values = {'ANIMAL NAME': VALUE}
# - a variable is initialized in order to hold the highest value
# (currently 0)
# max_value = 0
# - a variable is initialized in order to hold the name of the
# animal with the highest value
# max_animal = ""
# - the same is also done for the second highest value animal
#
# During the game:
# - a random question is asked and then that question is kept track
# of so that it will not be asked again
# - when the user supplies an answer to the random question the
# animals table is checked for the animals which fit the
# description and their values in the animals_values is increased
# by 1
# - if the value of an animal exceeds that of max_value after it is
# incremented, the value of this animal will become the new
# value for max_value and max_animal will hold its name and likewise
# for the second highest valued animal
# - the best and second best guesses are used to determine which
# questions will be asked next
#
# End of game:
# - the game supplies max_animal as a guess to the player
# - the player answers if the guess was in fact the animal they
# were thinking of
# - if the guess is not correct then the player is asked for
# what they were actually thinking of and then that is
# added to the database as a new animal
# - user returned to the main_menu
###################################################################
import sqlite3, random, os
from decimal import *
# Get the current working directory
cwd = os.getcwd()
# Name of the database that is used for the program
database = cwd + "\\animals.db"
# Number of columns in animals table before the question values
nonq = 3
# Template for easier connection to the database
def create_connection(db_file):
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
# Keeps asking for input until a valid y/n answer is given.
# Returns answer as 1 or 0.
def game_answer():
ans = input("y/n:")
if ans == "y":
return 1
elif ans == "n":
return 0
else:
print("Invalid answer")
return game_answer()
# Main game logic
def new_game():
conn = create_connection(database)
c = conn.cursor()
c.execute("SELECT * FROM questions")
questions_data = c.fetchall()
# List of all questions that still haven't been asked
questions_left = []
# List of actual text of all the questions
questions_text = []
# Population of questions_left and questions_text
for question in questions_data:
questions_left.append(question[0])
questions_text.append(question[1])
# Initialize dictionary that will store scores for all animals.
# Will be used to select winner.
c.execute("SELECT * FROM animals")
animals_data = c.fetchall()
# Dictionary which holds values of each animal.
# Value is incremented.
animals_values = dict()
for row in animals_data:
animals_values.update({row[1]:0})
# Prompt user for weather they are ready to begin.
print("THINK OF AN ANIMAL!")
print("Do you have an animal in mind?")
# If anything other than y, prompt again.
if (input("y/n:") != "y"):
new_game()
return 0
# Initialize variables that will be used to hold best guess.
max_value = Decimal(0)
max_animal = ""
second_value = Decimal(0)
second_animal = ""
# Dictionary that stores the questions asked along with the
# answer which the player gives for each question.
answers = dict()
# For loop so that 20 questions will be asked before a guess is made.
for i in range(0, 20):
question_number = 0
# Select random question for first question.
if i < 2:
rand = random.random()
rand = rand * len(questions_left)
rand = int(rand)
question_number = questions_left[rand]
# Select question to differentiate between top 2 guesses.
else:
# Get data of the best guess from animals table.
sql = 'SELECT * FROM animals WHERE animal = ?'
c.execute(sql, (max_animal,))
best_values = c.fetchall()
# Get data of second best guess from animals table.
c.execute(sql, (second_animal,))
second_values = c.fetchall()
best_question = questions_left[0]
best_question_difference = 0
for j in questions_left:
dif = abs(best_values[0][j + nonq - 1] - second_values[0][j + nonq - 1])
if dif > 0:
best_question = j
best_question_difference = dif
question_number = best_question
# Print question and prompt for answer.
question = questions_text[int(question_number) - 1]
print(question)
answer = game_answer()
answers.update({question_number: answer})
# Add 1 to value of all animals that match answer.
for row in animals_data:
# if row[question_number + nonq - 1] == answer:
if answer == 0:
val = animals_values.get(str(row[1]))
val = Decimal(val)
val += Decimal(1) - Decimal(row[question_number + nonq - 1])
# Update best guess if needed.
if val > max_value:
# Change second best if new best.
if max_animal != row[1]:
# Previous best becomes second.
second_value = max_value
second_animal = max_animal
# New best animal guess.
max_value = val
max_animal = row[1]
# Update second best guess if needed.
elif val > second_value:
# Don't make second best equal to best animal guess.
if row[1] != max_animal:
# Previous second best guess is replaced.
second_value = val
second_animal = row[1]
animals_values.update({row[1]:val})
elif answer == 1:
val = animals_values.get(str(row[1]))
val = Decimal(val)
val += Decimal(row[question_number + nonq - 1])
# Update best guess if needed.
if val > max_value:
# change second best if new best
if max_animal != row[1]:
# Previous best becomes second.
second_value = max_value
second_animal = max_animal
# New best animal guess.
max_value = val
max_animal = row[1]
# Update second best guess if needed.
elif val > second_value:
# Don't make second best equal to best animal guess.
if row[1] != max_animal:
# Previous second best guess is replaced.
second_value = val
second_animal = row[1]
animals_values.update({row[1]:val})
# Remove question asked from questions_left so that it
# does not get asked again.
q_index = questions_left.index(question_number)
questions_left.remove(questions_left[q_index])
# Provide best guess.
guess = max_animal
post_game_actions(guess, answers)
main_menu()
# This is where all the after-game logic occurs.
# Updates animals table based on the results of the game.
def post_game_actions(guess, answers):
print("\nWhere you thinking of " + guess + "?")
feedback = game_answer()
# Create connection to database.
conn = create_connection(database)
c = conn.cursor()
questions_asked = ""
# Analyze player response.
# Update existing animal if the answer was correct.
if feedback == 1:
animal = guess
update_animal(animal, answers)
# If guess is wrong player is prompted for what they were
# actually thinking about.
elif feedback == 0:
# Ask user for the animal they had in mind,
print("\nWhat was the animal you were thinking about?")
animal = str(input())
# Check if that animal already has an entry in the animals table.
sql = 'SELECT 1 FROM animals WHERE animal = ?'
c.execute(sql, (animal,))
exists = c.fetchall()
# If the animal already has an entry in the table.
if exists:
# Update the existing entry.
update_animal(animal, answers)
# Animal not in the table.
# New entry will be created.
else:
# Create new entry in the animals table with just animal
# and frequency defined.
sql = 'INSERT INTO animals (animal, frequency) VALUES (?, ?);'
c.execute(sql, (animal, 1))
# Add all the data of the animal to the animals table.
for key, value in answers.items():
sql = 'UPDATE animals SET q' + str(key) + ' = q' + str(key) + ' + ' + str(value) + ' WHERE animal = ?;'
c.execute(sql, (animal,))
conn.commit()
else:
print("Error")
# Update the animal question values.
def update_animal(animal, answers):
# Create connection to database.
conn = create_connection(database)
c = conn.cursor()
# Fetch the existing animal data.
sql = 'SELECT * FROM animals WHERE animal = ?'
c.execute(sql, (animal,))
animal_data = c.fetchall()
prev_frequency = animal_data[0][2]
# Update animal with the new average value for each question
# frequency is used to not give earlier or later answers unfair
# representation in the data.
for key, value in answers.items():
old_val = Decimal(animal_data[0][int(key) + nonq - 1]) * Decimal(prev_frequency)
new_val = Decimal(Decimal(old_val + int(value)) / Decimal(prev_frequency + 1))
q_name = 'q' + str(key)
sql = 'UPDATE animals SET ' + q_name + ' = ' + str(new_val) + ' WHERE animal = ?;'
c.execute(sql, (animal,))
# Increase frequency of the animal that was updated by 1.
sql = 'UPDATE animals SET frequency = ? WHERE animal = ?'
new_frequency = prev_frequency + 1
c.execute(sql, (new_frequency, animal))
conn.commit()
# Ask questions during game.
# Formats the question so that it contains the animal name.
def animal_question_yn(question, animal):
# Replaces "it" from all the questions with the animal name.
question = question.replace("it", "a/an {}".format(animal))
print(question)
answer = input("y/n: ")
# Test for valid answer.
if answer != "y" and answer != "n":
print("Please enter a valid answer!")
return animal_question_yn(question, animal)
else:
if answer == "y":
return 1
else:
return 0
# Add an animal to the animals table in animals.db.
# Parameters are connection to database and a tuple
# of size 1, with an animal name as its only element.
def create_animal(conn, animal):
sql = 'SELECT animal FROM animals WHERE animal=?'
c = conn.cursor()
# Attempts to get row from animals table where animal column
# is equal to the animal that will be added.
c.execute(sql, animal)
exists = c.fetchall()
# If there already is an entry for given animal,
# exit create_animal by returning 0.
if exists:
print("Animal already exists.")
return 0
# If there is no entry for animal supplied, create
# a new row with the animal name as the value of the animal
# column in the newly created row.
sql = 'INSERT INTO animals(animal) VALUES(?)'
c.execute(sql, animal)
# Select animal name as string from the animal tuple.
animal = animal[0]
# Create a dictionary with question id as key and question
# text as value.
questions = {}
c.execute('SELECT * FROM questions')
data = c.fetchall()
for question in data:
questions.update({question[0]:question[1]})
# Goes through every question and asks them.
# Answers stored in dictionary, with key of the question used
# as key, and answer stored as value.
answers = {}
for key, value in questions.items():
answer = animal_question_yn(value, animal)
answers.update({key:answer})
# Will hold sqlite command.
questions_string = ""
# Holds values for each question for the animal.
values_list = []
# Create sqlite command.
for key, value in answers.items():
questions_string = questions_string + "q" + str(key) + " = ?,"
values_list.append(value)
values_list.append(str(animal))
values_t = tuple(values_list)
#print("values_t = ")
#print(values_t)
questions_string = questions_string[:-1]
#question_marks = question_marks[:-1]
sql = 'UPDATE animals SET '
sql += str(questions_string)
sql += ' WHERE animal = ?'
#sql += str(animal)
sql = str(sql)
#print("sql = " + sql)
c.execute(sql, values_t)
return c.lastrowid
# Adds new animal to animals table.
def new_animal():
new_animal = input("New animal: ")
# Connect to the database.
conn = create_connection(database)
with conn:
# Turn animal name into tuple.
animal = (new_animal,)
# Passes animal tuple to the create_animal function
# and returns what is returned by it.
return create_animal(conn, animal)
# Adds a question to the questions table.
# Returns id of the row once it is added to the table.
def create_question(conn, question):
sql = 'INSERT INTO questions(question) VALUES(?)'
# Create connection to the database.
c = conn.cursor()
c.execute(sql, question)
ret = c.lastrowid
# Will be used to add column to the animals table.
#NOT YET IMPLEMENTED
new_q = "q" + str(ret)
sql = 'ALTER TABLE animals ADD ' + new_q + ' INTEGER'
#col_name = tuple('"q" + str(ret)',)
c.execute(sql)
return ret
# Takes input for what the new question is and calls
# create_question in order to add it.
def new_question():
new_question = input("New question: ")
conn = create_connection(database)
with conn:
question = (new_question,)
return create_question(conn, question)
# Main interactive piece of program.
def main_menu():
# User chooses what he wants to do.
options = [
"0 Exit",
"1 New game",
"2 Add an animal",
#"3 Add a question",
]
# Display the options.
for option in options:
print(option)
# Listen for the user's option selection.
action = input()
# Make sure action is an integer.
try:
action = int(action)
except ValueError as e:
print("Option must be a number")
main_menu()
# Make sure action is a valid integer.
if action > len(options) or action < 0:
print("Invalid option selected")
main_menu()
# Preform the action which the user selected.
elif action == 0:
quit()
elif action == 1:
new_game()
elif action == 2:
new_animal()
elif action == 3:
new_question()
main_menu()
# Title screen.
def main():
print("WELCOME TO 20 QUESTIONS")
print("DEVELOPER EDITION")
print("CREATED BY CASCAVAL&CO")
print("\n\nWhat would you like to do?")
main_menu()
# Start the program.
main()
| true |
d980a0ff6c31fe2c5a792cae460edfd3842876e1 | Python | zcf1998/zcf1998 | /ex14.py | UTF-8 | 426 | 3.109375 | 3 | [] | no_license | import numpy as np
from sys import argv
script,r=argv
r=float(r)
def Cir(r): #calculate the circumference
return 2*np.pi*r
C=Cir(r)
C_earth=Cir(6378.0)
C_mars=Cir(3396.0)
if abs(C-C_earth)<abs(C-C_mars):
print "more likely to be earth.%.16f"%(abs(C-C_mars)-abs(C-C_earth))
elif abs(C-C_earth)>abs(C-C_mars):
print "more likely to be mars.%.16f"%(abs(C-C_mars)-abs(C-C_earth))
else:
print "both are possible."
| true |
0df17f202dcfbaf6e1149e4e092dad2a1765817e | Python | khushigupta9401/pygame | /image.py | UTF-8 | 613 | 3.265625 | 3 | [] | no_license | import pygame
pygame.init()
display_width = 500
display_hight = 500
gameDisplay = pygame.display.set_mode((display_width,display_hight))
pygame.display.set_caption('a bit racey')
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
carImg = pygame.image.load('3.jpg')
def car(x,y):
gameDisplay.blit(carImg, (x , y))
x = 250
y = 250
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
gameDisplay.fill(white)
car(x,y)
pygame.display.update()
pygame.quit()
quit()
| true |
dc2121cf13a274e29371c8523db7019937f39552 | Python | rarch/codeeval | /easy/testing.py | UTF-8 | 617 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env python
import sys
def test(bugs):
if bugs==0: return 'Done'
elif bugs<=2: return 'Low'
elif bugs<=4: return 'Medium'
elif bugs<=6: return 'High'
else: return 'Critical'
def getBugs(str1,str2):
count=0
for ind in xrange(len(str1)):
if str1[ind]!=str2[ind]: count+=1
return count
def main(filename):
lines=[]
with open(filename) as f_in: # get only nonempty lines
lines = [line.strip() for line in f_in if line.rstrip()]
for line in lines:
print test(getBugs(*(line.split(' | '))))
if __name__ == "__main__":
main(sys.argv[1]) | true |
f949232efae375e4dd8ce4e46ccaff5b343ae219 | Python | SheilaAbby/politico-api | /utils/validations.py | UTF-8 | 1,747 | 2.890625 | 3 | [] | no_license | from marshmallow import ValidationError
import re # use regex
def required(value):
if isinstance(value, str): # check if value is type string
if not value.strip(' '):
raise ValidationError('This parameter cannot be null')
return value
elif value:
return value
def email(value):
if not re.match(r"(^[a-zA-z0-9_.]+@[a-zA-z0-9]+\.[a-z]+$)", value):
raise ValidationError('Invalid email format')
return value
def password(password):
"""
Ensurepasses password is strong
:param password:
:return:
"""
if not re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', password):
raise ValidationError('Password not Strong')
return password
def is_valid_url(value):
"""Check if the logoUrl is a valid url."""
if re.match(r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)",
value):
return True
return False
def is_valid_date(value):
"""Check if date is valid."""
if re.match(
r"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)"
r"(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)"
r"0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|"
r"(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)"
r"(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$",
value):
return True
return False
def is_valid_phone(value):
"""Check if phone number is a valid kenyan phone number."""
if re.match(r"^(?:254|\+254|0)?(7(?:(?:[12][0-9])|(?:0[0-8])|(9[0-2]))[0-9]{6})$",
value):
return True
return False
| true |
a1742f18c2206d2bb59431a11626f13f55b7b334 | Python | massimotassinari/split | /split.py | UTF-8 | 2,252 | 2.578125 | 3 | [] | no_license | # people = [{
# 'name':'Miguel',
# 'spent' : 30,
# 'has_to_recieve':0,
# 'split': []
# },
# {
# 'name':'Omar',
# 'spent' : 20,
# 'has_to_recieve':0,
# 'split': []
# },
# {
# 'name':'Stefi',
# 'spent' : 12,
# 'has_to_recieve':0,
# 'split': []
# },
# {
# 'name':'Alessandra',
# 'spent' : 10,
# 'has_to_recieve':0,
# 'split': []
# },
# {
# 'name':'gabriel',
# 'spent' : 0,
# 'has_to_recieve':0,
# 'split': []
# }]
# # person = {
# # 'name':'Name',
# # 'spent' : 200,
# # 'has_to_recieve':0,
# # 'split': []
# # }
# total = 0
# for person in people:
# total = total + person["spent"]
# individual_payment = total/len(people)
# have_to_pay = []
# have_to_recieve = []
# for person in people:
# person['has_to_recieve'] = person["spent"] - individual_payment
# if(person['has_to_recieve']>=0):
# have_to_recieve.append(person)
# else:
# have_to_pay.append(person)
# have_to_pay.sort(key=lambda person: person['has_to_recieve'], reverse=True)
# have_to_recieve.sort(key=lambda person: person['has_to_recieve'])
# for person in have_to_pay:
# while abs(person['has_to_recieve'])>0 and len(have_to_recieve)>0:
# if(abs(person['has_to_recieve'])<=have_to_recieve[0]['has_to_recieve']):
# person['split'].append({
# 'name': have_to_recieve[0]['name'],
# 'amount': abs(person['has_to_recieve'])
# })
# have_to_recieve[0]['has_to_recieve'] = have_to_recieve[0]['has_to_recieve'] + person['has_to_recieve']
# person['has_to_recieve'] = 0
# else:
# person['split'].append({
# 'name': have_to_recieve[0]['name'],
# 'amount': have_to_recieve[0]['has_to_recieve'],
# })
# person['has_to_recieve'] = person['has_to_recieve'] + have_to_recieve[0]['has_to_recieve']
# have_to_recieve[0]['has_to_recieve']= 0
# have_to_recieve.remove(have_to_recieve[0])
# print("remove")
# for x in have_to_pay:
# print(x) | true |
87abf365ad450327a75f1debe59a1ca319e902c7 | Python | RSAKing/checkpoint2 | /func.py | UTF-8 | 1,047 | 3.46875 | 3 | [] | no_license | import re
def cadastrar(vazamento):
check = "S"
resp = "S"
while resp == "S":
tag = input("Informe o ID do vazamento >> ")
while check == "S":
email = input("Qual e-mail vazado?\n").upper()
if re.match("[^@]+@[^@]+\.[^@]+", email):
check = "N"
else:
print("e-mail invalido informado")
check = "S"
check = "S"
vazamento[tag] = [
email,
input("P4ssw0rd:\n"),
]
resp = input("\nDeseja cadastrar mais? (S/N)\n").upper()
def exibir(vazamento):
for tag, lista in vazamento.items():
print("\nTag--->", tag)
print("Email--->", lista[0])
print("Senha--->", lista[1])
def buscar(vazamento):
busca = input("Informe o ID do e-mail:\n")
lista = vazamento.get(busca)
if lista != None:
print("\nEmail--->", lista[0])
print("Senha--->", lista[1])
else:
print("Not Found!") | true |
d6c4e50dac08c74559ca4472bfbad33e5212e461 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_211/322.py | UTF-8 | 3,065 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python
def find_deficits(list, avg):
num_items = len(list)
total_deficits = 0.000000
for i in range(0, num_items):
if list[i] < avg:
total_deficits += avg - list[i]
return total_deficits
def find_avg(list, last_item, extras):
total = 0.000000
for i in range(0, last_item):
total += list[i]
avg = (total + extras)/last_item
#print "avg="+str(avg)
return avg
#def allocate_units(probs, units, final_avg):
## total_num = len(probs)
# remaining_units = units
# for i in range(0, total_num):
# if units[i] > final_avg:
# break
# elif i < (total_num - 1):
# next_item = probs[i]
# if remaining_units >= next_item - probs[i]:
# probs[i] = next_item
# remaining_units -= next_item -
def allocate_units(probs, units, target_avg):
rem_units = units
for i in range(0, len(probs)):
if probs[i] < target_avg:
if rem_units == 0:
return probs
elif rem_units >= (target_avg-probs[i]):
probs[i] = target_avg
rem_units -= target_avg - probs[i]
else:
probs[i] += rem_units
return probs
else:
break
return probs
def find_allocated_avg(probs, units, avg, num_items):
final_item = num_items
for i in range(0, num_items):
if probs[i] > avg:
final_item = i
new_avg = find_avg(probs, final_item, units)
if new_avg == avg:
return avg
else:
renewed_avg = find_allocated_avg(probs, units, new_avg, final_item)
#if new_avg == renewed_avg:
return renewed_avg
#else:
# find_allocated_avg(probs, units, new_avg, final_item)
def find_probs(probs):
cumulative_probs = 1.000000
for i in range(0, len(probs)):
cumulative_probs *= probs[i]
return cumulative_probs
def solve_cases(input_name, output):
fin = open(input_name)
fout = open(output, 'w')
inputline = fin.readline()
numcase = int(inputline)
for i in range (0,numcase):
line = fin.readline().split()
num_cores = int(line[0])
req_cores = int(line[1])
units = float(fin.readline())
probs = map(float, fin.readline().split())
#print "units:"+str(units)
#print probs
complete_deficit = find_deficits(probs, 1.0000000)
if complete_deficit <= units:
final_probs = 1.000000
else:
start_avg = find_avg(probs, len(probs), 0.000000)
#total_deficit = find_deficits(probs, avg)
final_avg = start_avg + (units / num_cores)
probs = sorted(probs)
new_mod_avg = find_allocated_avg(probs, units, final_avg, len(probs))
new_probs = allocate_units(probs, units, new_mod_avg)
final_probs = find_probs(new_probs)
answer = "Case #"+str(i+1)+": "+ str("%0.6f" %final_probs) + "\n"
print answer,
fout.write(answer)
#fout.write('\n')
fin.close
fout.close
sample_input = "c_sample_input.txt"
sample_output = "c_sample_output.txt"
small_input = "C-small-1-attempt0.in"
small_output = "C-small-out.txt"
large_input = "C-large.in"
large_output = "C-large-out.txt"
solve_cases(small_input, small_output) | true |
e940291663362d1e28018e59f4fcafdbd4615d39 | Python | My-lsh/Python-for-Data-Mining | /blog09-LinearRegression/test01.py | UTF-8 | 2,142 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 05 18:10:07 2017
@author: eastmount & zj
"""
#导入玻璃识别数据集
import pandas as pd
glass=pd.read_csv("glass.csv")
#显示前6行数据
print(glass.shape)
print(glass.head(6))
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(font_scale=1.5)
sns.lmplot(x='al', y='ri', data=glass, ci=None)
#利用Pandas画散点图
glass.plot(kind='scatter', x='al', y='ri')
plt.show()
#利用matplotlib做等效的散点图
plt.scatter(glass.al, glass.ri)
plt.xlabel('al')
plt.ylabel('ri')
#拟合线性回归模型
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
feature_cols = ['al']
X = glass[feature_cols]
y = glass.ri
linreg.fit(X, y)
plt.show()
#对于所有的x值做出预测
glass['ri_pred'] = linreg.predict(X)
print("预测的前六行:")
print(glass.head(6))
#用直线表示预测结果
plt.plot(glass.al, glass.ri_pred, color='red')
plt.xlabel('al')
plt.ylabel('Predicted ri')
plt.show()
#将直线结果和散点图同时显示出来
plt.scatter(glass.al, glass.ri)
plt.plot(glass.al, glass.ri_pred, color='red')
plt.xlabel('al')
plt.ylabel('ri')
plt.show()
#利用相关方法线性预测
linreg.intercept_ + linreg.coef_ * 2
#使用预测方法计算Al = 2的预测
linreg.predict(2)
#铝检验系数
ai=zip(feature_cols, linreg.coef_)
print(ai)
#使用预测方法计算Al = 3的预测
pre=linreg.predict(3)
print(pre)
#检查glass_type
sort=glass.glass_type.value_counts().sort_index()
print(sort)
#类型1、2、3的窗户玻璃
#类型5,6,7是家用玻璃
glass['household'] = glass.glass_type.map({1:0, 2:0, 3:0, 5:1, 6:1, 7:1})
print(glass.head())
plt.scatter(glass.al, glass.household)
plt.xlabel('al')
plt.ylabel('household')
plt.show()
#拟合线性回归模型并存储预测
feature_cols = ['al']
X = glass[feature_cols]
y = glass.household
linreg.fit(X, y)
glass['household_pred'] = linreg.predict(X)
plt.show()
#包括回归线的散点图
plt.scatter(glass.al, glass.household)
plt.plot(glass.al, glass.household_pred, color='red')
plt.xlabel('al')
plt.ylabel('household')
plt.show()
| true |
6f9720b650883f0b310319250ac7d78a7ee65da1 | Python | DivyanshuSaxena/Distributed-GHS | /generate.py | UTF-8 | 4,617 | 3.515625 | 4 | [] | no_license | """Generate test cases for the MST Problem"""
import sys
import random
def write_to_file(num_nodes, edges, graph_type):
"""Write the edges on the input file
Arguments:
num_nodes {Integer} -- Number of nodes in the graph
edges {List} -- List of edges with unique edges
graph_type {String} -- Mention in the file name
"""
# Write into input file
filename = 'files/inp-' + str(num_nodes) + '-' + graph_type + '.txt'
with open(filename, 'w') as file:
file.write(str(num_nodes) + '\n')
for edge in edges:
file.write(str(edge) + '\n')
def generate_random(num_nodes):
"""Generate a random connected graph with unique weight edges
Arguments:
num_nodes {Integer}
"""
nodes = list(range(num_nodes)) # List of all nodes
added = [0] # List of nodes added in the connected graph
edges = [] # Store edges for the connected graph
weights = list(range(5, (num_nodes * num_nodes) // 2))
random.shuffle(weights)
for node in nodes[1:]:
# Decide number of edges
num_edges = random.randint(1, len(added))
temp = added.copy()
for _ in range(num_edges):
end = random.choice(temp)
weight = random.choice(weights)
edges.append((node, end, weight))
temp.remove(end)
weights.remove(weight)
# Add the current node in the graph too
added.append(node)
write_to_file(num_nodes, edges, 'random')
def generate_connected(num_nodes):
"""Generate a fully connected random graph with unique edge weights
Arguments:
num_nodes {Integer}
"""
edges = []
weights = list(range(5, (num_nodes * num_nodes) // 2))
random.shuffle(weights)
for _in in range(num_nodes):
for _jn in range(_in + 1, num_nodes):
weight = random.choice(weights)
edges.append((_in, _jn, weight))
weights.remove(weight)
write_to_file(num_nodes, edges, 'connected')
def generate_tree(num_nodes):
"""Generate a tree with unique branch weights
Arguments:
num_nodes {Integer}
"""
queue = [0]
max_branches = 4
count = 1
edges = []
weights = list(range(5, (num_nodes * num_nodes) // 2))
random.shuffle(weights)
print(len(weights))
flag = False
while not flag:
node = queue.pop()
neighbours = random.randint(1, max_branches)
for neighbour in range(neighbours):
weight = random.choice(weights)
queue.append(count)
edges.append((node, count, weight))
weights.remove(weight)
count += 1
if count == num_nodes:
flag = True
break
if flag:
break
write_to_file(num_nodes, edges, 'tree')
def generate_linear(num_nodes):
"""Generate a linear tree with unique edge weights
Arguments:
num_nodes {Integer}
"""
edges = []
weights = list(range(5, (num_nodes * num_nodes) // 2))
nodes = list(range(num_nodes))
random.shuffle(weights)
random.shuffle(nodes)
for _in in range(num_nodes - 1):
weight = random.choice(weights)
edges.append((nodes[_in], nodes[_in + 1], weight))
weights.remove(weight)
write_to_file(num_nodes, edges, 'linear')
def generate_ring(num_nodes):
"""Generate a ring graph with unique edge weights
Arguments:
num_nodes {Integer}
"""
edges = []
weights = list(range(5, (num_nodes * num_nodes) // 2))
nodes = list(range(num_nodes))
random.shuffle(weights)
random.shuffle(nodes)
for _in in range(num_nodes - 1):
weight = random.choice(weights)
edges.append((nodes[_in], nodes[_in + 1], weight))
weights.remove(weight)
# Add the final edge
weight = random.choice(weights)
edges.append((nodes[-1], nodes[0], weight))
write_to_file(num_nodes, edges, 'ring')
if __name__ == '__main__':
if len(sys.argv) != 3:
print('To run the file: python generate.py <num-nodes-to-generate> ' +
'<graph-type (tree/connected/linear/random/ring)>')
sys.exit()
num_nodes = int(sys.argv[1])
gen_type = sys.argv[2]
print(gen_type)
if gen_type == 'linear':
generate_linear(num_nodes)
elif gen_type == 'tree':
generate_tree(num_nodes)
elif gen_type == 'connected':
generate_connected(num_nodes)
elif gen_type == 'ring':
generate_ring(num_nodes)
else:
generate_random(num_nodes)
| true |
cbf1bbf826b36638e3723387e314799ec18fb62d | Python | robertdahmer/Exercicios-Python | /Projetos Python/Aulas Python/Aula07/DESAFIO08.py | UTF-8 | 208 | 3.78125 | 4 | [
"MIT"
] | permissive | #MOSTRA O VALOR COM 5% DE DESCONTO
valor = float(input('Qual o valor do produto? R$ '))
desconto = valor - (valor * 5 / 100)
print('Seu produto com 5% de desconto ficaria {:.2f} R$. '.format(desconto))
| true |
9615e5db50c04a6671a238147bf2ed19bb80c8c3 | Python | Taschee/schafkopf | /tests/test_card_deck.py | UTF-8 | 896 | 2.875 | 3 | [
"MIT"
] | permissive | from schafkopf.card_deck import CardDeck
from schafkopf.suits import ACORNS, BELLS, HEARTS, LEAVES
from schafkopf.ranks import ACE, TEN, EIGHT, SEVEN
import pytest
@pytest.fixture
def card_deck():
return CardDeck()
def test_deal_hand(card_deck):
hand = card_deck.deal_hand()
assert len(hand) == 8
assert len(card_deck.cards) == 24
assert hand == [(ACE, ACORNS), (ACE, LEAVES), (ACE, HEARTS), (ACE, BELLS),
(TEN, ACORNS), (TEN, LEAVES), (TEN, HEARTS), (TEN, BELLS)]
def test_deal_hands(card_deck):
player_hands = card_deck.deal_player_hands()
assert len(player_hands) == 4
assert len(player_hands[2]) == 8
assert len(card_deck.cards) == 0
assert player_hands[3] == [(EIGHT, ACORNS), (EIGHT, LEAVES), (EIGHT, HEARTS), (EIGHT, BELLS),
(SEVEN, ACORNS), (SEVEN, LEAVES), (SEVEN, HEARTS), (SEVEN, BELLS)]
| true |
c3b57aa500c46fb1658373b5ef6fa5ba88f3d263 | Python | HLNN/leetcode | /src/0557-reverse-words-in-a-string-iii/reverse-words-in-a-string-iii.py | UTF-8 | 668 | 3.765625 | 4 | [] | no_license | # Given a string s, reverse the order of characters in each word within a sentence while still preserving whitespace and initial word order.
#
#
# Example 1:
# Input: s = "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Example 2:
# Input: s = "God Ding"
# Output: "doG gniD"
#
#
# Constraints:
#
#
# 1 <= s.length <= 5 * 104
# s contains printable ASCII characters.
# s does not contain any leading or trailing spaces.
# There is at least one word in s.
# All the words in s are separated by a single space.
#
#
class Solution:
def reverseWords(self, s: str) -> str:
return ' '.join([w[::-1] for w in s.split(' ')])
| true |
d80022c348f736f29e8f962dba3aa9f3ad6fb630 | Python | Dzen819/pong_game | /main.py | UTF-8 | 1,215 | 3.0625 | 3 | [] | no_license | from turtle import Screen
from pads import Paddle
from scoreboard import Border, Score
from ball import Ball
import time
import random
screen = Screen()
screen.setup(width=800, height=600)
screen.bgcolor("black")
screen.title("Pong")
screen.listen()
screen.tracer(0)
bord = Border()
ball = Ball()
p1_score = Score((-370, 267), 1, "left")
p2_score = Score((370, 267), 2, "right")
pad_1 = Paddle((350, 0))
pad_2 = Paddle((-350, 0))
screen.onkeypress(pad_1.up, "Up")
screen.onkeypress(pad_1.down, "Down")
screen.onkeypress(pad_2.up, "w")
screen.onkeypress(pad_2.down, "s")
game_is_on = True
first_press = False
while game_is_on:
screen.update()
ball.move()
if ball.distance(pad_1) < 50 and 335 < ball.xcor() < 350:
ball.setheading(-ball.heading() + 180)
ball.speedup()
if ball.distance(pad_2) < 50 and -335 > ball.xcor() > -350:
ball.setheading(-ball.heading() + 180)
ball.speedup()
if ball.xcor() > 400:
p1_score.increase()
ball.ball_reset()
screen.update()
time.sleep(3)
elif ball.xcor() < -400:
p2_score.increase()
ball.ball_reset()
screen.update()
time.sleep(3)
screen.exitonclick() | true |
8fedcced2e82c4e6c2a65aafc8bb518d92251495 | Python | lemduc/CSCI622-Advanced-NLP | /HW1/python_code/2.SpaceDeleter.py | UTF-8 | 308 | 2.6875 | 3 | [] | no_license | import string
output = ""
f = open('space-deleter.fst', 'w')
f.write('%%%%%% Filename: space-deleter.fst %%%%%%\n')
f.write('0\n')
for c in string.ascii_uppercase:
out = '(0 (0 "{}" "{}"))'.format(c, c)
f.write(out + '\n')
f.write('(0 (0 "{}" {}))'.format('_', '*e*') + '\n')
f.close() | true |
a49bc0c7a2888a15b4724be56a6a903b0309cac1 | Python | nicoddemus/lima | /lima/abc.py | UTF-8 | 849 | 2.890625 | 3 | [
"MIT"
] | permissive | '''Abstract base classes for fields and schemas.
.. note::
:mod:`lima.abc` is needed to avoid circular imports of fields needing to
know about schemas and vice versa. The base classes are used for internal
type checks. For users of the library there should be no need to use
:mod:`lima.abc` directly.
'''
class FieldABC:
'''Abstract base class for fields.
Inheriting from :class:`FieldABC` marks a class as a field for internal
type checks.
(Usually, it's a *way* better Idea to subclass :class:`lima.fields.Field`
directly)
'''
pass
class SchemaABC:
'''Abstract base class for schemas.
Inheriting from :class:`SchemaABC` marks a class as a schema for internal
type checks.
(Usually, it's a *way* better Idea to subclass :class:`lima.schema.Schema`
directly)
'''
pass
| true |
1c4d27a3df6ede37d699a66640227ad17ac94f3d | Python | umairwaheed/mangrove | /mangrove/models.py | UTF-8 | 5,546 | 2.75 | 3 | [
"MIT"
] | permissive | import sys
import json
import sqlalchemy
import collections
from mangrove import query
from mangrove import fields
from mangrove import exceptions
from mangrove import connection
if sys.version_info < (3, 0):
import py2_base as base
else:
from mangrove import py3_base as base
class Model(base.ModelBase):
"""Base class for all models
Attributes
-----------
abstract:
If `True` DB table will not be created
Model Example
-------------
class Person(Model):
name = fields.StringField()
age = fields.IntegerField()
Primary Key
-----------
To add primary key to the table add `primary_key=True` to columns
"""
abstract = False
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def __repr__(self):
args = ', '.join('%s=%s' % (p, repr(getattr(self, p)))
for p in self.get_columns())
cls_name = self.__class__.__name__
return "%s(%s)" % (cls_name, args)
def __iter__(self):
return ((p, getattr(self, p)) for p in self.get_columns())
@classmethod
def get_key_name(cls):
"""Name of the column(s) that form primary key
Key name is a tuple of alphabetically sorted names of
columns that have primary_key == True.
"""
columns = cls.get_columns().values()
return tuple(sorted(p.name for p in columns if p.primary_key))
@classmethod
def get_table(cls):
"""Return the underlying SQLAlchemy table
In case the table is not found it is created, we need to do
this because this function can be called from `select` as well
and queries can be performed on the table without making the
instance of the model.
"""
return connection.get_table(cls)
@classmethod
def _get_items_from_dict(cls, item_type):
#ChainMap = collections.ChainMap
#items = ChainMap(*[c.__dict__ for c in cls.mro()]).items()
items = [i for base in cls.mro() for i in base.__dict__.items()]
return {k: v for k, v in items if isinstance(v, item_type)}
@classmethod
def get_columns(cls):
return cls._get_items_from_dict(sqlalchemy.Column)
@classmethod
def get_constraints(cls):
return cls._get_items_from_dict(sqlalchemy.Constraint)
@classmethod
def select(cls, columns=[]):
""" Create query over this model
:param list columns: List of columns that need to be returned
:returns: `query.Counter` object.
"""
return query.Query(cls, columns=columns)
@classmethod
def get_by_key(cls, key_map):
query = cls.select()
for column, value in key_map.items():
query.where(column==value)
return query.get()
@property
def key(self):
_key = tuple(getattr(self, p) for p in self.get_key_name())
return tuple(filter(None, _key))
def save(self):
""" Will only execute insert statement
It is an error to use this method for record which
already exists.
"""
data = {p: getattr(self, p) for p in self.get_columns()}
stmt = self.get_table().insert().values(**data)
result = connection.get_connection().execute(stmt)
# set the key on the model
key_name = self.get_key_name()
for col_name, col_value in zip(key_name, result.inserted_primary_key):
setattr(self, col_name, col_value)
return result
def update(self, exclude=[]):
""" Updates an entity
Issues update statement to the database. This function will
update the corressponding row with the properties of the object.
Arguments
---------
@exclude: A list of column names that you want to exclude from
update.
"""
if not self.key:
return
ReferenceField = fields.ReferenceField
_exclude = list(self.get_key_name())
for e in exclude:
class_field = getattr(self.__class__, e)
if isinstance(class_field, ReferenceField):
_exclude.extend(list(class_field.get_fk_columns().keys()))
else:
_exclude.append(e)
data = {
p: getattr(self, p)
for p in self.get_columns()
if p not in _exclude
}
table = self.get_table()
stmt = table.update().values(**data)
for col_name, col_value in zip(self.get_key_name(), self.key):
stmt = stmt.where(table.columns[col_name] == col_value)
return connection.get_connection().execute(stmt)
def update_or_save(self):
""" Update or insert new record
If primary key is present the function will try to update
if it cannot update it will insert new record. If primary
key is not present the function will insert new record.
"""
if self.key:
result = self.update()
if result.rowcount:
return result
return self.save()
def delete(self):
key = self.key
key_name = self.get_key_name()
table = self.get_table()
stmt = table.delete()
for col_name, col_value in zip(key_name, key):
stmt = stmt.where(table.columns[col_name] == col_value)
result = connection.get_connection().execute(stmt)
if result.rowcount:
return result
| true |
b2a74e1f1c3f772455b315f61db73c6e2ebaa53a | Python | nephashi/news-spider | /redis_util/redis_url_pusher.py | UTF-8 | 1,013 | 2.71875 | 3 | [] | no_license | import common_utils.json_util as ju
class RedisUrlPusher(object):
def __init__(self, redis_queue_dao, dup_rmv_cache, logger):
self.__redis_queue_dao = redis_queue_dao
self.__dup_rmv_cache = dup_rmv_cache
self.__logger = logger
def log_cache_status(self):
status = "url pusher cache status. "
num_per_level = self.__dup_rmv_cache.get_cache_size_per_level()
status += "num level: " + str(num_per_level) + "; "
for i in range(len(num_per_level)):
status += "in " + str(i) + "th level, num url: " + str(num_per_level[i]) + "; "
self.__logger.info(status)
def url_push(self, dic):
url = dic['link']
crawled = self.__dup_rmv_cache.if_url_crawled(url)
if (crawled == False):
self.__dup_rmv_cache.update_cache(url)
news_url_json_str = ju.py2json(dic)
self.__redis_queue_dao.put(item = news_url_json_str)
return True
else:
return False
| true |
4dfe985538845dc9fca0fc5cb2bf0ac70c5ed1b8 | Python | henry0312/keras_compressor | /bin/keras-compressor.py | UTF-8 | 2,466 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import logging
import keras
import keras.backend as K
import numpy
from keras.models import load_model
from keras_compressor.compressor import compress
def count_total_params(model):
"""Counts the number of parameters in a model
See:
https://github.com/fchollet/keras/blob/172397ebf45d58ba256c10004c6fce8b40df286b/keras/utils/layer_utils.py#L114-L117
:param model: Keras model instance
:return: trainable_count, non_trainable_count
:rtype: tuple of int
"""
trainable_count = int(
numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
return trainable_count, non_trainable_count
def gen_argparser():
parser = argparse.ArgumentParser(description='compress keras model')
parser.add_argument('model', type=str, metavar='model.h5',
help='target model, whose loss is specified by `model.compile()`.')
parser.add_argument('compressed', type=str, metavar='compressed.h5',
help='compressed model path')
parser.add_argument('--error', type=float, default=0.1, metavar='0.1',
help='layer-wise acceptable error. '
'If this value is larger, compressed model will be '
'less accurate and achieve better compression rate. '
'Default: 0.1')
parser.add_argument('--log-level', type=str, default='INFO',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
help='log level. Default: INFO')
return parser
def main():
parser = gen_argparser()
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level))
model = load_model(args.model) # type: keras.models.Model
total_params_before = sum(count_total_params(model.layers))
model = compress(model, acceptable_error=args.error)
total_params_after = sum(count_total_params(model.layers))
model.save(args.compressed)
print('\n'.join((
'Compressed model',
' before #params {:>20,d}',
' after #params {:>20,d} ({:.2%})',
)).format(
total_params_before, total_params_after, 1 - float(total_params_after) / total_params_before,
))
if __name__ == '__main__':
main()
| true |
248c233766b0f2bfef4c58a9e1555527bb6d9135 | Python | adrianflatner/ITGK | /9AdrianFlatner/øving9/Generelt_om_filbehandling.py | UTF-8 | 529 | 3.78125 | 4 | [] | no_license | def write_to_file(data):
f = open('default_file.txt','w')
f.write(data)
f.close()
def read_from_file(filename):
f = open(filename,'r')
innhold = f.read()
f.close()
return innhold
def main():
r_or_w = 0
while r_or_w != 'done':
r_or_w = input("Do you want to read or write? ")
if r_or_w == 'write':
write_to_file(input("What do you want to write? "))
elif r_or_w == 'read':
print(read_from_file('default_file.txt'))
main()
| true |
fbcbc5ea5ccc0bcc4c9a762e1758f49d7c6b2dd5 | Python | SaifurShatil/Python | /dict.py | UTF-8 | 440 | 2.90625 | 3 | [] | no_license | d={1:'jsh',2:'jhdhdf',4:'jfdhjr'}
print(d[4])
print(d.get(2))
print(d.get(3,'Not Found'))
print(d.get(1,'Not Found'))
print(d.keys())
print(d.values())
keys=[1,2,3,'y']
val=[34,'drtt',4.7,45]
data=dict(zip(keys,val))
print(data)
print(data['y'])
data['mon']=87
print(data)
del data[3]
print(data)
mul={1:43,2:[23,45],3:{1:34,4:9}}
print(mul)
print(mul[1])
print(mul[2])
print(mul[2][1])
print(mul[3][4])
| true |
8ff6f948a1f61865e041817c413147be10d366f4 | Python | Alfredogomes/LabBio_Trabalho | /src/motifs.py | UTF-8 | 1,037 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Motifs
======
"""
import argparse
import sys
from Bio import SeqIO
from Bio.ExPASy import ScanProsite
SEQUENCES = 'data/translations.fasta'
def get_motifs(records, outfile):
for record in SeqIO.parse(records, 'fasta'):
seq = record.seq
keywords = {'CC':'/SKIP-FLAG=FALSE;'}
response = ScanProsite.scan(seq, 'http://www.expasy.org', 'xml', **keywords)
obj = ScanProsite.read(response)
outfile.write(str(obj) + "\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("sequences", type=argparse.FileType('r'),
default=SEQUENCES, nargs='?',
help="Fasta file containing the protein sequences")
parser.add_argument("-o", "--outfile", type=argparse.FileType('w'),
default=sys.stdout,
help="output file to write the motifs")
args = parser.parse_args()
get_motifs(args.sequences, args.outfile)
if __name__ == "__main__":
main()
| true |
282e3ddb204cb14d11f0d5438b042fbb196c3fb2 | Python | lpxxn/lppythondemo | /base/http-reqpest-demo-.py | UTF-8 | 405 | 2.765625 | 3 | [] | no_license | import requests
r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass'))
print(r.status_code)
assert r.status_code == 200, f'Should be 200 current is {r.status_code}'
print(r.encoding)
print(r.text)
print(r.json())
print(r.json()['authenticated'])
# print(r.json()['user1'])
print(r.json().get('authenticatedvvv'))
if r.json().get('adsf') is None:
print('adsf is None') | true |
7ee4d4f2c0ccc3534bac2702fbaff8e1439c90c1 | Python | dmorais/rede | /Alternative_citation.py | UTF-8 | 1,716 | 3.328125 | 3 | [] | no_license | import sys
import os
def ensure_dir(dir_path):
if not os.path.exists(dir_path):
print("Creating", dir_path)
os.makedirs(dir_path)
return True
def create_list_of_citations(file_name, dir_path, author):
citations = dict()
with open(file_name, 'r') as f:
for line in f:
if not line.strip():
continue
records = line.strip().split("|")
names = records[1].split(";")
for name in names:
name = name.strip()
last_names = name.split(' ')
if last_names[0] in citations.keys():
citations[last_names[0]].add(name)
else:
citations[last_names[0]] = {name}
with open(os.path.join(dir_path, author + "_Alternative_citation.txt"), 'a') as f:
for k, v in citations.items():
# Print only if there is an alternative citation
if len(v) > 1:
text = k + ' : ' + '; '.join(v)
f.write(text)
f.write('\n\n')
def main():
if (len(sys.argv) != 2) or sys.argv[1] == "-h":
print("Usage:\npython " + sys.argv[0] + " <File created by Scapper.py>\n"
"The script now creates a dir from the CWD and writes to"
"files named after the author.")
sys.exit()
dir_path = os.path.join(os.getcwd(), "alternative_citations")
ensure_dir(dir_path)
file_name = sys.argv[1]
author = os.path.basename(file_name).split('.')
create_list_of_citations(file_name, dir_path, author[0])
if __name__ == '__main__':
main()
| true |
11455cf640b4546ec9bb8f682397484a7d18cef3 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_118/1634.py | UTF-8 | 1,028 | 2.921875 | 3 | [] | no_license | import fileinput
import math
def palindromes_count(start, end):
palindromes_list = [1, 4, 9, 121, 484, 10201, 12321, 14641, 40804, 44944, 1002001, 1234321, 4008004, 100020001, 102030201, 104060401, 121242121, 123454321, 125686521, 400080004, 404090404, 10000200001, 10221412201, 12102420121, 12345654321, 40000800004, 1000002000001, 1002003002001, 1004006004001, 1020304030201, 1022325232201, 1024348434201, 1210024200121, 1212225222121, 1214428244121, 1232346432321, 1234567654321, 4000008000004, 4004009004004, 100000020000001, 100220141022001, 102012040210201, 102234363432201, 121000242000121, 121242363242121, 123212464212321, 123456787654321, 400000080000004]
result_list = [value for value in palindromes_list if value >=start and value <= end]
return len(result_list)
if __name__ == '__main__':
indata = [line for line in fileinput.input()]
for (case_no, line) in enumerate(indata[1:]):
start, end = line.split(' ')
print 'Case #%d: %d' % (case_no+1, palindromes_count(int(start), int(end))) | true |
e5a4a429adba1fe3efc41baeb97f0d6fe0e0034d | Python | MaximZolotukhin/erik_metiz | /chapter_9/exercise_9.3.py | UTF-8 | 1,856 | 4.21875 | 4 | [] | no_license | """
Пользователи:
создайте класс с именем User. Создайте два атрибута first_name и last_name, а затем еще
несколько атрибутов, которые обычно хранятся в профиле пользователя. Напишите метод describe_user(),
который выводит сводку с информацией о пользователе. Создайте еще один метод greet_user()
для вывода персонального приветствия для пользователя.
Создайте несколько экземпляров, представляющих разных пользователей.
Вызовите оба метода для каждого пользователя.
"""
class User():
"""Описание пользователя"""
def __init__(self, first_name, last_name, age, work="Секретный агент"):
self.first_name = first_name
self.last_name = last_name
self.age = age
self.work = work
def describe_user(self):
"""Вывод полной информации о пользователе"""
print(f"Краткая информация о новом агенте 007:")
print(f"Имя {self.first_name}")
print(f"Фамилия {self.last_name}")
print(f"Возраст {self.age}")
print(f"Работа {self.work}")
def greet_user(self):
"""Вывод сообщения"""
print(f"Привет {self.first_name} {self.last_name}, вы завербованы!")
sean = User("Шен", "Коннери", 37)
valera = User("Валений", "Попов", 22, "Архитектор")
sean.describe_user()
sean.greet_user()
print()
valera.describe_user()
valera.greet_user()
| true |
38c8c9c8e877ca204b5e23df37719c7e32d31bf2 | Python | BIAOXYZ/variousCodes | /_CodeTopics/LeetCode/401-600/000565/000565.py3 | UTF-8 | 716 | 2.671875 | 3 | [] | no_license | class Solution:
def arrayNesting(self, nums: List[int]) -> int:
globallyVisitedIndex = set()
lis = []
for i, num in enumerate(nums):
if i not in globallyVisitedIndex:
se = set([i])
while nums[i] not in se:
se.add(nums[i])
i = nums[i]
lis.append(se)
globallyVisitedIndex |= se
return len(max(lis, key=len))
"""
https://leetcode.cn/submissions/detail/338223722/
执行用时:
176 ms
, 在所有 Python3 提交中击败了
76.86%
的用户
内存消耗:
35 MB
, 在所有 Python3 提交中击败了
30.56%
的用户
通过测试用例:
885 / 885
"""
| true |
c3e2069f025c19c712ee604f4e997c8516bab0bb | Python | danny-hunt/Advent-of-Code-2019 | /day10.py | UTF-8 | 804 | 3.3125 | 3 | [] | no_license | from math import atan2, hypot, pi
def angle(a, b):
return atan2(b[0] - a[0], a[1] - b[1]) % (2 * pi)
def visible(asteroids, a):
return len(set(angle(a, b) for b in asteroids if a != b))
with open('day10.txt', 'r') as text_input:
data = text_input.read().splitlines()
asteroids = [(x, y) for y in range(len(data))
for x in range(len(data[0])) if data[y][x] == '#']
answer = max(visible(asteroids, a) for a in asteroids)
print(answer)
#Answer = 292
a = max(asteroids, key=lambda a: visible(asteroids, a))
asteroids.remove(a)
asteroids.sort(key=lambda b: hypot(b[0] - a[0], b[1] - a[1]))
ranks = {b : sum(angle(a, b) == angle(a, c) for c in asteroids[:i])
for i, b in enumerate(asteroids)}
x, y = sorted(asteroids, key=lambda b: (ranks[b], angle(a, b)))[199]
print( x * 100 + y)
| true |
1d239c579a16a6bfc1957297b88a3e83f7b9440d | Python | polarisXD/Automation-Certification | /commons/printer.py | UTF-8 | 184 | 3.75 | 4 | [] | no_license |
class Printer:
def __init__(self):
pass
def printEntries(self, dictionary):
for key in dictionary.keys():
print(key + ": " + str(dictionary[key])) | true |
5d307d0d41c0dea73fcaf9a3c0ca7f76a02a51ee | Python | broox9/learning | /egghead/python/inputs.py | UTF-8 | 507 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
#python 2
# rname = raw_input('What is your python 2 name?: ')
# print("hello there, {0} from the letter python".format(rname))
# python 3
name = input('Name: ')
job = input('Job: ')
location = input('Where are you? ')
print(f"Hello there, {name} from {location}")
#inputs are always strings
num1 = input('enter a number') # 12
num2 = input('enter another number') # 13
print(f"string value is { num1 + num2 }") # 1213
print(f"numerical value is { int(num1) + int(num2) }") # 25
| true |
3705f10549a58788f7e6b71f2992f8a773377458 | Python | lasoren/ml-optimization | /neural_net/NeuralNet.py | UTF-8 | 9,420 | 3.359375 | 3 | [
"MIT"
] | permissive | """
Created on Wed Feb 10 21:56:02 2016
@author: Ryan Lader, Emily MacLeod working from Lab4_Soln
"""
import numpy as np
import matplotlib.pyplot as plt
def construct_truth(y):
v = []
for i in range(len(y)):
if y[i] == 0:
v += [[1,0]]
elif y[i] == 1:
v += [[0,1]]
return v
class NeuralNet:
"""
This class implements a simple 3 layer neural network.
"""
def __init__(self, input_dim, hidden_layer, hidden_dim, output_dim, epsilon):
"""
Initializes the parameters of the neural network to random values
"""
if not hidden_layer:
self.W = np.random.randn(input_dim, output_dim) / np.sqrt(input_dim)
self.b = np.zeros((1, output_dim))
self.epsilon = epsilon
self.reg_lambda = epsilon
self.hidden_layer = False
else:
#the structure of the neural net must change in this case
#let in_h denote input to hidden layer
self.W_in_h = np.random.randn(input_dim, hidden_dim) / np.sqrt(input_dim)
self.b_in_h = np.zeros((1, hidden_dim))
#let h_out denote hidden layer to output
self.W_h_out = np.random.randn(hidden_dim, output_dim) / np.sqrt(hidden_dim)
self.b_h_out = np.zeros((1, output_dim))
self.epsilon = epsilon
self.reg_lambda = 0.01 #can be modified
self.hidden_layer = True
#--------------------------------------------------------------------------
def compute_cost(self,X, y):
"""
Computes the total loss on the dataset
"""
num_samples = len(X)
if not self.hidden_layer:
# Do Forward propagation to calculate our predictions
z = X.dot(self.W) + self.b
exp_z = np.exp(z)
softmax_scores = exp_z / np.sum(exp_z, axis=1, keepdims=True)
# Calculate the cross-entropy loss
cross_ent_err = -np.log(softmax_scores[range(num_samples), y])
data_loss = np.sum(cross_ent_err)
# Add regulatization term to loss
data_loss += self.reg_lambda/2 * (np.sum(np.square(self.W)))
return 1./num_samples * data_loss
else:
#Forward prop
z_in = X.dot(self.W_in_h) + self.b_in_h
#Access the sigmoid function
activation = 1./(1 + np.exp(-z_in))
#Let activation denote a new input, acts like X to the hidden layer
z_out = activation.dot(self.W_h_out) + self.b_h_out
exp_z = np.exp(z_out)
softmax = exp_z / np.sum(exp_z, axis=1, keepdims=True)
cross_ent_err = -np.log(softmax[range(num_samples), y])
data_loss = np.sum(cross_ent_err)
# Add regulatization term to loss
data_loss += self.reg_lambda/2 * (np.sum(np.square(self.W_h_out)))
return 1./num_samples * data_loss
#--------------------------------------------------------------------------
def predict(self,x):
"""
Makes a prediction based on current model parameters
"""
# Do Forward Propagation
if not self.hidden_layer:
z = x.dot(self.W) + self.b
exp_z = np.exp(z)
softmax = exp_z / np.sum(exp_z, axis=1, keepdims=True)
return np.argmax(softmax, axis=1)
else:
#Do the same computation as above, but account for the hidden layer
z_in = x.dot(self.W_in_h) + self.b_in_h
activation = 1./(1 + np.exp(-z_in))
z_out = activation.dot(self.W_h_out) + self.b_h_out
exp_z = np.exp(z_out)
softmax = exp_z / np.sum(exp_z, axis=1, keepdims=True)
return np.argmax(softmax, axis=1)
#--------------------------------------------------------------------------
def fit(self,X,y,num_epochs):
"""
Learns model parameters to fit the data
"""
if not self.hidden_layer:
for i in range(num_epochs):
# Do Forward propagation to calculate our predictions
z = X.dot(self.W) + self.b
exp_z = np.exp(z)
softmax = exp_z / np.sum(exp_z, axis=1, keepdims=True) #Our prediction probabilities
#Backpropagation
beta_z = softmax - construct_truth(y)
dW = np.dot(X.T,beta_z)
dB = np.sum(beta_z, axis=0, keepdims=True)
# Add regularization term
dW += self.reg_lambda * self.W
#Follow the gradient descent
self.W = self.W - epsilon*dW
self.b = self.b - epsilon*dB
#HIDDEN LAYER CASE
else:
for i in range(num_epochs):
#Forward propagation
z_in = X.dot(self.W_in_h) + self.b_in_h
#Access the sigmoid function
activation = 1./(1 + np.exp(-z_in))
#Let activation denote a new input, acts like X to the hidden layer
z_out = activation.dot(self.W_h_out) + self.b_h_out
exp_z = np.exp(z_out)
softmax = exp_z / np.sum(exp_z, axis=1, keepdims=True) #our prediction probabilities
#Backpropagation
beta_z = softmax
beta_z[range(len(X)), y] -= 1
beta_hidden = beta_z.dot(self.W_h_out.T) * (activation - np.power(activation,2))
dW_h_out = (activation.T).dot(beta_z)
dB_h_out = np.sum(beta_z, axis=0, keepdims=True)
dW_in_h = np.dot(X.T, beta_hidden)
dB_in_h = np.sum(beta_hidden, axis=0)
#Optional regularization terms
dW_h_out += self.reg_lambda * self.W_h_out
dW_in_h += self.reg_lambda * self.W_in_h
#Follow the gradient descent
self.W_in_h += -epsilon * dW_in_h
self.b_in_h += -epsilon * dB_in_h
self.W_h_out += -epsilon * dW_h_out
self.b_h_out += -epsilon * dB_h_out
return self
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def plot_decision_boundary(pred_func):
"""
Helper function to print the decision boundary given by model
"""
# Set min and max values
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
plt.show()
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
#Train Neural Network on
linear = False
#A. linearly separable data
if linear:
#load data
X = np.genfromtxt('/Users/RyanJosephLader/Desktop/Lab4_Soln/DATA/ToyLinearX.csv', delimiter=',')
y = np.genfromtxt('/Users/RyanJosephLader/Desktop/Lab4_Soln/DATA/ToyLinearY.csv', delimiter=',')
y = y.astype(int)
#plot data
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
plt.show()
#B. Non-linearly separable data
else:
#load data
X = np.genfromtxt('/Users/RyanJosephLader/Desktop/Lab4_Soln/DATA/ToyMoonX.csv', delimiter=',')
y = np.genfromtxt('/Users/RyanJosephLader/Desktop/Lab4_Soln/DATA/ToyMoonY.csv', delimiter=',')
y = y.astype(int)
#plot data
plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral)
plt.show()
input_dim = 2 # input layer dimensionality
output_dim = 2 # output layer dimensionality
# Gradient descent parameters
epsilon = 0.01
num_epochs = 5000
# Fit model
#----------------------------------------------
#Uncomment following lines after implementing NeuralNet
#----------------------------------------------
NN = NeuralNet(input_dim, False, 0, output_dim, epsilon)
NN.fit(X,y,num_epochs)
#
# Plot the decision boundary
plot_decision_boundary(lambda x: NN.predict(x))
plt.title("Neural Net Decision Boundary")
print('Cost of Neural Net: \n')
print(NN.compute_cost(X,y))
correctCount = 0.
predictions = NN.predict(X)
for i in range(len(y)):
if predictions[i] == y[i]:
correctCount += 1.0
accuracy = correctCount/len(y)
print('\nAccuracy of Neural Net:\n')
print(accuracy)
for i in range(1,11):
NN = NeuralNet(input_dim, True, i, output_dim, epsilon)
NN.fit(X,y,num_epochs)
print('Cost of Neural Net with ' + str(i) + ' hidden layers: \n')
print(NN.compute_cost(X,y))
correctCount = 0.
predictions = NN.predict(X)
for j in range(len(y)):
if predictions[j] == y[j]:
correctCount += 1.0
accuracy = correctCount/len(y)
print('\nAccuracy of Neural Net with ' + str(i) + ' hidden layers:\n')
print(accuracy)
| true |
3244f48a0637c4d3fab77dcc9c4727c81c2c7524 | Python | MatteoEsposito/ProgettoInItinereI-IngegneriaDegliAlgoritmi- | /Python/lib/ProjUtilities.py | UTF-8 | 8,294 | 3.515625 | 4 | [
"MIT"
] | permissive | # coding=utf-8
# ProjUtilities.py
# Autore: Matteo Esposito
# Versione di Python: 2.6.9
import random
from extendedAVL import ExtendedAVL
from settings import DEBUG
def createAVLByArray(array):
'''
Classe usata per creare un albero AVL da un Array
:Time: O(n)
:param array: Array of integers
:return: ExtendedAVL - AVL Tree
'''
avl = ExtendedAVL()
for i in range(len(array)):
avl.insert(i, array[i])
return avl
def generateAVLBYRandomSeeding(start, end, n):
"""
Classe usata per generare un singolo Albero AVL da numeri pseudo-randomizzati
:param start: int - inzio del range dal quale generare i numeri
:param end: int - fine del range dal quale generare i numeri
:param n: quantità di numeri da restituire nell'array
:Time: O(n)
:return: ExtendedAVL - AVL Tree
"""
try:
return createAVLByArray(random.sample(range(start, end), n))
except ValueError:
print('Il numero di campioni supera il range specificato')
exit(1)
def generateRandomAVLTree(start,end,n,diff,maj):
"""
Classe usata per generare una coppia Albero AVL da numeri pseudo-randomizzati che differiscono per:
> Una certa quantità diff di elementi
> Un Albero ha chiavi tutte maggiori del secondo e/o viceversa
:param start: int - inzio del range dal quale generare i numeri
:param end: int - fine del range dal quale generare i numeri
:param n: quantità di numeri da restituire nell'array
:param diff: quantita di elementi di differenza tra albero più alto e più basso, la differenza viene aggiunta allo
albero più alto
:param maj: 0: A più alto di B;
1: B più alto di A;
:Time: O(n)
:return: Tuple( ExtendedAVL - AVL Tree (A), ExtendedAVL - AVL Tree (B), )
"""
if maj==0 :
A = generateAVLBYRandomSeeding(-end,-start-diff,n + diff)
B = generateAVLBYRandomSeeding(start,end,n)
elif maj == 1:
A = generateAVLBYRandomSeeding(-end, -start, n)
B = generateAVLBYRandomSeeding(start, end + diff, n + diff)
else:
print("GenerateAVLBYRandomSeeding: Valore non accettato!")
exit(1)
return A, B
# Main Code per la Concatenazione
def concatenate(A, B):
"""
Main Core dell'algoritmo del progetto questa classe, estensivamente documentata nella relazione
presi due alberi AVL uno con chiavi tutte minori o maggiori dell'altro, ne restituisce un AVL
Bilanciato e Concatenato
:param A: ExtendedAVL - AVL Tree con chiavi tutte minori di B
:param B: ExtendedAVL - AVL Tree con chiavi tutte maggiori di A
:Time: O(log(n))
:return: ExtendedAVL - AVL Tree ( Concatenato )
"""
# Ottengo l'altezza degli alberi
H_a = A.getTreeHeight()
H_b = B.getTreeHeight()
if DEBUG:
print "L'abero A ha altezza pari ad: " + str(H_a)
print "L'albero B ha altezza pari ad: " + str(H_b)
# Se uno dei due alberi dovesse avere altezza nulla restituisce l'altro albero
# Se entrambi risultasseo avere altezze nulle allora ritorna None
if H_a == None:
return B
elif H_b == None:
return A
elif H_a == None and H_b == None:
return None
# Punto Focale dell'algoritmo qui decide cosa fare
# in base alla differente altezza dei due alberi
if H_a <= H_b :
# Ottengo il Nodo all'estrema destra di A
R_a = A.getMaxNode()
R_av = A.value(R_a)
R_ak = A.key(R_a)
# Rimuovo il Nodo dall'albero di A e Bilancio
A.delete(A.key(R_a))
if DEBUG:
print("\n \n Massimo di A: \n")
print(R_av)
print("\n \n Albero A senza il suo Massimo \n")
A.tree.stampa()
# Cerco alla sinistra dell'albero più lungo, per ipotesi B un nodo la cui altezza
# sia uguale o al limite di un'unità in più rispetto all'altezza dell'abero A
R_b = B.searchLeftForNodeOfHeight(H_a)
# Ottengo il padre di R_b così potrò attaccarvi il nodo precedentemente estratto da A
if R_b.father == None:
isRoot = True
FtR_b = B.tree.root
else:
isRoot = False
FtR_b = R_b.father
# Rimuovo da B il Nodo R_b e ne estrapolo il sotto albero
R_bt = B.tree.cut(R_b)
if DEBUG:
print("\n \n Sotto-albero del nodo R ( incluso ) \n \n")
R_bt.stampa()
# Ora creo un albero temporaneo in cui la radice è il massimo di A,
# il suo figlio detro è l'intero sottoalbero di R_b ( incluso )
# il suo filgio sinistro è l'intero albero A
# tempA = createAVLByArray([R_av]) # vecchio metodo lasciato a testimonianza, per completezza
# ho ritenuto di salvare, spero giustamente, anche il valore key precedente
tempA = ExtendedAVL()
tempA.insert(R_ak, R_av)
tempA.tree.insertAsLeftSubTree(tempA.tree.root,A.tree)
tempA.tree.insertAsRightSubTree(tempA.tree.root, R_bt)
if DEBUG:
print("\n \n Albero \"A\" Temporaneo \n \n")
tempA.tree.stampa()
# Aggiorno l'altezza di tale albero
tempA.updateHeight(tempA.tree.root)
if DEBUG:
print("\n \n Altezza Aggioranta dell'Albero \"A\" Temporaneo \n \n")
tempA.tree.stampa()
if isRoot:
return tempA # se il nodo R risualtava essere proprio la radice di B
# è inutile innestare qualcosa in B se la stessa radice era il nodo R
# quindi ritorna direttamente l'albero temporaneo
else:
# Innesto quindi l'abero così creato in B
# come sotto-albero sinistro del padre di R_b
B.tree.insertAsLeftSubTree(FtR_b, tempA.tree)
# Quindi aggiorno l'altezza del nodo a cui sono andato ad innestare
# l'albero temporaneo precedentemente creato creato ad-hoc
B.balInsert(FtR_b.leftSon)
return B
else:
# Caso in cui H_a > H_b
# Ottengo il Nodo all'estrema sinistra di B
R_b = B.getMinNode()
R_bv = B.value(R_b)
R_bk = B.key(R_b)
# Rimuovo il Nodo dall'albero di B e Bilancio
B.delete(A.key(R_b))
if DEBUG:
print("\n \n Minimo di B \n" + str(R_bv))
print("\n \n Albero B senza il suo minimo \n")
B.tree.stampa()
# Cerco alla destra dell'albero più lungo, per ipotesi A, un nodo la cui altezza
# sia uguale o al limite di un'unità in più rispetto all'altezza dell'abero B
R_a = A.searchRightForNodeOfHeight(H_b)
# Ottengo il padre di R_a così potrò attaccarvi il nodo precedentemente estratto da B
FtR_a = R_a.father
# Rimuovo da B il Nodo R_b e ne estrapolo il sotto albero
R_at = A.tree.cut(R_a)
if DEBUG:
print("\n \n Sotto-Albero R ( incluso ) \n \n")
R_at.stampa()
# Ora creo un albero temporaneo in cui la radice è il minimo di B,
# il suo figlio detro è è l'intero albero B
# il suo figlio sinistro è l'intero sottoalbero di R_a ( incluso )
# tempB = createAVLByArray([R_bv]) # vecchio metodo lasciato a testimonianza, per completezza
# ho ritenuto di salvare, spero giustamente, anche il valore key precedente
tempB = ExtendedAVL()
tempB.insert(R_bk, R_bv)
tempB.tree.insertAsRightSubTree(tempB.tree.root, B.tree)
tempB.tree.insertAsLeftSubTree(tempB.tree.root, R_at)
if DEBUG:
print("\n \n Albero \"B\" Temporaneo \n \n")
tempB.tree.stampa()
# Aggiorno l'altezza di tale albero
tempB.updateHeight(tempB.tree.root)
if DEBUG:
print("\n \n Altezza aggiornata dell'Albero \"B\" Temporaneo \n \n")
tempB.tree.stampa()
# Innesto quindi l'abero così creato in B
# come sotto-albero sinistro del padre di R_b
A.tree.insertAsRightSubTree(FtR_a, tempB.tree)
# Quindi aggiorno l'altezza del nodo a cui sono andato ad innestare
# l'albero temporaneo precedentemente creato creato ad-hoc
A.balInsert(FtR_a.rightSon)
return A
| true |
9cba45842ecc00ece7ee8604a27381d39ea04255 | Python | BenDosch/holbertonschool-higher_level_programming | /0x03-python-data_structures/4-new_in_list.py | UTF-8 | 483 | 3.71875 | 4 | [] | no_license | #!/usr/bin/python3
def new_in_list(my_list, idx, element):
copy_list = []
if my_list:
for i in range(len(my_list)):
copy_list.append(my_list[i])
if idx < len(my_list) and idx >= 0:
copy_list[idx] = element
return (copy_list)
def main():
test_list = [1, 2, 3, 4, 5]
idx = 3
new_element = 9
new_list = new_in_list(test_list, idx, new_element)
print(new_list)
print(test_list)
if __name__ == "__main__":
main()
| true |
66349255efbb80b5be35c924f47a2df1c88dd0ee | Python | thomas-dubard/python-eval | /needleman_wunsch/ruler.py | UTF-8 | 4,818 | 3.5625 | 4 | [] | no_license | import numpy as np
from colorama import init, Fore, Style
# Variable à changer pour adapter les scores
egalite = 1
trou = 1
def red_text(text: str) -> str:
"""
On utilise cette fonction pour proprement inclure du texte en rouge.
"""
init(convert=True) # nécessaire sous des OS propriétaires :p
return f"{Fore.RED}{text}{Style.RESET_ALL}"
class Ruler:
"""
Cette classe vise à permettre de comparer deux chaînes de caractères.
On va construire une matrice de comparaison mat.
Puis on va l'utiliser pour calculer la distance entre ces chaînes.
"""
def __init__(self, str1, str2):
self._alignA = ""
self._alignB = ""
self.A = " " + str1
self.B = " " + str2
# Oui je triche car sinon le premier caractère est ignoré...
# Mais grâce à ça ça marche et puis ça ne change pas la distance ...
#Initialisation des properties
nb_col = len(self.B)
nb_ligne = len(self.A)
self._mat = np.array([[0]*(nb_col+1)]*(nb_ligne+1))
self.distance = None
def compute(self):
"""
Cette fonction remplit la matrice de comparaison.
Elle est basée sur l'algorithme de Needleman-Wunsch.
"""
for i in range(len(self.A)):
self.mat[i][0] = 1 * i
for j in range(len(self.B)):
self.mat[0][j] = 1 * j
for i in range(1, len(self.A)):
for j in range(1, len(self.B)):
if self.A[i] == self.B[j]:
# Même caractère
s1 = self.mat[i - 1][j - 1] - 1
else:
# Caractère différent
s1 = self.mat[i - 1][j - 1] + 1
# Il manque un caractère
s2 = self.mat[i][j - 1] + 1
# Il faut insérer un caractère
s3 = self.mat[i - 1][j] + 1
# On identifie la situation dans laquelle on est
self.mat[i][j] = min(s1, s2, s3)
#Puis on peut alors calculer la distance.
matrix = np.copy(self.mat)
res = 0
def S(A: str, B:str) -> int:
if A == B:
return -1
else:
return 1
i = len(self.A) - 1
j = len(self.B) - 1
while i > 0 or j > 0:
pos = (i > 0 and j > 0)
comp = S(self.A[i], self.B[j])
if pos and matrix[i][j] == matrix[i - 1][j - 1] + comp:
# On a pas de trou et il faut comparer les caractères
self._alignA = f"{self.A[i]}{self._alignA}"
self._alignB = f"{self.B[j]}{self._alignB}"
i += -1
j += -1
if self.A[i] != self.B[j]:
# S'ils sont différents, cela augmente la distance
res += egalite
elif i > 0 and matrix[i][j] == matrix[i - 1][j] + 1:
# On a un trou à mettre en B
self._alignA = f"{self.A[i]}{self._alignA}"
self._alignB = f"={self._alignB}"
i += -1
res += (trou - egalite)
#A chaque fois ces erreurs comptent doubles donc on adapte
else:
# On a un trou à mettre en A
self._alignA = f"={self._alignA}"
self._alignB = f"{self.B[j]}{self._alignB}"
j += -1
res += (trou - egalite)
#A chaque fois ces erreurs comptent doubles donc on adapte
self.distance = res
def report(self) -> tuple:
"""
Cette fonction vise à donner un compte-rendu de la comparaison
Par la fonction red_text on met en évidence les différences.
"""
d = self.distance
if len(self._alignA) != len(self._alignB):
raise ValueError("Unexpected Error,distance calculation has failed")
alignA_print, alignB_print = "", ""
for k in range(len(self._alignA)):
# On réécrit en mettant en valeur les changements
if self._alignA[k] == "=":
alignA_print += red_text("=")
alignB_print += self._alignB[k]
elif self._alignB[k] == "=":
alignA_print += self._alignA[k]
alignB_print += red_text("=")
elif self._alignA[k] != self._alignB[k]:
alignA_print += red_text(self._alignA[k])
alignB_print += red_text(self._alignB[k])
else:
alignA_print += self._alignA[k]
alignB_print += self._alignB[k]
return alignA_print, alignB_print
@property
def mat(self):
"""
Cette property va stocker la matrice de comparaison
"""
return self._mat | true |
2b081a79cb6a40c88d923968b114ac3179ce80fd | Python | hugoboursier/python | /connexion_database.py | UTF-8 | 437 | 2.578125 | 3 | [] | no_license | import sqlite3
fichierDonnees ="/hometu/etudiants/b/o/E155590U/2eme_annee/python/python/db.sq3"
conn = sqlite3.connect('fichierDonnees')
cur = conn.cursor()
"""
cur.execute("INSERT INTO membres(age,nom,taille) VALUES(21,'Dupont',1.83)")
cur.execute("INSERT INTO membres(age,nom,taille) VALUES(15,'Blumâr',1.57)")
cur.execute("INSERT Into membres(age,nom,taille) VALUES(18,'Özémir',1.69)")
"""
conn.commit()
cur.close()
conn.close()
| true |
722cff4cd6c60e5c5558310def15b6ff9a9473c8 | Python | NaoiseGaffney/PythonMTACourseCertification | /GaffTest/temperatureConversion.py | UTF-8 | 1,593 | 4.78125 | 5 | [] | no_license | temperatureInput = input("Please enter the temperature as a number,followed by either a 'C' for Celsius or\
'F' for Fahrenheit: ").upper().replace(" ", "")
"""
Enter temperature in either Celsius followed by a 'C' or Fahrenheit followed by an 'F'.
The variable 'temperatureInput' contains the entered temperature and either 'C' or 'F' in uppercase (upper()), and
all the space removed (replace(" ", ""), strip() works equally well here, and will remove all whitespace too).
The 'try-except' statement catches multiple entries of 'C' or 'F' or other characters. The 'if' statement checks
whether the last character in 'temperatureInput' is either a 'C' or an 'F' as well as is the input is not empty,
and executes the relevant statements to convert the entered temperature to Fahrenheit or Celsius, printing the result.
"""
try:
if temperatureInput != "" and temperatureInput[-1] == "C":
temperatureInputFloat = float(temperatureInput[:-1])
fahrenheit = (temperatureInputFloat * 9) / 5 + 32
print(f"Celsius temperature, {temperatureInputFloat}, is {fahrenheit} in Fahrenheit.")
elif temperatureInput != "" and temperatureInput[-1] == "F":
temperatureInputFloat = float(temperatureInput[:-1])
celsius = (temperatureInputFloat - 32) * 5 / 9
print(f"Fahrenheit temperature, {temperatureInputFloat}, is {celsius} in Celsius.")
else:
print("Please enter a 'C' for Celsius or 'F' for Fahrenheit after the temperature!")
except ValueError:
print("Please enter a 'C' for Celsius or 'F' for Fahrenheit after the temperature!")
| true |
bae5522ffa522122a30c9144bcbced453e61d53e | Python | SebghatYusuf/vpic-api | /vpic/client.py | UTF-8 | 31,266 | 3 | 3 | [
"MIT"
] | permissive | import logging
from typing import Any, Dict, List, Optional, Union
from .client_base import ClientBase
log = logging.getLogger(__name__)
class Client(ClientBase):
"""A client library for the U.S. NHTSA vPIC API
``Client`` returns JSON responses from the vPIC API. vPIC responses
don't always use the same name for a variable, so by default this
library standardizes variable names. You can disable this by creating
a client like this:
``c = Client(standardize_names=False)``
If you prefer to receive model objects instead of JSON responses,
use ``vpic.Client`` instead.
A client library for the United States National Highway Traffic Safety
Administration (NHTSA) Vehicle Product Information Catalog (vPIC) Vehicle
Listing API.
Use this to gather information on vehicles and their specifications,
and to decode VINs to extract information for specific vehicles. vPIC
has information about these types of vehicles sold or imported in
the USA:
* Bus
* Incomplete Vehicle
* Low Speed Vehicle (LSV)
* Motorcycle
* Multipurpose Passenger Vehicle (MPV)
* Passenger Car
* Trailer
* Truck
vPIC has all of the information about how manufacturers assign a VIN that
encodes the vehicles characteristics. Vehicle manufacturers provide this
information to NHTSA under U.S. law 49 CFR Part 565.
The API available 24/7, is free to use, and does not require registration.
NHTSA uses automatic traffic rate controls to maintain the performance of
the API and their websites that use the API.
See https://vpic.nhtsa.dot.gov/api for more on the API.
Attributes:
host: Hostname, including http(s)://, of the vPIC instance to query.
standardize_variables: vPIC uses different names for the same
variable, so this client standarizes those names by default.
Set this to False to receive the raw vPIC response.
"""
def __init__(
self,
host: Optional[str] = "https://vpic.nhtsa.dot.gov/api/vehicles/",
standardize_variables: bool = True,
):
super(Client, self).__init__(host, standardize_variables)
def decode_vin(
self, vin: str, model_year: int = None, extend=False, flatten=True
) -> Dict[str, Any]:
"""Decode a 17-digit Vehicle Identification Number (VIN) or partial VIN.
Decode the make, model, series, trim, and other vehicle information
from VIN. Model year is required for pre-1980 vehicles, though vPIC
recommends that you always pass it.
If you don't have a complete 17-digit VIN, you can pass a partial
VIN, using asterisk (*) for missing characters. The VIN check digit
(the 9th character) isn't required for partial VINs. The shorter the
partial VIN, the less vehicle information you'll receive in the
response.
See get_vehicle_variable_list for the variables returned here.
Args:
vin: A 17-digit VIN or partial VIN. Use asterisk for missing
characters.
model_year: The vehicle's model year. Recommended, but not required.
extend: If True, response will include variables for other NHTSA
programs like NCSA. Defaults to False.
flatten: True to receive vehicle variables in key-value pairs (this is
the default and usually best choice). False to receive them as a
list of variable objects that include the variable ID.
Raises:
ValueError: if ``vin`` is missing or isn't 6 to 17 characters long.
ValueError: if ``model_year`` is earlier than 1981.
"""
if vin is None:
raise ValueError("vin is required")
if not len(vin) in range(6, 17 + 1):
raise ValueError(
"vin must be at least 6 characters and at most 17 characters"
)
if model_year and model_year < 1981:
raise ValueError("model year must be 1981 or later")
endpoint = "DecodeVin"
if flatten:
endpoint = "DecodeVinValues"
if extend:
endpoint = f"{endpoint}Extended"
if model_year is not None:
params = {"modelyear": model_year}
else:
params = {}
results = self._request(f"{endpoint}/{vin}", params)
return results[0] if flatten else results
def decode_vin_batch(self, vins: List[str]) -> List[Dict[str, Any]]:
"""Decode a batch of 17-digit VINs or partial VINs.
Model year is required for pre-1980 vehicles, though vPIC recommends
that you always pass it.
If you don't have a complete 17-digit VIN, you can pass a partial
VIN, using asterisk (*) for missing characters. The VIN check digit
(the 9th character) isn't required for partial VINs. The shorter the
partial VIN, the less vehicle information you'll receive in the
response.
Vehicle variables will be returned in key-value pairs, the same
format returned by decode_vin(.., flatten=True).
See get_vehicle_variable_list for the variables returned here.
Args:
vins: A list of 17-digit VIN or partial VINs and optional model year.
Use asterisk for missing characters. For example: ["VIN, model_year",
"VIN, model_year", ...]
Raises:
ValueError: if ``vin`` is missing or isn't 6 to 17 characters long.
ValueError: if ``model_year`` is earlier than 1981.
"""
if vins is None:
raise ValueError("vins is required")
if not len(vins) in range(1, 50 + 1):
raise ValueError("pass at least one VIN, and at most 50 VINs")
return self._request_post("DecodeVINValuesBatch", data={"DATA": ";".join(vins)})
def decode_wmi(self, wmi: str) -> Dict[str, Any]:
"""Decode a WMI to get manufacturer information
Provides information on the World Manufacturer Identifier for a
specific WMI code.
Args:
wmi: A 3-character or 6-character World Manufacturer Index code.
Large volume manufacturers usually have a 3 character WMI
representing positions 1 to 3 ("JTD") of a VIN. Smaller
manufacturers have a 6 character WMI representing positions
1 to 3 and 12 to 14 of a VIN.
Raises:
ValueError: if ``wmi`` is missing or isn't 3 or 6 characters long.
Example:
>>> decode_wmi('1FT')
{
"CommonName": "Ford",
"CreatedOn": "2015-03-23",
"DateAvailableToPublic": "2015-01-01",
"MakeName": "FORD",
"ManufacturerName": "FORD MOTOR COMPANY, USA",
"ParentCompanyName": "",
"URL": "http://www.ford.com/",
"UpdatedOn": null,
"VehicleType": "Truck ",
}
"""
if not len(wmi) in [3, 6]:
raise ValueError("WMI must be 3 or 6 characters")
result = self._request(f"DecodeWMI/{wmi}")[0]
# result["WMI"] = wmi
return result
def get_wmis_for_manufacturer(
self,
manufacturer: Optional[Union[str, int]] = None,
vehicle_type: Optional[Union[str, int]] = None,
) -> List[Dict[str, Any]]:
"""Returns the WMIs for one or all manufacturers
You must pass one or both of provide manufacturer or vehicle_type.
Args:
manufacturer: Pass the Manufacturer Id (int) or the complete
manufacturer name (str) to return WMIs for a single manufacturer.
Pass a partial name to return WMIs for all manufacturers with
names that include the partial name.
vehicle_type: Pass the vehicle_type Id (int) or complete vehicle_type
name to return WMIs for that vehicle_type. Pass a partial name to
return WMIs for vehicle_types matching that name.
Raises:
ValueError: if ``manufacturer`` and ``vehicle_type`` are missing
Examples:
>>> get_wmis_for_manufacturer('Honda')
[
{
"Country": null,
"CreatedOn": "2015-03-26",
"DateAvailableToPublic": "2015-01-01",
"Id": 987,
"Name": "HONDA MOTOR CO., LTD",
"UpdatedOn": "2015-06-04",
"VehicleType": "Passenger Car",
"WMI": "JHM"
},
...
]
"""
if manufacturer is None and vehicle_type is None:
raise ValueError("manufacturer or vehicle_type is required")
if manufacturer is None:
endpoint = "GetWMIsForManufacturer"
else:
endpoint = f"GetWMIsForManufacturer/{manufacturer}"
params = {}
if vehicle_type:
params["vehicleType"] = vehicle_type
wmis = self._request(endpoint, params)
# for wmi in wmis:
# wmi["ManufacturerId"] = wmi["Id"]
# del wmi["Id"]
# wmi["Manufacturer"] = wmi["Name"]
# del wmi["Name"]
return wmis
def get_all_makes(self) -> List[Dict[str, Any]]:
"""Returns all of the makes registered with vPIC.
Examples:
>>> get_all_makes()
[
{
"MakeId": 440,
"MakeName": "ASTON MARTIN"
},
{
"MakeId": 441,
"MakeName": "TESLA"
},
{
"MakeId": 442,
"MakeName": "JAGUAR"
},
...
]
"""
return self._request("GetAllMakes")
def get_parts(
self, cfr_part: str, from_date: str, to_date: str, page: int = 1
) -> List[Dict[str, Any]]:
"""Returns a list of vehicle documentation submitted by manufacturers.
Manufacturers provide vehicle information to NHTSA to comply with these
regulations:
* 49 CFR Part 565 (Vehicle Identification Number Guidance)
* 49 CFR Part 566 (Manufacturer Identification – Reporting Requirements)
This provides a list of documents submitted in a date range. Up to 1,000
results will be returned at a time.
Args:
cfr_part: '565' to return 49 CFR Part 565 submissions;
'566' to return 49 CFR Part 566 submissions
from_date: the beginning of the date range to search
end_date: the end of the date range to search
page: results are paginated; this is page number to return
Raises:
ValueError: if ``cfr_part`` is missing
Examples:
>>> get_parts('565', '2015-01-01', '2015-05-05', 1)
[
{
"CoverLetterURL": "",
"LetterDate": "5/5/2015",
"ManufacturerId": 8012,
"ManufacturerName": "PORSCHE CARS NORTH AMERICA, INC.",
"ModelYearFrom": null,
"ModelYearTo": null,
"Name": "ORG10658",
"Type": null,
"URL": "http://vpic.nhtsa.dot.gov/mid/home/displayfile/[guid here]"
},
...
]
"""
if cfr_part is None:
raise ValueError("cfr_part is required")
params = {
"type": cfr_part,
"fromDate": from_date,
"toDate": to_date,
"page": page,
}
return self._request("GetParts", params)
def get_all_manufacturers(
self, manufacturer_type: str = None, page: int = 1
) -> List[Dict[str, Any]]:
"""Return a list of vPIC manufacturers of the given manufacturer_type.
This provides a list of all the Manufacturers available in vPIC Dataset.
See ``get_vehicle_variable_values_list("Manufacturer Type")`` for the list
of manufacturer types.
Args:
manufacturer_type: The manufacturer type, which is Incomplete Vehicles,
Completed Vehicle Manufacturer, Incomplete Vehicle Manufacturer,
Intermediate Manufacturer, Final-Stage Manufacturer, Alterer,
Replica Vehicle Manufacturer. You can pass the full type name, or a
substring of the type.
page: results are paginated; this is the page number to return
Examples:
>>> get_all_manufacturers("Completed Vehicle", 1)
[
{
"Country": "UNITED STATES (USA)",
"Mfr_CommonName": "Tesla",
"Mfr_ID": 955,
"Mfr_Name": "TESLA, INC.",
"VehicleTypes": [
{
"IsPrimary": true,
"Name": "Passenger Car"
},
{
"IsPrimary": false,
"Name": "Multipurpose Passenger Vehicle (MPV)"
}
},
...
]
"""
params = {"ManufacturerType": manufacturer_type, "page": page}
return self._request("GetAllManufacturers", params)
def get_manufacturer_details(
self, manufacturer: Union[str, int]
) -> List[Dict[str, Any]]:
"""Returns details for one or more manufacturers.
Args:
manufacturer: Pass the Manufacturer Id (int) or the complete
manufacturer name (str) to return detail for a single
manufacturer. Pass a partial name to return manufacturers
with names that include the partial name.
Examples:
>>> get_manufacturer_details(988)
[
{
"Address": "1919 Torrance Blvd.",
"Address2": null,
"City": "Torrance",
"ContactEmail": "jeff_chang@ahm.honda.com",
"ContactFax": null,
"ContactPhone": "(310)783-3401",
"Country": "UNITED STATES (USA)",
"DBAs": "...",
"EquipmentItems": [],
"LastUpdated": "/Date(1618422117803-0400)/",
"ManufacturerTypes": [
{
"Name": "Completed Vehicle Manufacturer"
}
],
"Mfr_CommonName": "Honda",
"Mfr_ID": 988,
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC",
"OtherManufacturerDetails": null,
"PostalCode": "90501",
"PrimaryProduct": null,
"PrincipalFirstName": "Shinji Aoyama",
"PrincipalLastName": null,
"PrincipalPosition": "President & CEO",
"StateProvince": "CALIFORNIA",
"SubmittedName": "Wilson Tran",
"SubmittedOn": "/Date(1618286400000-0400)/",
"SubmittedPosition": "Sr. Specialist, II",
"VehicleTypes": [
{
"GVWRFrom": "Class 1A: 3,000 lb or less (1,360 kg or less)",
"GVWRTo": "Class 1D: 5,001 - 6,000 lb (2,268 - 2,722 kg)",
"IsPrimary": true,
"Name": "Passenger Car"
},
{
"GVWRFrom": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"GVWRTo": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"IsPrimary": false,
"Name": "Truck "
},
{
"GVWRFrom": "Class 1B: 3,001 - 4,000 lb (1,360 - 1,814 kg)",
"GVWRTo": "Class 2E: 6,001 - 7,000 lb (2,722 - 3,175 kg)",
"IsPrimary": false,
"Name": "Multipurpose Passenger Vehicle (MPV)"
}
]
}
...
]
"""
if manufacturer is None:
raise ValueError("manufacturer is required")
return self._request(f"GetManufacturerDetails/{manufacturer}")
def get_makes_for_manufacturer(
self, manufacturer: Union[str, int], model_year: int = None
) -> List[Dict[str, Any]]:
"""Returns makes produced by a manufacturer or manufacturers.
Args:
manufacturer: Pass the Manufacturer Id (int) or the complete
manufacturer name (str) to return detail for a single manufacturer.
Pass a partial name to return manufacturers with names that include
the partial name.
model_year: Pass a model year to return only those makes made by
the manufacturer for that model year.
Raises:
ValueError: if ``manufacturer`` is missing
Examples:
>>> get_makes_for_manufacturer(988)
[
{
"MakeId": 474,
"MakeName": "HONDA",
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC"
},
{
"MakeId": 475,
"MakeName": "ACURA",
"Mfr_Name": "HONDA DEVELOPMENT & MANUFACTURING OF AMERICA, LLC"
}
...
]
"""
if manufacturer is None:
raise ValueError("manufacturer is required")
if model_year:
results = self._request(
f"GetMakesForManufacturerAndYear/{manufacturer}", {"year": model_year}
)
else:
results = self._request(f"GetMakeForManufacturer/{manufacturer}")
return results
def get_makes_for_vehicle_type(self, vehicle_type: str) -> List[Dict[str, Any]]:
"""Returns makes that produce a vehicle_type
Args:
vehicle_type: A vPIC vehicle_type. For example, "Passenger Car",
"Truck", or "Multipurpose Passenger Vehicle (MPV)". If you pass
a partial vehicle_type, for example "Passenger", results will
include makes for all matching vehicle types. Matching is not
case sensitive.
Raises:
ValueError: if ``vehicle_type`` is missing
Examples:
>>> get_makes_for_vehicle_type('Car')
[
{
"MakeId": 440,
"MakeName": "ASTON MARTIN",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
...
]
"""
if vehicle_type is None:
raise ValueError("vehicle_type is required")
return self._request(f"GetMakesForVehicleType/{vehicle_type.rstrip()}")
def get_vehicle_types_for_make(self, make: Union[str, int]) -> List[Dict[str, Any]]:
"""Returns vehicle types produced by a make or make
Args:
make: Pass the MakeId (int) or the complete make name (str) to return
vehicle types for a single manufacturer. Pass a partial make name
to return vehicle types for all makes that match the partial name.
When you pass a make name, results will include the MakeId and
MakeName because you may get vehicle_types for more than one make.
Raises:
ValueError: if ``make`` is missing
Examples:
>>> get_vehicle_types_for_make(474)
[
{
"VehicleTypeId": 1,
"VehicleTypeName": "Motorcycle"
},
{
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"VehicleTypeId": 3,
"VehicleTypeName": "Truck "
},
{
"VehicleTypeId": 7,
"VehicleTypeName": "Multipurpose Passenger Vehicle (MPV)"
},
{
"VehicleTypeId": 9,
"VehicleTypeName": "Low Speed Vehicle (LSV)"
}
]
>>> get_vehicle_types_for_make('kia')
[
{
"MakeId": 499,
"MakeName": "KIA",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
},
{
"MakeId": 499,
"MakeName": "KIA",
"VehicleTypeId": 7,
"VehicleTypeName": "Multipurpose Passenger Vehicle (MPV)"
},
{
"MakeId": 5848,
"MakeName": "MGS GRAND SPORT (MARDIKIAN)",
"VehicleTypeId": 2,
"VehicleTypeName": "Passenger Car"
}
]
"""
if make is None:
raise ValueError("make is required")
if isinstance(make, int):
return self._request(f"GetVehicleTypesForMakeId/{make}")
else:
return self._request(f"GetVehicleTypesForMake/{make}")
def get_equipment_plant_codes(
self, year: int, equipment_type: int, report_type: str = "All"
) -> List[Dict[str, Any]]:
"""Returns a list of plants that manufacture certain vehicle equipment.
Plants have a unique three-character U.S. Department of Transportation
(DOT) code. vPIC API documentation says this API only accepts 2016 and
later.
Args:
year: must be 2016 or later
equipment_type: return plants that manufacture one of these equipment
types: 1 = Tires; 3 = Brake Hoses; 13 = Glazing; 16 = Retread
report_type: must be one of
New = plants whose code was assigned during the selected year
Updated = plants whose data was modified during the selected year
Closed = plants that are no longer active
All = all active and closed plants, regardless of year
Raises:
ValueError: if ``year`` is earlier than 2016
Example:
>>> get_equipment_plant_codes(2016, 1)
[
{
"Address": "2950 INTERNATIONAL BLVD.",
"City": "CLARKSVILLE",
"Country": "USA",
"DOTCode": "00T",
"Name": "HANKOOK TIRE MANUFACTURING TENNESSEE, LP",
"OldDotCode": "",
"PostalCode": "37040",
"StateProvince": "TENNESSEE",
"Status": "Active"
},
...
]
"""
if year < 2016:
raise ValueError("Year must be 2016 or later")
params = {
"year": year,
"equipmentType": equipment_type,
"reportType": report_type,
}
return self._request("GetEquipmentPlantCodes", params)
def get_models_for_make(
self, make: Union[int, str], model_year: int = None, vehicle_type: str = None
) -> List[Dict[str, Any]]:
"""Return a list of models for a make or makes.
Optionally filter the results by model year and vehicle type.
Args:
make: Pass the MakeId (int) or the complete make name (str) to return
vehicle types for a single manufacturer. Pass a partial make name
to return vehicle types for all makes that match the partial name.
When you pass a make name, results will include the MakeId and
MakeName because you may get vehicle_types for more than one make.
model_year: pass this to return models made in this model year
vehicle_type: one of the vPIC vehicle_types (for example, "Passenger Car",
"Truck", or "Multipurpose Passenger Vehicle (MPV)")
Raises:
ValueError: if ``year`` is earlier than 2016
Examples:
>>> get_models_for_make("TESLA", model_year=2020)
[
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 1685,
"ModelName": "Model S"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 10199,
"ModelName": "Model X"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 17834,
"ModelName": "Model 3"
},
{
"MakeId": 441,
"MakeName": "TESLA",
"ModelId": 27027,
"ModelName": "Model Y"
}
]
VehicleTypeId and VehicleTypeName are only returned
when you specify vehicle_type.
"""
if make is None:
raise ValueError("make is required")
if model_year or vehicle_type:
my = f"/modelyear/{model_year}" if model_year else ""
vt = f"/vehicletype/{vehicle_type}" if vehicle_type else ""
if isinstance(make, int):
endpoint = f"GetModelsForMakeIdYear/makeId/{make}{my}{vt}"
else:
endpoint = f"GetModelsForMakeYear/make/{make}{my}{vt}"
else:
if isinstance(make, int):
endpoint = f"GetModelsForMakeId/{make}"
else:
endpoint = f"GetModelsForMake/{make}"
return self._request(endpoint)
def get_vehicle_variable_list(self) -> List[Dict[str, Any]]:
"""Return a list of vehicle variables tracked by vPIC
Examples:
>>> get_vehicle_variable_list()
[
{
"DataType": "string",
"Description": "<p>Any other battery information that does...",
"Id": 1,
"Name": "Other Battery Info"
},
{
"DataType": "lookup",
"Description": "<p>Battery type field stores the battery ...",
"Id": 2,
"Name": "Battery Type"
},
{
"DataType": "lookup",
"Description": "<p>Bed type is the type of bed (the open b...",
"Id": 3,
"Name": "Bed Type"
},
{
"DataType": "lookup",
"Description": "<p>Cab type applies to both pickup truck ...",
"Id": 4,
"Name": "Cab Type"
},
{
"DataType": "lookup",
"Description": "<p>Body Class presents the Body Type, bas...",
"Id": 5,
"Name": "Body Class"
},
...
]
"""
return self._request("GetVehicleVariableList")
def get_vehicle_variable_values_list(
self, variable_name: str
) -> List[Dict[str, Any]]:
"""Return the values for a vehicle variable
Args:
variable_name: the name of the vehicle variable
Raises:
ValueError: if ``variable_name`` is missing
Examples:
>>> get_vehicle_variable_values_list("Vehicle Type")
[
{
"ElementName": "Vehicle Type",
"Id": 1,
"Name": "Motorcycle"
},
{
"ElementName": "Vehicle Type",
"Id": 2,
"Name": "Passenger Car"
},
{
"ElementName": "Vehicle Type",
"Id": 3,
"Name": "Truck "
},
{
"ElementName": "Vehicle Type",
"Id": 5,
"Name": "Bus"
},
{
"ElementName": "Vehicle Type",
"Id": 6,
"Name": "Trailer"
},
{
"ElementName": "Vehicle Type",
"Id": 7,
"Name": "Multipurpose Passenger Vehicle (MPV)"
},
{
"ElementName": "Vehicle Type",
"Id": 9,
"Name": "Low Speed Vehicle (LSV)"
},
{
"ElementName": "Vehicle Type",
"Id": 10,
"Name": "Incomplete Vehicle"
},
{
"ElementName": "Vehicle Type",
"Id": 13,
"Name": "Off Road Vehicle"
}
]
"""
if variable_name is None:
raise ValueError("variable_name is required")
return self._request(f"GetVehicleVariableValuesList/{variable_name}")
def get_canadian_vehicle_specifications(
self, year: int, make: str, model: str = None, units: str = "Metric"
) -> List[Dict[str, Any]]:
"""Get original vehicle dimensions from the Canadian Vehicle Specification.
The Canadian Vehicle Specifications (CVS) consists of a database of
original vehicle dimensions, used primarily in collision investigation
and reconstruction, combined with a search engine. The database is
compiled annually by the Collision Investigation and Research Division
of Transport Canada.
See [Canadian Vehicle Specifications](http://www.carsp.ca/research/resources
/safety-sources/canadian-vehicle-specifications/).
Args:
year: 1971 or later
make: a make name like "Honda", "Toyota", ...
model: a model name like "Pilot", "Focus", ...
units: "Metric" or "US"
"""
params = {"Year": year, "Make": make, "Model": model, "units": units}
return self._request("GetCanadianVehicleSpecifications", params=params)
| true |
2eb9b16e6411f114ee2da5254c527768bbe7bdb1 | Python | Haimzis/Moles_Detective_Data_Backend | /color_picker.py | UTF-8 | 1,788 | 2.953125 | 3 | [] | no_license | import cv2
import numpy as np
import glob
image_hsv = None
pixel = (20, 60, 80)
image_src = None
# mouse callback function
def pick_color(event, x, y, flags, param):
"""
clicking event - prints the color range of the area that have been clicked
:param event: click event on specific img
:param x: coord
:param y: coord
"""
if event == cv2.EVENT_LBUTTONDOWN:
pixel = image_hsv[y, x]
# you might want to adjust the ranges(+-10, etc):
upper = np.array([pixel[0] + 10, pixel[1] + 10, pixel[2] + 15])
lower = np.array([pixel[0] - 10, pixel[1] - 10, pixel[2] - 15])
print(list([list(lower), list(upper)]))
image_mask = cv2.inRange(image_hsv, lower, upper)
RGB_image_mask = cv2.merge((image_mask, image_mask, image_mask))
image_src_without_wanted_color = cv2.bitwise_and(image_src, image_src, mask=cv2.bitwise_not(image_mask))
overlay = cv2.addWeighted(image_src_without_wanted_color, 1.0, RGB_image_mask, 1.0, 0)
cv2.imshow("overlay", overlay)
def main():
import sys
global image_hsv, pixel, image_src # so we can use it in mouse callback
files = glob.glob('/media/haimzis/Extreme SSD/Moles_Detector_Dataset/Classification/ISIC_2019_Training_Input/*.jpg')
for file in files:
image_src = cv2.imread(file, -1) # pick.py my.png
if image_src is None:
print("the image read is None............")
return
cv2.imshow('bgr', image_src)
## NEW ##
cv2.setMouseCallback('bgr', pick_color)
# now click into the hsv img , and look at values:
image_hsv = cv2.cvtColor(image_src, cv2.COLOR_BGR2HSV)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
f9276d3c198a04ab7db2e2cade312693da322bbb | Python | pyg-team/pytorch_geometric | /test/explain/algorithm/test_attention_explainer.py | UTF-8 | 2,057 | 2.625 | 3 | [
"MIT"
] | permissive | import pytest
import torch
from torch_geometric.explain import AttentionExplainer, Explainer
from torch_geometric.explain.config import ExplanationType, MaskType
from torch_geometric.nn import GATConv, GATv2Conv, TransformerConv
class AttentionGNN(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = GATConv(3, 16, heads=4)
self.conv2 = GATv2Conv(4 * 16, 16, heads=2)
self.conv3 = TransformerConv(2 * 16, 7, heads=1)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index).relu()
x = self.conv2(x, edge_index)
x = self.conv3(x, edge_index)
return x
x = torch.randn(8, 3)
edge_index = torch.tensor([
[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6],
])
@pytest.mark.parametrize('index', [None, 2, torch.arange(3)])
def test_attention_explainer(index, check_explanation):
explainer = Explainer(
model=AttentionGNN(),
algorithm=AttentionExplainer(),
explanation_type='model',
edge_mask_type='object',
model_config=dict(
mode='multiclass_classification',
task_level='node',
return_type='raw',
),
)
explanation = explainer(x, edge_index, index=index)
check_explanation(explanation, None, explainer.edge_mask_type)
@pytest.mark.parametrize('explanation_type', [e for e in ExplanationType])
@pytest.mark.parametrize('node_mask_type', [m for m in MaskType])
def test_attention_explainer_supports(explanation_type, node_mask_type):
with pytest.raises(ValueError, match="not support the given explanation"):
Explainer(
model=AttentionGNN(),
algorithm=AttentionExplainer(),
explanation_type=explanation_type,
node_mask_type=node_mask_type,
edge_mask_type='object',
model_config=dict(
mode='multiclass_classification',
task_level='node',
return_type='raw',
),
)
| true |
83fecede726ed0a96e970bd26a16aeaa8ad59cc3 | Python | hazrmard/SatTrack | /experiments/multi.py | UTF-8 | 507 | 3.015625 | 3 | [
"MIT"
] | permissive | __author__ = 'Ibrahim'
# Testing python multiprocessing functionality
import multiprocessing as mp
import time
jobs = []
def worker(i):
time.sleep(i)
print 'worker function ', i, ' Name: ', mp.current_process().name
def main(n):
for i in range(n):
p = mp.Process(target=worker, args=[i], name='worker '+str(i))
# p.daemon = True
jobs.append(p)
p.start()
if __name__ == '__main__':
main(5)
time.sleep(1)
for p in jobs:
print p.is_alive()
| true |
4cbb030f6fc35c6c9e5b750360f3de7413b8e71a | Python | BattySingh/myLearn | /coding/python/list7.py | UTF-8 | 277 | 3.140625 | 3 | [] | no_license | motorcycles = ['honda', 'yamha', 'suzuki']
print(motorcycles)
motorcycles.remove('honda')
print(motorcycles)
motorcycles.append('ducati')
too_expensive = 'ducati'
motorcycles.remove(too_expensive)
message = f"{ too_expensive.title() } is too expensive for me"
print(message) | true |
beebb0cae516a65eb8b305915870b6ea4c542684 | Python | TaeHyangKwon/RSASimulator | /Bob.py | UTF-8 | 632 | 2.859375 | 3 | [] | no_license | from Crypto.PublicKey import RSA
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
import base64
f = open('AlicePubKey.pem', 'r')
AlicPubKey = RSA.import_key(f.read())
f.close()
f = open('Message.pem', 'r')
message = f.read()
f.close()
f = open('Signature.pem', 'r')
signature = f.read()
signature = base64.b64decode(signature)
f.close()
print("Bob received message (", message, signature, ") from Alice.")
h = SHA256.new(message.encode('utf-8'))
try:
pkcs1_15.new(AlicPubKey).verify(h, signature)
print("The signature is valid")
except(ValueError, TypeError):
print("The signature is not valid")
| true |
4b69b87b827ad878bc16fedf48a88ae8fc4dc219 | Python | Blaxzter/UM_ARS_G8 | /04_Genetic_Algorithm/src/genetic/Mutations.py | UTF-8 | 1,982 | 2.8125 | 3 | [] | no_license | import numpy as np
from genetic import Genome
# todo write some other mutation operators
import utils.Constants as Const
"""
Author Guillaume Franzoni Darnois & Theodoros Giannilias
"""
def mutation(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
genome.genes[i] = np.random.uniform(low = -Const.GENOME_BOUNDS, high = Const.GENOME_BOUNDS)
return genome
# Flip values mutation operation
def bit_flip_mutation(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
genome.genes[i] = -1 * genome.genes[i]
return genome
# Swap randomly mutation operation
def swap_mutation(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
temp = genome.genes[i]
index = np.random.randint(low = 0, high = len(genome.genes))
genome.genes[i] = genome.genes[index]
genome.genes[index] = temp
return genome
def mutationInt(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
genome.genes[i] = np.random.randint(low = -Const.GENOME_BOUNDS, high = Const.GENOME_BOUNDS)
return genome
def boundary(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
genome.genes[i] = Const.GENOME_BOUNDS * (1 if np.random.uniform(low = 0, high = 1) < 0.5 else -1)
return genome
def gaussian(genome: Genome):
for i in range(len(genome.genes)):
if np.random.uniform(low = 0, high = 1) < Const.MUTATION_PROBABILITY:
genome.genes[i] = np.clip([np.random.normal(scale = 0.5)], -Const.GENOME_BOUNDS, Const.GENOME_BOUNDS)[0]
return genome
| true |
c3d745e4623485dfa725b6ccfb60c307cea8e7d8 | Python | chenghuiyu/MachineLearning-Tutorials | /Tutorials/2_Models/DecisionTree/python/test.py | UTF-8 | 551 | 3.21875 | 3 | [
"MIT"
] | permissive | """
对实现的函数进行测试
"""
from decision_tree import DecisionTree
if __name__ == '__main__':
# Toy data
X = [[1, 2, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[2, 1, 1, 0, 1],
[1, 1, 0, 1, 1]]
y = ['yes', 'yes', 'no', 'no', 'no']
clf = DecisionTree(mode='ID3')
clf.fit(X, y)
clf.show()
print
clf.predict(X) # ['yes' 'yes' 'no' 'no' 'no']
clf_ = DecisionTree(mode='C4.5')
clf_.fit(X, y).show()
print
clf_.predict(X) # ['yes' 'yes' 'no' 'no' 'no']
| true |
05af3dba75d7f03b0643ce2e0c7d0056ab1fd621 | Python | sergey-judi/python3 | /Coursera/Основы программирования на Python/Week5/task_04.py | UTF-8 | 168 | 3.65625 | 4 | [] | no_license | def printStair(n):
stair = ''
for i in range(n):
stair += str(i + 1)
print(stair)
def main():
n = int(input())
printStair(n)
main()
| true |
58fee0daa8dc5bb801ef46c686c3fdf679c29aef | Python | bulboushead/AtBS | /DateDetection.py | UTF-8 | 1,538 | 3.59375 | 4 | [] | no_license | #! python2
# Date format replacer, takes any date and formats it correctly.
import re, pyperclip
# DD/MM/YYYY, 01-31, 01-12, 1000-2999, if single digit, will have leading zero
# regEx will accept right format, wrong days
dateRegex = re.compile(r'''(
([0-9]{2})
/
([0-9]{2})
/
([0-9]{4})
)''', re.VERBOSE)
text = str(pyperclip.paste())
matches = []
# store strings into variables 'month', 'day', 'year' in a dictionary
dateDict = {}
for groups in dateRegex.findall(text):
dateDict[groups[0]] = [groups[1],groups[2],groups[3]]
for date in dateDict.values():
check = True
# detect if valid date
if date[0] > '31':
check = False
if date[1] > '12':
check = False
if date[2] < '1000' or date[2] > '2499':
check = False
# April, June, Sept, Nov 30 days
if date[1] in ['04','06','09','11']:
if date[0] > '30':
check = False
# Feb 28 days
if date[1] in ['02']:
if date[1] > '29':
check = False
# leap year?
if date[1] == '29':
if not ((date[2] % 400 == 0) or ((date[2] % 4 == 0) and (date[2] % 100 != 0))):
check = False
# Rest 31 days
if date[1] not in ['02','04','06','09','11']:
if date[0] > '31':
check = False
if check == True:
matches.append('%s/%s/%s' % (date[0],date[1],date[2]))
pyperclip.copy('\n'.join(matches)) | true |
4d8f998976180dbf1fdfd5d4d4bb40a6369f50b7 | Python | MrLittlejohn/Python_Project-1 | /python/function.py | UTF-8 | 3,218 | 4.0625 | 4 | [] | no_license | # Littlejohn, sort functions for a python Project.
#Dr.Decker, Due: Feb 26th, 2018, class: 16:00 to 17:15
#All rights are reserved to me, the almighty.
#1st includes insertion sort
#2nd is the recursive binary sort
#3rd is the split sort
def insertion_sort(insertion_list) :
for number in range(1,len(insertion_list)): #start 1 over, as we dont compare the 1st element with anything
current = insertion_list[number]
position = number
while position > 0 and insertion_list[position - 1] > current :
insertion_list[position] = insertion_list[position - 1]
position = position -1
insertion_list[position] = current
def binary_search(binary_list, item, offset) :
if not binary_list :
return print(None)
elif binary_list[len(binary_list) // 2] == item : # if when the binary list divides by 2 and it equals the item,
return offset + len(binary_list) // 2 # return the offset added to the length of where the binary list is sitting
elif binary_list[len(binary_list) // 2] > item : # if when the binary list divides by 2, and the length number is greater than where the item is sitting,
return binary_search(binary_list[:len(binary_list) // 2], item, offset) # return the function, as it will do it over and over again until it finds the number, so pass everything through as it will be used again
# until the item equals the the first else if statement.
# Also, make sure to only exclude the right half of the length of the list, otherwise it will go infinitely, and reveive an error
else :
return binary_search(binary_list[(len(binary_list) // 2) + 1:], item, offset+(len(binary_list) // 2) + 1) #if all else fails, it must be on the right half, so call the function again recursively
#the purpose for adding 1, is incase there are an even amount of numbers in the list.
#by adding 1, you prevent a number from being ignore
def split_char(string, char) :
if char in string : #make sure the character, or symbol is actually in the string
temp = 0
while temp < len(string) : #as long as the temporary variable is less than the length of the string,
if string[temp] == char[0] : #if the string equals the character, make the position whatever the temp may/not be
position = temp
break
temp += 1
before_split = string[:position] #grab everything before the seperator
after_split = split_char(string[position + 1:], char) #grab everything after the split, recursively.
after_split = [before_split] + after_split[:]
return after_split
else :
return [string]
| true |
5b92b4e09f11b4bd3fead2f9528ee39c659234a8 | Python | yueeong/geoip | /tests/test_libs.py | UTF-8 | 3,466 | 2.6875 | 3 | [] | no_license | import unittest
import geoip2.database
from collections import Counter
from library.filterextract import FilterExtract
from library.geo_utils import GeoClassifier
from library.stats import StatsCollector
class TestFE(unittest.TestCase):
def setUp(self):
self.list_of_strings = ['static', 'images', '/images/', 'images/', '.txt', 'llll']
self.list_of_neg_strings = ['hello', 'telecom', 'foo']
self.list_regex_patterns = ['[a-f0-9]+\/+((css)|(js))',
'[a-f0-9]+\/images\/',
'.*\.rss',
'.*\.atom']
self.list_string_to_check = ['/entry-images/',
'/images/',
'/user-images/',
'/static/',
'/robots.txt',
'/favicon.ico']
self.fe = FilterExtract(self.list_regex_patterns, self.list_string_to_check)
def test_regex_samples(self):
pass
def test_string_samples(self):
# tests if any in list is present
self.assertTrue(self.fe.check_presence(self.list_of_strings))
def test_string_samples_neg(self):
#checks that if all don't exists
self.assertFalse(self.fe.check_presence(self.list_of_neg_strings))
def test_string_sample_partials(self):
# checks partials are not matched, only exact
self.assertFalse(self.fe.check_presence(['.txt']))
self.assertFalse(self.fe.check_presence(['robots.txt']))
self.assertTrue(self.fe.check_presence(['/robots.txt']))
class TestGeo(unittest.TestCase):
def setUp(self):
mmdb_reader = geoip2.database.Reader('../data/GeoLite2-City.mmdb')
self.known_ip = '8.8.8.8'
self.geoc = GeoClassifier(mmdb_reader=mmdb_reader)
def test_known_ip(self):
self.geoc.lookup_ipaddr(self.known_ip)
self.assertEqual(self.geoc.place.country.iso_code, 'US')
def test_country(self):
self.geoc.lookup_ipaddr(self.known_ip)
self.assertEqual(self.geoc.get_country()[0], 'United States')
def test_subdivision_of_country(self):
self.geoc.lookup_ipaddr('208.67.220.220')
self.assertEqual(self.geoc.get_subdiv(), 'California')
class TestStats(unittest.TestCase):
def setUp(self):
self.test_data_list = [['Canada', '/region/459'],
['United States', '/entry/9680'],
['Philippines', '/region/2'],
['United States', '/entry/13395/reviews'],
['United States', '/entry/2785'],
['Netherlands', '/entry/near/19.2017%2C-155.523/filter'],
['United States', '/region/2']]
self.sc = StatsCollector(self.test_data_list, 'Country')
# print(self.sc.df)
def test_unique_loc(self):
self.assertEqual(self.sc.get_unique_loc(), ['Canada', 'United States', 'Philippines', 'Netherlands'])
def test_counting_urlpages(self):
self.assertEqual(self.sc.get_counter_urlpages('United States'),
Counter({'/entry/9680': 1, '/entry/13395/reviews': 1, '/entry/2785': 1, '/region/2': 1}) )
def test_counting_urlpages_neg(self):
self.assertEqual(self.sc.get_counter_urlpages('Foo'), Counter())
if __name__ == '__main__':
unittest.main() | true |
d4c3ebd033337a457413458b0badd29bdf25274b | Python | maksimuc24/GooglePlayCrawler-1 | /web_driver/scroll_driver.py | UTF-8 | 1,048 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
class ScrollDriver:
def __init__(self, web_driver, logger):
self.web_driver = web_driver
self.logger = logger
def scroll_down(self, load_time):
last_height = self.web_driver.execute_script("return document.body.scrollHeight")
while 1:
self.web_driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(load_time)
cur_height = self.web_driver.execute_script("return document.body.scrollHeight")
# If height after scroll equals the last one, then try to scroll in case it's still scrollable
if cur_height == last_height:
self.web_driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(load_time)
cur_height = self.web_driver.execute_script("return document.body.scrollHeight")
if cur_height == last_height:
break
last_height = cur_height | true |
80bf20c63d78e3e6c03dcd035622c021a864f03e | Python | crebro/BrainStorm | /brainstorm/koiGame.py | UTF-8 | 9,325 | 2.84375 | 3 | [] | no_license | from brainstorm.button import Button
from brainstorm.koiFish import KoiFish
from brainstorm.menuButton import MenuButton
import pygame
import sys
from brainstorm.constants import COLORS, FONTS, HEIGHT, IMAGES, KOISIZE, SOUNDS, WIDTH
class KoiGame:
def __init__(self, surface) -> None:
self.surface = surface
self.width, self.height = self.surface.get_width(), self.surface.get_height()
self.drawing = True
self.topPadding = 20
self.topText = FONTS["Bold"].render(
"Feeding Koi", 1, COLORS["white_foreground"]
)
self.backButton = MenuButton(
IMAGES["back"], "Back", IMAGES["back"].get_width() / 2 + 20, 20, padding=20
)
self.clock = pygame.time.Clock()
self.hudHeight = 60
self.hudPadding = 10
self.hudBarHeight = self.hudHeight - (self.hudPadding * 2)
self.waitingTime = 1000
self.loadingTime = 3000
self.waitingNextLevelTime = 3000
self.totalTime = 120
self.wrongsAllowed = 3
self.scoreOnCorrectFish = 250
self.reset()
def reset(self):
self.previousTimeCounter = 4
self.numberOfFishes = 10
self.fishes = []
self.counterStartTime = pygame.time.get_ticks()
self.canFeedFish = False
self.fishesFed = 0
self.loadingStart = pygame.time.get_ticks()
self.loadingNextRound = True
self.waitingForNextLoad = False
self.gameOver = False
self.allRightThisMatch = True
self.numberOfWrongs = 0
self.score = 0
self.gameBeginningTime = pygame.time.get_ticks()
def generateFishes(self):
self.fishes = []
for _ in range(self.numberOfFishes):
self.fishes.append(
KoiFish(
(
self.width - KOISIZE[0],
self.height - KOISIZE[1] - self.hudHeight,
)
)
)
def draw(
self,
):
while self.drawing:
self.clock.tick(60)
self.surface.fill(COLORS["koi_pond_color"])
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
b1, b2, b3 = pygame.mouse.get_pressed()
if b1 and self.backButton.isHovering():
self.drawing = False
return
if self.canFeedFish and not (
self.waitingForNextLoad or self.loadingNextRound
):
for fish in self.fishes:
if fish.isHovering():
if fish.isFed and self.allRightThisMatch:
self.numberOfWrongs += 1
self.allRightThisMatch = False
pygame.mixer.Sound.play(SOUNDS["wrong"])
elif not (fish.isFed):
self.score += self.scoreOnCorrectFish
pygame.mixer.Sound.play(SOUNDS["correct"])
fish.tryToFeedFish()
self.counterStartTime = pygame.time.get_ticks()
self.canFeedFish = False
self.fishesFed += 1
break
if self.gameOver:
try:
if self.retryButton.isHovering():
self.reset()
except Exception as e:
print(e)
if event.type == pygame.QUIT:
sys.exit()
self.update()
pygame.display.update()
def update(self):
self.backButton.draw(self.surface)
self.surface.blit(
self.topText,
(self.width / 2 - (self.topText.get_width() / 2), self.topPadding),
)
if not (self.gameOver):
for fish in self.fishes:
fish.draw(self.surface)
if (
self.loadingNextRound
and pygame.time.get_ticks() - self.loadingStart > self.loadingTime
):
self.loadingNextRound = False
self.generateFishes()
if (self.fishesFed == self.numberOfFishes) and not (
self.waitingForNextLoad
):
self.waitingForNextLoad = True
self.waitingForNextLoadStartTime = pygame.time.get_ticks()
for fish in self.fishes:
fish.revealFishStatus()
if (
self.waitingForNextLoad
and pygame.time.get_ticks() - self.waitingForNextLoadStartTime
> self.waitingNextLevelTime
):
if self.numberOfWrongs >= self.wrongsAllowed:
self.gameOver = True
return
self.waitingForNextLoad = False
self.loadingNextRound = True
self.numberOfFishes += 1
self.fishesFed = 0
self.allRightThisMatch = True
self.loadingStart = pygame.time.get_ticks()
for fish in self.fishes:
fish.moveEverything()
self.drawAllTimeTimer()
self.drawScore()
self.drawBar()
self.showWrongs()
self.drawTimer()
else:
gameOverText = FONTS["Bold"].render(
f"Score: { self.score }", 1, COLORS["white_foreground"]
)
gameOverTextX, gameOverTextY = (
self.width / 2 - (gameOverText.get_width() / 2),
(self.height / 2 - (gameOverText.get_height() / 2)),
)
self.retryButton = Button(
"Retry",
(WIDTH / 2, gameOverTextY + gameOverText.get_height() * 2),
COLORS["odd_blue"],
COLORS["white_foreground"],
)
self.surface.blit(gameOverText, (gameOverTextX, gameOverTextY))
self.retryButton.draw(self.surface)
def drawBar(self):
time = (
pygame.time.get_ticks() - self.counterStartTime
if not (self.canFeedFish)
else self.waitingTime
)
maxWidth = WIDTH // 2 // 2 - (self.hudPadding * 2)
pygame.draw.rect(
self.surface,
COLORS["good_green"] if self.canFeedFish else COLORS["white_foreground"],
(
self.hudPadding,
HEIGHT - (self.hudBarHeight - self.hudPadding * 2),
(time / self.waitingTime) * maxWidth,
self.hudBarHeight,
),
)
if not (self.canFeedFish) and time >= self.waitingTime:
self.canFeedFish = True
def drawTimer(self):
if self.loadingNextRound:
time = (4000 - (pygame.time.get_ticks() - self.loadingStart)) // 1000
renderingText = FONTS["Bold"].render(
str(
time,
),
1,
COLORS["black_background"],
)
pygame.draw.circle(
self.surface,
COLORS["white_foreground"],
(self.width / 2, self.height / 2),
50,
)
self.surface.blit(
renderingText,
(
(WIDTH // 2) - renderingText.get_width() // 2,
(HEIGHT // 2) - renderingText.get_height() // 2,
),
)
if time != self.previousTimeCounter:
pygame.mixer.Sound.play(SOUNDS["tick"])
self.previousTimeCounter = time
def drawAllTimeTimer(
self,
):
time = (
(self.totalTime * 1000) - (pygame.time.get_ticks() - self.gameBeginningTime)
) // 1000
if time <= 0:
self.gameOver = True
timeMinutes = int(time // 60)
timeSeconds = int(time % 60)
timeText = FONTS["Bold"].render(
f"Time: {timeMinutes } : { timeSeconds }",
1,
COLORS["white_foreground"],
)
self.surface.blit(
timeText,
(
self.width / 2 - (timeText.get_width() / 2),
self.height - timeText.get_height() - self.topPadding,
),
)
def drawScore(self):
scoreText = FONTS["Bold"].render(
f"Score: {self.score}", 1, COLORS["white_foreground"]
)
self.surface.blit(
scoreText,
(
WIDTH - self.topPadding - scoreText.get_width(),
self.height - scoreText.get_height() - self.topPadding,
),
)
def showWrongs(self):
for x in range(self.numberOfWrongs):
self.surface.blit(
IMAGES["matrix_wrong"],
(
WIDTH - IMAGES["matrix_wrong"].get_width() - self.topPadding,
x * IMAGES["matrix_wrong"].get_height() + self.topPadding,
),
)
| true |
ae4ed6b5b3568d2aae50d11250713e534e8ed918 | Python | KristianLN/Thesis_UCPH | /utils/preprocessing_features_and_labels.py | UTF-8 | 134,428 | 2.515625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import os
import time
import h5py
import copy
import datetime
import ta
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import QuantileTransformer
# Do you wanna see?
# Setting up the Scalers!
mm_scaler = MinMaxScaler()
scaler = StandardScaler()
norm_scaler = Normalizer()
pt = PowerTransformer()
ptNst = PowerTransformer(standardize=False)
qtUni = QuantileTransformer(n_quantiles=100)
qtGau = QuantileTransformer(n_quantiles=100,output_distribution='normal')
################################################## Table of Content ##################################################
# extract_labels() : extract labels given classes and group_style (we have only equal: 5 x 20% bins right now)
# extract_labels_multi() : extract labels over all tickers given classes and group_style (we have only equal: 5 x 20% bins right now)
# align_features_and_labels(): burn-in features, extract labels (calls extract_labels()) and align indices to features
# align_features_and_labels_multi(): burn-in features, extract labels (calls extract_labels_multi()) and align indices to features over all tickers
######################################################################################################################
def extract_labels(data = '', classes = 5, group_style = 'equal'):
# returns = ((data.T[-1][1:]/data.T[-1][0:-1])-1)*100
returns = ((data[1:, -1] / data[:-1, -1])-1)*100
labels = np.zeros(returns.shape[0])
if group_style == 'equal':
thresholdsMin = [np.array_split(np.sort(returns),classes)[i].min() for i in np.arange(classes)]
thresholdsMax = [np.array_split(np.sort(returns),classes)[i].max() for i in np.arange(classes)]
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
for i in np.arange(classes):
if i == 0:
labels[(returns <= thresholdsMax[i])] = i
elif i == (classes-1):
labels[(returns >= thresholdsMin[i])] = i
else:
labels[(returns >= thresholdsMin[i]) & (returns<=thresholdsMax[i])] = i
return labels, returns, [thresholdsMin, thresholdsMax]
# _multi works with align_features_and_labels_multi() and assumes data is a vector of close prices (not a matrix)
def extract_labels_multi_v1(data = None, classes = 5, group_style = 'equal'):
# returns = ((data.T[-1][1:]/data.T[-1][0:-1])-1)*100
returns = ((data[1:] / data[:-1]) -1) * 100
labels = np.zeros(returns.shape[0])
if group_style == 'equal':
thresholdsMin = [np.array_split(np.sort(returns), classes)[i].min() for i in np.arange(classes)]
thresholdsMax = [np.array_split(np.sort(returns), classes)[i].max() for i in np.arange(classes)]
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
for i in np.arange(classes):
if i == 0:
labels[(returns <= thresholdsMax[i])] = i
elif i == (classes-1):
labels[(returns >= thresholdsMin[i])] = i
else:
labels[(returns >= thresholdsMin[i]) & (returns<=thresholdsMax[i])] = i
return labels #, returns, [thresholdsMin, thresholdsMax]
def extract_labels_multi_v2(data = None,
classes = 5,
group_style = 'equal',
splits=None):
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
# returns = ((data.T[-1][1:]/data.T[-1][0:-1])-1)*100
returns = ((data[1:] / data[:-1]) -1) * 100
# If returns are exact zero, perhaps because there hasn't been any price updates over a candle, we add a little bit of noise, to ensure that the labels are evenly distributed.
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
labels = np.zeros(returns.shape[0])
if group_style == 'equal':
if splits is None:
splits = np.array_split(np.sort(returns),classes)
for i in np.arange(classes):
labels[np.isin(returns,splits[i])] = i
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
return labels #, returns, [thresholdsMin, thresholdsMax]
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
def extract_labels_multi_v3(data = None,
classes = 5,
group_style = 'equal',
splits=None,noise=True):
# returns = ((data.T[-1][1:]/data.T[-1][0:-1])-1)
returns = ((data[1:] / data[:-1]) -1)
# If returns are exact zero, perhaps because there hasn't been any price updates over a candle, we add a little bit of noise, to ensure that the labels are evenly distributed.
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
labels = np.zeros(returns.shape[0])
if group_style == 'equal':
if splits is None:
splits = np.array_split(np.sort(returns),classes)
for i in np.arange(classes):
labels[np.isin(returns,splits[i])] = i
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
return labels #, returns, [thresholdsMin, thresholdsMax]
## Works only for two classes
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
def extract_labels_multi_v4(data = None,
classes = 5,
group_style = 'equal',
global_median=None):
# returns = ((data.T[-1][1:]/data.T[-1][0:-1])-1)
returns = ((data[1:] / data[:-1]) -1)
# If returns are exact zero, perhaps because there hasn't been any price updates over a candle, we add a little bit of noise, to ensure that the labels are evenly distributed.
# if noise:
# returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
labels = np.zeros(returns.shape[0])
if group_style == 'equal':
# if splits is None:
# splits = np.array_split(np.sort(returns),classes)
# for i in np.arange(classes):
labels[returns>global_median] = 1
labels[returns<=global_median] = 0
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
return labels #, returns, [thresholdsMin, thresholdsMax]
## Works for all number of classes
# this version takes data in a direct returns for a specific ticker
def extract_labels_multi_v5(data = None,
classes = 5,
group_style = 'equal',
splits=None):
if group_style == 'equal':
labels = pd.cut(data, bins=splits, labels=False, right=False, include_lowest=True)
# we need right=False (open right-handside in split interval) to get median into the positive class
# this makes the last point nan, we fix it here
if sum(np.isnan(labels)) > 0:
print(f'Number of NaNs in label: {sum(np.isnan(labels))}. 1 is expected')
print(f'Returns that lead to NaNs in label: {data[np.where(np.isnan(labels))]}')
assert sum(np.isnan(labels)) <= 1, "There should be max 1 NaN"
if data[np.where(np.isnan(labels))] >= splits[-1]:
labels[np.where(np.isnan(labels))] = classes - 1 # assign last label id
else:
print(data[np.where(np.isnan(labels))], splits[-1])
raise ValueError('There is a label NaN where its underlying return is not max of dataset, which it should be')
elif group_style != 'equal':
raise ValueError(f'group_style {group_style} not implemented')
return labels
# per version 6 we no longer use group_style, as the "splits" fully describes splits for both equal and non-equal
def extract_labels_multi_v6(data = None,
classes = 5,
splits=None):
# this version takes data in a direct returns for a specific ticker
# per version 6 we no longer use group_style, as the "splits" fully describes splits for both equal and non-equal
labels = pd.cut(data, bins=splits, labels=False, right=False, include_lowest=True)
# we need right=False (open right-handside in split interval) to get median into the positive class
# this makes the last point nan, we fix it here
if sum(np.isnan(labels)) > 0:
print(f'Number of NaNs in label: {sum(np.isnan(labels))}. 1 is expected')
print(f'Returns that lead to NaNs in label: {data[np.where(np.isnan(labels))]}')
assert sum(np.isnan(labels)) <= 1, "There should be max 1 NaN"
if data[np.where(np.isnan(labels))] >= splits[-1]:
labels[np.where(np.isnan(labels))] = classes - 1 # assign last label id
else:
print(data[np.where(np.isnan(labels))], splits[-1])
raise ValueError('There is a label NaN where its underlying return is not max of dataset, which it should be')
return labels
# v6
def extract_labels_multi_final(data = None,
classes = 5,
splits=None):
# this version takes data in a direct returns for a specific ticker
# per version 6 we no longer use group_style, as the "splits" fully describes splits for both equal and non-equal
labels = pd.cut(data, bins=splits, labels=False, right=False, include_lowest=True)
# we need right=False (open right-handside in split interval) to get median into the positive class
# this makes the last point nan, we fix it here
if sum(np.isnan(labels)) > 0:
print(f'Number of NaNs in label: {sum(np.isnan(labels))}. 1 is expected')
print(f'Returns that lead to NaNs in label: {data[np.where(np.isnan(labels))]}')
assert sum(np.isnan(labels)) <= 1, "There should be max 1 NaN"
if data[np.where(np.isnan(labels))] >= splits[-1]:
labels[np.where(np.isnan(labels))] = classes - 1 # assign last label id
else:
print(data[np.where(np.isnan(labels))], splits[-1])
raise ValueError('There is a label NaN where its underlying return is not max of dataset, which it should be')
return labels
def align_features_and_labels(candles, prediction_horizon, features, n_feature_lags, n_classes,
safe_burn_in = False, data_sample = 'full'):
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
price_candles = candles.iloc[:, :4].copy(deep=True).values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = features.iloc[burned_in_idx : -end_point_cut, :].reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels, _, _ = extract_labels(data = price_candles[burned_in_idx+n_feature_lags:, :],
classes = n_classes, group_style = 'equal')
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
return burned_in_features, labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# _multi has multi-ticker support
def align_features_and_labels_multi_v1(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full'):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi(data = ticker_prices[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal')
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
burned_in_features['ticker'] = ticker_name
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features, all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
def align_features_and_labels_multi_v2(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global'):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
splits = np.array_split(np.sort(returns),n_classes)
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi(data = ticker_prices[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
burned_in_features['ticker'] = ticker_name
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features, all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
def align_features_and_labels_multi_v3(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = True):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
in price_candles.Ticker.unique()])
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
splits = np.array_split(np.sort(returns),n_classes)
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi(data = ticker_prices[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
splits = splits,noise=noise)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
burned_in_features['ticker'] = ticker_name
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features, all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# attempts on 13-08-2020 to fix label issue (this version is not fully tested / might not work correctly)
def align_features_and_labels_multi_v4(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = True):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
in price_candles.Ticker.unique()])
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
# splits = np.array_split(np.sort(returns),n_classes)
global_median = np.median(returns)
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi(data = ticker_prices[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
global_median = global_median)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
burned_in_features['ticker'] = ticker_name
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features, all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# this version calculates global returns, add ticker to the output, and inputs direct ticker-wise returns for extract label
def align_features_and_labels_multi_v5(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
burned_in_features['ticker'] = ticker_name
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features, all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# Included the option to return ticker label as dummies.
def align_features_and_labels_multi_v6(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = None):
all_burned_in_features = pd.DataFrame()
all_labels = pd.DataFrame()
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
# burned_in_features['ticker'] = ticker_name
burned_in_features.loc[:,'ticker'] = ticker_name
# Returning the ticker as dummies
if ticker_dummies is not None:
tickers = burned_in_features.pop('ticker')
burned_in_features = pd.concat([burned_in_features, pd.get_dummies(tickers, prefix='ticker', drop_first=False)], axis=1)
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
return all_burned_in_features.reset_index(drop=True), all_labels.reset_index(drop=True) # call the function as X, y = align_features_and_labels(.) if you like
# Now extracting the time indices to be used to sort afterwards.
def align_features_and_labels_multi_v7(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
dailyIndices = pd.DataFrame({'days':price_candles.index.get_level_values(0),
'timestemps':price_candles.index.get_level_values(1),
'ticker':price_candles.Ticker})
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :]
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
group_style = 'equal',
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
# print('Are all burned_in_idx the same?', all(keepCheck==keepCheck[0]))
# print(dailyIndicies.head(50))
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
# v8: adding custom label splitting (label_split) for multi-class
def align_features_and_labels_multi_v8(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
label_split = [],
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
dailyIndices = pd.DataFrame({'days':price_candles.index.get_level_values(0),
'timestamps':price_candles.index.get_level_values(1),
'ticker':price_candles.Ticker})
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
if label_split == []:
# equal-sized bins according to n_classes
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
elif label_split != []:
_, splits = pd.qcut(returns, q=label_split, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :]
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
# print('Are all burned_in_idx the same?', all(keepCheck==keepCheck[0]))
# print(dailyIndicies.head(50))
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
# adding custom label splitting (label_split) for multi-class
def align_features_and_labels_multi_v9(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
label_split = [],
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
# dailyIndices = pd.DataFrame({'days':all_features.index.get_level_values(0),
# 'timestamps':all_features.index.get_level_values(1),
# 'ticker':all_features.ticker})
dailyIndices = all_features['ticker'].reset_index().rename(columns={'level_0':'days','level_1':'timestamps'}).copy(deep=True)
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
for ticker in all_features.ticker.unique():
temp_price = all_features[all_features.ticker==ticker].iloc[0:-n_feature_lags,:].close_lag0
ticker_returns = (temp_price.values[1:]/temp_price.values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
if label_split == []:
# equal-sized bins according to n_classes
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
elif label_split != []:
_, splits = pd.qcut(returns, q=label_split, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
if ticker_name == 'AAPL':
print(burned_in_idx)
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
# end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :].copy(deep=True) #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :].copy(deep=True)
if ticker_name == 'AAPL':
print(ticker_indices.iloc[burned_in_idx : -end_point_cut, :])
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx):],#+n_feature_lags
classes = n_classes,
splits = splits)
if ticker_name == 'AAPL':
print(burned_in_features.shape,burned_in_indices.shape,labels.shape)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
if ticker_name == 'AAPL':
print(burned_in_indices.iloc[0:10,:])
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])#
if ticker_name == 'AAPL':
print(all_burned_in_indices.iloc[0:10,:])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
## Correct wrong indices alignment, by shifting price_candles n_feature_lags back.
## Added only printing at the end, when all tickers has been processed.
def align_features_and_labels_multi_v10(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
label_split = [],
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
dailyIndices = pd.DataFrame({'days':price_candles.index.get_level_values(0),
'timestamps':price_candles.index.get_level_values(1),
'ticker':price_candles.Ticker})
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
if label_split == []:
# equal-sized bins according to n_classes
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
elif label_split != []:
_, splits = pd.qcut(returns, q=label_split, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True).shift(-n_feature_lags)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :]
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])
all_labels = pd.concat([all_labels, pd.Series(labels)])
# print(ticker_name + " done")
print("All tickers processed.")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
# print('Are all burned_in_idx the same?', all(keepCheck==keepCheck[0]))
# print(dailyIndicies.head(50))
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
# This is V10
def align_features_and_labels_multi_final(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
label_split = [],
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
dailyIndices = pd.DataFrame({'days':price_candles.index.get_level_values(0),
'timestamps':price_candles.index.get_level_values(1),
'ticker':price_candles.Ticker})
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
for ticker in price_candles.Ticker.unique():
ticker_returns = (price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
price_candles[price_candles.Ticker==ticker]['close'].values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
if label_split == []:
# equal-sized bins according to n_classes
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
elif label_split != []:
_, splits = pd.qcut(returns, q=label_split, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'Ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True).shift(-n_feature_lags)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.Ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :] #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :]
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx+n_feature_lags):],
classes = n_classes,
splits = splits)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])
all_labels = pd.concat([all_labels, pd.Series(labels)])
# print(ticker_name + " done")
print("All tickers processed.")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
# print('Are all burned_in_idx the same?', all(keepCheck==keepCheck[0]))
# print(dailyIndicies.head(50))
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
def align_features_and_labels_multi_test(price_candles,
all_features,
prediction_horizon,
n_feature_lags,
n_classes,
label_split = [],
safe_burn_in = False,
data_sample = 'full',
splitType='global',
noise = False,
ticker_dummies = False,
top=None):
all_burned_in_features = pd.DataFrame()
all_burned_in_indices = pd.DataFrame()
all_labels = pd.DataFrame()
# dailyIndices = pd.DataFrame({'days':all_features.index.get_level_values(0),
# 'timestamps':all_features.index.get_level_values(1),
# 'ticker':all_features.ticker})
dailyIndices = all_features['ticker'].reset_index().rename(columns={'level_0':'days','level_1':'timestamps'}).copy(deep=True)
if splitType.lower() == 'global':
# Making the splits for the labels based on all tickers
# returns = ((price_candles['close'].values[1:] / price_candles['close'].values[:-1]) -1) * 100
# returns = np.concatenate([((price_candles[price_candles.Ticker==ticker]['close'].values[1:]/\
# price_candles[price_candles.Ticker==ticker]['close'].values[:-1])-1) for ticker\
# in price_candles.Ticker.unique()])
returns = []
tickers = []
end_point_cut = max(prediction_horizon, n_feature_lags + 1)
for ticker in all_features.ticker.unique():
temp_price = all_features[all_features.ticker==ticker].iloc[0:-n_feature_lags,:].close_lag0
ticker_returns = (temp_price.values[1:]/temp_price.values[:-1]) - 1
ticker_names = [ticker for i in range(len(ticker_returns))]
returns.append(ticker_returns)
tickers.append(ticker_names)
# concatenate returns and add noise
returns = np.concatenate(returns)
if noise:
returns[returns==0] = np.random.normal(0,1,sum(returns==0))/1000000
tickers = np.concatenate(tickers)
if label_split == []:
# equal-sized bins according to n_classes
if top is not None:
test = copy.deepcopy(returns)
print('upper limit: %.3f\nlower limit: %.3f' % (np.percentile(test,top),np.percentile(test,100-top)))
test[test>np.percentile(test,top)] = np.percentile(test,top)
test[test<np.percentile(test,100-top)] = np.percentile(test,100-top)
_, splits = pd.qcut(test, q=n_classes, labels=False, retbins=True)
else:
_, splits = pd.qcut(returns, q=n_classes, labels=False, retbins=True)
elif label_split != []:
_, splits = pd.qcut(returns, q=label_split, labels=False, retbins=True)
#print(splits)
returns = pd.DataFrame({'returns': returns, 'ticker': tickers})
keepCheck = []
for ticker_iter, ticker_name in enumerate(all_features.ticker.unique()):
ticker_features = all_features[all_features.ticker==ticker_name].copy(deep=True)
ticker_indices = dailyIndices[dailyIndices.ticker==ticker_name].copy(deep=True)
# removing the "ticker" variable from ticker_features as np.isnan() does not like non-numericals
#ticker_features = ticker_features.iloc[:, ticker_features.columns != 'ticker']
ticker_features.drop('ticker', axis=1, inplace=True)
# extract first 4 columns as the lag0 or raw OHLC prices (used for labelling)
#ticker_prices = price_candles[price_candles.Ticker==ticker_name]['close'].values # candles.iloc[:, :4].values
ticker_returns = returns[returns.ticker==ticker_name]['returns'].values
if not safe_burn_in:
assert data_sample == 'full'
# we assume data_sample is full and that we can continue features from yesterday's values.
# that we have a single burn-in at the beginning and that's it
# get first index that has no NaNs (the sum checks for True across columns, we look for sum == 0 and where that is first True)
burned_in_idx = np.where((np.sum(np.isnan(ticker_features.values), axis=1) == 0) == True)[0][0]
if ticker_name == 'AAPL':
print(burned_in_idx)
keepCheck.append(burned_in_idx)
# calculate end-point cut-off to match with labels
# end_point_cut = max(prediction_horizon, n_feature_lags + 1)
# slice away the observations used for burn-in (taking off 1 at the end to match with labels [slice off "prediction_horizon"])
burned_in_features = ticker_features.iloc[burned_in_idx : -end_point_cut, :].copy(deep=True) #.reset_index(drop=True) # features[burned_in_idx:] latter is sligthly faster but maybe not as precise
burned_in_indices = ticker_indices.iloc[burned_in_idx : -end_point_cut, :].copy(deep=True)
if ticker_name == 'AAPL':
print(ticker_indices.iloc[burned_in_idx : -end_point_cut, :])
# slice away the burned-in indices from labels
labels = extract_labels_multi_final(data = ticker_returns[(burned_in_idx):],#+n_feature_lags
classes = n_classes,
splits = splits)
if ticker_name == 'AAPL':
print(burned_in_features.shape,burned_in_indices.shape,labels.shape)
# labels, returns, thresholds = extract_labels(data = candles[burned_in_idx + n_feature_lags : , :],
# classes = n_classes, group_style = 'equal')
# check if there are remaining NaNs are burn-in (means error)
remaining_nans = np.where(np.isnan(burned_in_features.values))[0].size
if remaining_nans > 0:
raise ValueError('Had NaN in burned_in_features after burn-in')
# Adding the ticker
burned_in_features.loc[:,'ticker'] = ticker_name
if ticker_name == 'AAPL':
print(burned_in_indices.iloc[0:10,:])
# Adding the burned in data
all_burned_in_features = pd.concat([all_burned_in_features, burned_in_features.reset_index(drop=True)])
all_burned_in_indices = pd.concat([all_burned_in_indices, burned_in_indices.reset_index(drop=True)])#
if ticker_name == 'AAPL':
print(all_burned_in_indices.iloc[0:10,:])
all_labels = pd.concat([all_labels, pd.Series(labels)])
print(ticker_name + " done")
# Returning the ticker as dummies
if ticker_dummies:
tickers = all_burned_in_features.pop('ticker')
all_burned_in_features = pd.concat([all_burned_in_features, pd.get_dummies(tickers, prefix='d_ticker', drop_first=False)], axis=1)
return all_burned_in_features.reset_index(drop=True),\
all_labels.reset_index(drop=True),\
all_burned_in_indices.reset_index(drop=True)
def pre_processing_initial(rawData,ppDict,subBy,verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
preproX = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
preproX[key[item==ele]] = rawData[key[item==ele]]
# Return the actual values demeaned
elif ele.lower() == 'actde':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
# print(X[key[item==ele]].head())
# print(X[key[item==ele]].mean())
# print((X[key[item==ele]]-X[key[item==ele]].mean()).head())
preproX[key[item==ele]] = rawData[key[item==ele]]-rawData[key[item==ele]].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = pd.DataFrame(qtGau.fit_transform(rawData[key[item==ele]].values))
# Return the features standardized
elif ele.lower() == 'std':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = pd.DataFrame(scaler.fit_transform(rawData[key[item==ele]].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = rawData[key[item==ele]]-subBy
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = pd.DataFrame(pt.fit_transform(rawData[key[item==ele]].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = pd.DataFrame(mm_scaler.fit_transform(rawData[key[item==ele]].values))
# Return the features norm scale
elif ele.lower() == 'norm':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
preproX[key[item==ele]] = pd.DataFrame(norm_scaler.fit_transform(rawData[key[item==ele]].values))
return preproX
def pre_processing_extended(rawData_train,
rawData_test,
ppDict,
subBy,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
# preproX[key[item==ele]] = rawData[key[item==ele]]
pp_train[key[item==ele]] = rawData_train[key[item==ele]]
pp_test[key[item==ele]] = rawData_test[key[item==ele]]
# Return the actual values demeaned
elif ele.lower() == 'actde':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
# print(X[key[item==ele]].head())
# print(X[key[item==ele]].mean())
# print((X[key[item==ele]]-X[key[item==ele]].mean()).head())
# preproX[key[item==ele]] = rawData[key[item==ele]]-rawData[key[item==ele]].mean()
pp_train[key[item==ele]] = rawData_train[key[item==ele]]-rawData_train[key[item==ele]].mean()
pp_test[key[item==ele]] = rawData_test[key[item==ele]]-rawData_train[key[item==ele]].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# preproX[key[item==ele]] = pd.DataFrame(qtGau.fit_transform(rawData[key[item==ele]].values))
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(qtGau.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(qtGau.transform(rawData_test[key[item==ele]].values))
elif ele.lower() == 'quantuni':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# preproX[key[item==ele]] = pd.DataFrame(qtGau.fit_transform(rawData[key[item==ele]].values))
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(qtUni.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(qtUni.transform(rawData_test[key[item==ele]].values))
# Return the features standardized
elif ele.lower() == 'std':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
# preproX[key[item==ele]] = pd.DataFrame(scaler.fit_transform(rawData[key[item==ele]].values))
scaler.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(scaler.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(scaler.transform(rawData_test[key[item==ele]].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
# preproX[key[item==ele]] = rawData[key[item==ele]]-subBy
pp_train[key[item==ele]] = rawData_train[key[item==ele]]-subBy
pp_test[key[item==ele]] = rawData_test[key[item==ele]]-subBy
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
# preproX[key[item==ele]] = pd.DataFrame(pt.fit_transform(rawData[key[item==ele]].values))
pt.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(pt.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(pt.transform(rawData_test[key[item==ele]].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
# preproX[key[item==ele]] = pd.DataFrame(mm_scaler.fit_transform(rawData[key[item==ele]].values))
mm_scaler.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(mm_scaler.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(mm_scaler.transform(rawData_test[key[item==ele]].values))
# Return the features norm scale
elif ele.lower() == 'norm':
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
# preproX[key[item==ele]] = pd.DataFrame(norm_scaler.fit_transform(rawData[key[item==ele]].values))
norm_scaler.fit(rawData_train[key[item==ele]].values)
pp_train[key[item==ele]] = pd.DataFrame(norm_scaler.transform(rawData_train[key[item==ele]].values))
pp_test[key[item==ele]] = pd.DataFrame(norm_scaler.transform(rawData_test[key[item==ele]].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
def pre_processing_v1(rawData_train,
rawData_test,
ppDict,
subBy,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
pp_train[cols] = rawData_train[cols]
pp_test[cols] = rawData_test[cols]
# Return the actual values demeaned
elif ele.lower() == 'actde':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
pp_train[cols] = rawData_train[cols]-rawData_train[cols].mean()
pp_test[cols] = rawData_test[cols]-rawData_train[cols].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtGau.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtGau.transform(rawData_test[cols].values))
elif ele.lower() == 'quantuni':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtUni.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtUni.transform(rawData_test[cols].values))
# Return the features standardized
elif ele.lower() == 'std':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(scaler.transform(rawData_test[cols].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = rawData_train[cols]-subBy
pp_test[cols] = rawData_test[cols]-subBy
elif ele.lower() == 'log':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = np.log(rawData_train[cols])
pp_test[cols] = np.log(rawData_test[cols])
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pt.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(pt.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(pt.transform(rawData_test[cols].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
mm_scaler.fit(rawData_train[cols].values) if len(cols) > 1 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# Return the features norm scale
elif ele.lower() == 'norm':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
norm_scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(norm_scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(norm_scaler.transform(rawData_test[cols].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
def pre_processing_v2(rawData_train,
rawData_test,
ppDict,
subBy,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
pp_train[cols] = rawData_train[cols]
pp_test[cols] = rawData_test[cols]
# Return the actual values demeaned
elif ele.lower() == 'actde':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
pp_train[cols] = rawData_train[cols]-rawData_train[cols].mean()
pp_test[cols] = rawData_test[cols]-rawData_train[cols].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtGau.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtGau.transform(rawData_test[cols].values))
elif ele.lower() == 'quantuni':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtUni.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtUni.transform(rawData_test[cols].values))
# Return the features standardized
elif ele.lower() == 'std':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(scaler.transform(rawData_test[cols].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = rawData_train[cols]-subBy
pp_test[cols] = rawData_test[cols]-subBy
elif ele.lower() == 'log':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = np.log(rawData_train[cols])
pp_test[cols] = np.log(rawData_test[cols])
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if (t in c)] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pt.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(pt.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(pt.transform(rawData_test[cols].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
# print(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# print(rawData_train[cols].values)
# print(rawData_train[cols].values.shape)
#print(rawData_train[cols].values.reshape(-1,2))
#print(rawData_train[cols].values.reshape(-1,2).shape)
# Adding the transformed features to the new frame
mm_scaler.fit(rawData_train[cols].values)# if len(cols) > 10 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# print(pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)))
pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values))#,columns=cols# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# print(pp_train)
# print(pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)))
pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values))#,columns = cols#if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# print(pp_test)
# pp_train[cols] = mm_scaler.transform(rawData_train[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = mm_scaler.transform(rawData_test[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# mm_scaler.fit(rawData_train[cols].values) if len(cols) > 1 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# Return the features norm scale
elif ele.lower() == 'norm':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
norm_scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(norm_scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(norm_scaler.transform(rawData_test[cols].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
def pre_processing_v3(rawData_train,
rawData_test,
ppDict,
subBy=100,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
pp_train[cols] = rawData_train[cols]
pp_test[cols] = rawData_test[cols]
# Return the actual values demeaned
elif ele.lower() == 'actde':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
pp_train[cols] = rawData_train[cols]-rawData_train[cols].mean()
pp_test[cols] = rawData_test[cols]-rawData_train[cols].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtGau.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtGau.transform(rawData_test[cols].values))
elif ele.lower() == 'quantuni':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtUni.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtUni.transform(rawData_test[cols].values))
# Return the features standardized
elif ele.lower() == 'std':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(scaler.transform(rawData_test[cols].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = rawData_train[cols]-subBy
pp_test[cols] = rawData_test[cols]-subBy
elif ele.lower() == 'log':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = np.log(rawData_train[cols])
pp_test[cols] = np.log(rawData_test[cols])
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if (t in c)] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pt.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(pt.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(pt.transform(rawData_test[cols].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
# print(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# print(rawData_train[cols].values)
# print(rawData_train[cols].values.shape)
#print(rawData_train[cols].values.reshape(-1,2))
#print(rawData_train[cols].values.reshape(-1,2).shape)
# Adding the transformed features to the new frame
mm_scaler.fit(rawData_train[cols].values)# if len(cols) > 10 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# print(pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)))
pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values))#,columns=cols# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# print(pp_train)
# print(pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)))
pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values))#,columns = cols#if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# print(pp_test)
# pp_train[cols] = mm_scaler.transform(rawData_train[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = mm_scaler.transform(rawData_test[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# mm_scaler.fit(rawData_train[cols].values) if len(cols) > 1 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# Return the features norm scale
elif ele.lower() == 'norm':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
norm_scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(norm_scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(norm_scaler.transform(rawData_test[cols].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
def pre_processing_v4(rawData_train,
rawData_test,
ppDict,
subBy=100,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
pp_train[cols] = rawData_train[cols]
pp_test[cols] = rawData_test[cols]
# Return the actual values demeaned
elif ele.lower() == 'actde':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
pp_train[cols] = rawData_train[cols]-rawData_train[cols].mean()
pp_test[cols] = rawData_test[cols]-rawData_train[cols].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtGau.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtGau.transform(rawData_test[cols].values))
elif ele.lower() == 'quantuni':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtUni.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtUni.transform(rawData_test[cols].values))
# Return the features standardized
elif ele.lower() == 'std':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(scaler.transform(rawData_test[cols].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = rawData_train[cols]-subBy
pp_test[cols] = rawData_test[cols]-subBy
elif ele.lower() == 'log':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = np.log(rawData_train[cols])
pp_test[cols] = np.log(rawData_test[cols])
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if (t in c)] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pt.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(pt.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(pt.transform(rawData_test[cols].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
# print(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# print(rawData_train[cols].values)
# print(rawData_train[cols].values.shape)
#print(rawData_train[cols].values.reshape(-1,2))
#print(rawData_train[cols].values.reshape(-1,2).shape)
# Adding the transformed features to the new frame
mm_scaler.fit(rawData_train[cols].values)# if len(cols) > 10 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# print(pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)))
pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values))#,columns=cols# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# print(pp_train)
# print(pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)))
pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values))#,columns = cols#if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# print(pp_test)
# pp_train[cols] = mm_scaler.transform(rawData_train[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = mm_scaler.transform(rawData_test[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# mm_scaler.fit(rawData_train[cols].values) if len(cols) > 1 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# Return the features norm scale
elif ele.lower() == 'norm':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
norm_scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(norm_scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(norm_scaler.transform(rawData_test[cols].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
def pre_processing_final(rawData_train,
rawData_test,
ppDict,
subBy=100,
verbose=False):
# Creating empty lists to hold the content of our pre-processing dictonary
key = []
item = []
# Extracting the items of the pre-processing dictonary
for k,i in ppDict.items():
key.append(k)
item.append(i)
# Numping
key = np.array(key)
item = np.array(item)
# Creating an empty dataframe to store the pre-processed data.
pp_train = pd.DataFrame()
pp_test = pd.DataFrame()
# Pre-processing the data according to the desired ways.
for ele in np.unique(item):
if verbose:
print('Pre-Processing Procedure: ',ele)
# Return the actual values
if ele.lower() == 'act':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the raw feature to the new frame
pp_train[cols] = rawData_train[cols]#.copy(deep=True)
pp_test[cols] = rawData_test[cols]
# Return the actual values demeaned
elif ele.lower() == 'actde':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the demeaned features to the new frame
pp_train[cols] = rawData_train[cols]-rawData_train[cols].mean()
pp_test[cols] = rawData_test[cols]-rawData_train[cols].mean()
# Return the features quantiale transformed (gaussian)
elif ele.lower() == 'quantgau':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtGau.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtGau.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtGau.transform(rawData_test[cols].values))
elif ele.lower() == 'quantuni':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
qtUni.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(qtUni.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(qtUni.transform(rawData_test[cols].values))
# Return the features standardized
elif ele.lower() == 'std':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(scaler.transform(rawData_test[cols].values))
# Return the features substracted a certain amount
elif ele.lower() == 'sub':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = rawData_train[cols]-subBy
pp_test[cols] = rawData_test[cols]-subBy
elif ele.lower() == 'log':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pp_train[cols] = np.log(rawData_train[cols])
pp_test[cols] = np.log(rawData_test[cols])
# Return the features power transformed (standardized)
elif ele.lower() == 'pow':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if (t in c)] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
pt.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(pt.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(pt.transform(rawData_test[cols].values))
# Return the features min-max-normalised
elif ele.lower() == 'minmax':
# Account for lags and preprocess all lags the same way
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
# print(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# print(rawData_train[cols].values)
# print(rawData_train[cols].values.shape)
#print(rawData_train[cols].values.reshape(-1,2))
#print(rawData_train[cols].values.reshape(-1,2).shape)
# Adding the transformed features to the new frame
mm_scaler.fit(rawData_train[cols].values)# if len(cols) > 10 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# print(pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)))
pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values))#,columns=cols# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# print(pp_train)
# print(pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)))
pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values))#,columns = cols#if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# print(pp_test)
# pp_train[cols] = mm_scaler.transform(rawData_train[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = mm_scaler.transform(rawData_test[cols].values)# if len(cols) > 10 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# mm_scaler.fit(rawData_train[cols].values) if len(cols) > 1 else mm_scaler.fit(rawData_train[cols].values.reshape(-1,1))
# pp_train[cols] = pd.DataFrame(mm_scaler.transform(rawData_train[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_train[cols].values.reshape(-1,1)))
# pp_test[cols] = pd.DataFrame(mm_scaler.transform(rawData_test[cols].values)) if len(cols) > 1 else pd.DataFrame(mm_scaler.transform(rawData_test[cols].values.reshape(-1,1)))
# Return the features norm scale
elif ele.lower() == 'norm':
# Account for lags and preprocess all lags the same way
# cols = [[c for c in rawData_train.columns if t in c] for t in key[item==ele]]
cols = [[c for c in rawData_train.columns if ((t==c) | (t in c) & ('lag' in c))] for t in key[item==ele]]
cols = np.concatenate(cols)
if verbose:
print('Columns Processed:',key[item==ele],'\n')
# Adding the transformed features to the new frame
norm_scaler.fit(rawData_train[cols].values)
pp_train[cols] = pd.DataFrame(norm_scaler.transform(rawData_train[cols].values))
pp_test[cols] = pd.DataFrame(norm_scaler.transform(rawData_test[cols].values))
# Rearanging columns before we return it
pp_train,pp_test = pp_train[rawData_train.columns],pp_test[rawData_test.columns]
# Return preprocessed data
return pp_train.reset_index(drop=True),pp_test.reset_index(drop=True)
| true |
e6b4d6ecf8fd23790b020838b599585adac028a6 | Python | karan-sikarwa123/machine-learning-projects | /decisiontree.py | UTF-8 | 991 | 3.21875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('Position_Salaries.csv')
x = dataset.iloc[: , 1:2].values
y =dataset.iloc[:, 2].values
from sklearn.model_selection import train_test_split
x_train ,x_test ,y_train , y_test = train_test_split(x , y ,test_size=0.2, random_state=0)
"""from sklearn.preprocessing import StandardScaler
standardscaler_obj = StandardScaler()
x_train = standardscaler_obj.fit_transform(x_train)
x_test = standardscaler_obj.fit_transform(x_test)"""
from sklearn.tree import DecisionTreeRegressor
decisiontree_obj = DecisionTreeRegressor(random_state =0)
decisiontree_obj.fit(x , y)
y_pred = decisiontree_obj.predict(x)
#-----------------visualize data---------------
plt.scatter(x , y,color='red')
plt.plot(x , decisiontree_obj.predict(x) , color='blue')
plt.title('Decision tree')
plt.xlabel('years of experience')
plt.ylabel('salary')
plt.show() | true |
3f347db35c519205d69e9622201e21fb7619ab65 | Python | zclgni/compress-picture-by-opencv3 | /conpress_img.py | UTF-8 | 1,397 | 2.921875 | 3 | [] | no_license | #coding = utf-8
import cv2
import os
import math
def get_doc_size(path):
try:
size = os.path.getsize(path)
return get_mb_size(size)
except Exception as err:
print(err)
def get_mb_size(bytes):
bytes = float(bytes)
mb = bytes / 1024 / 1024
return mb
def delete_file(path):
if file_exist(path):
os.remove(path)
else:
print('no such file:%s' % path)
def file_exist(path):
return os.path.exists(path)
def resize_rate(path, resize_path, fx, fy):
image = read_image(path)
im_resize = cv2.resize(image, None, fx=fx, fy=fy)
delete_file(resize_path)
save_image(resize_path, im_resize)
def save_image(path, image):
cv2.imwrite(path, image)
def read_image(path):
return cv2.imread(path)
path = "D://3//313.jpg" #被压缩图片的绝对路径
resize_path = "D://3//3.jpg" #压缩图片的存储位置,记住给压缩图片命名,3.jpg就是压缩后图片的名字
size = get_doc_size(path)
print(size) #原始的size
delete_file(resize_path)
filesize = 0.8 #压缩比例
while size > filesize:
rate = math.ceil((size / filesize) * 10) / 10 + 0.1
rate = math.sqrt(rate)
rate = 1.0 / rate
if file_exist(resize_path):
resize_rate(resize_path, resize_path, rate, rate)
else:
resize_rate(path, resize_path, rate, rate)
size = get_doc_size(resize_path)
| true |
3cd6287a0032ab49773b660b0dcc402bfd53c225 | Python | Mikescher/AdventOfCode2017 | /02_solution-1.py | UTF-8 | 237 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
import aoc
rawinput = aoc.read_input(2)
result = 0
for line in rawinput.splitlines():
values = list(map(lambda d: int(d), line.split('\t')))
result = result + (max(values) - min(values))
print(result)
| true |
3b89173c0a5c961f67b994a39e593d0f947b1684 | Python | dkuspawono/puppet | /modules/mediawiki/files/hhvm/cleanup_cache | UTF-8 | 3,626 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
"""
hhvm_cleanup_cache
Prune stale tables from the HHVM bytecode cache.
Tables are deemed unused if they reference a repo schema other than the
current one.
"""
import sys
import logging
from logging.handlers import SysLogHandler
import os.path
import subprocess
import sqlite3
import argparse
TABLES_QUERY = """
SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE '%{}'
"""
def get_repo_schema():
"""
Gets the repository schema version from the hhvm admin interface
"""
return subprocess.check_output(['/usr/bin/hhvm', '--repo-schema']).rstrip()
def delete_and_vacuum(dbh, tables):
"""
Drops stale tables and vacuums the database
"""
log = logging.getLogger('cleanup_hhvm_cache')
cur = dbh.cursor()
log.info("Deleting tables")
for table in tables:
log.debug("Deleting table %s", table)
cur.execute("DROP TABLE {}".format(table))
log.info("Vacuuming the db")
cur.execute("VACUUM")
log.info("Done")
def setup_logging(debug=False):
"""
Setting up logging
"""
log_format = '%(name)s: %(levelname)s - %(message)s'
log = logging.getLogger('cleanup_hhvm_cache')
if not debug:
# if normal mode, log to syslog
log.setLevel(logging.INFO)
log.propagate = False
handler = SysLogHandler(
address='/dev/log',
facility=SysLogHandler.LOG_LOCAL3)
formatter = logging.Formatter(fmt=log_format)
handler.setFormatter(formatter)
log.addHandler(handler)
else:
# if debug mode, print to stderr
logging.basicConfig(level=logging.DEBUG, format=log_format)
return log
def main():
parser = argparse.ArgumentParser(
prog="hhvm_cleanup_cache",
description="Prune unused entries from a HHVM bytecode cache database"
)
parser.add_argument('--debug', action='store_true',
default=False, help="print debug information to stdout")
parser.add_argument('--noop', action='store_true', default=False,
help="show what would be done, but take no action")
parser.add_argument('filename',
help="the path of the bytecode cache database")
args = parser.parse_args()
log = setup_logging(args.debug)
repo_size_before = os.path.getsize(args.filename)
try:
repo_schema = get_repo_schema()
if not repo_schema:
log.error("Got an empty schema, cannot continue")
sys.exit(1)
else:
log.info("Current schema version is %s", repo_schema)
hhvm_db = args.filename
with sqlite3.connect(hhvm_db) as dbh:
cursor = dbh.cursor()
tables_to_clean = [
t for (t,) in cursor.execute(
TABLES_QUERY.format(repo_schema)
).fetchall()
]
# To remove the annoying unicode marker
printable_tables = ", ".join(tables_to_clean)
if args.noop:
log.info("Tables to remove: %s", printable_tables)
log.info("NOT deleting tables (noop)")
else:
log.debug("Tables to remove: %s", printable_tables)
delete_and_vacuum(dbh, tables_to_clean)
repo_size_after = os.path.getsize(args.filename)
kb_change = (repo_size_before - repo_size_after) / 1024.0
log.info('Pruned %.2f kB.', kb_change)
except Exception as e:
log.error("Execution failed with error %s", e)
sys.exit(1)
if __name__ == '__main__':
main()
| true |
5033fe4c192ac0f5a2912eefcddf14a157df8cd5 | Python | nlintz/ThinkDSP | /Chapter1/exercise7.py | UTF-8 | 1,789 | 2.96875 | 3 | [] | no_license | import os, sys
lib_path = os.path.abspath('../lib/ThinkDSP/code/')
sys.path.append(lib_path)
import thinkdsp
import thinkplot
import matplotlib.pyplot as pyplot
import helpers
import random
def harmonics(base_frequency=400):
# cos_sig = thinkdsp.CosSignal(freq=440, amp=1.0, offset=0)
base_signal = thinkdsp.CosSignal(base_frequency, amp=1.0, offset=0)
for i in xrange(2, 6):
base_signal += thinkdsp.CosSignal(base_frequency * i, amp=1.0, offset=0)
wave = base_signal.make_wave(duration=0.5, start=0, framerate=11025)
spectrum = wave.make_spectrum()
# helpers.makeWavFromSpectrum(spectrum, wave, 'exercise7.wav')
helpers.plot(spectrum)
def nonHarmonicFrequencies(base_frequency=400):
"""
When you add non-harmonic frequencies, you make chords
"""
# cos_sig = thinkdsp.CosSignal(freq=440, amp=1.0, offset=0)
base_signal = thinkdsp.CosSignal(base_frequency, amp=1.0, offset=0)
for i in xrange(2, 6):
base_signal += thinkdsp.CosSignal(base_frequency * (i + random.random()), amp=1.0, offset=0)
wave = base_signal.make_wave(duration=1, start=0, framerate=11025)
spectrum = wave.make_spectrum()
helpers.makeWavFromSpectrum(spectrum, wave, 'exercise7_with_nonHarmonics.wav')
# helpers.plot(spectrum)
def writeMajorChord(rootFrequency, thirdFrequency, fifthFrequency):
root = thinkdsp.CosSignal(rootFrequency, amp=1.0, offset=0)
third = thinkdsp.CosSignal(thirdFrequency, amp=1.0, offset=0)
fifth = thinkdsp.CosSignal(fifthFrequency, amp=1.0, offset=0)
chord = root + third + fifth
wave = chord.make_wave(duration=1, start=0, framerate=11025)
spectrum = wave.make_spectrum()
helpers.makeWavFromSpectrum(spectrum, wave, 'chord.wav')
def main():
# harmonics()
# nonHarmonicFrequencies()
writeMajorChord(440, 523, 659)
if __name__ == "__main__":
main() | true |
f787d593655dc9ee6fac1ba0bebe92e152355e5f | Python | AP-MI-2021/lab-4-irinaranga | /main.py | UTF-8 | 3,102 | 3.890625 | 4 | [] | no_license | def isPrime(x):
'''
determina daca un nr. este prim
:param x: un numar intreg
:return: True, daca x este prim sau False in caz contrar
'''
if x < 2:
return False
for i in range(2, x//2 + 1):
if x % i == 0:
return False
return True
def last_digit(x):
'''
determina daca ultima cifra dintr un nr. este egala cu o cifra citita de la tastatura
:param x: nr. intreg
:return: True daca ultima cifra dintr un nr. este egala cu o cifra citita de la tastatura, False in caz contrar
'''
cifra =int(input("Dati cifra:"))
while(x!=0):
cifra1=x%10
if cifra1==cifra:
return True
return False
def citireLista():
l = []
givenString = input("Dati lista, cu elementele separate prin virgula: ")
numbersAsString = givenString.split(",")
for x in numbersAsString:
l.append(int(x))
return l
def all_negative_numbers(l):
'''
determina elementele negative, nenule din lista
:param l: lista de nr. intregi
:return: o lista continand elementele negative nenule din ea
'''
rezultat = []
for x in l:
if x<0:
rezultat.append(x)
return rezultat
def test_all_negative_numbers():
assert all_negative_numbers([12,-1,4,-8])==[-1,-8]
assert all_negative_numbers([2,5,6,-1,-9,0,-7])==[-1,-9,-7]
def minim_with_last_digit(l):
'''
afiseaza cel mai mic nr. din lista care are ultima cifra egala cu o cifra citita de la tastatura
:param l: lista de nr. intregi
:return: cel mai mic nr. din lista care are ultima cifra egala cu o cifra citita de la tastatura
'''
rezultat = []
for x in l:
minim=x
for i in range(2,len(l)):
if last_digit(x) and x<min:
minim=x
rezultat.append(x)
return rezultat
def test_minim_with_last_digit():
assert minim_with_last_digit([1,23,53,24])==23
def number_superprime(l):
'''
afiseaza nr. superprime din lista
:param l: lista de nr. intregi
:return: toate nr. superprime din lista
'''
rezultat=[]
for x in l:
while(x>0):
def main():
l = []
test_all_negative_numbers()
test_minim_with_last_digit()
while True:
print("1. Citire lista:")
print("2. Afisare nr. negative nenule:")
print("3.Afisare cel mai mic nr. care are ultima cifra egala cu o cifra citita de la tastatura:")
print("4. Afisarea tuturor nr. din lista care sunt superprime:")
print("5. Afisarea lista in care nr. >0 au fost inlocuite cu CMMDC al lor, iar cele negative cu cifrele lor in ordine inversa:")
optiune = input("Dati optiunea: ")
if optiune == "1":
l = citireLista()
elif optiune == "2":
print(all_negative_numbers(l))
elif optiune == "3":
print(minim_with_last_digit(l))
elif optiune == "4":
print(last_digit(x))
elif optiune == "x":
break
else:
print("Optiune gresita! Reincercati!")
main()
| true |
289196ec448470e6454a15e79799e683aa593b63 | Python | Yasaman1997/My_Python_Training | /Test/till you get 100/__init__.py | UTF-8 | 164 | 4 | 4 | [] | no_license |
name = input("enter your name: \n")
age = int(input(" your age : \n "))
year = str((2017-age)+100)
print(name + " will be 100 years old in the year " +year) | true |
0cefd2453ead41582697da8762605e13f08f8363 | Python | huangdaoxu/Machine_Learning | /autoencoder/train.py | UTF-8 | 3,653 | 2.796875 | 3 | [] | no_license | """
Created on 2017-12-13 23:05
@author: huangdaoxu
"""
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import matplotlib.pyplot as plt
class autoencoder(object):
def __init__(self):
self._X = tf.placeholder(tf.float32, shape=[None,784], name='X')
# encoder params
self._en_w1 = tf.Variable(tf.truncated_normal([784, 64], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_w1')
self._en_b1 = tf.Variable(tf.truncated_normal([64], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_b1')
self._en_w2 = tf.Variable(tf.truncated_normal([64, 32], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_w2')
self._en_b2 = tf.Variable(tf.truncated_normal([32], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_b2')
self._en_w3 = tf.Variable(tf.truncated_normal([32, 16], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_w3')
self._en_b3 = tf.Variable(tf.truncated_normal([16], mean=0.0, stddev=1.0, dtype=tf.float32), name='en_b3')
# decoder params
self._de_w1 = tf.Variable(tf.truncated_normal([16, 32], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_w1')
self._de_b1 = tf.Variable(tf.truncated_normal([32], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_b1')
self._de_w2 = tf.Variable(tf.truncated_normal([32, 64], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_w2')
self._de_b2 = tf.Variable(tf.truncated_normal([64], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_b2')
self._de_w3 = tf.Variable(tf.truncated_normal([64, 784], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_w3')
self._de_b3 = tf.Variable(tf.truncated_normal([784], mean=0.0, stddev=1.0, dtype=tf.float32), name='de_b3')
def encode(self):
# activation is important ,we must choice a right function.
en_result1 = tf.nn.sigmoid(tf.matmul(self._X, self._en_w1) + self._en_b1)
en_result2 = tf.nn.sigmoid(tf.matmul(en_result1, self._en_w2) + self._en_b2)
en_result3 = tf.nn.sigmoid(tf.matmul(en_result2, self._en_w3) + self._en_b3)
return en_result3
def decode(self, encode):
# activation is important ,we must choice a right function.
de_result1 = tf.nn.sigmoid(tf.matmul(encode, self._de_w1) + self._de_b1)
de_result2 = tf.nn.sigmoid(tf.matmul(de_result1, self._de_w2) + self._de_b2)
de_result3 = tf.nn.sigmoid(tf.matmul(de_result2, self._de_w3) + self._de_b3)
return de_result3
def train(self, epoch=5, batch_size=256):
y_pred = self.decode(self.encode())
loss = tf.reduce_mean(tf.square(self._X - y_pred))
optmizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_batch = int(mnist.train.num_examples / batch_size)
for i in xrange(epoch):
for j in xrange(total_batch):
batch_xs, _ = mnist.train.next_batch(batch_size)
sess.run(optmizer, feed_dict={self._X: batch_xs})
print 'epoch is {0},loss is {1}'.format(i, sess.run(loss, feed_dict={self._X: batch_xs}))
data_pred = sess.run(y_pred, feed_dict={self._X: mnist.test.images[0:10]})
f, a = plt.subplots(2, 10, figsize=(10, 2))
for j in xrange(data_pred.shape[0]):
a[0][j].imshow(mnist.test.images[j].reshape(28,28))
a[1][j].imshow(data_pred[j].reshape(28,28))
plt.show()
if __name__ == "__main__":
ae = autoencoder()
ae.train() | true |
97fc0303cb5b65f13eadee82c1a86ca22fe7eefc | Python | imishinist/dfa-sample | /main_test.py | UTF-8 | 3,144 | 2.984375 | 3 | [] | no_license | import unittest
from main import FARule
from main import DFADesign
from main import DFARulebook
from main import NFADesign
from main import NFARulebook
from main import NFASimulation
class TestNFARulebook(unittest.TestCase):
def test_alphabet(self):
rulebook = NFARulebook([
FARule(1, 'a', 1), FARule(1, 'a', 2), FARule(1, None, 2),
FARule(2, 'b', 3),
FARule(3, 'b', 1), FARule(3, None, 2),
])
assert rulebook.alphabet() == {"a", "b"}
class TestDFA(unittest.TestCase):
def test_dfa(self):
dfa = DFADesign(1, [3], DFARulebook([
FARule(1, 'a', 2), FARule(1, 'b', 1),
FARule(2, 'a', 2), FARule(2, 'b', 3),
FARule(3, 'a', 3), FARule(3, 'b', 3),
]))
assert not dfa.accept("a")
assert not dfa.accept("baa")
assert dfa.accept("baba")
class TestNFA(unittest.TestCase):
def test_nfa(self):
nfa = NFADesign(1, [4], NFARulebook([
FARule(1, 'a', 1), FARule(1, 'b', 1), FARule(1, 'b', 2),
FARule(2, 'a', 3), FARule(2, 'b', 3),
FARule(3, 'a', 4), FARule(3, 'b', 4),
]))
assert nfa.accept("bab")
assert nfa.accept("bbbbb")
assert not nfa.accept("bbabb")
def test_nfa_with_current_states(self):
nfa = NFADesign(1, {3}, NFARulebook([
FARule(1, 'a', 1), FARule(1, 'a', 2), FARule(1, None, 2),
FARule(2, 'b', 3),
FARule(3, 'b', 1), FARule(3, None, 2),
]))
assert nfa.to_nfa().get_current_states() == {1, 2}
assert nfa.to_nfa({2}).get_current_states() == {2}
assert nfa.to_nfa({3}).get_current_states() == {3, 2}
def test_nfa_free_move(self):
nfa = NFADesign(1, [2, 4], NFARulebook([
FARule(1, None, 2), FARule(1, None, 4),
FARule(2, 'a', 3),
FARule(3, 'a', 2),
FARule(4, 'a', 5),
FARule(5, 'a', 6),
FARule(6, 'a', 4),
]))
assert nfa.accept('aa')
assert nfa.accept('aaa')
assert not nfa.accept('aaaaa')
assert nfa.accept('aaaaaa')
class TestNFASimulation(unittest.TestCase):
def test_next_state(self):
nfa_design = NFADesign(1, {3}, NFARulebook([
FARule(1, 'a', 1), FARule(1, 'a', 2), FARule(1, None, 2),
FARule(2, 'b', 3),
FARule(3, 'b', 1), FARule(3, None, 2),
]))
simulation = NFASimulation(nfa_design)
assert simulation.next_state({1, 2}, 'a') == {1, 2}
assert simulation.next_state({1, 2}, 'b') == {3, 2}
assert simulation.next_state({3, 2}, 'b') == {1, 3, 2}
assert simulation.next_state({1, 3, 2}, 'b') == {1, 3, 2}
assert simulation.next_state({1, 3, 2}, 'a') == {1, 2}
def test_rules_for(self):
nfa_design = NFADesign(1, {3}, NFARulebook([
FARule(1, 'a', 1), FARule(1, 'a', 2), FARule(1, None, 2),
FARule(2, 'b', 3),
FARule(3, 'b', 1), FARule(3, None, 2),
]))
simulation = NFASimulation(nfa_design)
if __name__ == "__main__":
unittest.main()
| true |
0886e75497833c68b5cde160399ce85fa2439b22 | Python | kubikowski/PythonScripts | /dotify_image/_rgb_color.py | UTF-8 | 269 | 2.953125 | 3 | [] | no_license | from typing import Final, NamedTuple, Tuple
RGB: Final[str] = 'RGB'
class RGBColor(NamedTuple):
red: int
green: int
blue: int
@staticmethod
def of(color: Tuple[int, int, int]) -> 'RGBColor':
return RGBColor(color[0], color[1], color[2])
| true |
ff6c55293354ef668d8e5f3ca72d9457d63ad839 | Python | snowjamai/LeetCode | /palindrome-linked-list/palindrome-linked-list.py | UTF-8 | 657 | 3.359375 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
from collections import deque
class Solution:
def isPalindrome(self, head: Optional[ListNode]) -> bool:
DQ = deque()
next_h = 1
while next_h is not None:
cur = head.val
DQ.append(cur)
next_h = head.next
head = next_h
while(len(DQ) > 1):
p_l = DQ.popleft()
p_r = DQ.pop()
if p_l != p_r :
return False
return True
| true |
3b3322a07221ca39815337eb44c7269fca72977e | Python | YerardinPerlaza/AirBnB_clone_v2 | /2-do_deploy_web_static.py | UTF-8 | 1,452 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python3
# Fabric script (based on the file 1-pack_web_static.py) that
# distributes an archive to your web servers
from fabric.api import *
from os import path
env.host = ['35.190.183.78', '34.226.194.149']
def do_deploy(archive_path):
'''Distributes an archive to your web servers'''
if len(archive_path) == 0 or not path.exists(archive_path):
print("{} doesn’t exist".format(archive_path))
return False
file = archive_path.split('/')[1]
name_of_file = file.split('.')[0]
folder_web = "/data/web_static/releases/{}".format(name_of_file)
current = "/data/web_static/current"
if put(archive_path, "/tmp/{}".format(file)).failed is True:
return False
if run("rm -rf {}/".format(folder_web)).failed is True:
return False
if run("mkdir -p {}/".format(folder_web)).failed is True:
return False
if run("tar -xzf /tmp/{} -C {}/".format(file, folder_web)).failed is True:
return False
if run("rm -rf /tmp/{}".format(file)).failed is True:
return False
if run("mv {}/web_static/* {}/".
format(folder_web, folder_web)).failed is True:
return False
if run("rm -rf {}/web_static".format(folder_web)).failed is True:
return False
if run("rm -rf {}".format(current)).failed is True:
return False
if run("ln -s {}/ {}".format(folder_web, current)).failed is True:
return False
return True
| true |