blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f57f2441f24f7db7e5c9a9d3613376ab06d44cf8
|
Python
|
wonyeonglee/studyCT
|
/Greedy/greedyQuiz7.py
|
UTF-8
| 278
| 3.671875
| 4
|
[] |
no_license
|
#설탕 배달 (백준 문제)
n = int(input())
sum=0
original_n = n
#먼저 5킬로 먼저 담는다.
sum+= n//5
n = n%5
#그 다음 3킬로 담는다
sum+= n//3
n = n%3
if(n!=0):
n= original_n
sum = n // 3
n = n%3
if(n!=0):
sum = -1
print(sum)
| true
|
64f847f9400c047977543ab95ccb1e5762468512
|
Python
|
ParkJeongseop/Algorithm
|
/Python/11399.py
|
UTF-8
| 158
| 2.890625
| 3
|
[] |
no_license
|
n = int(input())
p = sorted(map(int, input().split()))
answer = 0
for i in range(len(p)):
for j in range(i+1):
answer += p[j]
print(str(answer))
| true
|
3c95d0a3127670efc01bd4272d04d64c17f9635e
|
Python
|
daobilige-su/guts
|
/GPy-0.4.6/GPy/examples/regression.py
|
UTF-8
| 11,878
| 2.9375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
Gaussian Processes regression examples
"""
import pylab as pb
import numpy as np
import GPy
def toy_rbf_1d(optimizer='tnc', max_nb_eval_optim=100):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d()
# create simple GP Model
m = GPy.models.GPRegression(data['X'],data['Y'])
# optimize
m.ensure_default_constraints()
m.optimize(optimizer, max_f_eval=max_nb_eval_optim)
# plot
m.plot()
print(m)
return m
def rogers_girolami_olympics(optim_iters=100):
"""Run a standard Gaussian process regression on the Rogers and Girolami olympics data."""
data = GPy.util.datasets.rogers_girolami_olympics()
# create simple GP Model
m = GPy.models.GPRegression(data['X'],data['Y'])
#set the lengthscale to be something sensible (defaults to 1)
m['rbf_lengthscale'] = 10
# optimize
m.ensure_default_constraints()
m.optimize(max_f_eval=optim_iters)
# plot
m.plot(plot_limits = (1850, 2050))
print(m)
return m
def toy_rbf_1d_50(optim_iters=100):
"""Run a simple demonstration of a standard Gaussian process fitting it to data sampled from an RBF covariance."""
data = GPy.util.datasets.toy_rbf_1d_50()
# create simple GP Model
m = GPy.models.GPRegression(data['X'],data['Y'])
# optimize
m.ensure_default_constraints()
m.optimize(max_f_eval=optim_iters)
# plot
m.plot()
print(m)
return m
def silhouette(optim_iters=100):
"""Predict the pose of a figure given a silhouette. This is a task from Agarwal and Triggs 2004 ICML paper."""
data = GPy.util.datasets.silhouette()
# create simple GP Model
m = GPy.models.GPRegression(data['X'],data['Y'])
# optimize
m.ensure_default_constraints()
m.optimize(messages=True,max_f_eval=optim_iters)
print(m)
return m
def coregionalisation_toy2(optim_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
"""
X1 = np.random.rand(50,1)*8
X2 = np.random.rand(30,1)*5
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
X = np.hstack((np.vstack((X1,X2)),index))
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
Y2 = np.sin(X2) + np.random.randn(*X2.shape)*0.05 + 2.
Y = np.vstack((Y1,Y2))
k1 = GPy.kern.rbf(1) + GPy.kern.bias(1)
k2 = GPy.kern.Coregionalise(2,1)
k = k1.prod(k2,tensor=True)
m = GPy.models.GPRegression(X,Y,kernel=k)
m.constrain_fixed('.*rbf_var',1.)
#m.constrain_positive('.*kappa')
m.ensure_default_constraints()
m.optimize('sim',messages=1,max_f_eval=optim_iters)
pb.figure()
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
mean, var,low,up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
mean, var,low,up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
return m
def coregionalisation_toy(optim_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions.
"""
X1 = np.random.rand(50,1)*8
X2 = np.random.rand(30,1)*5
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
X = np.hstack((np.vstack((X1,X2)),index))
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape)*0.05
Y = np.vstack((Y1,Y2))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.Coregionalise(2,2)
k = k1.prod(k2,tensor=True)
m = GPy.models.GPRegression(X,Y,kernel=k)
m.constrain_fixed('.*rbf_var',1.)
#m.constrain_positive('kappa')
m.ensure_default_constraints()
m.optimize(max_f_eval=optim_iters)
pb.figure()
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
mean, var,low,up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
mean, var,low,up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
return m
def coregionalisation_sparse(optim_iters=100):
"""
A simple demonstration of coregionalisation on two sinusoidal functions using sparse approximations.
"""
X1 = np.random.rand(500,1)*8
X2 = np.random.rand(300,1)*5
index = np.vstack((np.zeros_like(X1),np.ones_like(X2)))
X = np.hstack((np.vstack((X1,X2)),index))
Y1 = np.sin(X1) + np.random.randn(*X1.shape)*0.05
Y2 = -np.sin(X2) + np.random.randn(*X2.shape)*0.05
Y = np.vstack((Y1,Y2))
num_inducing = 40
Z = np.hstack((np.random.rand(num_inducing,1)*8,np.random.randint(0,2,num_inducing)[:,None]))
k1 = GPy.kern.rbf(1)
k2 = GPy.kern.Coregionalise(2,2)
k = k1.prod(k2,tensor=True) + GPy.kern.white(2,0.001)
m = GPy.models.SparseGPRegression(X,Y,kernel=k,Z=Z)
m.constrain_fixed('.*rbf_var',1.)
m.constrain_fixed('iip')
m.constrain_bounded('noise_variance',1e-3,1e-1)
m.ensure_default_constraints()
m.optimize_restarts(5, robust=True, messages=1, max_f_eval=optim_iters)
#plotting:
pb.figure()
Xtest1 = np.hstack((np.linspace(0,9,100)[:,None],np.zeros((100,1))))
Xtest2 = np.hstack((np.linspace(0,9,100)[:,None],np.ones((100,1))))
mean, var,low,up = m.predict(Xtest1)
GPy.util.plot.gpplot(Xtest1[:,0],mean,low,up)
mean, var,low,up = m.predict(Xtest2)
GPy.util.plot.gpplot(Xtest2[:,0],mean,low,up)
pb.plot(X1[:,0],Y1[:,0],'rx',mew=2)
pb.plot(X2[:,0],Y2[:,0],'gx',mew=2)
y = pb.ylim()[0]
pb.plot(Z[:,0][Z[:,1]==0],np.zeros(np.sum(Z[:,1]==0))+y,'r|',mew=2)
pb.plot(Z[:,0][Z[:,1]==1],np.zeros(np.sum(Z[:,1]==1))+y,'g|',mew=2)
return m
def multiple_optima(gene_number=937,resolution=80, model_restarts=10, seed=10000, optim_iters=300):
"""Show an example of a multimodal error surface for Gaussian process regression. Gene 939 has bimodal behaviour where the noisey mode is higher."""
# Contour over a range of length scales and signal/noise ratios.
length_scales = np.linspace(0.1, 60., resolution)
log_SNRs = np.linspace(-3., 4., resolution)
data = GPy.util.datasets.della_gatta_TRP63_gene_expression(gene_number)
# Sub sample the data to ensure multiple optima
#data['Y'] = data['Y'][0::2, :]
#data['X'] = data['X'][0::2, :]
# Remove the mean (no bias kernel to ensure signal/noise is in RBF/white)
data['Y'] = data['Y'] - np.mean(data['Y'])
lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)
pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)
ax = pb.gca()
pb.xlabel('length scale')
pb.ylabel('log_10 SNR')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Now run a few optimizations
models = []
optim_point_x = np.empty(2)
optim_point_y = np.empty(2)
np.random.seed(seed=seed)
for i in range(0, model_restarts):
#kern = GPy.kern.rbf(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))
kern = GPy.kern.rbf(1, variance=np.random.uniform(1e-3,1), lengthscale=np.random.uniform(5,50))
m = GPy.models.GPRegression(data['X'],data['Y'], kernel=kern)
m['noise_variance'] = np.random.uniform(1e-3,1)
optim_point_x[0] = m['rbf_lengthscale']
optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
# optimize
m.ensure_default_constraints()
m.optimize('scg', xtol=1e-6, ftol=1e-6, max_f_eval=optim_iters)
optim_point_x[1] = m['rbf_lengthscale']
optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);
pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1]-optim_point_x[0], optim_point_y[1]-optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')
models.append(m)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return m #(models, lls)
def _contour_data(data, length_scales, log_SNRs, kernel_call=GPy.kern.rbf):
"""Evaluate the GP objective function for a given data set for a range of signal to noise ratios and a range of lengthscales.
:data_set: A data set from the utils.datasets director.
:length_scales: a list of length scales to explore for the contour plot.
:log_SNRs: a list of base 10 logarithm signal to noise ratios to explore for the contour plot.
:kernel: a kernel to use for the 'signal' portion of the data."""
lls = []
total_var = np.var(data['Y'])
kernel = kernel_call(1, variance=1., lengthscale=1.)
Model = GPy.models.GPRegression(data['X'], data['Y'], kernel=kernel)
for log_SNR in log_SNRs:
SNR = 10.**log_SNR
noise_var = total_var/(1.+SNR)
signal_var = total_var - noise_var
Model.kern['.*variance'] = signal_var
Model['noise_variance'] = noise_var
length_scale_lls = []
for length_scale in length_scales:
Model['.*lengthscale'] = length_scale
length_scale_lls.append(Model.log_likelihood())
lls.append(length_scale_lls)
return np.array(lls)
def sparse_GP_regression_1D(N = 400, num_inducing = 5, optim_iters=100):
"""Run a 1D example of a sparse GP regression."""
# sample inputs and outputs
X = np.random.uniform(-3.,3.,(N,1))
Y = np.sin(X)+np.random.randn(N,1)*0.05
# construct kernel
rbf = GPy.kern.rbf(1)
noise = GPy.kern.white(1)
kernel = rbf + noise
# create simple GP Model
m = GPy.models.SparseGPRegression(X, Y, kernel, num_inducing=num_inducing)
m.ensure_default_constraints()
m.checkgrad(verbose=1)
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
m.plot()
return m
def sparse_GP_regression_2D(N = 400, num_inducing = 50, optim_iters=100):
"""Run a 2D example of a sparse GP regression."""
X = np.random.uniform(-3.,3.,(N,2))
Y = np.sin(X[:,0:1]) * np.sin(X[:,1:2])+np.random.randn(N,1)*0.05
# construct kernel
rbf = GPy.kern.rbf(2)
noise = GPy.kern.white(2)
kernel = rbf + noise
# create simple GP Model
m = GPy.models.SparseGPRegression(X,Y,kernel, num_inducing = num_inducing)
# contrain all parameters to be positive (but not inducing inputs)
m.ensure_default_constraints()
m.set('.*len',2.)
m.checkgrad()
# optimize and plot
m.optimize('tnc', messages = 1, max_f_eval=optim_iters)
m.plot()
print(m)
return m
def uncertain_inputs_sparse_regression(optim_iters=100):
"""Run a 1D example of a sparse GP regression with uncertain inputs."""
fig, axes = pb.subplots(1,2,figsize=(12,5))
# sample inputs and outputs
S = np.ones((20,1))
X = np.random.uniform(-3.,3.,(20,1))
Y = np.sin(X)+np.random.randn(20,1)*0.05
#likelihood = GPy.likelihoods.Gaussian(Y)
Z = np.random.uniform(-3.,3.,(7,1))
k = GPy.kern.rbf(1) + GPy.kern.white(1)
# create simple GP Model - no input uncertainty on this one
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z)
m.ensure_default_constraints()
m.optimize('scg', messages=1, max_f_eval=optim_iters)
m.plot(ax=axes[0])
axes[0].set_title('no input uncertainty')
#the same Model with uncertainty
m = GPy.models.SparseGPRegression(X, Y, kernel=k, Z=Z, X_variance=S)
m.ensure_default_constraints()
m.optimize('scg', messages=1, max_f_eval=optim_iters)
m.plot(ax=axes[1])
axes[1].set_title('with input uncertainty')
print(m)
fig.canvas.draw()
return m
| true
|
2fa07fcbe8a7255a871ef21574bd0f0900d996d5
|
Python
|
Energy-Queensland/nem-reader
|
/print_examples.py
|
UTF-8
| 2,380
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
import nemreader as nr
def output_file():
from nemreader import output_as_csv
file_name = "examples/unzipped/Example_NEM12_multiple_meters.csv"
output_file = output_as_csv(file_name)
def pandas_df():
from nemreader import output_data_frames
file_name = "examples/unzipped/Example_NEM12_multiple_meters.csv"
dfs = output_data_frames(file_name)
for nmi, df in dfs:
print(nmi, df.dtypes)
print(df.describe())
print(df.head())
print(df.tail())
def print_meter_record(file_path, rows=5):
""" Output readings for specified number of rows to console """
m = nr.read_nem_file(file_path)
print("Header:", m.header)
print("Transactions:", m.transactions)
for nmi in m.readings:
for channel in m.readings[nmi]:
print(nmi, "Channel", channel)
for reading in m.readings[nmi][channel][-rows:]:
print("", reading)
def print_examples():
print("Example NEM12 - Actual Interval:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM12_actual_interval.csv", 5)
print("\nExample NEM12 - Substituted Interval:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM12_substituted_interval.csv", 5)
print("\nExample NEM12 - Multiple Quality Methods:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM12_multiple_quality.csv", 5)
print("\nExample NEM12 - Multiple Meters:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM12_multiple_meters.csv", 5)
print("\nExample NEM13 - Actual Read:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM13_consumption_data.csv", 5)
print("\nExample NEM13 - Forward Estimates:")
print("-" * 10)
print_meter_record("examples/unzipped/Example_NEM13_forward_estimate.csv", 5)
print("\nReal NEM13 Example:")
print("-" * 10)
print_meter_record("examples/NEM13#DATA_14121801#WBAYM#3044076134.V01", 5)
print("\nReal NEM12 Example:")
print("-" * 10)
print_meter_record("examples/NEM12#DATA_16081001#WBAYM#3044076134.V01", 5)
print("\nZipped NEM12 Example:")
print("-" * 10)
print_meter_record("examples/NEM12#NEM1201005Scenario1#GLOBALM#NEMMCO.ZIP", 5)
if __name__ == "__main__":
# output_file()
pandas_df()
# print_examples()
| true
|
3d6bffb4164c62aba2ec9f9b76f20998ce25b75c
|
Python
|
chippolot/advent-of-code
|
/2020/07/7_1.py
|
UTF-8
| 759
| 3.34375
| 3
|
[] |
no_license
|
map = {}
seen = set()
def traversecontainers(color):
print(color)
if color in seen:
return
seen.add(color)
if color in map:
for c in map[color]:
traversecontainers(c)
lines = open('input.txt', 'r').read().splitlines()
counting_color = 'shiny gold'
for line in lines:
color, rules = line.split(' bags contain ')
rules = rules[:-1] # strip .
if rules == 'no other bags':
continue
else:
rules = rules.split(', ')
for rule in rules:
toks = rule.split(' ')
inner_num = int(toks[0])
inner_color = ' '.join(toks[1:-1])
map[inner_color] = map.get(inner_color, []) + [color]
traversecontainers(counting_color)
print(len(seen) - 1)
| true
|
d09b22bb1f78b0d1ab38f96a7aba7979df8bd4bf
|
Python
|
chuck1l/market_intelligence
|
/src/fea_ing_description.py
|
UTF-8
| 1,738
| 3.03125
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
from datetime import date
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
#plt.style.use('ggplot')
plt.rcParams.update({'font.size': 16})
from pandas_datareader import data as pdr
import yfinance as yf
# Create the date range for data
start_date = '2015-01-01'
end_date = date.today()
# Ticker symbol to investigate
ticker = 'SPY'
# Pull data for both low and high of day
df = pdr.get_data_yahoo(ticker, start=start_date, end=end_date)
df.drop(['Open', 'Close', 'Adj Close', 'Volume', 'Low'], axis=1, inplace=True)
df['after_training'] = df['High'].shift(periods=1)
df.dropna('rows', how='any', inplace=True)
df.reset_index(inplace=True)
df['Date'] = df['Date'].astype(str)
indices = list(range(int(df.shape[0]*.7), int(df.shape[0])))
maximum = df['after_training'][0:int(df.shape[0]*.7)].max()
for i in indices:
if df['after_training'][i] < maximum:
df['after_training'][i] = df['after_training'][i]
else:
df['after_training'][i] = maximum
#Plot the true vs persistence, high
xmarks=[i for i in range(1,len(df['Date'])+1,200)]
plt.figure(figsize=(18, 9))
plt.plot(df['Date'], df['after_training'], 'r-', label="Prediction")
plt.plot(df['Date'], df['High'], 'k--', label='True Value')
plt.axvline(indices[0], color='b', linestyle='-', label='X Train Ending')
plt.ylabel('Price ($)', fontsize=20, c='k')
plt.xlabel('Date', fontsize=20, c='k')
plt.xticks(xmarks, rotation=20)
plt.tick_params(axis='x', colors='k', labelsize=20)
plt.tick_params(axis='y', colors='k', labelsize=20)
plt.title('Feature and Target Engineering Understanding', fontsize=24)
plt.legend()
plt.tight_layout
plt.savefig('../imgs/feature_ing_explain.png')
plt.show();
| true
|
acf006fc9917762d255994f2373a2223d47d46e9
|
Python
|
youthgovernsl/THE-IDLE-PythonCrashCourse-2021
|
/Week-6/6.1_Basic image transforms.py
|
UTF-8
| 853
| 3.296875
| 3
|
[] |
no_license
|
import cv2
img = cv2.imread('Samples/RGB.png')
# loads an image (formats including jpg and png are supported)
# relative or absolute file path could be provided
print(img.shape)
# dimensions of image
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# conversion from BGR to RGB colour schemes
# Syntax: cv2.rectangle(image, start_point, end_point, color, thickness)
cv2.rectangle(img1, (55,15), (470,405) , (0,0,0) , 3)
# Syntax: cv2.circle(image, center_coordinates, radius, color, thickness)
cv2.circle(img1, (175,282), 120, (0,0,0) , 2)
# Syntax : cv2.putText(img, text, org, font, fontScale, color, int thickness)
text = '(Transformed)'
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img1, text , (160,480), font, 1 , (0,0,0), 2)
cv2.imshow('Original image',img)
cv2.imshow('Transformed image',img1)
cv2.waitKey(10000)
# displays windows for 10s
| true
|
0242e02c423cec90266b8905063f8ffcaca76f6c
|
Python
|
paulmcheng/Machine-Learning-For-Trading
|
/05-Statistical-analysis/simple_moving_average.py
|
UTF-8
| 1,447
| 3.171875
| 3
|
[] |
no_license
|
import pandas as pd
import matplotlib.pyplot as plt
import sys
from pathlib import Path as pa
sys.path.insert(0, str(pa(__file__).resolve().parent.parent))
from common import stockdata as sd
def get_rolling_mean(df, window):
return df.rolling(window=window, center=False).mean()
def get_rolling_std(df, window):
return df.rolling(window=window, center=False).std()
def get_bollinger_bands(rmean, rstd):
# create the bollinger bands of 2 standard deviration from the mean
return rmean+2*rstd, rmean-2*rstd
def test_run():
# compute rolling mean
#Read data
dates = pd.date_range('2017-04-01', '2018-02-28')
symbol='AAPL'
symbols = [symbol]
df = sd.get_data(symbols, dates)
# compute rolling mean
rmean = get_rolling_mean(df[symbol], window=20)
# compute rolling standard deviation
rstd = get_rolling_std(df[symbol], window=20)
# compute upper and lower bands
upper_band, lower_band = get_bollinger_bands(rmean, rstd)
#plot
ax2 = df[symbol].plot(title='{} stock price on Bollinger bands'.format(symbol) , label='Price', color="black")
rmean.plot(label='Rolling mean', ax=ax2, color='gray')
upper_band.plot(label='Upper band', ax=ax2, color='green')
lower_band.plot(label='Lower band', ax=ax2, color='red')
ax2.set_xlabel("Date")
ax2.set_ylabel("Price")
ax2.legend(loc='upper left')
plt.show()
if __name__ == "__main__":
test_run()
| true
|
2650d907feaecccdee8684d3f49d32ea1150af55
|
Python
|
mh105/somata
|
/somata/basic_models/arn.py
|
UTF-8
| 18,488
| 2.8125
| 3
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
"""
Author: Mingjian He <mh105@mit.edu>
arn module contains autoregressive model of order n methods used in SOMATA
"""
from somata.basic_models import StateSpaceModel as Ssm
from somata.exact_inference import inverse
import numpy as np
import numbers
from scipy.linalg import block_diag
class AutoRegModel(Ssm):
"""
AutoRegModel is a subclass of StateSpaceModel class dedicated
to autoregressive models of order n
"""
type = 'arn'
default_G = None
default_sigma2 = 3.
order = None
coeff = None
sigma2 = None
def __init__(self, coeff=None, sigma2=None,
components='Arn', F=None, Q=None, mu0=None, Q0=None, G=None, R=None, y=None, Fs=None):
"""
Constructor method for AutoRegModel class
:param coeff: coefficients of AR models
:param sigma2: noise variance of AR models
:param components: a list of independent AR components
:param F: transition matrix
:param Q: state noise covariance matrix
:param mu0: initial state mean vector
:param Q0: initial state covariance matrix
:param G: observation matrix (row major)
:param R: observation noise covariance matrix
:param y: observed data (row major, can be multivariate)
:param Fs: sampling frequency in Hz
"""
# Autoregressive models can be constructed by directly specifying
# the autoregressive parameters {order, coeff, sigma2}
if coeff is not None:
if isinstance(coeff, numbers.Number):
coeff = (np.asanyarray([coeff], dtype=np.float64), )
elif isinstance(coeff[0], numbers.Number):
coeff = (np.asanyarray(coeff, dtype=np.float64),)
else:
coeff = tuple([np.asanyarray(x, dtype=np.float64) for x in coeff])
order = np.asarray([len(x) for x in coeff])
if sigma2 is not None:
sigma2 = np.asanyarray([sigma2], dtype=np.float64) if \
isinstance(sigma2, numbers.Number) else np.asanyarray(sigma2, dtype=np.float64)
assert sigma2.size == len(coeff), 'Different numbers of AR model parameters provided.'
else:
sigma2 = np.ones_like(order, dtype=np.float64) * AutoRegModel.default_sigma2
self.order = order
self.coeff = coeff
self.sigma2 = sigma2
F_tmp, Q_tmp = self._arn_to_ssm_param()
if F is not None:
assert (F == F_tmp).all(), 'Input state equation parameters do not agree with input F.' # type: ignore
if Q is not None:
assert (Q == Q_tmp).all(), 'Input state equation parameters do not agree with input Q.' # type: ignore
F, Q = F_tmp, Q_tmp
else:
assert sigma2 is None, 'No coefficient provided but noise variance sigma2 is given as input.'
# Provide default values for mu0 and Q0
mu0 = np.zeros((F.shape[1], 1), dtype=np.float64) if mu0 is None and F is not None else mu0
Q0 = Q if Q0 is None and Q is not None else Q0
# Fill autoregressive parameters
self.fill_arn_param(F=F, Q=Q)
# Set up components input to parent class constructor
if components == 'Arn':
if self.order is None:
components = None
else:
components = []
for ii in range(len(self.order)):
component = AutoRegModel()
component.default_G = np.hstack([np.array([[1.]], dtype=np.float64),
np.zeros((1, self.order[ii]-1), dtype=np.float64)])
components.append(component)
else:
for ii in range(len(components)):
current_component: AutoRegModel = components[ii] # type: ignore
assert current_component.type == 'arn', 'Encountered non-arn type component.'
assert current_component.default_G.shape[1] == self.order[ii], 'Components mismatch AR order.'
# Update default_G attribute if only a single component
if components is not None and len(components) == 1:
self.default_G = components[0].default_G
# Call parent class constructor
super().__init__(components=components, F=F, Q=Q, mu0=mu0, Q0=Q0, G=G, R=R, y=y, Fs=Fs)
# Dunder methods - magic methods
def __repr__(self):
""" Unambiguous and concise representation when calling AutoRegModel() """
# Dynamic display depending on whether a single AR component
if self.default_G is None:
return super().__repr__().replace('Ssm', 'Arn')
else:
return 'Arn=' + str(self.default_G.shape[1]) + '<' + hex(id(self))[-4:] + '>'
def __str__(self):
""" Helpful information when calling print(AutoRegModel()) """
print_str = super().__str__().replace('<Ssm object at', '<Arn object at')
# Append additional information about autoregressive parameters
np.set_printoptions(precision=3)
print_str += "{0:9} = {1}\n ".format("AR order", str(self.order))
# create the string for displaying coeff
if self.coeff is None:
coeff_str = 'None'
else:
coeff_str = '(' + str(self.coeff[0])
for ii in range(1, len(self.coeff)):
coeff_str += ', ' + str(self.coeff[ii])
coeff_str += ')'
print_str += "{0:9} = {1}\n ".format("AR coeff", coeff_str)
print_str += "{0:9} = {1}\n ".format("sigma2", str(self.sigma2))
np.set_printoptions(precision=8)
return print_str
# Syntactic sugar methods - useful methods to make manipulations easier
def concat_(self, other, skip_components=False):
"""
Join two AutoRegModel objects together by concatenating the
components.
"""
assert self.type == 'arn' and other.type == 'arn', \
'Both objects input to concat_() need to be of AutoRegModel class.'
# Fill autoregressive parameters in both objects first
self.fill_arn_param()
other.fill_arn_param()
# Coefficient is a mandatory input for objects to call concat_()
assert self.coeff is not None and other.coeff is not None, \
'Both objects need at least coeff specified in order to concat.'
tmp_coeff = list(self.coeff)
for x in other.coeff:
tmp_coeff.append(x)
coeff = tuple(tmp_coeff)
# Configure the rest of attributes that are immutable
# sigma2
if self.sigma2 is None and other.sigma2 is None:
sigma2 = None
elif self.sigma2 is None:
sigma2 = np.hstack([np.ones_like(self.coeff, dtype=np.float64) * AutoRegModel.default_sigma2, other.sigma2])
elif other.sigma2 is None:
sigma2 = np.hstack([self.sigma2, np.ones_like(other.coeff, dtype=np.float64) * AutoRegModel.default_sigma2])
else:
sigma2 = np.hstack([self.sigma2, other.sigma2])
# Q0
if self.Q0 is None and other.Q0 is None:
Q0 = None
elif self.Q0 is None:
tmp_sigma2 = np.ones_like(self.order, dtype=np.float64) * AutoRegModel.default_sigma2
_, tmp_Q0 = self._arn_to_ssm_param(sigma2=tmp_sigma2)
Q0 = block_diag(tmp_Q0, other.Q0)
elif other.mu0 is None:
tmp_sigma2 = np.ones_like(other.order, dtype=np.float64) * AutoRegModel.default_sigma2
_, tmp_Q0 = AutoRegModel._arn_to_ssm_param(self=other, sigma2=tmp_sigma2)
Q0 = block_diag(self.Q0, tmp_Q0)
else:
Q0 = block_diag(self.Q0, other.Q0)
# Call parent class method to obtain general concatenated attributes
temp_obj = super().concat_(other, skip_components=skip_components)
return AutoRegModel(coeff=coeff, sigma2=sigma2, mu0=temp_obj.mu0, Q0=Q0, R=temp_obj.R,
y=temp_obj.y, Fs=temp_obj.Fs, components=temp_obj.components)
def remove_component(self, comp_idx):
"""
Remove a component from the AutoRegModel object,
default to remove the left most component
"""
super().remove_component(comp_idx=comp_idx)
if self.order is not None:
self.order = np.delete(self.order, comp_idx)
if self.coeff is not None:
self.coeff = tuple([self.coeff[x] for x in range(len(self.coeff)) if x != comp_idx])
if self.sigma2 is not None:
self.sigma2 = np.delete(self.sigma2, comp_idx)
# Set attributes to default values if nothing left
self.order = None if len(self.order) == 0 else self.order
self.coeff = None if len(self.coeff) == 0 else self.coeff
self.sigma2 = None if len(self.sigma2) == 0 else self.sigma2
def fill_arn_param(self, F=None, Q=None):
""" Attempt to fill autoregressive parameters """
F = self.F if F is None else F
Q = self.Q if Q is None else Q
if Q is not None:
non_zero_diagonal = np.nonzero(np.diagonal(Q))[0]
self.order = np.asanyarray(
np.diff(np.append(non_zero_diagonal, Q.shape[0])), dtype=np.int_) # type: ignore
elif F is not None:
self.order = self._guess_ar_order(F=F)
coeff, sigma2 = self._ssm_to_arn_param(F=F, Q=Q)
self.coeff = coeff if F is not None else self.coeff
self.sigma2 = sigma2 if Q is not None else self.sigma2
def _guess_ar_order(self, F=None):
"""
Try to guess the orders of AR models using F matrix,
last resort method, should not be used normally
"""
F = self.F if F is None else F
assert F is not None, 'Cannot guess AR orders with None F matrix.'
pointer = 0
order = []
while pointer < F.shape[0]:
next_pointer = np.argmax(F[pointer, pointer:] == 0)
current_order = F.shape[0] - pointer if next_pointer == 0 else next_pointer - pointer
if current_order > 1:
assert np.all(F[pointer+1:pointer+current_order, pointer:pointer+current_order-1] ==
np.eye(current_order-1, dtype=np.float64)), \
'Failed to guess the autoregressive model orders. Consider input order explicitly.'
order.append(current_order)
pointer += current_order
return np.array(order, dtype=np.int_)
def _ssm_to_arn_param(self, F=None, Q=None, order=None):
""" Convert from matrices F and Q to autoregressive parameters """
F = self.F if F is None else F
Q = self.Q if Q is None else Q
order = self.order if order is None else order
if order is not None:
coeff = []
sigma2 = []
for ii in range(len(order)):
start_idx = sum(order[:ii])
end_idx = sum(order[:ii+1])
current_coeff = F[start_idx, start_idx:end_idx] if F is not None else None
if isinstance(current_coeff, numbers.Number):
coeff.append(np.asanyarray([current_coeff], dtype=np.float64))
else:
coeff.append(np.asanyarray(current_coeff, dtype=np.float64)) # type: ignore
current_sigma2 = Q[start_idx, start_idx] if Q is not None else None # type: ignore
sigma2.append(current_sigma2)
coeff = tuple(coeff) if len(order) > 1 else (coeff[0],)
sigma2 = np.array(sigma2, dtype=np.float64)
return coeff, sigma2
else:
return None, None
def _arn_to_ssm_param(self, order=None, coeff=None, sigma2=None):
""" Convert from autoregressive parameters to matrices F and Q """
order = self.order if order is None else order
coeff = self.coeff if coeff is None else coeff
sigma2 = self.sigma2 if sigma2 is None else sigma2
if coeff is not None:
F_blocks = []
Q_blocks = []
for ii in range(len(coeff)):
F_blocks.append(AutoRegModel.get_coeff_mat(coeff[ii]))
Q_block = np.zeros((order[ii], order[ii]), dtype=np.float64)
Q_block[0, 0] = sigma2[ii]
Q_blocks.append(Q_block)
return block_diag(*F_blocks), block_diag(*Q_blocks)
else:
return None, None
def get_default_q(self, components=None, E=None):
"""
Get the default structure of state noise covariance
matrix Q in the Q_basis block diagonal form
"""
components = self.components if components is None else components
if len(components) == 1 or type(components) is AutoRegModel:
order = components.default_G.shape[1] if type(components) is AutoRegModel else \
components[0].default_G.shape[1]
E = np.eye(1, dtype=np.float64) if E is None else E
nsource = E.shape[0]
default_Q = np.zeros((order * nsource, order * nsource), dtype=np.float64)
default_Q[0:nsource, 0:nsource] = E
else:
default_Q = block_diag(*[x.get_default_q(components=x, E=E) for x in components])
return default_Q
@staticmethod
def get_coeff_mat(coeff):
""" Create a transition matrix F from AR coefficients """
order = len(coeff)
return np.vstack([coeff, np.hstack([np.eye(order - 1, dtype=np.float64),
np.zeros((order - 1, 1), dtype=np.float64)])])
# Parameter estimation methods (M step)
def m_estimate(self, **kwargs):
"""
Maximum likelihood or Maximum a posteriori estimation to update
parameters. Calling super().m_estimate() for all Ssm parameters
"""
results = super().m_estimate(**kwargs)
self.update_comp_param()
return results
def update_comp_param(self):
""" Update AutoRegModel specific parameters """
self.fill_arn_param()
def initialize_priors(self, Q_sigma2=None, Q_hyperparameter=None,
R_sigma2=None, R_hyperparameter=None):
""" Initialize priors for autoregressive models """
# base case
if self.ncomp <= 1:
# [Inverse gamma prior] on state noise covariance Q non-zero diagonal entries
if Q_sigma2 is None:
Q_sigma2 = self.Q[0, 0]
Q_hyperparameter = 0.1 if Q_hyperparameter is None else Q_hyperparameter
# [Inverse gamma prior] on observation noise variance R <--- TODO: update to Wishart
if R_sigma2 is None:
if self.R.shape[0] > 1:
raise NotImplementedError('Only uni-variate observation data is supported with prior for now.')
else:
R_sigma2 = self.R[0, 0]
R_hyperparameter = 0.1 if R_hyperparameter is None else R_hyperparameter
return {'Q_sigma2': Q_sigma2, 'Q_hyperparameter': Q_hyperparameter,
'R_sigma2': R_sigma2, 'R_hyperparameter': R_hyperparameter}
# recursive case
else:
assert self.components is not None, 'Cannot initialize priors outside base case when components is None.'
components_prefill = self.fill_components(empty_comp=AutoRegModel(), deep_copy=True)
# expand the specified prior values to the length of components
Q_sigma2 = self._initialize_priors_recursive_list(Q_sigma2)
R_sigma2 = self._initialize_priors_recursive_list(R_sigma2)
R_hyperparameter = self._initialize_priors_recursive_list(R_hyperparameter)
# construct the final priors that is a list of dictionaries
priors = []
for ii in range(self.ncomp):
current_component: AutoRegModel = self.components[ii]
assert current_component.type == 'arn', 'Component type is not AutoRegModel class.'
priors.append(current_component.initialize_priors(Q_sigma2=Q_sigma2[ii], R_sigma2=R_sigma2[ii],
R_hyperparameter=R_hyperparameter[ii]))
# unfill the components
self.unfill_components(components_prefill)
return priors
@staticmethod
def _m_update_f(A=None, B=None, C=None, priors=None):
""" Update transition matrix -- F """
# Update the AR coefficients -- coeff (no prior)
approach = 'svd' if A.shape[0] >= 5 else 'gaussian'
coeff_new = B[0, :] @ inverse(A, approach=approach)
# Construct transition matrix -- F
F = AutoRegModel.get_coeff_mat(coeff_new)
return F
@staticmethod
def _m_update_q(A=None, B=None, C=None, T=None, F=None, priors=None):
""" Update state noise covariance matrix -- Q """
coeff_new = F[0, :]
Q_ss = C[0, 0] - 2 * B[0, :] @ coeff_new + coeff_new @ A @ coeff_new
if Ssm._m_update_if_mle('Q_sigma2', priors):
# MLE
sigma2_Q_new = Q_ss / T
else:
# MAP with inverse gamma prior
Q_init = priors['Q_sigma2']
Q_hp = priors['Q_hyperparameter'] if 'Q_hyperparameter' in priors else 0.1
alpha = T * Q_hp / 2 # scales with data length T according to the hyperparameter
beta = Q_init * (alpha + 1) # setting the mode of inverse gamma prior to be Q_init
sigma2_Q_new = (beta + Q_ss / 2) / (alpha + T / 2 + 1) # mode of inverse gamma posterior
Q = np.zeros_like(F, dtype=sigma2_Q_new.dtype)
Q[0, 0] = sigma2_Q_new
return Q
@staticmethod
def _m_update_mu0(x_0_n=None):
""" Update initial state mean -- mu0 """
mu0 = np.zeros((x_0_n.shape[0], 1), dtype=x_0_n.dtype)
mu0[0, 0] = x_0_n[0]
return mu0
@staticmethod
def _m_update_q0(x_0_n=None, P_0_n=None, mu0=None):
""" Update initial state covariance -- Q0 """
Q0 = np.zeros(P_0_n.shape, dtype=P_0_n.dtype)
Q0[0, 0] = P_0_n[0, 0] + x_0_n[0]**2 - 2 * x_0_n[0] * mu0[0, 0] + mu0[0, 0]**2
return Q0
@staticmethod
def _m_update_g(y=None, x_t_n=None, P_t_n=None, h_t=None, C=None, D=None):
""" Update observation matrix -- G (AutoRegModel has fixed G) """
return None
| true
|
88939c71ff6e2dc0b984abc9ab9f70a788419c6a
|
Python
|
rahulrkroy/coding
|
/pythonbasics/generator.py
|
UTF-8
| 122
| 3.421875
| 3
|
[] |
no_license
|
def topten():
n=1
while(n<=10):
yield(n*n)
n+=1
values=topten()
for i in values:
print(i)
| true
|
661837cd7886c47de2847f854b1a86cd6ab8dadb
|
Python
|
Techwrekfix/Starting-out-with-python
|
/chapter-3/5. Mass_and_weight.py
|
UTF-8
| 447
| 4.59375
| 5
|
[] |
no_license
|
#This program measure the weight of objects
#Getting the mass of an object from user
mass_of_object = float(input("Enter the mass of the" \
" mass of the object: "))
#Calculating the weight:
weight = mass_of_object * 9.8
print("\nThe weight of the object is N", format(weight,'.2f'),sep='')
if weight > 500:
print("\nThis object is too heavy")
elif weight < 100:
print("\nThis object is too light")
| true
|
7c7feadbdf5f5c7da78e35fa299223d84e8e64fb
|
Python
|
sischei/global_solution_yale19
|
/Lecture_5/code/scikit_multi-d.py
|
UTF-8
| 2,123
| 3.234375
| 3
|
[] |
no_license
|
import numpy as np
from matplotlib import pyplot as plt
import cPickle as pickle
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
# Test function
def f(x):
"""The 2d function to predict."""
return np.sin(x[0]) * np.cos(x[1])
# generate training data
n_sample = 100 #points
dim = 2 #dimensions
X = np.random.uniform(-1., 1., (n_sample, dim))
y = np.sin(X[:, 0:1]) * np.cos(X[:, 1:2]) + np.random.randn(n_sample, 1) * 0.005
# Instantiate a Gaussian Process model
kernel = RBF()
#kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis / training points
y_pred, sigma = gp.predict(X, return_std=True)
#Compute MSE
mse = 0.0
n_sample_test=50
Xtest1 = np.random.uniform(-1., 1., (n_sample_test, dim))
y_pred1, sigma = gp.predict(Xtest1, return_std=True)
for g in range(len(Xtest1)):
delta = abs(y_pred1[g] - f(Xtest1[g]))
mse += delta
mse = mse/len(y_pred)
print(".......................")
print(" The MSE is ", mse[0])
print(".......................")
#----------------------------------------------------------------------
# Important -- save the model to a file
with open('2d_model.pcl', 'wb') as fd:
pickle.dump(gp, fd, protocol=pickle.HIGHEST_PROTOCOL)
print("data written to disk")
# Load the model and do predictions
with open('2d_model.pcl', 'rb') as fd:
gm = pickle.load(fd)
print("data loaded from disk")
# generate training data
n_test = 50
dim = 2
Xtest = np.random.uniform(-1., 1., (n_test, dim))
y_pred_test, sigma_test = gm.predict(Xtest, return_std=True)
MSE2 = 0
for a in range(len(Xtest)):
delta = abs(y_pred_test[a] - f(Xtest[a]))
MSE2 +=delta
MSE2 = MSE2/len(Xtest)
print(".......................")
print(" The MSE 2 is ", MSE2[0])
print(".......................")
#----------------------------------------------------------------------
| true
|
3291b2caffcee42cabb42f2402fe14eff0606db5
|
Python
|
sand9888/DAT210x-python
|
/lab7/cross_validation.py
|
UTF-8
| 779
| 2.90625
| 3
|
[] |
no_license
|
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)
# Test how well your model can recall its training data:
model.fit(X_train, y_train).score(X_train, y_train)
#0.943262278808
# Test how well your model can predict unseen data:
model.fit(X_train, y_train).score(X_test, y_test)
#0.894716422024
# 10-Fold Cross Validation on your training data
from sklearn.model_selection import cross_val_score as cval
cval.cross_val_score(model, X_train, y_train, cv=10)
#array([ 0.93513514, 0.99453552, 0.97237569, 0.98888889, 0.96089385,
# 0.98882682, 0.99441341, 0.98876404, 0.97175141, 0.96590909])
cval.cross_val_score(model, X_train, y_train, cv=10).mean()
#0.97614938602520218
| true
|
65957b16bd11b573a7ae2e8ede5c5fe181ce8051
|
Python
|
YatinGupta777/Python-Programs
|
/Gfg/Subarray of size k with given sum.py
|
UTF-8
| 780
| 4.0625
| 4
|
[] |
no_license
|
# Python program to check if any Subarray of size
# K has a given Sum
# Function to check if any Subarray of size K
# has a given Sum
def checkSubarraySum(arr, n,
k, sumV):
# Check for first window
curr_sum = 0
for i in range(0,k):
curr_sum += arr[i]
if (curr_sum == sumV):
return true
# Consider remaining blocks ending with j
for j in range(k,n):
curr_sum = curr_sum + arr[j] - arr[j-k]
if (curr_sum == sumV) :
return True
return False
# Driver code
arr = [ 1, 4, 2, 10, 2, 3, 1, 0, 20 ]
k = 4
sumV = 18
n = len(arr)
if (checkSubarraySum(arr, n, k, sumV)):
print("YES")
else:
print( "NO")
#This code is contributed by Yatin Gupta
| true
|
485b382de50f8b0afba1f8ef14751b06f8a8178f
|
Python
|
gjanesch/Darebee-Scraper
|
/darebee_scraping_functions.py
|
UTF-8
| 5,442
| 2.75
| 3
|
[] |
no_license
|
import re
import os
from bs4 import BeautifulSoup
import pandas as pd
import requests
import darebee_scraper_constants as consts
# Shorthand for grabbing a web page
def get_page_html(url):
return BeautifulSoup(requests.get(url).text, "lxml")
# Checks the main page and grabs all of the workout URLs
def get_darebee_links(darebee_url):
darebee_html = get_page_html(darebee_url)
javatext = darebee_html.findAll("script", attrs={'type':'text/javascript'})
workout_links_text = str(javatext[-1])
workout_links_text = re.findall("\{.*?\}", workout_links_text)
workout_links_text = re.sub("\\\/", "/",workout_links_text[0])
return [r for r in re.findall("\/workouts/.*?\.html",workout_links_text)]
# Acquires the infobox element from the workout page and extracts a specific
# part
def get_infobox(infobox, element):
try:
infobox = infobox.find('div', attrs={'class':element}).find('img')['src']
except AttributeError:
return "N/A"
return re.search("/images/infobox/.*?-(.*)\.jpg", infobox).group(1).title()
# Gets the information for a single workout from a Darebee workout page
def get_workout_info(workout_link):
print(workout_link)
workout_name = re.search("/workouts/(.*)\.html", workout_link).group(1)
workout_name = re.sub("-", " ", workout_name).title()
workout_page = consts.DAREBEE_BASE_URL + workout_link
workout_raw = requests.get(workout_page)
workout_html = BeautifulSoup(workout_raw.text, "lxml")
infobox_more = workout_html.find('div', attrs={'class':'infobox'})
focus = get_infobox(infobox_more, 'infobox-focus')
difficulty = get_infobox(infobox_more, 'infobox-difficulty')
works = get_infobox(infobox_more, 'infobox-works')
pdf_url = consts.DAREBEE_BASE_URL + "/pdf" + re.sub("\.html", ".pdf", workout_link)
# Pages have (somewhat dramatic) descriptions of the workouts. Could be
# useful, though
description = workout_html.find("div", attrs = {"class":"infomore"})
if description.find("div", attrs = {"class":"infotext"}) is not None:
description = description.find("div", attrs = {"class":"infotext"}).text
else:
description= workout_html.find("p").text
description = re.sub("( )?\\xa0", "", description)
# Most (but not all) workouts have extra credit, for doing exercises more
# intensely; grab those.
extra_credit = workout_html.find("div", attrs = {"class":"infoec"})
extra_credit = extra_credit.text if extra_credit is not None else ""
extra_credit = re.sub("Extra [Cc]redit:( )?|\\xa0", "", extra_credit)
return (workout_name, workout_page, focus, difficulty, works, pdf_url,
description, extra_credit)
# Either updates the workout list (if the darebee file exists) or downloads all
# of the existing workouts into a dataframe (if the file's not present).
def create_update_workout_list():
workout_links = get_darebee_links(consts.DAREBEE_BASE_URL + "/wods.html")
darebee_file_exists = os.path.isfile(consts.DAREBEE_FILE_NAME)
headers = ["Workout_Name","Workout_Page_URL","Focus","Difficulty",
"Works","PDF_URL","Description","Extra_Credit"]
if darebee_file_exists:
print("Darebee file found - checking for new workouts...")
darebee = pd.read_csv(consts.DAREBEE_FILE_NAME, sep = consts.DAREBEE_FILE_SEP)
workout_links_full = [consts.DAREBEE_BASE_URL + wl for wl in workout_links]
not_in_df = pd.Series(workout_links_full).isin(darebee["Workout_Page_URL"])
new_workouts = [wl for wl,nid in zip(workout_links, list(~not_in_df)) if nid]
print(str(len(new_workouts)) + " new workouts found.")
# If new workouts were found, download them and append them to the df
if len(new_workouts) != 0:
print("Gathering info...")
new_workout_tuples = [get_workout_info(nw) for nw in new_workouts]
new_workout_df = pd.DataFrame(new_workout_tuples, columns = headers)
darebee = new_workout_df.append(darebee)
update_pdf_collection(new_workout_df)
else:
print("No Darebee file found - creating new file...")
workout_tuples = [get_workout_info(wl) for wl in workout_links]
darebee = pd.DataFrame(workout_tuples, columns = headers)
set_up_workout_folders()
update_pdf_collection(darebee)
darebee.to_csv(consts.DAREBEE_FILE_NAME, sep = consts.DAREBEE_FILE_SEP,
index = False)
print("Done.")
# Downloads a workout PDF to the specified directory.
def download_workout_pdf(pdf_url, file_destination):
response = requests.get(pdf_url)
with open(file_destination,'wb') as f:
f.write(response.content)
# Sets up the workout folders; they're only divided by difficulty.
def set_up_workout_folders():
if not os.path.exists("./Workout PDFs"):
os.mkdir("./Workout PDFs")
for x in range(5):
dir_name = "Difficulty " + str(x+1)
if not os.path.exists("./Workout PDFs/" + dir_name):
os.mkdir("./Workout PDFs/" + dir_name)
def update_pdf_collection(df):
for _, row in df.iterrows():
pdf_name = re.search("workouts/(.*?)$", row["PDF_URL"]).group(1)
workout_file_path = "Workout PDFs/Difficulty " + str(row["Difficulty"]) + "/" + pdf_name
if not os.path.isfile(workout_file_path):
download_workout_pdf(row["PDF_URL"], workout_file_path)
| true
|
6e2888f477443ad040000c6598d7e21b30497103
|
Python
|
sanfendu/TCM_word2vec
|
/train_model.py
|
UTF-8
| 2,282
| 2.53125
| 3
|
[] |
no_license
|
import GlobalParament
import utils
from gensim.models import word2vec
#训练模型word2vec
def train(sentences, model_save_path):
print("开始训练")
model=word2vec.Word2Vec(sentences=sentences,size=GlobalParament.train_size,window=GlobalParament.train_window)
model.save(model_save_path)
print("保存模型结束")
if __name__ == '__main__':
word='恶寒'
word1='厥逆'
word_list=['恶寒','社会']
type ='wiki'
#sentences=utils.process_text(GlobalParament.text_alldata,GlobalParament.text_afterprocess_alldata,GlobalParament.stop_words_dir)
#sentences=utils.load_traintext(GlobalParament.text_afterprocess_partdata_word)
sentences = utils.load_traintext(GlobalParament.wiki_afterprocess)
train(sentences,GlobalParament.model_save_path+str(GlobalParament.train_size)+'-'+str(GlobalParament.train_window)+'-'+type+'.model')
#print (len(sentences))
#sim_list = []
model=word2vec.Word2Vec.load(GlobalParament.model_save_path+str(GlobalParament.train_size)+'-'+str(GlobalParament.train_window)+'-'+type+'.model')
vocab=list(model.wv.vocab.keys())
#model.wv.save_word2vec_format('embedding.txt')
#print(model.wv.index2word()) # 获得所有的词汇
# for word in model.wv.index2word():
# print(word, model[word])
# print(vocab)
# print(len(vocab))
with open(GlobalParament.mode_test_path, 'a', encoding=GlobalParament.encoding) as f_writer:
f_writer.write('\n***************' + type + '*******************\n')
f_writer.write("句子长度: " + str(len(sentences)) + '\n')
f_writer.write("词表大小:" + str(len(vocab)) + '\n')
f_writer.write('window: ' + str(GlobalParament.train_window) + ' size:' + str(GlobalParament.train_size )+ '\n')
for word in word_list:
f_writer.write('与 ' + word + ' 比较:\n')
for e in model.most_similar(positive=word, topn=10):
f_writer.write(e[0]+' '+str(e[1])+'\n')
f_writer.write('------------------------\n')
#sim_value = model.similarity(word1, word2)
#f_writer.write(str(sim_value))
f_writer.write('\n')
f_writer.flush()
f_writer.close()
| true
|
b92dcd150c250c8eb62f373c50d40e5a8e8b4182
|
Python
|
Seralpa/AdventOfCode2018
|
/day3/p1.py
|
UTF-8
| 791
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
class Rectangle:
def __init__(self, offset, size, id):
self.offset = (int(offset[0]), int(offset[1]))
self.size = (int(size[0]), int(size[1]))
self.id = id
def fillRect(self, matrix):
for i in range(self.offset[0], self.offset[0] + self.size[0]):
for j in range(self.offset[1], self.offset[1] + self.size[1]):
if matrix[i][j] != 0:
matrix[i][j] = "x"
else:
matrix[i][j] = id
matrix = [[0 for i in range(1000)] for j in range(1000)]
with open("input.txt", "r") as f:
for line in f:
line = line.split()
id = int(line[0].replace("#", ""))
offset = line[2].replace(":", "").split(",")
size = line[3].split("x")
rect = Rectangle(offset, size, id)
rect.fillRect(matrix)
cont = 0
for line in matrix:
cont += line.count("x")
print(f"part 1: {cont}")
| true
|
05ad53adc73ed59e04c7a46ba4b25af21c5440c6
|
Python
|
ZainebPenwala/Python-practice-problems
|
/longest word.py
|
UTF-8
| 541
| 3.9375
| 4
|
[] |
no_license
|
# find_longest_word that takes a list of words and returns the length of the longest one.
li=['hi','hello','wonderful']
longest=li[0]
for elem in li[1:]:
if len(elem)>len(longest):
longest=elem
print(longest,len(longest))
# filter_long_words that takes a list of words and an integer n and returns the list of words that are longer than n
li=['hi','hello','wonderful']
n=3
f=[]
def filter_words (a,b):
for elem in li:
if len(elem)> n :
f.append(elem)
return f
print(filter_words(li,n))
| true
|
5f45178558e81970290c61add495858fc41f56c4
|
Python
|
jaceycarter/datavisualizationhw02
|
/piechart.py
|
UTF-8
| 1,404
| 3.3125
| 3
|
[] |
no_license
|
#importing stuff
import json
import pprint
import matplotlib.pyplot as plt
import numpy as np
#with open('us_senators.json', 'r', encoding = 'ASCII') as f:
#sen = f.read()
#us_senators = json.loads(sen)
#with open('us_governors.json', 'r', encoding = 'ASCII') as f:
#gov = f.read()
#us_governors = json.loads(gov)
#GRAPH 1
democrat=0
republican=0
independent=0
file= 'us_senators.json'
with open(file) as f:
text = f.read()
us_senators=json.loads(text)
pprint.pprint(us_senators)
for senators in us_senators['objects']:
for key in senators:
if key =='party':
if senators['party']=='Republican':
republican+=1
if senators['party']=='Democrat':
democrat+=1
if senators['party']=='Independent':
independent+=1
print(republican)
print(democrat)
print(independent)
# Pie chart, where the slices will be ordered and plotted counter-clockwise:
labels = 'Republicans', 'Democrats', 'Independent'
sizes = [50, 48, 2]
explode = (0, 0.1, 0)
colors = ["red", "blue", "purple"]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, colors=colors,
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.legend(title = "Party:")
plt.title("US Senators and Party Affiliation")
plt.show()
| true
|
cb72a6d2c4bc8befa159b048c8781bc084fa5205
|
Python
|
vitormrts/python-exercises
|
/Desafios/des090b.py
|
UTF-8
| 409
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
num = list()
par = list()
impar = list()
while True:
num.append(int(input('Digite um valor: ')))
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
for i, v in enumerate(num):
if v % 2 == 0:
par.append(v)
else:
impar.append(v)
print(f'\nLista completa: {num}'
f'\nLista de pares: {par}'
f'\nLista de ímpares: {impar}')
| true
|
b308224630d899293555011972e395b22599b004
|
Python
|
Cactiw/UText_bot
|
/libs/locations/castle.py
|
UTF-8
| 686
| 2.84375
| 3
|
[] |
no_license
|
from libs.locations.location import *
class Castle(Location):
def __init__(self, id, name, fraction):
super(Castle, self).__init__(id, name)
self.fraction = fraction #фракция, которая контроллирует точку
def change_fraction(self, new_fraction):
self.fraction = new_fraction
feds_castle = Castle(2, "Авентин", "Федералы")
feds_castle.roads = {1: 8, 11: 5, 13: 5, 5: 5, 6: 5}
trib_castle = Castle(3, "Эсквилин", "Трибунал")
trib_castle.roads = {1: 8, 11: 5, 12: 5, 7: 5, 8: 5}
stai_castle = Castle(4, "Палантин", "Стая")
stai_castle.roads = {1: 8, 12: 5, 13: 5, 9: 5, 10: 5}
| true
|
3e210d1f0d86f844afe91c1086af322e3b95d984
|
Python
|
ModellingWebLab/cellmlmanip
|
/cellmlmanip/rdf.py
|
UTF-8
| 1,102
| 3.390625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""Module for working with RDF data."""
import rdflib
def create_rdf_node(node_content):
"""Creates and returns an RDF node.
:param node_content: the content for the node.
The ``node_content``, if given, must either be a :class:`rdflib.term.Node` instance,
a tuple ``(namespace_uri, local_name)``, or a string, in which case it is interpreted
as either a URI ref if it starts with # otherwise a literal RDF node.
``node_content`` may also be ``None`` to return ``None``, allowing easy handling of wildcard options to queries.
"""
if node_content is None or isinstance(node_content, rdflib.term.Node):
return node_content
elif isinstance(node_content, tuple):
uri, local_name = node_content
# Ensure namespace prefix can be appended to
if not uri.endswith('#') and not uri.endswith('/'):
uri += '#'
return rdflib.Namespace(uri)[local_name]
elif isinstance(node_content, str) and node_content.startswith('#'):
return rdflib.URIRef(node_content)
else:
return rdflib.Literal(node_content)
| true
|
2b9ee5725f8e0dc09c783a34c1318d467c2bba23
|
Python
|
evgeniysgs3/YouSecReport
|
/miscellaneous/tools/Nmap/ReadConfig.py
|
UTF-8
| 645
| 2.65625
| 3
|
[] |
no_license
|
import configparser
class Config:
def __init__(self, file_config):
self.f_config = file_config
def read_config(self):
"""Read configuration file"""
config = configparser.ConfigParser()
config.read(self.f_config)
return config
def get_auth_for_send_email(self):
"""Return auth settingth for email sender report"""
config = self.read_config()
email_auth_section = config['EMAILAUTH']
return email_auth_section['login'], email_auth_section['password']
if __name__ == '__main__':
config = Config('config.ini')
print(config.get_auth_for_send_email())
| true
|
9f37c7e64b57313d282053d751c05d09a218d68d
|
Python
|
hunzo/book-devops
|
/chapter16/otp_dock/python/tests/unit_tests/otp_test.py
|
UTF-8
| 872
| 2.671875
| 3
|
[] |
no_license
|
import pytest
from src import otp
def test_generate_otp_return_str_type():
res = otp.generate_otp()
assert type(res) is str
def test_generate_otp_return_length():
res = otp.generate_otp()
assert len(res) == 6
def test_generate_otp_return_str_numeric():
res = otp.generate_otp()
assert res.isnumeric() == True
def test_generate_otp_return_random():
res1 = otp.generate_otp()
res2 = otp.generate_otp()
assert res1 != res2
@pytest.mark.parametrize("input, output", [('nuttachot@hotmail.com', True), ('nuttachot', False), ('nuttachot.hotmail.com', False)])
def test_val_email(input, output):
add = otp.val_email(input)
assert add == output
def test_get_email_list_key_is_not_empty():
res = otp.get_email_list_key()
assert res != None
def test_get_email_list_key_return_string():
res = otp.get_email_list_key()
assert type(res) is str
| true
|
712c71297af0e02f159f9fa837a19cb1d6380384
|
Python
|
jeffreylozano1376/Basics_Python
|
/exercises/4 - dictionary_exercise.py
|
UTF-8
| 2,311
| 3.890625
| 4
|
[] |
no_license
|
# Person
person_info = {"first_name": "Jeffrey", "last_name": "Lozano",
"age": 29, "city": "Mandaluyong City"}
print(person_info)
# Favorite Numbers
friends = {
'jastin': '6',
'gabriel': '1',
'julius': '5',
'shem': '9'
}
for name, number in friends.items():
print(f"{name.title()}'s favorite number is {number}.")
# Glossary
programming_words = {
'list': 'a data structure in Python that is a mutable, or changeable, ordered sequence of elements',
'tuple': 'like a list, it is sequence of Python objects, however it is immutable',
'set': 'an unordered collection data type that is iterable, mutable and has no duplicate elements',
'dictionary': 'maps a set of objects (keys) to another set of objects (values)',
}
for word, definition in programming_words.items():
print(f"\n{word.upper()}:", end="")
print(f"\t{definition}")
# Rivers
rivers = {
'nile river': [
'egypt', 'sudan', 'south sudan', 'eritrea', 'ethiopia', 'kenya', 'congo', 'burundi', 'rwanda', 'uganda', 'tanzania'],
'amazon river': [
'peru', 'bolivia', 'venezuela', 'colombia', 'ecuador', 'brazil'],
'yangtze river': ['china']
}
for river, countries in rivers.items():
print(f"The {river.title()} runs through the following countries:")
for country in countries:
print(f"- {country.title()}")
# Polling
join_poll = ['jeffrey', 'ken', 'sevan', 'joan']
respondent = ['jeffrey', 'ken']
for joinee in join_poll:
if joinee in respondent:
print(f"Thank you {joinee.title()} for responding.")
else:
print(f"{joinee.title()} please take the poll!")
# People (complex)
person_info = {
'person_1': {
"first_name": "Jeffrey",
"last_name": "Lozano",
"age": "29",
"city": "San Mateo, Rizal"
},
'person_2': {
"first_name": "Ken",
"last_name": "Hufancia",
"age": "26",
"city": "Naga City"
},
'person_3': {
"first_name": "Gabriel",
"last_name": "Tiongson",
"age": "28",
"city": "Marikina City"
}
}
for person_info, info in person_info.items():
print(f"{person_info}:")
print(f"\t-{info['first_name']}")
print(f"\t-{info['last_name']}")
print(f"\t-{info['age']}")
print(f"\t-{info['city']}")
| true
|
e504c623fc132d34d87c2bd1c2f14e59a2f86381
|
Python
|
xiaohuanlin/Algorithms
|
/Leetcode/1557. Minimum Number of Vertices to Reach All Nodes.py
|
UTF-8
| 1,864
| 4.03125
| 4
|
[] |
no_license
|
'''
Given a directed acyclic graph, with n vertices numbered from 0 to n-1, and an array edges where edges[i] = [fromi, toi] represents a directed edge from node fromi to node toi.
Find the smallest set of vertices from which all nodes in the graph are reachable. It's guaranteed that a unique solution exists.
Notice that you can return the vertices in any order.
Example 1:
Input: n = 6, edges = [[0,1],[0,2],[2,5],[3,4],[4,2]]
Output: [0,3]
Explanation: It's not possible to reach all the nodes from a single vertex. From 0 we can reach [0,1,2,5]. From 3 we can reach [3,4,2,5]. So we output [0,3].
Example 2:
Input: n = 5, edges = [[0,1],[2,1],[3,1],[1,4],[2,4]]
Output: [0,2,3]
Explanation: Notice that vertices 0, 3 and 2 are not reachable from any other node, so we must include them. Also any of these vertices can reach nodes 1 and 4.
Constraints:
2 <= n <= 10^5
1 <= edges.length <= min(10^5, n * (n - 1) / 2)
edges[i].length == 2
0 <= fromi, toi < n
All pairs (fromi, toi) are distinct.
'''
from typing import *
import unittest
class Solution:
def findSmallestSetOfVertices(self, n: int, edges: List[List[int]]) -> List[int]:
res = []
in_deg = [0 for _ in range(n)]
for u, v in edges:
in_deg[v] += 1
for i in range(len(in_deg)):
if in_deg[i] == 0:
res.append(i)
return res
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
((5, [[0,1],[2,1],[3,1],[1,4],[2,4]]), [0, 2, 3]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().findSmallestSetOfVertices(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
| true
|
05008032fdb3c51d01923b78ce822c452a250a01
|
Python
|
YuriiKhomych/ITEA-BC
|
/Vlad_Hytun/4_iterations-Hytun_Vlad/hw/HW_41_iterations-Hytun_Vlad.py
|
UTF-8
| 478
| 4.46875
| 4
|
[] |
no_license
|
# 1. Write a Python program that accepts a string
# and calculate the number of digits and letters.
# isdigit()
# isalpha()
# "a".isdigit() - проверка на стринг или же дигитал
digit = 0
letter = 0
my_string = input("Enter please your string: ")
for symbol in my_string:
if symbol.isdigit():
digit += 1
elif symbol.isalpha():
letter += 1
print(f"Here is {digit} symbols of digit")
print(f"Here is {letter} symbols of letters")
| true
|
5dd9475718203d40903134af0ee33277f00c4935
|
Python
|
elenatheresa/CSCI-160
|
/elenaCorpus_CSCI160_tuesday_parta-2.py
|
UTF-8
| 701
| 4.0625
| 4
|
[] |
no_license
|
'''
Elena Corpus
CSCI 160
Tuesday 5-7 pm
asking the user what kind of shape they wish to draw, either rectangle or triangle
'''
shape = input("Choose between rectangle or triangle: ")
rectangle = 'rectangle'
triangle = 'triangle'
if shape == rectangle:
print("Enter width: ")
width = int(input())
print("Enter height: ")
height = int(input())
for i in range(height):
if i in[0]:
print("* "*(width))
elif i in[(height-1)]:
print("* "*(width))
else:
print("*"+" "*(width-2)+" *")
elif shape == triangle:
print("Enter height: ")
height = int(input())
for i in range(1, height + 1):
print("*" * i)
| true
|
59d10ed580ce5d5e0b9b721f5741a40eb354bc89
|
Python
|
Stanleyli1984/myscratch
|
/lc/prob_72.py
|
UTF-8
| 1,027
| 3.28125
| 3
|
[] |
no_license
|
class Solution:
# @param {string} word1
# @param {string} word2
# @return {integer}
def minDistance(self, word1, word2):
dp_array = [float('-inf')] + [float('inf')] * (len(word2)) # At least how many chars are needed to generate inx number of matches
# Find the maximal number of matches
for char1 in word1:
for idx2, char2 in enumerate(word2):
if char1 == char2:
for i in xrange(1, len(dp_array)):
if dp_array[i-1] < idx2:
if dp_array[i] > idx2:
dp_array[i] = idx2
else:
break
#print dp_array
idx1 = 0
for idx, v in enumerate(dp_array):
if v != float('inf'):
idx1 = idx
else:
break
#print idx1
return abs(len(word1)-len(word2)) + min(len(word1), len(word2)) - idx1
print Solution().minDistance("ab", "bc")
| true
|
4c2e422fbb0b877e8b7a9e0dd09a6d1df10ed86d
|
Python
|
shigeokitamura/atcoder
|
/abc126/c.py
|
UTF-8
| 224
| 2.875
| 3
|
[] |
no_license
|
# https://atcoder.jp/contests/abc126/tasks/abc126_c
import math
N, K = map(int, input().split())
a = 0
for i in range(1, N+1):
x = 0
while(i * pow(2, x) < K):
x += 1
a += 1/N * pow(1/2, x)
print(a)
| true
|
ea12deb107b764393df6d6a9903c9c199a15f137
|
Python
|
alekhka/virtual-assistant-avika
|
/sewrite.py
|
UTF-8
| 664
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import time
import serial
import sys
ser = serial.Serial(
port='/dev/ttyS0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
arg = sys.argv[1]
if arg == '1':
print "arg is", arg
ser.write('1')
time.sleep(0.5)
elif arg == '2':
print "arg is", arg
ser.write('2')
time.sleep(0.5)
elif arg == '3':
print "arg is", arg
ser.write('3')
time.sleep(0.5)
else:
print "arg error"
| true
|
e0e09895ea0d19dca122fed10be1075cb7510712
|
Python
|
2legit/python-anandology
|
/modules/7.py
|
UTF-8
| 583
| 4.03125
| 4
|
[] |
no_license
|
""" Write a function make_slug that takes a name converts it into a slug. A slug is a string where spaces and special characters are
replaced by a hyphen, typically used to create blog post URL from post title. It should also make sure there are no more than
one hyphen in any place and there are no hyphens at the biginning and end of the slug. """
def make_slug(string):
import re
name=r'\w+'
print '-'.join(re.findall(name,string))
import sys
if len(sys.argv)!=2:
print 'error : invalid syntax\nusage : python make_slug.py "string"\n\n'
else:
make_slug(sys.argv[1])
| true
|
b481e1e823f74b036746a446a224c139a8c07dff
|
Python
|
kaylezy/Batch-Three
|
/Ikorodu Center I/Python/Adedejiproject.py
|
UTF-8
| 2,499
| 3.9375
| 4
|
[] |
no_license
|
#Get user operational input
print("welcome to our improved calculator")
print("For multiplication input'*',For addition input'+',For division input'/',For Subtraction input'-',")
print("========================================================================================")
user_input= input("what mathematical operation will you like to run:")
#First Conditional Statement for multiplication
if user_input == '*':
firstnumber = float(input("Enter your first number:"))
secondnumber = float(input("Enter your second number:"))
#This is where the logic happens
multiple_number= firstnumber*secondnumber
print("The multiple of",'first_number', "and", 'second_number', "=",multiple_number)
#Second Conditional Statement for Addition
elif user_input == '+':
print("Good you have choosen the Addition operation")
print("==============================================")
first_number = float(input("Enter your first number:"))
print("==================================================")
second_number = float(input("Enter your second number:"))
#This is where the logic happens
addition_number=first_number + second_number
print("======================================")
print("The additional of",'first_number', "and", 'second_number' "=",addition_number)
#Third Conditional Statement for Division
if user_input == '/':
firstnumber = float (input("Enter your first number:"))
secondnumber = float(input("Enter your second number:"))
#This is where the logic happens
divid_number= firstnumber/secondnumber
print("The divisoin of",'first_number', "and", 'second_number', "=",divid_number)
#Fourth Conditional Statement for subtraction
elif user_input == '-':
print("Good you have choosen the Subtraction operation")
print("=================================================")
first_number = float(input("Enter your first number:"))
print("======================================================")
second_number = float(input("Enter your second number:"))
#This is where the logic happens
subtraction_number=first_number - second_number
print("=========================================")
print("The subtraction of",'first number', "and", 'second_number', "=",subtraction_number)
#if there is an invalid......
else:
print("this is way too much oga")
| true
|
121b78373fd3a4b7d9a4a20dbff9f0e7746350ec
|
Python
|
jleng7987/Algorithm-sorting
|
/面试真题整理/完美世界/盒子套.py
|
UTF-8
| 560
| 2.984375
| 3
|
[] |
no_license
|
import math
n=int(input())
ls = []
lsd = []
for i in range(0,n):
c, k = map(int, input().split())
d = (math.pow(c, 2) + math.pow(k, 2)) ** 0.5
ls.append([c,k])
# ls1 = set(ls)
# ls1.sort(reverse=True)
print(ls)
def minl (i, min, count=0):
if i == n:
return count
if ls[i][0] > min[0] & ls[i][1] > min[1]:
min = ls[i]
count = count +1
minl(i+1,min,count)
else:
minl(i+1,min,count)
ls2 = []
for i in range(0,n):
minbox = [0][0]
count1 = minl(0, minbox)
ls2.append(count1)
print(ls2)
| true
|
12d6053cb183e29dcdaa96ee7de1d4ee90afbb99
|
Python
|
Etherealskye/Mango
|
/MangoMain.py
|
UTF-8
| 17,563
| 2.921875
| 3
|
[] |
no_license
|
import os
import discord
import sys
import pandas as pd
import youtube_api.youtube_api_utils
from youtube_api import YouTubeDataAPI
from dotenv import load_dotenv
from discord.ext.commands import Bot
from discord.ext import commands
from HololiveStreamer import hololiveStreamer
from HololiveStream import HololiveStream
from JikanClient import JikanClient
load_dotenv()
#Load in discord token and youtube API key from environment variables
TOKEN = os.getenv('MANGO_TOKEN')
YT_KEY = os.getenv('YOUTUBE_KEY')
#Variables used with hololive commands
selectedGen = None
holoStreamers = []
#setup bot
mango = commands.Bot(command_prefix='m!')
#create youtube data api client
yt = YouTubeDataAPI(YT_KEY)
#Create jikanClient to interact with JikanAPI
jikan = JikanClient()
@mango.event
async def on_ready():
print(f'{mango.user} is online!')
#Change status
#Command to let user search for a livestream
@mango.command(name = "holoStream", help = 'Use m!holoStream <channel name> to check if a channel is live or not')
async def Hololive(ctx,arg):
initialSearch = yt.search(q=arg, search_type='channel')
#ID's we will be using for the channel and livestream to send requests
stream = HololiveStream()
#If we get a matching channel, we get the id and title. Otherwise, we tell the user that no matching channel was found in the search
#(Hololive streamers will usually be at the top of the list due to their distinct names, no need to check) - m!hologen and m!holoselect will be used for
#direct selection anyways
if len(initialSearch)>0:
stream.channelID = initialSearch[0]['channel_id']
stream.channelTitle = initialSearch[0]['channel_title']
stream.channelImage = initialSearch[0]['video_thumbnail']
print(stream.channelID)
else:
await ctx.send('No channel was found, please double check spelling and search again~')
#If the channelID exists, proceed to see if they are live
if(hasattr(stream,'channelID')):
channelState = yt.search(q = stream.channelTitle, event_type = "live", type = "video")
print(channelState)
#If we get a response (meaning that the channel is live), proceed to grab the details of the stream and send it as an embed
if len(channelState)>0 and channelState[0]['channel_id'] == stream.channelID:
print("here")
stream.streamID = channelState[0]['video_id']
stream.streamTitle = channelState[0]['video_title']
stream.streamThumbnail = channelState[0]['video_thumbnail']
stream.streamDesc = channelState[0]['video_description']
#Send another request to get info on view and like count of the stream
videoData = yt.get_video_metadata(stream.streamID)
stream.totalViews = videoData['video_view_count']
stream.likes = videoData['video_like_count']
print("data sucesfully obtained")
#create the embed
embed = discord.Embed(title = stream.streamTitle,description = stream.streamDesc,colour = discord.Colour(0x2abdb5))
#Modify some attributes
embed.set_thumbnail(url=stream.streamThumbnail)
embed.set_author(name = stream.channelTitle,icon_url=stream.channelImage)
embed.url='https://www.youtube.com/watch?v='+stream.streamID
embed.add_field(name = "\u200b",value = "**Total viewers: **" + stream.totalViews + "\n**Likes: **" + stream.likes ,inline = True)
await ctx.send(embed=embed)
#If there are no livestreams, check if there is an upcoming stream or not
else:
#Send a second search to see if there are any upcoming streams for the selected hololive streamer
secondSearch = yt.search(channel_id=stream.channelID, search_type='video', event_type='upcoming',)
#If there is an upcoming stream scheduled, proceed to parse for data
if len(secondSearch)>0:
stream.streamID = secondSearch[0]['video_id']
stream.streamTitle = secondSearch[0]['video_title']
stream.streamThumbnail = secondSearch[0]['video_thumbnail']
stream.streamDesc = secondSearch[0]['video_description']
#Send another request to get info on view and like count of the upcoming stream
#print(stream.streamID)
videoData = yt.get_video_metadata(stream.streamID)
stream.totalViews = videoData['video_view_count']
stream.likes = videoData['video_like_count']
print("data sucesfully obtained")
#Check if we actually got an upcoming stream (This is due to youtube's data api returning streams that have just finished as "upcoming" for some reason)
#We can only check this by sending a second request to VideoData - the upcoming stream will have 0 views because it has not premeried yet
if stream.totalViews == '0':
#Let user know that there are no current live streams, but that there is an upcoming one
await ctx.send(stream.channelTitle + " has no current livestreams, but does have an upcoming stream in the near future:")
#create the embed
embed = discord.Embed(title = stream.streamTitle,description = stream.streamDesc,colour = discord.Colour(0x2abdb5))
#Modify some attributes
embed.set_thumbnail(url=stream.streamThumbnail)
embed.set_author(name = stream.channelTitle,icon_url=stream.channelImage)
embed.url='https://www.youtube.com/watch?v='+stream.streamID
embed.set_footer(text='If the channel displayed is not the hololive streamer you are looking for, try using m!hologen and m!holoselect to directly check their channel')
embed.add_field(name = "\u200b",value = "**Total viewers: **" + stream.totalViews + "\n**Likes: **" + stream.likes ,inline = True)
await ctx.send(embed=embed)
elif stream.totalViews != '0':
await ctx.send(stream.channelTitle + ' is not currently streaming live nor has any upcoming livestreams\n'
+'If the channel displayed is not the hololive streamer you are looking for, try using m!hologen and m!holoselect to directly check their channel')
#If no upcoming stream, let users know that channel is not live nor are there are any upcoming stream
else:
await ctx.send(stream.channelTitle + ' is not currently streaming live nor has any upcoming livestreams\n'
+'If the channel displayed is not the hololive streamer you are looking for, try using m!hologen and m!holoselect to directly check their channel')
#Command that lists all the supported hololive groups
@mango.command(name='holoList', help = 'Displays list of supported hololive groups')
async def hololist(ctx):
embed = discord.Embed(title = "List of supported Hololive generations",description = '**0.** Hololive Gen 0\n' + '**1.** Hololive Gen 1\n' + '**2.** Hololive Gen 2\n'
+ '**3.** Hololive Gen 3\n' + '**4.** Hololive Gen 4\n'+ '**5.** Hololive gamers',
colour = discord.Colour(0x42b9f5))
embed.set_footer(text = 'use m!hologen <list number> to get more details on each generation')
await ctx.send(embed = embed)
#Command that brings up the members of a hololive group/generation
@mango.command(name='holoGen', help = 'Use m!holoGen <group number> to select a group from the displayed groups')
async def holoGen(ctx,arg):
global holoStreamers
global selectedGen
holoStreamers.clear()
try:
selectedGen = int(arg)
memberNum = os.getenv(arg + '_SIZE')
displayString = ""
print(memberNum)
for i in range(int(memberNum)):
currentID = os.getenv(arg+'.'+f'{i}')
print(currentID)
nameSearch = yt.search(channel_id=currentID,search_type='channel')
print(nameSearch)
channelName = nameSearch[0]['channel_title']
print(channelName)
channelProfile = nameSearch[0]['video_thumbnail']
print(channelProfile)
streamer = hololiveStreamer(group = arg, title = channelName, profile = channelProfile)
holoStreamers.append(streamer)
displayString = displayString + "**"+f'{i+1}'+ '.** ' + channelName +"\n"
embed = discord.Embed(title = 'Members of ' + os.getenv(arg) +':',description = displayString, colour = discord.Colour(0x42b9f5))
embed.set_footer(text = 'use m!holoselect <list number> to get the status on each member')
await ctx.send(embed = embed)
except (TypeError, ValueError):
await ctx.send('Please select a valid number from the displayed list!')
#Command used to selelct a hololiveStreamers for more details on if they're livestreaming or not
@mango.command(name='holoSelect', help = 'Use m!holoSelect <list number> to select a member from the group displayed')
async def holoselect(ctx,arg):
global selectedGen
#Makes sure the user has actually selected a generation/group to select a member from
#print(selectedGen)
if selectedGen != None:
try:
#The ID of the hololive streamer we want to search for
searchID = os.getenv(f'{selectedGen}'+'.'+f'{int(arg)-1}')
#Create hololive stream object and set some attributes
stream = HololiveStream()
stream.channelID = searchID
stream.channelTitle = holoStreamers[int(arg)-1].channel_title
stream.channelImage = holoStreamers[int(arg)-1].channel_profile
#Send search to api to see if channel is live
search = yt.search(channel_id=searchID, search_type='video', event_type='live',)
#If we get a response (meaning that the channel is live), proceed to grab the details of the stream and send it as an embed
if len(search)>0:
stream.streamID = search[0]['video_id']
stream.streamTitle = search[0]['video_title']
stream.streamThumbnail = search[0]['video_thumbnail']
stream.streamDesc = search[0]['video_description']
#Send another request to get info on view and like count of the stream
#print(stream.streamID)
videoData = yt.get_video_metadata(stream.streamID)
stream.totalViews = videoData['video_view_count']
stream.likes = videoData['video_like_count']
print("data sucesfully obtained")
#create the embed
embed = discord.Embed(title = stream.streamTitle,description = stream.streamDesc,colour = discord.Colour(0x2abdb5))
#Modify some attributes
embed.set_thumbnail(url=stream.streamThumbnail)
embed.set_author(name = stream.channelTitle,icon_url=stream.channelImage)
embed.url='https://www.youtube.com/watch?v='+stream.streamID
embed.add_field(name = "\u200b",value = "**Total viewers: **" + stream.totalViews + "\n**Likes: **" + stream.likes ,inline = True)
await ctx.send(embed=embed)
#If no current livestreams, see if there is an upcoming stream or not
else:
#Send a second search to see if there are any upcoming streams for the selected hololive streamer
secondSearch = yt.search(channel_id=searchID, search_type='video', event_type='upcoming',)
#If there is an upcoming stream scheduled, proceed to parse for data
if len(secondSearch)>0:
stream.streamID = secondSearch[0]['video_id']
stream.streamTitle = secondSearch[0]['video_title']
stream.streamThumbnail = secondSearch[0]['video_thumbnail']
stream.streamDesc = secondSearch[0]['video_description']
#Send another request to get info on view and like count of the upcoming stream
#print(stream.streamID)
videoData = yt.get_video_metadata(stream.streamID)
stream.totalViews = videoData['video_view_count']
stream.likes = videoData['video_like_count']
print("data sucesfully obtained")
#Check if we actually got an upcoming livestream - youtube data api will sometimes give us a livestream that just ended recently
#We can only check this by sending a second request for video metadata and seeing if total views = 0 or not
if stream.totalViews == '0':
#Let user know that there are no current live streams, but that there is an upcoming one
await ctx.send(holoStreamers[int(arg)-1].channel_title + " has no current livestreams, but does have an upcoming stream in the near future:")
#create the embed
embed = discord.Embed(title = stream.streamTitle,description = stream.streamDesc,colour = discord.Colour(0x2abdb5))
#Modify some attributes
embed.set_thumbnail(url = stream.streamThumbnail)
embed.set_author(name = stream.channelTitle,icon_url=stream.channelImage)
embed.url='https://www.youtube.com/watch?v='+stream.streamID
embed.add_field(name = "\u200b",value = "**Total viewers: **" + stream.totalViews + "\n**Likes: **" + stream.likes ,inline = True)
await ctx.send(embed=embed)
elif stream.totalViews != '0':
await ctx.send(holoStreamers[int(arg)-1].channel_title + ' is not currently streaming live nor has any upcoming livestreams~')
#If no upcoming stream, let users know that channel is not live nor are there are any upcoming stream
else:
await ctx.send(holoStreamers[int(arg)-1].channel_title + ' is not currently streaming live nor has any upcoming livestreams~')
except(ValueError,IndexError):
await ctx.send('Please select a valid member number from the generation/group!')
elif selectedGen == None:
await ctx.send('Please select a generation first!')
#Command used to search for anime and display a list of results
@mango.command(name = 'animeSearch', help = 'Use m!animeSearch <anime name> to search for an anime and view a list of results')
async def animeSearch(ctx, arg):
jikan.animeSearch(arg)
#Only proceed to send the embed if we actually have any anime to display. Else, let the user know that no anime were found.
#This prevents the bot from sending an empty embed.
if len(jikan.animeList)>0:
embed = jikan.animeListDisplay(arg)
await ctx.send(embed = embed)
else:
await ctx.send("No anime found for: '" + arg + "'")
#Commnand used to select an anime from the displayed list of results
@mango.command(name = 'animeSelect', help = 'Use m!animeSelect <anime number> to view an anime from the displayed list')
async def animeSelect(ctx,arg):
try:
#Make sure that we have succesfully gotten a displayed list of anime
if len(jikan.animeList)>0:
await ctx.send(embed=jikan.animeEmbed(int(arg)-1))
else:
await ctx.send("Please use m!animeSearch <anime name> first!")
except (ValueError,IndexError):
await ctx.send("Please enter a number from the displayed list!")
#Command used to search for manga and display the results
@mango.command(name = 'mangaSearch', help = 'Use m!mangaSearch <manga name> to search for a manga and view a displayed list of results')
async def mangaSearch (ctx, arg):
jikan.mangaSearch(arg)
#Only proceed to send the embed if we actually have any manga to display. Else, let the user know that no manga were found.
#This prevents the bot from sending an empty embed.
if len(jikan.mangaList)>0:
embed = jikan.mangaListDisplay(arg)
await ctx.send(embed = embed)
else:
await ctx.send("No manga found for: '" + arg + "'")
#Command used to select manga from the displayed list of results
@mango.command(name = 'mangaSelect', help = 'Use m!mangaSelect <manga number> to view a manga from the displayed list of results')
async def mangaSelect(ctx, arg):
try:
#Make sure that we have successfully gotten a displayed list of manga
if len(jikan.mangaList)>>0:
await ctx.send(embed = jikan.mangaEmbed(int(arg)-1))
else:
await ctx.send("Please use m!mangaSearch <manga name> first!")
except(ValueError,IndexError):
await ctx.send("Please enter a number from the displayed list")
@mango.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.MissingRequiredArgument):
await ctx.send("Please enter the required arg(s)!")
if isinstance(error, commands.CommandNotFound):
await ctx.send("Please enter a valid command!")
mango.run(TOKEN)
| true
|
be5cf3d11d65ea23b23970d73e245e5d9b305398
|
Python
|
EstradaAlex20/Computer-Graphics-1-Project
|
/Texture.py
|
UTF-8
| 4,225
| 2.65625
| 3
|
[] |
no_license
|
from Program import *
import io
import png
import zipfile
import os.path
class Texture:
def __init__(self, typ):
self.type = typ
self.tex = None
def bind(self,unit):
glActiveTexture(GL_TEXTURE0 + unit)
glBindTexture(self.type,self.tex)
def unbind(self,unit):
glActiveTexture(GL_TEXTURE0 + unit)
glBindTexture(self.type,0)
class TextureCube(Texture):
def __init__(self,size):
Texture.__init__(self, GL_TEXTURE_CUBE_MAP)
self.size = size
class ImageTextureCube(TextureCube):
def __init__(self, namepattern):
super().__init__(None)
tmp = array.array("I", [0])
glGenTextures(1,tmp)
self.tex = tmp[0]
self.bind(0)
self.loadImages(namepattern)
def loadImages(self, namepattern):
for i in range(6):
fname = os.path.join("assets", namepattern % i)
membuf = io.BytesIO()
pw, ph, fmt, pix = png.decode(open(fname,"rb").read())
if pw != ph:
raise RuntimeError("Cubemap must be square: "+fname)
if i == 0:
self.size = pw
elif self.size != pw:
raise RuntimeError("Cubmap sides: Wrong size: " + fname)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGBA8,
self.size, self.size, 0, GL_RGBA,
GL_UNSIGNED_BYTE, pix)
glGenerateMipmap(GL_TEXTURE_CUBE_MAP)
class Texture2DArray(Texture):
def __init__(self,w,h,slices):
Texture.__init__(self, GL_TEXTURE_2D_ARRAY)
self.w = w
self.h = h
self.slices = slices
class DataTexture2DArray(Texture2DArray):
def __init__(self, w, h, slices, pix):
Texture2DArray.__init__(self,w,h,slices)
tmp = array.array("I", [0])
glGenTextures(1,tmp)
self.tex = tmp[0]
self.bind(0)
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA, w,h,slices, 0, GL_RGBA, GL_UNSIGNED_BYTE, pix)
self.unbind(0)
def setData(self,w,h,slices,pix):
self.bind(0)
if w == self.w and h == self.h and slices == self.slices:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, 0,w,h,slices, GL_RGBA, GL_UNSIGNED_BYTE, pix)
else:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_RGBA,w,h,slices, 0, GL_RGBA, GL_UNSIGNED_BYTE, pix)
self.w = w
self.h = h
self.slices = slices
glGenerateMipmap(GL_TEXTURE_2D_ARRAY)
self.unbind(0)
class ImageTexture2DArray(DataTexture2DArray):
def __init__(self, *files):
membuf = io.BytesIO()
w=None
h=None
slices=0
for fname in files:
if fname.endswith(".png") or fname.endswith(".jpg"):
fname = os.path.join("assets", fname)
tmp = open(fname, "rb").read()
pw, ph, fmt, pix = png.decode(tmp)
pix = png.flipY(pw, ph, pix)
if w == None:
w = pw
h=ph
else:
if w != pw or h != ph:
raise RuntimeError("Size mismatch")
slices+= 1
membuf.write(pix)
elif fname.endswith(".ora") or fname.endswith(".zip"):
z = zipfile.ZipFile(fname)
for n in sorted(z.namelist()):
if n.endswith(".png") or n.endswith(".jpg"):
tmp = z.open(n).read()
pw, ph, fmt, pix = png.decode(tmp)
pix = png.flipY(pw, ph, pix)
if w == None:
w = pw
h = ph
else:
if w != pw or h != ph:
raise RuntimeError("Size mismatch")
slices += 1
membuf.write(pix)
else:
raise RuntimeError("Cannot read file " + fname)
DataTexture2DArray.__init__(self,w,h,slices,membuf.getbuffer())
| true
|
6ef68666458f6802f71e6e46ee1e82b967015c32
|
Python
|
LuterGS/AI2020_H2
|
/LuterGS/Preprocess.py
|
UTF-8
| 7,007
| 2.984375
| 3
|
[] |
no_license
|
from tqdm import tqdm
import numpy as np
import os
from konlpy.tag import Okt
# 파일 INIT
PATH = os.path.dirname(os.path.abspath(__file__))
TAG = Okt()
TAG_NAME = [
"Noun",
"Verb",
"Adjective",
"Determiner",
"Adverb",
"Conjunction",
"Exclamation",
"Josa",
"PreEomi",
"Eomi",
"Suffix",
"Punctuation",
"Foreign",
"Alpha",
"Number",
"Unknown",
"KoreanParticle",
"Hashtag",
"ScreenName",
"Email",
"URL",
"Modifier",
"VerbPrefix"
]
# 파라미터로 입력받은 파일에 저장된 단어 리스트를 딕셔너리 형태로 저장
def load_vocab(filename):
vocab_file = open(filename,'r',encoding='utf8')
print("{} vocab file loading...".format(filename))
# default 요소가 저장된 딕셔너리 생성
symbol2idx, idx2symbol = {"<PAD>":0, "<UNK>":1}, {0:"<PAD>", 1:"<UNK>"}
# 시작 인덱스 번호 저장
index = len(symbol2idx)
for line in tqdm(vocab_file.readlines()):
symbol = line.strip()
symbol2idx[symbol] = index
idx2symbol[index]= symbol
index+=1
return symbol2idx, idx2symbol
# 입력 데이터를 고정 길이의 벡터로 표현하기 위한 함수
def convert_data2feature(data, symbol2idx, max_length=None):
# 고정 길이의 0 벡터 생성
feature = np.zeros(shape=(max_length), dtype=np.int)
# 입력 문장을 공백 기준으로 split
words = data.split()
for idx, word in enumerate(words[:max_length]):
if word in symbol2idx.keys():
feature[idx] = symbol2idx[word]
else:
feature[idx] = symbol2idx["<UNK>"]
return feature
# 파라미터로 입력받은 파일로부터 tensor객체 생성
def load_data(filename, word2idx, tag2idx):
file = open(filename,'r',encoding='utf8')
# return할 문장/라벨 리스트 생성
indexing_inputs, indexing_tags = [], []
print("{} file loading...".format(filename))
# 실제 데이터는 아래와 같은 형태를 가짐
# 문장 \t 태그
# 세 종 대 왕 은 <SP> 조 선 의 <SP> 4 대 <SP> 왕 이 야 \t B_PS I_PS I_PS I_PS O <SP> B_LC I_LC O <SP> O O <SP> O O O
for line in tqdm(file.readlines()):
try:
id, sentence, tags = line.strip().split('\t')
except:
id, sentence = line.strip().split('\t')
input_sentence = convert_data2feature(sentence, word2idx, config["max_length"])
indexing_tag = convert_data2feature(tags, tag2idx, config["max_length"])
indexing_inputs.append(input_sentence)
indexing_tags.append(indexing_tag)
return np.array(indexing_inputs), np.array(indexing_tags)
# tensor 객체를 리스트 형으로 바꾸기 위한 함수
def tensor2list(input_tensor):
return input_tensor.cpu().detach().numpy().tolist()
def convert_data2tagfeature(sentence, max_length, embedding_dim):
"""
문장을 품사 정보를 토대로 벡터링
예를 들어, "나는 건국대 학생" 이라는 단어가 있으면, 이걸 pos로 매핑하면
(나, noun), (는, Josa), (건국대, Noun), (학생, Noun) 으로 바꿔주는데,
n
:param sentence:
:param max_length:
:param embedding_dim:
:return:
"""
result = [np.full(shape=embedding_dim, fill_value=0, dtype=np.float) for i in range(max_length)]
pos = TAG.pos(sentence)
i = 0
for morphs in pos:
num = TAG_NAME.index(morphs[1]) + 1
# print(morphs, num, i)
for letter in morphs[0]:
result[i] = np.full(shape=embedding_dim, fill_value=num, dtype=np.float)
i += 1
# print("normal, ", i)
if i == max_length:
break
if i == max_length:
break
try:
if sentence[i] == " ":
result[i] = np.full(shape=embedding_dim, fill_value=0, dtype=np.float)
i += 1
# print('TRY : ', i)
if i == max_length:
break
except IndexError:
break
if len(result) != max_length:
print("데이터 전처리가 잘못되었습니다! 프로그램을 종료합니다 :", sentence)
exit(1)
return np.asarray(result)
def get_one_posdata(filepath, embedding=14, max_length=150):
"""
:param filepath: 전처리 데이터를 얻을 파일 경로입니다. (상대경로시 이 함수를 호출하는 파일의 경로를 시작점으로 합니다)
:param embedding: biGRU 계층을 지난 결과값고 concat하기 때문에, 계층의 데이터를 균등하기 주기 위해 차원을 임의로 늘렸습니다.
늘릴 차원의 개수입니다.
:param max_length: 문장의 최대 길이입니다. 기본값 150으로 설정되어 있으며, 값을 변경할 수 있습니다.
예시)
먼저, max_length가 20이라고 가정하면, 크기가 20인 빈 numpy array를 만듭니다.
이후, 나는 건국대 학생이다. 라는 12글자의 문장이 있다고 할 때, 해당 글자를 Okt를 이용해 pos를 추출합니다.
(나, noun), (는, josa), (건국대, noun), (학생, noun), (이다, josa), (. ,Punctuation) 으로 분리가 됩니다. (예시이며, 다르게 나올 수 있음)
noun을 1, josa를 2, punctuation을 3, space를 0이라고 하면, 해당 문장을 다음과 같은 벡터로 치환합니다.
나 는 <SP> 건 국 대 <SP> 학 생 이 다 .
1 2 0 1 1 1 0 1 1 2 2 3 0...
-> [12011101122300000000] -> 벡터 1
이후, 차원을 늘려주기 위해, 각 값과 같은 크기를 가지는 embedding (기본 14)만큼의 numpy array를 채워넣습니다.
즉, 위 벡터에서 1은 [1 1 1 1 1 1 1 1 1 1 1 1 1 1] 로 변환됩니다.
따라서, 위 벡터는 이런 값으로 변환됩니다.
[[1111....
[2222....
[0000...
[1111...
[1111...
[1111...
[0000...
[1111...
[1111...
[2222...
[2222...
[3333...
[0000...
...] -> 벡터 2
:return: [문장 개수][max_length][embedding] 의 크기를 가지는 numpy array를 return 합니다.
"""
result = []
with open(filepath, "r", encoding="utf8") as file:
for line in tqdm(file.readlines()):
line = line.split("\t")
if len(line) == 3:
raw_sentence = line[1]
else:
raw_sentence = line[0]
result.append(convert_data2tagfeature(raw_sentence.replace(" ", "").replace("<SP>", " "), max_length=max_length, embedding_dim=embedding))
return np.asarray(result)
if __name__ == "__main__":
# np.save("ner_dev", get_one_posdata("../baseline/ner_dev.txt"))
# np.save("ner_train", get_one_posdata("../baseline/ner_train.txt"))
dev = np.load("ner_dev.npy")
print(dev.shape)
train = np.load("ner_train.npy")
print(train.shape)
| true
|
a79a05b827aa945dddb12253b57a77e31a779d1e
|
Python
|
xxz/test-av2
|
/snippet/dynamic_calls.py
|
UTF-8
| 1,909
| 2.828125
| 3
|
[] |
no_license
|
class vm:
def power_on(self):
return "vm is powered on"
def exec_cmd(self, cmd):
return "%s executed on vm" % cmd
def revert_snapshot(self, name):
return "reverting to snapshot %s" % name
def task(self, *args):
return "something goes wrong"
def three(self, arg1, arg2, arg3, argn):
return arg1,arg2,arg3,argn
class manager:
def open_vm(self):
self.vm = vm()
def __getattr__(self, name):
f = getattr(vm, name)
return f
def revert_last_snapshot(self):
return self.revert_snapshot("last")
def _run_task(self, vm, func, args, task=False):
f = getattr(vm, func)
if task is True:
task = f(args)
return "task: %s" % task
else:
return f(args)
def revert_new_snaphost(self, vm, snapshot):
return self._run_task(vm, "revert_snapshot", snaphost, task=True)
def exec_cmd(self, vm, cmd, args):
return self._run_task(vm, "exec_cmd", cmd, args)
class man:
def _run_cmd(self, vm, func, params=None, task=False):
f = getattr(vm, func)
if task is True:
if params is None:
task = f
else:
task = f(params)
return task
else:
if params is None:
return f
else:
return f(params)
def power_on(self, vm):
return self._run_cmd(vm,"power_on", task=True)
def execute(self, vm, cmd, args=[]):
return self._run_cmd(vm, "exec_cmd", [cmd, args])
if __name__ == "__main__":
vm = vm()
m = manager()
print m.revert_last_snapshot()
print m.exec_cmd(vm,"yoo","gigs")
n = man()
print n.power_on(vm)
print n.execute(vm, "cmd",["arg1","arg2"])
| true
|
bcadf44db8076d68b7ecf81f09694e90d36de68d
|
Python
|
16kozlowskim/Software-Engineering
|
/scraper/news.py
|
UTF-8
| 1,092
| 3
| 3
|
[] |
no_license
|
import feedparser, csv, sys
from pyteaser import SummarizeUrl
def get_rss(search):
url = 'https://news.google.com/news/rss/search/section/q/'+search+'/'+search+'?hl=en&gl=GB&ned=us'
d = feedparser.parse(url)
return d
def get_data(rss, num):
pathToCSV = '../fileStore/file.csv'
data= []
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
index = 0
for e in rss['entries']:
if (index == int(num)):
break
wr.writerow([(e['title']).encode('utf-8')])
wr.writerow([(e['link']).encode('utf-8')])
summary = []
try:
for elem in SummarizeUrl(e['link'].encode('utf-8')):
summary.append(elem)
wr.writerow([' '.join(summary).encode('utf-8').strip().replace('\n', '')])
except TypeError:
wr.writerow(['Summary Unavailable'])
index = index + 1
def main():
get_data(get_rss(sys.argv[1]), sys.argv[2])
if __name__ == "__main__":
main()
| true
|
1953080c273eef8f3ad7753d52e31d6b5632bce8
|
Python
|
nadav366/intro2cs-ex2
|
/quadratic_equation.py
|
UTF-8
| 1,431
| 4.46875
| 4
|
[] |
no_license
|
def quadratic_equation(a, b, c):
""" This function accepts factors of a second-order
equation and returns the solutions
:param a: Numeric value, factor of x^2, Suppose he did not 0
:param b: Numeric value, factor of x
:param c: Numeric value, free factor
:return: Numeric value, Equation solutions
"""
delta = b ** 2 - 4 * a * c # variable to the value within the root
# Check out how many solutions there are
if delta < 0:
return None, None
if delta == 0:
return -b / (2 * a), None
return (-b + (delta ** 0.5)) / (2 * a), (-b - (delta ** 0.5)) / (2 * a)
def quadratic_equation_user_input():
""" This function accepts factors of a quadratic equation,
separated by a space, and prints the solutions of the equation
"""
str_input = input("Insert coefficients a, b, and c: ")
list_input = str_input.split() # split the input into list
# split the list into variables
a = float(list_input[0])
b = float(list_input[1])
c = float(list_input[2])
sol1, sol2 = quadratic_equation(a, b, c) # variable to the solutions
# prints the solutions
if sol1 is None:
print("The equation has no solutions")
elif sol2 is None:
print("The equation has 1 solution: "+str(sol1))
else:
print("The equation has 2 solutions: "+str(sol1)+" and "+str(sol2))
return
| true
|
936ac996ada27dc7cce81fe832a3c0f8d06acd75
|
Python
|
adamsjoe/keelePython
|
/Week 3/11_4.py
|
UTF-8
| 1,393
| 4.46875
| 4
|
[] |
no_license
|
# Dictionaries in Python are Mutable. Dictionaries are passed to functions by reference.
# A dictionary dict is declared as:
# dict={'Andrew':5, 'Brian':3, 'Clive':2, 'David':4}
# it contains the names of volunteers and the number of times that they have volunteered for a particular duty. The dictionary is regularly updated by providing both the dictionary and a list to a function upd.
# For the current update the following list has been declared - each name represents a new volunteer session:
# ulist = ['Brian', 'David', 'Peter']
# Write and demonstrate the function upd.
def upd(lis):
for name in lis:
# check if the name (in the update list) exists in the dictionary
if name in udict:
# if the name exists, get the number of volunteering sessions the person has
sessions = udict.get(name)
# increment the sessions
sessions += 1
# write the new number of sessions back to the dictionary
udict[name] = sessions
else:
# the name doesn't exist, so we can set the volunteering sessions to 1
udict[name] = 1
udict={'Andrew':5, 'Brian':3, 'Clive':2, 'David':4}
ulist = ['Brian', 'David', 'Peter']
# for information, print the dictionary before the update is performed
print(udict)
upd(ulist)
# print the dictionary after the update
print(udict)
| true
|
b77d1824eddaab7e4998c374ff761bac65a6f586
|
Python
|
muggin/string-kernels
|
/src/ngk_kernel.py
|
UTF-8
| 529
| 3.546875
| 4
|
[] |
no_license
|
def create_ngrams(text, n):
"""Create a set of ngrams of length n"""
return set(text[i:i+n] for i in range(len(text)-n+1))
def ngk(doc1, doc2, n):
sd1 = create_ngrams(doc1, n)
sd2 = create_ngrams(doc2, n)
if len(sd1 | sd2) == 0:
return 1.0
return len(sd1 & sd2) * 1.0 / len(sd1 | sd2)
if __name__ == "__main__":
assert ngk("Das ist ein Test", "Das ist ein Test", 4) == 1.0
print ngk("Das ist ein Test", "Das ist ein Test", 4)
print ngk("Das ist ein Tlub", "Das ist ein Test", 4)
| true
|
cd3ba5abdd2e221f464815bbc9bd5d0f7ea3582f
|
Python
|
henrymendez/garage
|
/openai/venv/lib/python3.10/site-packages/langchain/document_loaders/sitemap.py
|
UTF-8
| 2,392
| 3.078125
| 3
|
[] |
no_license
|
"""Loader that fetches a sitemap and loads those URLs."""
import re
from typing import Any, Callable, List, Optional
from langchain.document_loaders.web_base import WebBaseLoader
from langchain.schema import Document
def _default_parsing_function(content: Any) -> str:
return str(content.get_text())
class SitemapLoader(WebBaseLoader):
"""Loader that fetches a sitemap and loads those URLs."""
def __init__(
self,
web_path: str,
filter_urls: Optional[List[str]] = None,
parsing_function: Optional[Callable] = None,
):
"""Initialize with webpage path and optional filter URLs.
Args:
web_path: url of the sitemap
filter_urls: list of strings or regexes that will be applied to filter the
urls that are parsed and loaded
parsing_function: Function to parse bs4.Soup output
"""
try:
import lxml # noqa:F401
except ImportError:
raise ValueError(
"lxml package not found, please install it with " "`pip install lxml`"
)
super().__init__(web_path)
self.filter_urls = filter_urls
self.parsing_function = parsing_function or _default_parsing_function
def parse_sitemap(self, soup: Any) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts."""
els = []
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
continue
if self.filter_urls and not any(
re.match(r, loc.text) for r in self.filter_urls
):
continue
els.append(
{
tag: prop.text
for tag in ["loc", "lastmod", "changefreq", "priority"]
if (prop := url.find(tag))
}
)
return els
def load(self) -> List[Document]:
"""Load sitemap."""
soup = self.scrape("xml")
els = self.parse_sitemap(soup)
results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el])
return [
Document(
page_content=self.parsing_function(results[i]),
metadata={**{"source": els[i]["loc"]}, **els[i]},
)
for i in range(len(results))
]
| true
|
39377dc4982404044b77233e7108a91a7948f765
|
Python
|
varuneranki/CKANCrawler
|
/ckancrawler/main.py
|
UTF-8
| 2,444
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
#python Epydoc docstring for a function http://epydoc.sourceforge.net/manual-epytext.html
__version__ = '$0.1$'.split()[1]
__author__ = 'Varun Maitreya Eranki'
__doc__='''
@author: U{'''+__author__+'''<http://www.github.com/varunmaitreya>}
@version: ''' + __version__ +'''
@copyright: 2018
@license: BCD
@todo: USE A LOOP TO SPLIT EACH STRING INTO CUSTOM SIZE OF STRING PREFERABLY 0.5 GB SO THAT RABBITMQ CAN HANDLE MESSAGES FASTER
@todo: Implement ckanext-dcat
'''
import pika
import re
import crawler
def main():
'''
This is the main method where RabbitMQ send and receive queues are implemented
A CKANURL is received from message queue CKAN and send for crawling.
A JSONLines file is received from dump function and is send to datajson function for extracting dictionaries
Dictionary will be made into chunks of data using bytecal function
'''
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='ckan')
def callback(ch, method, properties, body):
if re.match('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', body):
print(" [x] Received %r" % body)
urls = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', body)
urlse = str(urls[0])
print(" [x] Sending url")
try:
result = crawler.dump(urlse) #CKAN crawler is called
except Exception:
print(" [x] Exception caught")
finally:
if(result[0] == 0 and result[1] == 0):
file = "/data/" + result[2]
data = crawler.datajson(file)
#TODO:HANDLE URI DATA AND SEND IT IN CHUNKS OF 0.98 GB MAXIMUM PER MESSAGE (USE 0.5 GB FOR SPEED) USING channel.basic_publish
#HINT:USE BYTECAL AS IT CAN CALCULATE AND CONCATENATE LENGTH OF STRINGS.
channel.basic_publish(exchange='', routing_key='ckan2', body='ckan crawler exited')
elif(result[0] == 1 and result[1] == 0):
channel.basic_publish(exchange='', routing_key='ckan2', body='Error# error processing ckan url')
connection.close()
channel.basic_consume(callback,
queue='ckan',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
| true
|
0d4fe47435395b42e0825e0ea88e956f3a797e3c
|
Python
|
rafaelburgueno/Bitbugsg5
|
/proyectofarmacia/core/views.py
|
UTF-8
| 1,881
| 2.640625
| 3
|
[] |
no_license
|
from django.shortcuts import render
# importamos el TemplateView para implementar las vistas basadas en clases
from django.views.generic.base import TemplateView
class HomePageView(TemplateView):
template_name = 'core/home.html'
# metodo para insertar el diccionario de contexto(datos de la base de datos) al template.
# usar el metodo get() inabilita al metodo get_context_data()
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# context['titulo'] = "Texto introducido desde core.views, con el metodo get_context_data()"
# return context
# otra forma de insertar diccionarios de contexto
def get(self, request, *args, **kwargs):
# variables que voy a mandar al template
usuario = 'usuario no registrado'
# grupo_sala = False
# grupo_farmacia = False
# if 'sala' == str(request.user.groups.all()[0]):
# if request.user.groups.filter(name='sala').exists():
# print('es un usuario de sala')
# grupo_sala = True
# else:
# print('no es un usuario de sala')
# if request.user.groups.filter(name='farmacia').exists():
# print('es un usuario de farmacia')
# grupo_farmacia = True
# else:
# print('no es un usuario de farmacia')
# print('el usuario es: ', request.user.username)
# print('los grupos son: ', request.user.get_group_permissions())
if request.user.username:
usuario = request.user.username
return render(request, self.template_name, {'usuario':usuario})
# class SamplePageView(TemplateView):
# template_name = 'core/sample.html'
# class ArchivoPageView(TemplateView):
# template_name = 'core/archivo.html'
| true
|
2b61799afa6edd44fe232dd1066765ef6a2c937b
|
Python
|
lawy623/Algorithm_Interview_Prep
|
/Algo/Leetcode/074SearchA2DMatrix.py
|
UTF-8
| 609
| 3.3125
| 3
|
[] |
no_license
|
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
n = len(matrix)
m = len(matrix[0])
i = 0
j = n * m - 1
while i <= j:
mid = (i + j) / 2
u, v = flat_to_uv(mid, n, m)
if matrix[u][v] == target:
return True
elif matrix[u][v] > target:
j = mid - 1
else:
i = mid + 1
return False
def flat_to_uv(v, n, m):
return v / m, v % m
| true
|
7531fd17c7fc73bd178e2dd3ac89702fc83099a1
|
Python
|
PSFREITASUEA/walkcycle-mario
|
/Player.py
|
UTF-8
| 4,898
| 3.015625
| 3
|
[] |
no_license
|
import pygame.image
from SpriteLoader import *
from utils import *
class Player:
def __init__(self, pos_x, pos_y):
self.sprites_walk_right = load_sprites_walk_right()
self.sprites_run_right = load_sprites_run_right()
self.sprites_walk_left = load_sprites_walk_left()
self.sprites_run_left = load_sprites_run_left()
self.sprites_jump_left = load_sprites_jump_left()
self.sprites_jump_right = load_sprites_jump_right()
self.current_left = 0
self.current_run_left = 0
self.current_right = 0
self.current_run_right = 0
self.current_jump_left = 0
self.current_jump_right = 0
self.image = self.sprites_walk_right[self.current_right]
self.rect = self.image.get_rect()
self.rect = self.rect.move(pos_x, pos_y)
self.is_idle_to_left = False
self.is_idle_to_right = True
self.is_going_up = False
self.is_going_left = False
self.is_going_right = False
self.is_running_left = False
self.is_running_right = False
self.is_jump = False
self.jump_count = 10
def movement(self):
if not self.is_jump:
if self.is_going_right:
self.rect.x += 5
elif self.is_going_left:
self.rect.x -= 5
elif self.is_running_left:
self.rect.x -= 12
elif self.is_running_right:
self.rect.x += 12
else:
if self.jump_count >= -10:
self.rect.y -= (self.jump_count * abs(self.jump_count)) * 0.5
if self.is_going_left:
self.rect.x -= 5
elif self.is_going_right:
self.rect.x += 5
self.jump_count -= 1
else:
self.jump_count = 10
self.is_jump = False
def update(self):
self.movement()
self.animate()
self.is_colliding_with_limits()
def render(self, screen: pygame.surface):
if self.is_going_right:
screen.blit(self.sprites_walk_right[int(self.current_right)], (self.rect.x, self.rect.y))
elif self.is_going_left:
screen.blit(self.sprites_walk_left[int(self.current_left)], (self.rect.x, self.rect.y))
elif self.is_running_right:
screen.blit(self.sprites_run_right[int(self.current_run_right)], (self.rect.x, self.rect.y))
elif self.is_running_left:
screen.blit(self.sprites_run_left[int(self.current_run_left)], (self.rect.x, self.rect.y))
elif self.is_idle_to_left:
if self.is_jump:
screen.blit(self.sprites_jump_left[int(self.current_jump_left)], (self.rect.x, self.rect.y))
else:
screen.blit(self.sprites_walk_left[2], (self.rect.x, self.rect.y))
elif self.is_idle_to_right:
if self.is_jump:
screen.blit(self.sprites_jump_right[int(self.current_jump_right)], (self.rect.x, self.rect.y))
else:
screen.blit(self.sprites_walk_right[2], (self.rect.x, self.rect.y))
def is_colliding_with_limits(self):
if self.rect.left <= 0:
self.rect.left = 0
elif self.rect.right >= WINDOW_WIDTH:
self.rect.right = WINDOW_WIDTH
def animate(self):
if self.is_going_right:
self.current_right += 0.25
if self.current_right >= len(self.sprites_walk_right):
self.current_right = 1
elif self.is_going_left:
self.current_left += 0.25
if self.current_left >= len(self.sprites_walk_left):
self.current_left = 1
elif self.is_running_left:
self.current_run_left += 0.5
if self.current_run_left >= len(self.sprites_run_left):
self.current_run_left = len(self.sprites_run_left) - 5
elif self.is_running_right:
self.current_run_right += 0.5
if self.current_run_right >= len(self.sprites_run_right):
self.current_run_right = len(self.sprites_run_right) - 5
elif self.is_jump:
if self.is_idle_to_right:
self.current_jump_right += 0.25
if self.current_jump_right >= len(self.sprites_jump_right):
self.current_jump_right = len(self.sprites_jump_right) - 1
elif self.is_idle_to_left:
self.current_jump_left += 0.25
if self.current_jump_left >= len(self.sprites_jump_left):
self.current_jump_left = len(self.sprites_jump_left) - 1
else:
self.current_left = 0
self.current_right = 0
self.current_run_left = 0
self.current_run_right = 0
self.current_jump_left = 0
self.current_jump_right = 0
| true
|
dab916828f7c3612739071338775a224af82bb2f
|
Python
|
gerardburgues/LinearRegression
|
/LinearRegression/main.py
|
UTF-8
| 4,105
| 3.265625
| 3
|
[] |
no_license
|
"""
La nostra base de dades tracta sobre el rendiment d’alumnes de secundària en dos escoles portugueses.
Els atributs inclueixen dades sobre les seves calificacions,
característiques demogràfiques, socials i característiques relacionades amb l’escola.
Totes aquestes dades han sigut obtingudes de informes escolars i qüestionaris.
"""
# Import Libraries
import numpy as np
import pandas as pd
import scipy.stats
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
def DataInfo():
"""This function give us information about our table and our data
shape, head, description, null values..."""
print("We see all the variables that we have in our dataset: \n", data.head)
print("What shape our CSV has ? \n",data.shape)
print("Detailed information: \n", data.describe)
print("Null values ? \n", data.isnull().sum())
def DropColumns(data):
"""We are droping those columns which we think are not worth having
to generate a good prediction.
"""
return data.drop(
['school', 'famsize', 'Pstatus', 'Fedu', 'Medu', 'Fjob', 'Mjob', 'reason', 'guardian', 'traveltime', 'famsup',
'nursery', 'internet', 'goout', 'Dalc'], axis=1)
def plotsRelation(data):
"""This functions will show us the relation between each dataset component
(Those variables we have deleted).
"""
sns.pairplot(data)
plt.show()
print("unique? -->",data.nunique())
print("new data head: \n", data.head)
def HeatMap(data):
"""Showing the heatmap from
1. Can be all the data
2. Can be just those specific rows (Not counting eliminated rows).
"""
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 12), )
ax = sns.heatmap(data=data.corr(), ax=ax, annot=True, cmap="coolwarm")
ax.set_xlabel('Features', fontdict={"fontsize": 16})
ax.set_ylabel('Features', fontdict={"fontsize": 16})
for _, s in ax.spines.items():
s.set_linewidth(5)
s.set_color('cyan')
ax.set_title('Correlation between different Features', loc="center",
fontdict={"fontsize": 16, "fontweight": "bold", "color": "white"}, )
plt.savefig("plotcorrelation.png", bbox_inches="tight")
plt.show()
def Mse(v1, v2):
""" Apply MSE formula"""
return ((v1 - v2)**2).mean()
def Regression(x, y):
""" Apply the Regression methods with libary"""
# Creem un objecte de regressió de sklearn
regr = LinearRegression()
# Entrenem el model per a predir y a partir de x
regr.fit(x, y)
# Retornem el model entrenat
return regr
def TransformingStrings(data):
"""
We have to transform data so we don't do the model with a character inside our dataset.
"""
# No --> 0
# Yes -->1
data['schoolsup'] = data['schoolsup'].replace(['no'], 0)
data['schoolsup'] = data['schoolsup'].replace(['yes'], 1)
data['sex'] = data['sex'].replace(['M'], 0)
data['sex'] = data['sex'].replace(['F'], 1)
data['address'] = data['address'].replace(['U'], 0)
data['address'] = data['address'].replace(['R'], 1)
data['paid'] = data['paid'].replace(['no'], 0)
data['paid'] = data['paid'].replace(['yes'], 1)
data['activities'] = data['activities'].replace(['no'], 0)
data['activities'] = data['activities'].replace(['yes'], 1)
data['romantic'] = data['romantic'].replace(['no'], 0)
data['romantic'] = data['romantic'].replace(['yes'], 1)
data['higher'] = data['higher'].replace(['no'], 0)
data['higher'] = data['higher'].replace(['yes'], 1)
return data
if __name__ == "__main__":
# Lets Read the data from our csv
data = pd.read_csv("student-mat.csv")
DataInfo()
data = DropColumns(data)
plotsRelation(data)
HeatMap(data)
data = TransformingStrings(data)
print(data)
#Values x and y
x = data.iloc[:, :-1]
y = data.iloc[:, -1]
print("HEY <",x)
print(y)
else:
print("File one executed when imported")
print("hi")
| true
|
9e3a2e41cfcd1309d98f50447ff41be17f0b7796
|
Python
|
blorente/beyond-the-loop
|
/scripts/post-formatter.py
|
UTF-8
| 3,565
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#/usr/bin/python3
import argparse
import re
import sys
from pathlib import Path
def printerr(*args):
print(*args, file=sys.stderr)
def parse_args():
parser = argparse.ArgumentParser(description='Process blogs')
subparsers = parser.add_subparsers(dest="subcommand_name")
devto_subcommand = subparsers.add_parser(
name='devto', help='Make the post Dev.to compliant.'
)
devto_subcommand.add_argument(
'--outdir',
type=Path,
help="Directory to output the post to",
default=Path("_devto/")
)
devto_subcommand.add_argument(
"file", type=Path, help='Post to process'
)
return parser.parse_args()
SITE_BASEURL = "https://beyondtheloop.dev"
YAML_HEADER_REGEX = re.compile(r"---(.*\n)*?---", re.MULTILINE)
def _remove_yaml_header(text: str) -> str:
printerr(" * Removed YAML header")
return YAML_HEADER_REGEX.sub("", text)
BLOG_LINK_STRING = """
**Note:** This content was originally posted in my blog, [Beyond The Loop](https://beyondtheloop.dev/3-programming-milestones-to-work-towards/).
If you like it, head over there or follow the blog on Twitter ([@BeyondLoop](https://twitter.com/BeyondLoop)) for more!
"""
def _add_blog_links(text: str) -> str:
printerr(" * Added blog link")
return BLOG_LINK_STRING + text
TWITTER_LINK_REGEX = re.compile(r"^.*https://twitter\.com/.*?/status/([^?]*).*$", re.MULTILINE)
def _replace_twitter_links(text: str) -> str:
def _process_match(matchobj):
printerr(f" * Twitter link found {matchobj.group(1)}")
return "{% twitter " + matchobj.group(1) + " %}"
printerr(" * Cleaning twitter links")
return TWITTER_LINK_REGEX.sub(_process_match, text)
YOUTUBE_LINK_REGEX = re.compile(r"^.*https://www.youtube\.com/embed/([^\"]*).*$", re.MULTILINE)
def _replace_youtube_links(text: str) -> str:
def _process_match(matchobj):
printerr(f" * Youtube link found {matchobj.group(1)}")
return "{% youtube " + matchobj.group(1) + " %}"
printerr(" * Cleaning youtube links")
return YOUTUBE_LINK_REGEX.sub(_process_match, text)
IMAGE_WITH_CAPTION_REGEX = re.compile(r"^.*include image-with-caption\.html.*url=[\"\']([^\"\']*).*description=[\"\']([^\"\']*).*.$", re.MULTILINE)
def _replace_image_with_caption_links(text: str) -> str:
def _process_match(matchobj):
printerr(f" * Image-with-caption link found {matchobj.group(1)}")
return f"})"
printerr(" * Cleaning image-with-caption links")
return IMAGE_WITH_CAPTION_REGEX.sub(_process_match, text)
BASEURL_LINK_REGEX = re.compile(r"\{\{site\.baseurl\}\}", re.MULTILINE)
def _replace_baseurl_links(text: str) -> str:
printerr(" * Replace baseurl links")
return BASEURL_LINK_REGEX.sub(SITE_BASEURL, text)
def process_devto(file: Path, outdir: Path):
dest = outdir / file.name
with open(dest, 'wb') as destfile:
contents = file.read_text()
contents = _remove_yaml_header(contents)
contents = _replace_baseurl_links(contents)
contents = _replace_twitter_links(contents)
contents = _replace_youtube_links(contents)
contents = _replace_image_with_caption_links(contents)
contents = _add_blog_links(contents)
printerr("*** DONE! ***")
destfile.write(contents.encode('utf-8'))
def main():
args = parse_args()
if args.subcommand_name:
if args.subcommand_name == "devto":
process_devto(args.file, args.outdir)
if __name__ == '__main__':
main()
| true
|
b2878b7c19ba36aa3d20026f61cc7ba1f4f3e40d
|
Python
|
Eric-Canas/BetArbitrageAnalysis
|
/Scrapers/MainScraper.py
|
UTF-8
| 2,776
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
from requests import get
from requests.status_codes import codes
from lxml import html
from Scrapers.commons import *
from time import sleep
from random import uniform, shuffle
from Constants import *
class BetScraper():
"""
Class that extract all the information of oddschecker.com/es/
"""
def __init__(self):
self.name = "OddsChecker" #Name of the spider
self.urls = [BASE_WEB+sport for sport in sports] #Set of pages to check
shuffle(self.urls)
self.information = {}
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
self.header = {'User-Agent' : user_agent}
def get_information(self, sleep_time_min=0.5, sleep_time_max=1.5):
"""
Scrap all the information of oddschecker.com/es/ for the sports selected
:param sleep_time_min: float. Minimum amount of seconds to wait between requests to the web
:param sleep_time_max: float. Maximum amount of seconds to wait between requests to the web
:return: List of Dictionaries. All the information extracted from oddschecker.com/es/ organized
as structured and nested dictionaries
"""
# For each url to scrap
for url in self.urls:
# Wait a random time for neither overwhelm the page nor be easily detected.
sleep(uniform(sleep_time_min, sleep_time_max))
# Make the request and get the HTML of the page
response = get(url=url, headers=self.header)
# If response gets code 200 (ok)
if response.status_code == codes['ok']:
# Gets the name of the sport
sport = response.url[str.rfind(response.url, '/') + len('/'):]
# Parse the web-page to transform it to an structure structure
result = parse(response=html.fromstring(response.content), sport=sport)
# If it gave a valid result
if result != {}:
# Save it and print an advice
self.information[sport] = result
print(sport.title() + " processed")
# If result gave was invalid
else:
# Inform about it
print(RuntimeError(sport.title()+" bets have no tie possibility, thus are not ensurable"))
# If response gave an error code go to the next url
else:
print(ReferenceError(response.url+" gave code "+str(response.status_code)))
if self.information == {}:
raise ValueError("No information found in www.oddschecker.com/es/")
return self.information
| true
|
7e58c6e7b1f295595ad812bfcde2dc6cd195136d
|
Python
|
veetarag/Data-Structures-and-Algorithms-in-Python
|
/DataStructure/linkedList.py
|
UTF-8
| 2,116
| 3.75
| 4
|
[] |
no_license
|
class Node():
def __init__(self, val):
self.val = val
self.next = None
def traverse(self):
node = self
while node!=None:
print(node.val)
node = node.next
def printnext(self):
print(self.next)
def removeKfromList(self, head, key):
temp = head
prev = None
# Check if the head contains the key repeatedly
while temp!=None and temp.val==key:
head = temp.next
temp = temp.next
while temp!=None:
# Search for the key to be deleted,
# keep track of the previous node
# as we need to change 'prev.next'
while temp!=None and temp.val != key:
prev = temp
temp = temp.next
# If there is no element equal to Key
if temp==None:
return head
# We got an key, so unlink it
prev.next = temp.next
# Update temp for next iteration for outer loop
temp = prev.next
return head
def deleteNodeNext(nthNode, node):
nthNode.next = node.next
def sumTwoList(head1, head2):
sum1 = ""
node = head1
while node!=None:
sum1+=str(node.val)
node = node.next
sum2 = ""
node = head2
while node!=None:
sum2+=str(node.val)
node = node.next
return int(sum1)+int(sum2)
if __name__=='__main__':
node1 = Node(12)
node2 = Node(43)
node3 = Node(56)
node4 = Node(5)
node5 = Node(56)
node6 = Node(19)
node7 = Node(88)
# node5.next = node6
# node6.next = node7
node1.next = node2
node2.next = node3
node3.next = node4
node4.next = node5
node1.traverse()
node1.removeKfromList(node1, 56)
node1.traverse()
# nodep1 = Node(1000)
# nodep2 = Node(1000)
# nodep1.next = nodep2
# #
# nodep1.traverse()
# nodep1.removeKfromList(nodep1, 1000)
# print()
# nodep1.traverse()
#deleteNodeNext(node1,node2)
#node1.traverse()
#print(sumTwoList(node1, node5))
| true
|
867b7b849da612b5d3bf12c55fb7fd1b21083b18
|
Python
|
deref007/learn_python_web
|
/learn_web/app_resp2.py
|
UTF-8
| 923
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, jsonify
from werkzeug.wrappers import Response
from config import setting
app = Flask(__name__, static_folder='/web_ui')
app.config.from_object(setting)
"""
另一种响应的方法
使用werkzeug内置的Response进行返回,使用jsonify进行json的格式化返回
"""
class JSONResponse(Response):
@classmethod
def force_type(cls, rv, environ=None):
if isinstance(rv, dict):
rv = jsonify(rv)
return super(JSONResponse, cls).force_type(rv, environ)
def set_response_class():
app.request_class = JSONResponse
set_response_class()
@app.route("/json_hello/")
def json_hello():
return {"message": 'hello world!'}
@app.route("/json_headers")
def json_headers():
return {"header": [1, 2, 3]}, 201, [('X-Request-Id', '100')]
if __name__ == '__main__':
app.run(host="127.0.0.1", port=1235)
| true
|
7c6e97355fa776c0a8723a6001682e88b177b405
|
Python
|
samjabrahams/taqtoe
|
/taqtoe/player/human.py
|
UTF-8
| 2,370
| 4.03125
| 4
|
[] |
no_license
|
import taqtoe.utils as utils
from taqtoe.exceptions import BadMoveException
from taqtoe.player.player import Player
class HumanPlayer(Player):
"""
Class for a human tic-tac-toe player.
"""
def move(self, game):
"""
Asks for user input from the console to make a move. Will loop until
a valid move is provided.
:param game: The TicTacToeGame that the player should move for.
:return: Integer. The game status after moving the piece.
"""
available_moves = utils.row_col_moves_to_idx(game.available_moves())
while True:
move = self._prompt_player(game, available_moves)
try:
int_move = self._validate_user_input(move)
row, col = utils.idx_to_row_col(int_move)
return game.move(row, col)
except BadMoveException as e:
utils.print_with_border(str(e))
def _prompt_player(self, game, available_moves):
"""
Displays current game status and requests a move from the player.
:param game:
:param available_moves: List of integers representing available cells.
:return: String input from the user (stripped of whitespace).
"""
print('\n{}\'s turn: please select a move (you are {}).'
.format(self.name, self.team_string))
game.print_board(print_available_cell_indices=True)
print('Available moves: {}'.format(str(available_moves)))
return input('> ').strip()
def _validate_user_input(self, move):
"""
Validates
:param s:
:return:
"""
if not move.isdigit():
raise BadMoveException('You must pass in a number as input.')
move = int(move)
if not 0 <= move < 9:
raise BadMoveException('Move must be in range [0-8].')
return move
if __name__ == '__main__':
# Setup a simple 2-player game
from taqtoe.constants import X, O, CONTINUE
from taqtoe.tictactoe import TicTacToe
game = TicTacToe()
player1 = HumanPlayer(X, 'Sam')
player2 = HumanPlayer(O, 'Zak')
game.print_board()
while game.status == CONTINUE:
player = player1 if game.turn == X else player2
player.move(game)
game.print_board()
print('Game over!')
game.print_board()
| true
|
f202201e9b465352629aac62062b5af680671604
|
Python
|
yuquan1006/Python_Spaces
|
/library/p_requests/unit_test.py
|
UTF-8
| 905
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Version : py2
import unittest
# 对abs()函数单元测试
# 设计测试用例
# 输入正数,比如1、1.2、0.99,期待返回值与输入相同;
# 熟入负数,比如-1、-1.2、-0.99,期待返回值与输入相反;
# 输入0,期待返回0;
# 输入非数值类型,比如None、[]、{},期待抛出TypeError。
# add('1','1')
class test(unittest.TestCase):
def test01(self):
self.assertEqual(1,abs(1))
self.assertEqual(1.2,abs(1.2))
print('pass')
def test02(self):
self.assertEqual(1,abs(-1))
self.assertEqual(1.2,abs(-1.2))
print('pass')
def test03(self):
self.assertEqual(0,abs(0))
print('pass')
def test04(self):
self.assertEqual('TypeError: abs() takes exactly one argument (0 given)',abs())
if __name__ == '__main__':
unittest.main()
| true
|
c2855dad2fe5db401fb5426737ce141ca8804085
|
Python
|
Stoggles/AdventofCode
|
/2015/day02.py
|
UTF-8
| 685
| 3.15625
| 3
|
[] |
no_license
|
test1 = [['2x3x4'], 58, 34]
test2 = [['1x1x10'], 43, 14]
def calc(list):
areas = []
lengths = []
for present in list:
dimensions = sorted(int(length) for length in present.split('x'))
areas.append(dimensions[0] * dimensions[1] * 3 + dimensions[0] * dimensions[2] * 2 + dimensions[1] * dimensions[2] * 2) # area of all 6 faces + area of smallest face
lengths.append((dimensions[0] + dimensions[1]) * 2 + dimensions[0] * dimensions[1] * dimensions[2]) # perimeter of smallest face + volume
return [sum(areas), sum(lengths)]
assert calc(test1[0]) == [test1[1], test1[2]]
assert calc(test2[0]) == [test2[1], test2[2]]
with open('input2.txt') as file:
print calc(file)
| true
|
aa6d85eced6f14aa242fdac20cf2b8e3cdf5b321
|
Python
|
hiroshi-maybe/deep-learning-from-scratch
|
/feed-forward-neural-network/neuralnet_mnist.py
|
UTF-8
| 1,037
| 2.6875
| 3
|
[] |
no_license
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
import pickle
from dataset.mnist import load_mnist
from PIL import Image # pip install Pillow
from common.functions import sigmoid, softmax
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1,W2,W3 = network['W1'],network['W2'],network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1)+b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2)+b2
z2 = sigmoid(a2)
a3 = np.dot(z2,W3)+b3
y = softmax(a3)
return y
x, t = get_data()
network = init_network()
batch_size = 100
ok_cnt = 0
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1)
ok_cnt += np.sum(p == t[i:i+batch_size])
print(f"Accuracy: {str(float(ok_cnt)/len(x))}")
| true
|
3b9ae4cbfb5117d150422541e8283b72c5336586
|
Python
|
tjytlxwxhyzqfw/online-judge
|
/codeforces/375/2/A.py
|
UTF-8
| 181
| 2.96875
| 3
|
[] |
no_license
|
def read(t=None):
string = raw_input()
return string if t is None else [t(x) for x in string.split()]
if __name__ == "__main__":
a = read(int)
a = sorted(a)
print a[2] - a[0]
| true
|
abfae196f120d72a83a6180263f713091218bdf1
|
Python
|
xatshepsut/GraphingApp
|
/graph_generator/graph_generator.py
|
UTF-8
| 1,863
| 3.328125
| 3
|
[] |
no_license
|
# https://networkx.github.io/documentation/latest/reference/generators.html
import click
import networkx as nx
from enum import Enum
class GraphType(Enum):
Path = 'path'
Cycle = 'cycle'
Star = 'star'
Complete = 'complete'
Hypercube = 'hypercube'
Wheel = 'wheel'
Random = 'random'
@classmethod
def fromstring(cls, string):
for key, value in vars(cls).iteritems():
if key == string.lower().title():
return value
def generate_graph(type, n):
graph = nx.empty_graph()
if GraphType.Path == type:
graph = nx.path_graph(n)
elif GraphType.Cycle == type:
graph = nx.cycle_graph(n)
elif GraphType.Star == type:
graph = nx.star_graph(n)
elif GraphType.Complete == type:
graph = nx.complete_graph(n)
elif GraphType.Hypercube == type:
graph = nx.hypercube_graph(n)
elif GraphType.Wheel == type:
graph = nx.wheel_graph(n)
elif GraphType.Random == type:
graph = nx.fast_gnp_random_graph(n, 0.5)
return graph
@click.group()
def cli():
click.echo('Run program with --help option for more info')
@cli.command(name='classic')
@click.option('--graph-type', help='[path, cycle, star, complete, hypercube, wheel]')
@click.option('--n', type=int, help='1..N')
def generate_classic(graph_type, n):
print 'Generating graph...'
type = GraphType.fromstring(graph_type)
Graph = generate_graph(type, int(n))
output_filename = '%s_%s.graphml' % (type.value, n)
nx.write_graphml(Graph, './%s' % output_filename)
print 'Result is saved as "%s"' % output_filename
@cli.command(name='random')
@click.option('--n', type=int, help='1..N')
def generate_random(n):
print 'Generating random graph...'
Graph = generate_graph(GraphType.Random, int(n))
output_filename = 'random_%s.graphml' % n
nx.write_graphml(Graph, './%s' % output_filename)
print 'Result is saved as "%s"' % output_filename
if __name__ == '__main__':
cli()
| true
|
c6828960992bc69106079a696d9dae097af8b9a2
|
Python
|
autosub-team/VHDL_Tasks
|
/gates/scripts/LogicFormulaCreator.py
|
UTF-8
| 3,316
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
########################################################################
# LogicFormulaCreator.py
# Generates a logic formula for given TaskParameters
#
# Copyright (C) 2015 Martin Mosbeck <martin.mosbeck@gmx.at>
# License GPL V2 or later (see http://www.gnu.org/licenses/gpl2.txt)
########################################################################
import sys
import re
def toBase3(number,length):
remainder=0
dividend=number
base3Nr=""
while dividend != 0 :
remainder = dividend % 3
dividend = dividend // 3
base3Nr+=str(remainder)
base3Nr=base3Nr.zfill(length)
return [int(x) for x in base3Nr]
def toBase2(number,length):
return [int(x) for x in bin(number)[2:].zfill(length)]
def createFromParameters(taskParameter):
taskParameter=int(taskParameter)
gates= toBase3(taskParameter>>41,5)
inputsEnableLvl1= toBase2((taskParameter & (2**41-1))>>25,16)
inputsNegateLvl1= toBase2((taskParameter & (2**25-1))>>9,16)
inputsNegateLvl2= toBase2((taskParameter & (2**9-1))>>5,4)
outputsNegate= toBase2(taskParameter & (2**5-1),5)
#print(gates,inputsEnableLvl1,inputsNegateLvl1,inputsNegateLvl2,outputsNegate)
gate_type={0:" and ",1:" or ",2:" xor "}
negation_type={0:"",1:"not "}
for i in range(len(gates)):
gates[i]=gate_type[gates[i]]
for i in range(len(inputsNegateLvl1)):
inputsNegateLvl1[i]=negation_type[inputsNegateLvl1[i]]
for i in range(len(inputsNegateLvl2)):
inputsNegateLvl2[i]=negation_type[inputsNegateLvl2[i]]
for i in range(len(outputsNegate)):
outputsNegate[i]=negation_type[outputsNegate[i]]
#print(gates,inputsEnableLvl1,inputsNegateLvl1,inputsNegateLvl2,outputsNegate)
inputsLvl1_lables={}
for key in [0, 4,8,12]:
inputsLvl1_lables[key] = 'A'
for key in [1, 5, 9,13]:
inputsLvl1_lables[key] = 'B'
for key in [2, 6, 10,14]:
inputsLvl1_lables[key] = 'C'
for key in [3, 7, 11,15]:
inputsLvl1_lables[key] = 'D'
expression=outputsNegate[4]+"( "+ \
outputsNegate[0]+inputsNegateLvl2[0]+"( cond0 )"+gates[4]+ \
outputsNegate[1]+inputsNegateLvl2[1]+"( cond1 )"+gates[4] + \
outputsNegate[2]+inputsNegateLvl2[2]+"( cond2 )"+gates[4]+ \
outputsNegate[3]+inputsNegateLvl2[3]+"( cond3 )"+ \
" )"
conditions_type={2: "x0gx1", 3: "x0gx1gx2", 4: "x0gx1gx2gx3"} # type of conditions possible, x inputs, g gate
conditions=[inputsEnableLvl1[0:4].count(1),inputsEnableLvl1[4:8].count(1),inputsEnableLvl1[8:12].count(1),inputsEnableLvl1[12:].count(1)]
for i in range(len(conditions)):
conditions[i]=conditions_type[conditions[i]]
for i in range(0,4):
x_num=0
conditions[i]=re.sub("g",gates[i],conditions[i])
for input_num in range(i*4,(i+1)*4):
if(inputsEnableLvl1[input_num]==1):
conditions[i]=re.sub("x"+str(x_num),inputsNegateLvl1[input_num]+inputsLvl1_lables[input_num],conditions[i])
x_num+=1
expression=re.sub("cond"+str(i),conditions[i],expression)
expression=re.sub("not not ","",expression)#strip double negations
return expression
| true
|
8c7f708c13e140998d7c23e3f016f842f41f1d9d
|
Python
|
mnihatyavas/Python-uygulamalar
|
/Bernd Klein (520) ile Python/p_31704.py
|
ISO-8859-9
| 976
| 3.140625
| 3
|
[] |
no_license
|
# coding:iso-8859-9 Trke
# p_31704.py: Sevgi sembol kalbin topografik grafii rnei.
import numpy as np
import matplotlib.pyplot as mp
from p_315 import Renk
x, y = np.ogrid [-1:1:100j, -1:1.56:100j]
mp.style.use ("dark_background")
mp.contour (
x.ravel(),
y.ravel(),
x**2 + (y - ((x**2)**(1.0 / 3)))**2,
[1],
colors='red')
#mp.axis ('equal') # on/ak, off/kapal...
mp.title ('Sevgi Sembol Kalp Grafii')
mp.show()
#---------------------------------------------------------------------------------------------------
y, x = np.ogrid [-1:2:100j, -1:1:100j]
ekil = mp.figure()
ekil.set_facecolor (Renk.renk())
altekil = ekil.add_subplot()
altekil.set_facecolor (Renk.renk())
mp.contour (
x.ravel(),
y.ravel(),
x**2 + (y - ((x**2)**(1.0 / 3)))**2,
[1],
colors=Renk.renk() )
mp.axis ('equal')
mp.title ('Sevgi Sembol Kalp Grafii', color=Renk.renk())
mp.show()
| true
|
144ac04d2da7bbd63aa97a60c1194f40d2a7ef25
|
Python
|
dongtianqi1125/Miki
|
/system/data/basicSchedule.py
|
UTF-8
| 2,566
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
from datetime import datetime
import time
from query import Query
class BasicSchedule(object):
# 时间调度模块
def __init__(self):
self.query = Query()
self.all_trade_days = None
self.system_run_before_trading_start = False
self.system_run_after_trading_end = False
def run_before_trading_start(self):
# 盘前运行
pass
def run_every_day(self):
# 每天05:00运行
pass
def run_every_minute(self):
# 盘中
pass
def run_after_trading_end(self):
# 盘后运行
pass
def run(self):
time1 = datetime.strptime('07:00:00', '%H:%M:%S').time()
time2 = datetime.strptime('09:00:00', '%H:%M:%S').time()
time3 = datetime.strptime('09:31:00', '%H:%M:%S').time()
time4 = datetime.strptime('11:30:00', '%H:%M:%S').time()
time5 = datetime.strptime('13:00:00', '%H:%M:%S').time()
time6 = datetime.strptime('15:01:00', '%H:%M:%S').time()
time7 = datetime.strptime('15:05:00', '%H:%M:%S').time()
time8 = datetime.strptime('15:10:00', '%H:%M:%S').time()
time9 = datetime.strptime('05:00:00', '%H:%M:%S').time()
now_time = datetime.strptime('00:00:00', '%H:%M:%S').time()
while True:
# 每一秒钟运行一次,避免占用CPU计算
time.sleep(1)
if self.all_trade_days is None or datetime.now().date()!=self.time_count.date():
self.all_trade_days = self.query.get_all_trade_days()
self.time_count = datetime.now()
if datetime.now().date() in self.all_trade_days:
if datetime.now().time() == time9:
t1 = time.time()
self.run_every_day()
t2 = time.time()
print('run_every_day use {}s'.format(t2-t1))
if not self.system_run_before_trading_start and time1<=datetime.now().time()<=time2:
t1 = time.time()
self.run_before_trading_start()
self.system_run_before_trading_start = True
t2 = time.time()
print('run_before_trading_start use {}s'.format(t2-t1))
while time3<=datetime.now().time()<=time4 or time5<=datetime.now().time()<=time6:
self.system_run_before_trading_start = False
self.system_run_after_trading_end = False
if [datetime.now().hour, datetime.now().minute] != [now_time.hour, now_time.minute]:
now_time = datetime.now()
self.run_every_minute()
else:
time.sleep(1)
if not self.system_run_after_trading_end and time7<=datetime.now().time()<=time8:
t1 = time.time()
self.run_after_trading_end()
self.system_run_after_trading_end = True
t2 = time.time()
print('run_after_trading_end use {}s'.format(t2-t1))
else:
time.sleep(60)
| true
|
eecd71ac67e7a36834e1c5f07009b144e300ef8e
|
Python
|
dabuu/test_pycharm
|
/overcome_python/chapter5 regex/test_regex.py
|
UTF-8
| 1,471
| 3.515625
| 4
|
[] |
no_license
|
# -*- encoding:utf-8 -*-
__author__ = 'dabuwang'
import re;
sss = "Life can be good";
print "====================== 1. 匹配&搜索: re.search 从整个string中查询,re.match 从 第一个字母开始查询================";
print re.search("can", sss); # matchobject
print re.search("can", sss).group(); #can
print re.match("can", sss); # None
print re.match("l*",sss,re.IGNORECASE) #matchobject
print re.match("l.*?\\s",sss,re.IGNORECASE).group(); #Life
print re.findall("\\w{3}\\s", sss, re.IGNORECASE); # list
print "====================== 2. 替换: re:sub() re.subn() 功能一样, 只是 subn() 返回一个元组 ================== ";
print re.sub("good","bad", sss); #Life can be bad
print re.sub("good|be","bad", sss, 1); #Life can bad good
print re.sub("good|be","bad", sss); #Life can bad bad
print re.subn("good|be", "bad", sss); #('Life can bad bad', 2)
print re.subn("good|be", "bad", sss)[1];#2
print "====================== 3. 分割字符串: re:split ================== ";
print re.split(" ", sss); #['Life', 'can', 'be', 'good']
print re.split(" ", sss,1); #['Life', 'can be good']
print "====================== 4. 正则表达式 对象 ================== ";
r = re.compile("\\b\\w+\\b",re.I); #查找单词
print r.findall(sss);
double = re.compile("(?P<first>\\w)(?P=first)"); # 查找重复字母
print double.findall(sss);
d = double.search(sss);
print d.groupdict();
print d.start(0);
print d.end(0);
| true
|
cf6ef3d6beec70327342ea0171b64154ddf7572f
|
Python
|
jefesaurus/hexapod-engine
|
/old-versions/staging/parts_library/leg_library.py
|
UTF-8
| 479
| 2.515625
| 3
|
[] |
no_license
|
__author__ = 'glalonde'
import xml.etree.ElementTree as ET
import copy
from staging.leg import Leg
def parse_legs():
tree = ET.parse(parts_path + '/legs.xml')
root = tree.getroot()
for child in root:
new_leg = Leg.from_xml_node(child)
legs[new_leg.type] = new_leg
def get_leg(name):
if name in legs:
return copy.deepcopy(legs[name])
raise ValueError('Leg %s not found'%(name))
legs = {}
parts_path = 'configurations'
parse_legs()
| true
|
ddfd01cd8f1c4a04fac86eeaccbdb25d8a1275f5
|
Python
|
rafaelpascoalrodrigues/snakeAI
|
/ai_random_play.py
|
UTF-8
| 170
| 2.875
| 3
|
[] |
no_license
|
import random
# Random engine
prng = random.Random()
def play():
return prng.randint(0, 3)
def main():
print(play())
if __name__ == '__main__':
main()
| true
|
f356e2be88e82cc2fdae41079222b9b924cd42b0
|
Python
|
ToeKnee/chat-demo
|
/server/chat/users/email.py
|
UTF-8
| 692
| 2.65625
| 3
|
[] |
no_license
|
from django.core.mail import send_mail
def send_welcome_email(user):
""" Send a welcome email to the specified user """
# Usually I would use something like Foundation Emails and send
# HTML + plain text emails. But for simplicity, plain text emails
# will do.
# http://foundation.zurb.com/
if user.email is None:
return None
message = """Hi {username},
Welcome to Chat Wall. We hope you have fun chatting with other wallers.
--
Kind Regards,
Chat Wall Team
""".format(
username=user.username
)
send_mail(
"Welcome to Chat Wall",
message,
"chat@chatwall.com",
[user.email],
fail_silently=False,
)
| true
|
69604aa6fc8f38b643aa32cd9123f232483f9ba4
|
Python
|
ruxtain/matrix
|
/matrix/amazon/proxy_pool/__init__.py
|
UTF-8
| 1,897
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
# 放置 proxy_pool 下共用的部分
# 对外交流的脚本
'''
未来可能购买代理的商家:
阿布 https://www.abuyun.com/pricing.html
西刺 http://www.xicidaili.com/wn/
'''
from matrix.models import *
import multiprocessing
import time
import json
import random
import requests
import os
def get_proxy_country(url):
'''
url 需要去掉端口部分
根据 url 读取其所属的国别,读取失败则返回空字符
'''
url = url.split(':')[0]
try:
test_url = 'http://ip-api.com/json/{}'
result = requests.get(test_url.format(url), timeout=10)
j = json.loads(result.text)
return j['countryCode']
except:
return ''
def save_proxy_to_db(url):
'''
将单个的 url,判断国别后,放入数据库,valid=None
'''
if not Proxy.objects.filter(url=url): # 去重
country = get_proxy_country(url)
proxy = Proxy(url=url, country=country, valid=None)
proxy.save()
print(country, proxy, 'is saved.')
def get_proxy(interval=60):
'''
从代理池取一个可以用的代理。
对于 use,fail,rate的更新外置
先不考虑国别的影响
'''
from matrix.models import Proxy
now = int(time.time())
pxys = Proxy.objects.filter(valid=True, stamp__lt=now-interval) # now > stamp + interval
if pxys:
pxy = pxys[0] # 符合条件里面的第一个 也就是use 最少的那一个
pxy.stamp = now # 被提取的代理会被打上stamp,在interval时间段内不会再被用到
pxy.use += 1
pxy.save()
return pxy
else: # 一个符合条件的都没有,一般是因为所有符合条件的 proxy 冷却时间都未到, 暂停一秒后重试
time.sleep(1)
print('No proxy is available. Wait 1 sec and retry.')
get_proxy(interval=interval)
| true
|
94fe85b2407bc3b19bf212fb8d818cfaf3fa1645
|
Python
|
jdanray/leetcode
|
/subdomainVisits.py
|
UTF-8
| 441
| 3.046875
| 3
|
[] |
no_license
|
# https://leetcode.com/problems/subdomain-visit-count/
class Solution:
def subdomainVisits(self, cpdomains):
count = {}
for cp in cpdomains:
n, dom = cp.split()
n = int(n)
dom = dom.split(".")
sub = ""
for d in dom[::-1]:
if sub:
sub = d + "." + sub
else:
sub = d + sub
if sub in count:
count[sub] += n
else:
count[sub] = n
return ["%d %s" % (count[sub], sub) for sub in count]
| true
|
6d3596b5a7994e7660eccf5acc649f3f24f2d810
|
Python
|
xc145214/python-learn
|
/exs/ex12.py
|
UTF-8
| 147
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 构造 1-100 的奇数列表
L = []
n = 1
while n < 100:
L.append(n)
n = n + 2
print L
| true
|
127a07e792fa0a15157043e0d03531bbe6c1fea9
|
Python
|
weruuu/Program
|
/Python/link_test.py
|
UTF-8
| 556
| 3
| 3
|
[] |
no_license
|
import pymysql
# 连接数据库
connect = pymysql.Connect(
host='localhost',
port=3306,
user='Eviless',
passwd='',
db='mysql',
charset='utf8'
)
# 获取游标
cursor = connect.cursor()
# 查询数据
sql = "SELECT * FROM testtb "
cursor.execute(sql)
for row in cursor.fetchall():
print(row)
print('共查找出', cursor.rowcount, '条数据')
# 插入数据
sql = "insert into testtb values(4,'Eviless',25)"
cursor.execute(sql)
connect.commit()
print('写入完成')
# 关闭连接
cursor.close()
connect.close()
| true
|
3b0220e68a951cf25df924007d283f8aa6383e7c
|
Python
|
lsst-sitcom/spot_motion_monitor
|
/spot_motion_monitor/controller/plot_ccd_controller.py
|
UTF-8
| 1,456
| 2.515625
| 3
|
[
"Python-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
import spot_motion_monitor.utils as smmUtils
__all__ = ['PlotCcdController']
class PlotCcdController():
"""This class manages the interactions between the all of the plot widgets
and calculation data models.
Attributes
----------
cameraPlotWidget : .CameraPlotWidget
An instance of the camera plot widget.
updater : .InformationUpdater
An instance of the status bar updater.
"""
def __init__(self, cpw):
"""Initialize the class.
Parameters
----------
cpw : .CameraPlotWidget
An instance of the camera plot widget.
"""
self.cameraPlotWidget = cpw
self.updater = smmUtils.InformationUpdater()
def passFrame(self, frame, showFrames):
"""Receive and handle the camera CCD frame.
Parameters
----------
frame : numpy.array
A frame from a camera CCD.
showFrames : bool
Flag to show camera CCD frames.
"""
if frame is None:
return
if showFrames:
self.cameraPlotWidget.image.setImage(frame)
| true
|
8e1733bf25585703533d5fb6226d5b7cb8a346c1
|
Python
|
jasonbaker/agentm
|
/agentm.py
|
UTF-8
| 3,372
| 2.6875
| 3
|
[] |
no_license
|
from pymongo.son_manipulator import SONManipulator
__version__ = '0.1.0'
class ValidationFailedError(Exception):
pass
def WritableValue(name, validator=None):
""" A value in the dictionary that has been exposed as both readable and
writable.
:param name: The key of the dictionary to be exposed.
:param validator: If given, a function that will return True if the value is valid. If this
function returns false, a :class:`~ValidationFailedError` will be raised.
"""
def _get_prop(self):
return self[name]
if validator:
def _set_prop(self, value):
if not validator(value):
raise ValidationFailedError
self[name] = value
else:
def _set_prop(self, value):
self[name] = value
return property(fget=_get_prop, fset=_set_prop)
def ReadonlyValue(name):
"""
A value that is exposed as read only.
:param name: The key of the dictionary to be exposed.
"""
def _get_prop(self):
return self[name]
return property(fget=_get_prop)
def Reference(name, cls):
def _get_prop(self):
if not isinstance(self[name], cls):
self[name] = cls(self[name])
return self[name]
def _set_prop(self, value):
if not isinstance(value, cls):
value = cls(value)
self[name] = value
return property(fget=_get_prop, fset=_set_prop)
def ReferenceList(name, cls):
"""
A list of references. This will convert each element of the list to cls if
it is not already.
"""
names = name.split(".")
last_name = names[-1]
def _get(self):
cursor = self
for piece in names[:-1]:
cursor = cursor.setdefault(piece, {})
return cursor, cursor.setdefault(last_name, [])
def _get_prop(self):
cursor, value = _get(self)
for index, val in enumerate(value):
if not isinstance(val, cls):
value[index] = cls(val)
return value
# NOTE: removing set for now, because of object identity issues
# and it's not strictly necessary for us atm
# def _set_prop(self, values):
# cursor, value = _get()
# cursor[last_name]
# self[name] = [cls(val) for val in values]
return property(fget=_get_prop) #, fset=_set_prop)
doc_registry = {}
class Document(dict):
abstract = True
class __metaclass__(type):
def __new__(cls, name, bases, dict):
abstract = dict.pop('abstract', False)
new_cls = type.__new__(cls, name, bases, dict)
if not abstract:
if new_cls.collection not in doc_registry:
doc_registry[new_cls.collection] = new_cls
else:
current_cls = doc_registry[new_cls.collection]
if issubclass(current_cls, new_cls):
doc_registry[new_cls.collection] = new_cls
return new_cls
id = ReadonlyValue('_id')
@classmethod
def transform(cls, son):
return cls(son)
class DocumentSONManipulator(SONManipulator):
def willcopy(self):
return True
def transform_outgoing(self, son, collection):
cls = doc_registry.get(collection.name)
if cls:
return cls.transform(son)
else:
return son
| true
|
24fbeabbe57b118a9256170483cae411604e0350
|
Python
|
EricWebsmith/ml_framework_poc
|
/py/job_executor.py
|
UTF-8
| 667
| 2.609375
| 3
|
[] |
no_license
|
import json
import sys
def my_import(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def execute(config_file):
config={}
with open(config_file) as f:
config=json.load(f)
f.close()
inputs = {}
results = []
for node_config in config['pipepline']:
the_class=my_import(node_config["class"])
obj=the_class()
inputs=obj.execute(inputs, node_config)
results.append(inputs['data'])
if __name__=="__main__":
print("start")
config_file=sys.argv[1]
execute(config_file)
print("DONE")
| true
|
6a49078ce095279c67650c98249a3732889d189b
|
Python
|
rpryzant/code-doodles
|
/interview_problems/2018/PRAMP/two_sum/twosum.py
|
UTF-8
| 1,047
| 3.1875
| 3
|
[] |
no_license
|
"""
classic case of pairs
[package1 package2 .. package n]
weight limit
need to select 2 packages whose sum == limit
only 1 pair
[3 1] [1 3]
1) bf
for package, check all others, if sum == limit, return that pair
O N^2
2)
21
[4, 6, 10, 15, 16]
^
if we find a 21-6=15 later on, we're done
{
needed: index
}
64
for package in arr:
if package val in my bf
O(n) to recover that
mark (target - package)
O(n) time
O(n) space
21
[4, 6, 10, 15, 16]
^
{
17: 0
15: 1
}
negatives are allowed, not limits
arr = []
"""
class BV:
def __init__(self, limit):
self.vec = [0 for _ in (limit+1)/32]
def get_bit()
def set_bit()
def get_indices_2(arr, limit):
bv = BV(limit)
for i, p in enumerate(arr):
if bv.get(p) == 1:
# search arr[:i] for limit - p
else:
bv.set(limit - p)
def get_indices_of_item_wights(arr, limit):
d = {}
for i, weight in enumerate(arr):
if weight in d:
return i, d[weight]
else:
d[limit - weight] = i
return []
| true
|
ceec6e5a52c2f8d8c002db5f8292a70a9a8dbb73
|
Python
|
Janoda/ayudantia-IIC1103
|
/crea_contrataciones.py
|
UTF-8
| 639
| 3.0625
| 3
|
[] |
no_license
|
import random
f = open("contrataciones.txt", "w")
equipos = ["Univ Católica", "U. La Calera", "Unión Española ", "Curicó Unido", "Antofagasta", "U. de Chile", "Huachipato", "U. Concepción", "Audax", "Everton", "Wanderers", "Cobresal", "Iquique", "Palestino", "Coquimbo", "O'Higgins", "La Serena", "Colo Colo" ]
newline = ''
for i in equipos:
st = random.randint(1,3) #solo uno de cada 3 equipos contara con un goleador estrella
if st == 1:
f.write(newline)
f.write(i + ';' + str(random.randint(1,4))) #agregamos al equipo i una contratacion con una destreza random entre 1 y 4
newline = '\n'
f.close()
| true
|
0651f6dbdc463606a0339f7c61ea82aae6aa5aa3
|
Python
|
Manuel-MA/webApp
|
/src/index.py
|
UTF-8
| 5,778
| 2.5625
| 3
|
[] |
no_license
|
from flask import Flask, url_for, render_template, request
app=Flask(__name__)
@app.template_global(name='zip')
def _zip(*args, **kwargs):
return __builtins__.zip(*args, **kwargs)
@app.route("/")
def cover():
return render_template('cover.html'), 200
@app.route("/City")
def city():
topic='City'
pictures=['arts1','street','townHall','bullring','skyline','velesEvents']
descriptions=['Arts and Science City','Street','Town Hall',
'Bullring','Skyline','Veles e Vents']
return render_template('gallery.html',topic=topic,pictures_descriptions=zip(pictures,descriptions)), 200
@app.route("/Nature")
def nature():
topic='Nature'
pictures=['albufera','malvarrosaBeach','coast','montanejos','serella','covaTallada']
descriptions=['Albufera','Malvarrosa','Altea','Montanejos','Serella','Cova tallada']
return render_template('gallery.html',topic=topic,pictures_descriptions=zip(pictures,descriptions)), 200
@app.route("/Leisure")
def leisure():
topic='Leisure'
pictures=['fallas','gulliver','marinaBeach','umbracle','olympia','heronCity']
descriptions=['Fallas','Gulliver','Marina Beach','Umbracle Disco',
'Olumpia Theatre','Heron City']
return render_template('gallery.html',topic=topic,pictures_descriptions=zip(pictures,descriptions)), 200
@app.route("/Sports")
def sports():
topic='Sports'
pictures=['fonteta','surf','athleticsTrack',
'mestalla','skatePark','volleyCourt']
descriptions=['"La Fontenta" basket stadium','Surf on Malvarrosa Beach',
'Athletics Track','Valencia CF stadium', 'Skate park','Volleyball courts']
return render_template('gallery.html',topic=topic,pictures_descriptions=zip(pictures,descriptions)), 200
@app.route("/<catalogue>/<picture>")
def picture(catalogue=None,picture=None):
cat = {'catalogue':catalogue}
pict = {'picture':picture}
if catalogue == "City":
if picture == "street":
description = 'This is a Valencia`s street located in Campanar neighbourhood'
elif picture == "arts1":
description = 'The famous "Ciudad de las artes y las ciencias"'
elif picture == "townHall":
description = 'This is the town hall, located in the citycentre'
elif picture == "bullring":
description = 'An old bullring'
elif picture == "skyline":
description = 'Nice views of Valencia skyline'
elif picture == "velesEvents":
description = 'Veles e Vents is a building in the port'
else:
return page_not_found(404)
elif catalogue == "Nature":
if picture == "albufera":
description = "L'albugfera is the most famous lake in Valencia"
elif picture == "malvarrosaBeach":
description = 'Malvarrosa Beach is inside Valencia city'
elif picture == "coast":
description = 'Altea`s coast'
elif picture == "montanejos":
description = 'Montanejos lake'
elif picture == "serella":
description = 'Serella`s mountain'
elif picture == "covaTallada":
description = 'Cova Tallada'
else:
return page_not_found(404)
elif catalogue=="Leisure":
if picture=="fallas":
description = 'A monument of Valencia`s fest'
elif picture == "gulliver":
description = 'Gulliver is a place to enjoy with your kids'
elif picture == "marinaBeach":
description = 'Marina Beach is a evening disco'
elif picture == "umbracle":
description = 'Umbracle is a night disco'
elif picture == "olympia":
description = 'Olympia theatre is the most famous theatre in Valencia'
elif picture == "heronCity":
description = 'Heron City has cinemas, bowling and lots of leisure places'
else:
return page_not_found(404)
elif catalogue=="Sports":
if picture=="fonteta":
description = 'This is Valencia Basket`s stadium'
elif picture == "surf":
description = 'Surf is very usual on Malvarrosa Beach'
elif picture == "athleticsTrack":
description = 'Athletics Track where you can go for free'
elif picture == "skatePark":
description = 'The new Skate Park'
elif picture == "mestalla":
description = 'Mestalla is the Valencia CF stadium'
elif picture == "volleyCourt":
description = 'VolleyBall court'
else:
return page_not_found(404)
else:
return page_not_found(404)
return render_template('picture.html',cat=cat,pict=pict,description=description), 200
@app.route("/redirect", methods=['POST','GET'])
def redirection():
dest = request.form['dest']
if dest == "Sports":
return sports()
elif dest == "City":
return city()
elif dest == "Nature":
return nature()
elif dest == "Leisure":
return leisure()
elif dest == "/":
return cover()
@app.errorhandler(404)
def page_not_found(error):
form = '''
<html>
<head>
<link href="../static/css/bootstrap.min.css" rel="stylesheet"/>
<link href="../static/css/style.css" rel="stylesheet"/>
<body>
<div id="container">
<h1> ERROR, the requested URL is not valid</h1>
<div class="mainImgContainer">
<h3> Let us take you back </h3>
<h4> Where do you want to go? </h4>
<br/><br/><br/>
<form action="/redirect" method="post" name="form">
<input type="radio" name="dest" value="/"> <b>Home</b>
<input type="radio" name="dest" value="City"> <b>City</b>
<input type="radio" name="dest" value="Nature"> <b>Nature</b>
<input type="radio" name="dest" value="Leisure"> <b>Leisure</b>
<input type="radio" name="dest" value="Sports"> <b>Sports</b>
<br/><br/>
<input type="submit" value="Go!" class="btnGo btn btn-primary btn-lg">
</form>
</div>
</div>
<html><body>
'''
return form
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| true
|
30a510e4155044572dfed321542d08c8c1afddeb
|
Python
|
dschaffner/SSX_python
|
/PE_SC/constructPatternCount.py
|
UTF-8
| 945
| 3.25
| 3
|
[] |
no_license
|
import numpy as np
from collections import Counter
def constructPatternCount(data,n=5,delay=1):
'''
Parameters
----------
data : 1-D array
time-series data.
n : integer,optional
embedding dimension. The default is 5.
delay : integer, optional
embedday delay. The default is 1.
Returns
-------
A Count occurance of patterns and total number of permutations
'''
T=np.array(data)
if len(T.shape)>1:
raise TypeError('Data must be a 1-D array')
t = len(T)
Ptot = t - delay*(n - 1) #Total number of order n permutations in T
#print 'Number of permutations = ', Ptot
A = [] #Array to store each permutation
for i in range(Ptot): #Will run through all possible n segments of T
A.append(''.join(T[i:i+(n-1)*delay+1:delay].argsort().astype(str)))
#Count occurance of patterns
count=Counter(A)
return count,Ptot
| true
|
c786c02d94074638caeb83eb265d94b6b0a54687
|
Python
|
BhoomikaMS/PIP-1BM17CS047
|
/divisors.py
|
UTF-8
| 117
| 3.46875
| 3
|
[] |
no_license
|
n=int(input("Enter a number: "))
li=[]
for i in range(1,(n//2)+1):
if n%i==0:
li.append(i)
li.append(n)
print(li)
| true
|
db9de781f8aab920f7d764cbf62e202304a33f8c
|
Python
|
paul-yamaguchi/2021_forStudy
|
/05_01バブルソート.py
|
UTF-8
| 254
| 3.4375
| 3
|
[] |
no_license
|
def sort(A):
for i in range(0, len(A) - 1):
modify_order(A, i)
print(A);print()
def modify_order(A, i):
for j in range(len(A) - 1, i, -1):
if A[j - 1] >A[j]:
A[j - 1], A[j] = A[j], A[j - 1]
sort([9,2,7,4,5])
| true
|
87d873e4c8c46b5aa2ced21af067aab9bf6416b7
|
Python
|
tomisilander/bn
|
/bn/infer/jtr.py
|
UTF-8
| 1,061
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import heapq
import udg, elo
""" Builds a tree of clique(indice)s """
def jtr(clqs, valcs):
# sort sepsets (cliquepairs) largest first (smallest weight)
w = [elo.weight(clq, valcs) for clq in clqs]
clqpairs = [((-len(clq1&clq2), w[i]+w[j]), (i, j))
for (i,clq1) in enumerate(clqs)
for (j,clq2) in enumerate(clqs)
if i<j]
heapq.heapify(clqpairs)
# from now on cliques are identified by their indices
nof_clqs = len(clqs)
# one of these qlique trees will be returned
jtrs = [udg.Udg([i]) for i in xrange(nof_clqs)]
ti = jtrs[0]
while ti.nof_edges() != nof_clqs - 1:
(i,j) = heapq.heappop(clqpairs)[1] # cliques
ti, tj = jtrs[i], jtrs[j] # and their trees
if ti != tj:
# join udg tj to udg ti
ti.join_with(tj, (i,j))
# mark cliques of tj to be now in ti
for clqj in tj.nodes():
jtrs[clqj] = ti
# return if ready
return ti
| true
|
662864a5b8b96d1d3bf6e2ad8d991002c61f3695
|
Python
|
chrisk60331/multiprocess-ftp
|
/test/queue_test.py
|
UTF-8
| 1,735
| 2.6875
| 3
|
[] |
no_license
|
"""Test suite for queue classes."""
from unittest.mock import Mock
import pytest
from multiprocess_ftp.better_queue import BetterQueue
def test_better_queue():
better_queue = BetterQueue()
expected_object, expected_count = ["foo"], 1
better_queue.put(expected_object)
actual_count = better_queue.qsize()
assert expected_count == actual_count
better_queue.put(expected_object)
expected_count += 1
actual_count = better_queue.qsize()
assert expected_count == actual_count
expected_count -= 1
actual_object = better_queue.get()
actual_count = better_queue.qsize()
assert expected_object == actual_object
assert expected_count == actual_count
def test_better_queue_all_tasks_done():
better_queue = BetterQueue()
expected_object = ["foo"]
better_queue.put(expected_object)
actual_tasks_done = better_queue.all_tasks_done()
assert not actual_tasks_done
better_queue.get()
actual_tasks_done = better_queue.all_tasks_done()
assert not better_queue.qsize()
assert actual_tasks_done
def test_better_queue_raises_runtime_error():
better_queue = BetterQueue()
with pytest.raises(RuntimeError):
assert not better_queue.__getstate__()
def test_better_queue_set_state():
better_queue = BetterQueue()
better_queue.__setstate__(
{
"parent_state": [
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
Mock(),
]
}
)
better_queue.size.increment(1)
assert better_queue.qsize() == 1
| true
|
de502c74433892da7bcaed427fe5fd46ccc01a87
|
Python
|
smspillaz/graph-paths
|
/increasing.py
|
UTF-8
| 108
| 3.3125
| 3
|
[] |
no_license
|
complexity = 20
for i in range(0, complexity):
for j in range(0, i):
print(i)
print(j)
| true
|
c4818505cfcf1c61c62ecf66f41883d492222013
|
Python
|
AndrewYoung97/baseline_bigcn
|
/tools/earlystopping2class.py
|
UTF-8
| 2,599
| 2.859375
| 3
|
[] |
no_license
|
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.accs=0
self.F1=0
self.F2 = 0
self.F3 = 0
self.F4 = 0
self.val_loss_min = np.Inf
def __call__(self, val_loss, accs,acc1,acc2,pre1,pre2,rec1,rec2,F1,F2,model,modelname,str):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.accs = accs
self.acc1=acc1
self.acc2=acc2
self.pre1=pre1
self.pre2=pre2
self.rec1=rec1
self.rec2=rec2
self.F1 = F1
self.F2 = F2
self.save_checkpoint(val_loss, model,modelname,str)
elif score < self.best_score:
self.counter += 1
# print('EarlyStopping counter: {} out of {}'.format(self.counter,self.patience))
if self.counter >= self.patience:
self.early_stop = True
print("BEST LOSS:{:.4f}| Accuracy: {:.4f}|acc1: {:.4f}|acc2: {:.4f}|pre1: {:.4f}|pre2: {:.4f}"
"|rec1: {:.4f}|rec2: {:.4f}|F1: {:.4f}|F2: {:.4f}"
.format(-self.best_score,self.accs,self.acc1,self.acc2,self.pre1,self.pre2,self.rec1,self.rec2,self.F1,self.F2))
else:
self.best_score = score
self.accs = accs
self.acc1=acc1
self.acc2=acc2
self.pre1=pre1
self.pre2=pre2
self.rec1=rec1
self.rec2=rec2
self.F1 = F1
self.F2 = F2
self.save_checkpoint(val_loss, model,modelname,str)
self.counter = 0
def save_checkpoint(self, val_loss, model,modelname,str):
'''Saves model when validation loss decrease.'''
# if self.verbose:
# print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(self.val_loss_min,val_loss))
torch.save(model.state_dict(),modelname+str+'.m')
self.val_loss_min = val_loss
| true
|
dda2d3780b425758311b613fc680d8f8b8c987c9
|
Python
|
BEmran/sim-to-real
|
/extra/simple_dynamic.py
|
UTF-8
| 3,001
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 28 11:29:10 2018
@author: emran
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
class Dynamic:
def __init__(self, s0, dsdt, dt = 0.01, int_type = "rk4"):
ns = len(s0)
self.dt = dt
self.state = np.asarray(s0)
self.intType = int_type
self.dsdt = dsdt
self.ds = np.zeros(ns)
def step(self, a):
s = np.append(self.state,a)
if (self.intType == "forward"):
ns = self.forward(s, 0)
if (self.intType == "euler"):
ns = self.euler(s, 0)
elif (self.intType == "rk4"):
ns = self.rk4(s, 0)
self.state = ns[:-1]
return (self.state)
def get_states(self):
return (self.state)
def rk4(self, y0, a):
dt = self.dt
dt2 = self.dt/2
k1 = np.asarray(self.dsdt(y0 , 0))
k2 = np.asarray(self.dsdt(y0 + dt2 * k1, dt2))
k3 = np.asarray(self.dsdt(y0 + dt2 * k2, dt2))
k4 = np.asarray(self.dsdt(y0 + dt * k3, dt))
yout = y0 + self.dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout
def forward(self, y0, t):
yout = y0 + self.dt * np.asarray(self.dsdt(y0, self.dt))
return yout
def euler(self, y0, t):
dy = np.append(self.ds,0)
ndy = np.asarray(self.dsdt(y0, self.dt))
yout = y0 + self.dt * (ndy + dy) / 2.0;
self.ds = ndy[:-1]
return yout
def dsdt (s, t):
x1 = s[0]
x2 = s[1]
a = s[2]
dx = x2
ddx = -10 * x1 - 4 * x2 + 10 * a
return [dx, ddx, 0]
if __name__ == "__main__":
dt = 0.1;
s0 = [0.0, -10.0]
sysf = Dynamic(s0, dsdt, dt,"forward")
sysr = Dynamic(s0, dsdt, dt,"rk4")
syse = Dynamic(s0, dsdt, dt,"rk4")
imax = 100;
time = np.arange(0,imax*dt,dt)
Xsf = np.zeros([imax,2])
Xsr = np.zeros([imax,2])
Xse = np.zeros([imax,2])
Xso = np.zeros([imax,2])
Xsf[0] = np.asarray(sysf.get_states())
Xsr[0] = np.asarray(sysr.get_states())
Xse[0] = np.asarray(syse.get_states())
Xso[0] = np.asarray(s0)
for t in range(1,len(time)):
a = np.random.randint(-10,10)
a= 1
sysf.step(a)
sysr.step(a)
syse.step(a)
Xsf[t] = sysf.get_states()
Xsr[t] = sysr.get_states()
Xse[t] = sysr.get_states()
Xso[t] = odeint(dsdt, list(np.asarray(Xso[t-1],a)), [0, dt])[1,:-1]
plt.figure(1)
plt.subplot(211)
plt.plot(time, Xsf[:,0],
time, Xsr[:,0],
time, Xse[:,0],
time, Xso[:,0])
plt.legend(("forward", "rk4", "euler", "odint"))
plt.subplot(212)
plt.plot(time, np.sqrt((Xso[:,0]-Xsf[:,0])**2),
time, np.sqrt((Xso[:,0]-Xsr[:,0])**2),
time, np.sqrt((Xso[:,0]-Xse[:,0])**2))
plt.legend(("forward", "rk4", "euler"))
| true
|
0c107cf6558f60f5e34b11cde5985b2683833f0b
|
Python
|
mjj29/deckchecks
|
/top_tables.py
|
UTF-8
| 3,542
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import csv, sys, cgitb, os, cgi
from deck_mysql import DeckDB
from printers import TextOutput, HTMLOutput
from login import check_login
from swisscalc import calculateTop8Threshold
output = None
def top_tables(tournament, form):
try:
with DeckDB() as db:
id = db.getEventId(tournament)
currentRound = db.get_round(id)
totalRounds = db.getEventRounds(id)
playersWithEachByes = db.getPlayersForEachByeNumber(id)
(currentMarginalThreshold, currentTop8Threshold, undefeatedThreshold) = calculateTop8Threshold(playersWithEachByes, totalRounds, currentRound)
output.printMessage("Players with at least %s points can still make top 8" % currentTop8Threshold)
tables = db.get_top_tables(id)
with output.table("Table", "Score", "Name", "Previous Checks", "Score", "Name", "Previous Checks") as table:
for row in tables:
try:
score = row[0]
tablenum = row[1]
(player1, player2) = db.get_table(id, tablenum)
(name1, score1, _, _) = player1
(name2, score2, _, _) = player2
prevChecks1 = db.getPreviousChecks(id, name1)
prevChecks2 = db.getPreviousChecks(id, name2)
if (score1 == undefeatedThreshold or score2 == undefeatedThreshold):
table.setNextRowType('undefeated')
elif (score1 < currentMarginalThreshold and score2 < currentMarginalThreshold):
table.setNextRowType('dead')
elif (score1 >= currentTop8Threshold or score2 >= currentTop8Threshold):
table.setNextRowType('live')
elif (score1 > currentMarginalThreshold or score2 > currentMarginalThreshold):
table.setNextRowType('marginal')
else:
table.setNextRowType('unlikely')
table.printRow(
output.makeLink(form, 'get_table?table=%s'%tablenum, tablenum),
score1,
output.makeLink(form, 'get_player?name=%s'%name1, name1),
", ".join([str(x) for x in prevChecks1]),
score2,
output.makeLink(form, 'get_player?name=%s'%name2, name2),
", ".join([str(x) for x in prevChecks2]))
except Exception as e:
print str(e)
except Exception as e:
output.printMessage("Failed to print top tables: %s" % (e))
def docgi():
print """Content-type: text/html
<html>
<head><title>Deck Checks - top tables</title><link rel='stylesheet' href='style.css' /></head>
<body>
<h1>Top tables</h1>
"""
form = cgi.FieldStorage()
with DeckDB() as db:
db.checkEvent(form["event"].value, output)
roundnum = db.get_round(db.getEventId(form["event"].value))
output.pageHeader(db, form['event'].value, roundnum, form)
if not check_login(output, form['event'].value, form['password'].value if 'password' in form else '', 'top_tables'):
return
print """
<p>Key:
<span class='undefeated'>undefeated</span>
<span class='live'>definitely live for top 8</span>
<span class='marginal'>possibility of top 8</span>
<span class='unlikely'>theoretically possible</span>
<span class='dead'>cannot top 8</span>
</p>
"""
top_tables(form["event"].value, form)
output.printLink(form, 'export?type=top', 'Download as TSV')
output.printLink(form, 'root', 'Return to menu')
print """
</body>
</html>
"""
def main(args):
with DeckDB() as db:
db.checkEvent(args[0], output)
top_tables(args[0], {})
if __name__ == "__main__":
if 'REQUEST_URI' in os.environ:
cgitb.enable()
output = HTMLOutput()
docgi()
else:
if len(sys.argv) < 2:
print "Usage: top_tables.py <event>"
sys.exit(1)
output = TextOutput()
main(sys.argv[1:])
| true
|
2b34b3dae39f5b2d2c7e2301f5f5432f827187fe
|
Python
|
spacetx/starfish
|
/starfish/core/experiment/builder/test/test_inplace.py
|
UTF-8
| 5,571
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import hashlib
import os
from pathlib import Path
from typing import Mapping, Union
import numpy as np
from skimage.io import imsave
from slicedimage import ImageFormat
from starfish.core.types import Axes, Coordinates, CoordinateValue
from ..builder import write_experiment_json
from ..inplace import (
InplaceFetchedTile, InplaceWriterContract,
)
from ..providers import FetchedTile, TileFetcher
from ...experiment import Experiment, FieldOfView
SHAPE = {Axes.Y: 500, Axes.X: 1390}
def test_inplace(tmpdir):
tmpdir_path = Path(tmpdir)
write_inplace(tmpdir_path)
# load up the experiment, and select an image. Ensure that it has non-zero data. This is to
# verify that we are sourcing the data from the tiles that were already on-disk, and not the
# artificially zero'ed tiles that we feed the experiment builder.
experiment = Experiment.from_json(os.fspath(tmpdir_path / "experiment.json"))
primary_image = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)
assert not np.allclose(primary_image.xarray, 0)
def tile_fn(input_dir: Path, prefix: str, fov: int, r: int, ch: int, zplane: int) -> Path:
filename = '{}-Z{}-H{}-C{}.tiff'.format(prefix, zplane, r, ch)
return input_dir / f"fov_{fov:03}" / filename
class ZeroesInplaceTile(InplaceFetchedTile):
"""These tiles contain all zeroes. This is irrelevant to the actual experiment construction
because we are using in-place mode. That means we build references to the files already on-disk
and any data returned is merely metadata (tile shape, tile coordinates, and tile checksum."""
def __init__(self, file_path: Path):
self.file_path = file_path
@property
def shape(self) -> Mapping[Axes, int]:
return SHAPE
@property
def coordinates(self) -> Mapping[Union[str, Coordinates], CoordinateValue]:
return {
Coordinates.X: (0.0, 0.0001),
Coordinates.Y: (0.0, 0.0001),
Coordinates.Z: (0.0, 0.0001),
}
@property
def sha256(self) -> str:
hasher = hashlib.sha256()
with open(str(self.file_path), "rb") as fh:
hasher.update(fh.read())
return hasher.hexdigest()
@property
def filepath(self) -> Path:
return self.file_path
class ZeroesInplaceFetcher(TileFetcher):
def __init__(self, input_dir: Path, prefix: str):
self.input_dir = input_dir
self.prefix = prefix
def get_tile(
self, fov_id: int, round_label: int, ch_label: int, zplane_label: int) -> FetchedTile:
filename = '{}-Z{}-H{}-C{}.tiff'.format(self.prefix, zplane_label, round_label, ch_label)
file_path = self.input_dir / f"fov_{fov_id:03}" / filename
return ZeroesInplaceTile(file_path)
def fov_path_generator(parent_toc_path: Path, toc_name: str) -> Path:
return parent_toc_path.parent / toc_name / "{}.json".format(parent_toc_path.stem)
def format_data(
image_dir: Path,
primary_image_dimensions: Mapping[Union[Axes, str], int],
aux_name_to_dimensions: Mapping[str, Mapping[Union[Axes, str], int]],
num_fovs):
def add_codebook(experiment_json_doc):
experiment_json_doc['codebook'] = "codebook.json"
return experiment_json_doc
write_experiment_json(
path=os.fspath(image_dir),
fov_count=num_fovs,
tile_format=ImageFormat.TIFF,
primary_image_dimensions=primary_image_dimensions,
aux_name_to_dimensions=aux_name_to_dimensions,
primary_tile_fetcher=ZeroesInplaceFetcher(image_dir, FieldOfView.PRIMARY_IMAGES),
aux_tile_fetcher={
aux_img_name: ZeroesInplaceFetcher(image_dir, aux_img_name)
for aux_img_name in aux_name_to_dimensions.keys()
},
postprocess_func=add_codebook,
default_shape=SHAPE,
writer_contract=InplaceWriterContract(),
)
def write_image(
base_path: Path,
prefix: str,
num_fovs: int,
image_dimensions: Mapping[Union[Axes, str], int],
):
"""Writes the constituent tiles of an image to disk. The tiles are made up with random noise.
"""
for fov_num in range(num_fovs):
for round_label in range(image_dimensions[Axes.ROUND]):
for ch_label in range(image_dimensions[Axes.CH]):
for zplane_label in range(image_dimensions[Axes.ZPLANE]):
path = tile_fn(base_path, prefix, fov_num, round_label, ch_label, zplane_label)
path.parent.mkdir(parents=True, exist_ok=True)
data = np.random.random(size=(SHAPE[Axes.Y], SHAPE[Axes.X])).astype(np.float32)
imsave(os.fspath(path), data, plugin="tifffile")
def write_inplace(tmpdir: Path, num_fovs: int = 2):
primary_image_dimensions: Mapping[Union[Axes, str], int] = {
Axes.ROUND: 4,
Axes.CH: 4,
Axes.ZPLANE: 1,
}
aux_name_to_dimensions: Mapping[str, Mapping[Union[Axes, str], int]] = {
'nuclei': {
Axes.ROUND: 1,
Axes.CH: 1,
Axes.ZPLANE: 1,
},
}
# write out the image files
write_image(tmpdir, FieldOfView.PRIMARY_IMAGES, num_fovs, primary_image_dimensions)
for aux_img_name in aux_name_to_dimensions.keys():
write_image(tmpdir, aux_img_name, num_fovs, aux_name_to_dimensions[aux_img_name])
# format the experiment.
format_data(tmpdir, primary_image_dimensions, aux_name_to_dimensions, num_fovs)
Experiment.from_json(os.fspath(tmpdir / "experiment.json"))
| true
|
774d961b22dbec8fce3155c3d8f421a2b96bd20a
|
Python
|
pankajsherchan/MachineLearning
|
/Regression/Linear Regression/SimpleLinearRegression/simplelinearRegression.py
|
UTF-8
| 1,891
| 3.28125
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def trainData(X, Y, theta, learning_rate, iterations):
costs = []
for i in range(iterations):
Ytheta = X.dot(theta)
cost = Ytheta - Y
theta = theta - learning_rate * X.T.dot(cost)
mse = cost.dot(cost) / len(X)
print(mse, 'iteration' , i)
costs.append(mse)
plt.plot(costs)
plt.show()
return theta
def testData(X, Y, theta, originalX):
Ytheta = X.dot(theta)
plt.scatter(originalX, Y)
plt.plot(originalX, Ytheta)
plt.show()
return 0
def scale_dataset(Y):
mean = np.mean(Y)
std = np.std(Y)
Y = (Y - mean) / std
return Y
def main():
print('reading CSV')
X = []
Y = []
path = '/Users/Pankaj/machine_learning_examples/linear_regression_class/'
for line in open('train.csv'):
# print(line)
x, y = line.split(',')
#print(x)
#print(y)
X.append(float(x))
Y.append(float(y))
Xtest = []
Ytest = []
for line in open('test.csv'):
# print(line)
x, y = line.split(',')
#print(x)
#print(y)
Xtest.append(float(x))
Ytest.append(float(y))
X = np.array(X)
Y = np.array(Y)
Xtest = np.array(Xtest)
Ytest = np.array(Ytest)
originalX = Xtest
Xtest = np.vstack([np.ones(len(Xtest)), Xtest]).T
X = scale_dataset(X)
Y = scale_dataset(Y)
plt.scatter(X, Y)
plt.show()
N = len(X)
X = np.vstack([np.ones(N), X]).T
D = 2
w = np.random.randn(D) / np.sqrt(D)
initial_theta = np.array([0.5, 0.5])
initial_theta = w
learning_rate = 0.001
iterations = 1000
theta = trainData(X, Y, initial_theta, learning_rate, iterations)
testData(Xtest, Ytest, theta, originalX)
if __name__ == "__main__":
main()
| true
|
de034679596037950018f0b827a868770bf412ea
|
Python
|
mlell/tapas
|
/scripts/src/geom_induce.py
|
UTF-8
| 3,143
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
from random import random
def main():
parser= ArgumentParser(description="""
Change one base into another with a probability geometrically dependent
on proximity to string beginning or end. The function used to calculate
the base exchange probabilities in dependence of the base position is
P_{Base Exchange}(x) = fac * dgeom(x, prob) + t
where dgeom is the density function of the geometric distribution.
x is considered therein the trial number of the first success
(x \\in {1,2,3,...})
All other parameters are explained below.
""")
parser.add_argument("fromBase", metavar="from", type=str,
help="The base to change")
parser.add_argument("toBase", metavar="to", type=str,
help="The base to be changed into")
parser.add_argument("prob", metavar="probability", type=float,
help="argument for geometric distribution: success probability"+
"(don't confuse with base exchange probability!)")
parser.add_argument("fac", metavar="factor",type=float,
help="factor to multiply the probabilities of base exchange")
parser.add_argument("t", metavar="const",type=float,
help="constant part of base exchange probability function")
parser.add_argument("--inverse",action="store_true", default=False,
help="mutate with respect to distance from the end of the read"+
" instead from the beginning")
args = parser.parse_args()
prob = args.prob
fac = args.fac
fromBase = args.fromBase
toBase = args.toBase
t = args.t
inverse = args.inverse
#sys.argv.pop(0) # shift
#sys.argv.pop(0) # remove first two arguments, remaining for fileinput
while True:
line = sys.stdin.readline()
if not line: break
sys.stdout.write(geom_mutate(
string=line, fromBase=fromBase
, toBase=toBase, fac=fac
, prob=prob, t=t
, inverse=inverse))
sys.stdout.flush() # don't buffer multiple lines
def geom_mutate(string,fromBase, toBase, fac, prob, t, inverse=False):
"""Read in a string. Then mutate each character with a probability
dependent on the distance from the beginning or the end (inverse=True).
Probability is modulated with the geometric density function geom:
fac * geom(x, prob) + t
x being the distance from the beginning (the end). (x=1,2,3,...)"""
l = len(string)
s=list(string)
fromBase=fromBase.lower()
for i,char in enumerate(s):
if(char.lower() == fromBase):
k = l-i-1 if inverse else i+1
r = random()
# +1 because of geom. distr (no 0 allowed)
x = fac * geom(k,prob) + t
if r < x :
s[i] = toBase
return "".join(s)
def geom(k,p):
""" Geometric probability mass function"""
if(k<1): return None
else: return ((1-p)**(k-1))*p
if (__name__== "__main__"): main()
| true
|
f362d06212e5c21eec34b245e4a58b02d8ebc44c
|
Python
|
jaydoe723/MachineLearning
|
/Pseudomonas Aeruginosa Files/load_dataset.py
|
UTF-8
| 2,434
| 2.734375
| 3
|
[] |
no_license
|
from random import shuffle
import glob
import sys
import re
from PIL import Image
import tensorflow as tf
import pandas as pd
TARGET_FEATURE = 'carb.auc.delta'
target_df = pd.read_csv("target_labels.csv")
# Convert to form the tf will understand
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# Convert to form the tf will understand
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# Find strain number then then find corresponding label
def find_label(filename):
regex = re.search("PIL-\d*", filename)
strain = filename[regex.start()+4: regex.end()]
strain = int(strain)
df = target_df.loc[target_df['strain'] == strain]
if df.empty:
label = 0
else:
label = df.iloc[0][TARGET_FEATURE]
return label
def create_record(out_filename, addrs):
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(out_filename)
print("Creating: " + out_filename)
for i in range(len(addrs)):
# print how many images are saved every 5 images
if not i % 5:
print('{}/{}'.format(i, len(addrs)))
sys.stdout.flush()
# Load the image
filename = addrs[i]
img = Image.open(filename)
if img is None:
continue
# Generate label
label = find_label(filename)
# Create a feature
feature = {
'image_raw': _bytes_feature(img.tobytes()),
'label': _int64_feature(label)
}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
print("Done")
writer.close()
sys.stdout.flush()
train_path = 'images/train/*.jpg'
test_path = 'images/test/*.jpg'
val_path = 'images/validation/*.jpg'
# read addresses and labels from the train folder
train_addrs = glob.glob(train_path)
test_addrs = glob.glob(test_path)
val_addrs = glob.glob(val_path)
# shuffle data
shuffle(train_addrs)
shuffle(test_addrs)
shuffle(val_addrs)
# create records files
create_record('train.tfrecords', train_addrs)
create_record('test.tfrecords', test_addrs)
create_record('val.tfrecords', val_addrs)
| true
|
2c9dc19871bbd323ff39b396ca8b92305da78c77
|
Python
|
315181690/THC
|
/Python/HackerRank/Sum_Basic_Looping.py
|
UTF-8
| 235
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/python3.7
#
#David Alonso Garduño Granados
#Python(3.7.4)
#01/12/19
#01/12/19
#Se realiza la suma de n terminos de manera recursiva
def Sloop(x):
if x==1:
return(1)
else:
return(x+Sloop(x-1))
n=int(input())
print(Sloop(n))
| true
|
5df1f4cfd6a8d9c9344d4069d4d27d1f2168ff43
|
Python
|
sixspeedchips/neural-net-impl
|
/src/rework/Functions.py
|
UTF-8
| 320
| 2.78125
| 3
|
[] |
no_license
|
import numpy as np
class Tanh:
@staticmethod
def f(x):
return np.tanh(x)
@staticmethod
def prime(x):
return (1 + Tanh.f(x)) * (1 - Tanh.f(x))
class Relu:
@staticmethod
def f(x):
return np.maximum(0, x)
@staticmethod
def prime(x):
return (x > 0)*1.0
| true
|
adea984579d1bab82c10734729fe5c98da3cedea
|
Python
|
teamopensource/jmeter-test-plans
|
/php/conversion/k-to-csv-all.py
|
UTF-8
| 2,030
| 2.5625
| 3
|
[] |
no_license
|
import sys
import glob
import argparse
import re
import os
parser = argparse.ArgumentParser(description='Convert cachegrind to csv')
parser.add_argument('--cap', default="0", type=int, help="exclude function calls below this threshold (microseconds)")
parser.add_argument("--i", default=".", help="directory containing cachegrind files")
parser.add_argument("--o", default=".", help="directory to generate csv file to")
args = parser.parse_args()
if args.o == ".":
args.o = args.i
if not os.path.exists(args.o):
os.makedirs(args.o)
files = glob.glob("/".join([args.i, "cachegrind.*"]))
outname = args.o + "/all.csv"
with open(outname, 'w') as outfile:
for filename in files:
if ".csv" not in filename and ".svg" not in filename:
inname = filename
# outname = "/".join([args.o, inname.split("/").pop() + '.csv'])
key = inname.split("/").pop()
key = key.split(".")
key.pop()
key = ".".join(key)
timestamp = inname.split(".").pop().replace("_", ".")
def append (outfile, key, timestamp, fl, fn, li, tm):
if int(tm) >= args.cap: # only save the call, if it has taken more than args.cap microseconds
outfile.write(",".join([key, timestamp, fl, fn, li, tm]) + "\n")
with open(inname, 'r') as infile:
print "converting", inname, "-->", outname
fl = ""
fn = ""
li = ""
tm = ""
for line in infile:
numbers = re.match(r"([0-9]+)\ ([0-9]+)", line) # find linenumber and microseconds, like this: 26 26
if numbers:
li = numbers.group(1)
tm = numbers.group(2)
elif line.startswith("fl") or line.startswith("cfl"):
# save the old one, if it exists
if fl and fn and li and tm:
append(outfile, key, timestamp, fl, fn, li, tm)
fl = line.replace("\n", '').split('=')[1] # get the function name
elif line.startswith("fn") or line.startswith("cfn"):
fn = line.replace("\n", '').split('=')[1] # get the function name
if fl and fn and li and tm:
append(outfile, key, timestamp, fl, fn, li, tm)
| true
|
ff00b75a991236222af676acfb28e8d0addae676
|
Python
|
riteshtawde/AI
|
/WumpusWorld_MDP/wumpus_mdp.py
|
UTF-8
| 4,292
| 3.09375
| 3
|
[] |
no_license
|
'''
@author: ritesh(rtawde@iu.edu)
'''
import solver
import time
class WumpusMDP:
# wall_locations is a list of (x,y) pairs
# pit_locations is a list of (x,y) pairs
# wumnpus_location is an (x,y) pair
# gold_location is an (x,y) pair
# start_location is an (x,y) pair representing the start location of the agent
states = []
#change dimensions here as per the input to constructor
grid_dimension_x = 5
grid_dimension_y = 6
wumpus_dead = False
has_arrow = True
def __init__(self, wall_locations, pit_locations, wumpus_location, gold_location, start_location):
self.wall_locations = wall_locations
self.pit_locations = pit_locations
self.wumpus_location = wumpus_location
self.gold_location = gold_location
self.start_location = start_location
for x in range(self.grid_dimension_x):
for y in range(self.grid_dimension_y):
if (x,y) not in self.wall_locations:
self.states.append((x,y))
def A(self):
return ['do nothing','left','right','up','down','shoot left','shoot right','shoot up','shoot down']
def S(self):
return self.states
def P(self, s, a, u):
if (a == 'up' or a == 'down' or a == 'left' or a == 'right') and u in self.wall_locations:
return 0
if a == 'do nothing' and s == u and s == self.gold_location:
return 1
if a == 'up' and u[1] == s[1]+1 and u[0] == s[0]:
return 0.9
if a == 'up' and self.in_neighbour_states(s, u):
return 0.1/3
if a == 'shoot up' and s[0] == self.wumpus_location[0] and s[1] < self.wumpus_location[1] and not self.wumpus_dead and self.has_arrow and s==u:
self.wumpus_dead = True
self.has_arrow = False
return 1
#down
if a == 'down' and u[1] == s[1]-1 and u[0] == s[0]:
return 0.9
if a == 'down' and self.in_neighbour_states(s, u):
return 0.1/3
if a == 'shoot down' and s[0] == self.wumpus_location[0] and s[1] > self.wumpus_location[1] and not self.wumpus_dead and self.has_arrow and s==u:
self.wumpus_dead = True
self.has_arrow = False
return 1
#left
if a == 'left' and u[0] == s[0]-1 and u[1] == s[1]:
return 0.9
if a == 'left' and self.in_neighbour_states(s, u):
return 0.1/3
if a == 'shoot left' and s[1] == self.wumpus_location[1] and s[0] > self.wumpus_location[0] and not self.wumpus_dead and self.has_arrow and s==u:
self.wumpus_dead = True
self.has_arrow = False
return 1
#right
if a == 'right' and u[0] == s[0]+1 and u[1] == s[1]:
return 0.9
if a == 'right' and self.in_neighbour_states(s, u):
return 0.1/3
if a == 'shoot right' and s[1] == self.wumpus_location[1] and s[0] < self.wumpus_location[0] and not self.wumpus_dead and self.has_arrow and s==u:
self.wumpus_dead = True
self.has_arrow = False
return 1
return 0
def R(self, s):
if s == self.wumpus_location and not self.wumpus_dead:
return -100
elif s in self.pit_locations:
return -100
elif s == self.gold_location:
return 100
return -1
def initial_state(self):
return self.start_location
def gamma(self):
return 0.99
def in_neighbour_states(self, state, neighbour):
return True if ((neighbour[0] == state[0]-1 or neighbour[0] == state[0]+1) and neighbour[1] == state[1] ) or ((neighbour[1] == state[1]-1 or neighbour[1] == state[1]+1) and neighbour[0] == state[0]) else False
#mdp = WumpusMDP([(0,0),(1,0),(2,0),(3,0),(3,1),(3,2),(3,3),(2,3),(1,3),(0,3),(0,2),(0,1)], [(1,2)], (2,1), (2,2), (1,1))
mdp = WumpusMDP([(0,0),(1,0),(2,0),(3,0),(4,0),(4,1),(4,2),(4,3),(4,4),(4,5),(3,5),(2,5),(1,5),(0,5),(0,4),(0,3),(0,2),(0,1),(2,3)], [(1,2)], (3,2), (3,4), (1,1))
solve = solver.Solver(mdp)
start = time.time()
policy = solve.solve()
end = time.time()
#print('time : ',(end-start))
print(policy)
| true
|
8b1d25ae477860ce2b9d77530cadb250504b135c
|
Python
|
the-alexmeza/kiteai
|
/make_dict.py
|
UTF-8
| 1,220
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
import csv
import nltk
import numpy as np
import pickle as pkl
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from preprocess import preprocess_for_dict
vocab_size = 7000
stop_words = set(stopwords.words('english'))
# Format of list:
# [((ID, Text), [toxic, severe_toxic, obscene, threat, insult, identity_hate])]
all_data = []
all_tokens = []
with open("train.csv", 'r') as train_data:
hold = csv.reader(train_data, delimiter=",", quotechar='"')
for instance in hold:
all_data.append(((instance[0], instance[1]),
np.array([instance[2], instance[3], instance[4],
instance[5],instance[6], instance[7]])))
# Remove labels
del all_data[0]
# Retrieve all sentences
all_text = [sentence for ((_, sentence), _) in all_data]
for sentence in all_text:
t = preprocess_for_dict(sentence)
for item in t:
if item not in stop_words:
all_tokens.append(item)
vocab = nltk.FreqDist(all_tokens)
most_common_vocab = [word for word,_ in vocab.most_common(vocab_size)]
print(most_common_vocab)
pkl.dump(most_common_vocab, open('data/vocab.p', 'wb'))
pkl.dump(all_text, open('data/corpus.p', 'wb'))
| true
|
4044b8a758d6455f4992d3724c40a6a0335ac816
|
Python
|
yukihiko-shinoda/asynccpu
|
/tests/testlibraries/cpu_bound.py
|
UTF-8
| 2,192
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
"""
Cpu bound.
see: https://docs.python.org/3/library/asyncio-eventloop.html#executing-code-in-thread-or-process-pools
"""
import os
import time
from datetime import datetime
from logging import getLogger
from multiprocessing.connection import Connection
from signal import SIGTERM, signal
from typing import Any, NoReturn, Optional
from tests.testlibraries import SECOND_SLEEP_FOR_TEST_MIDDLE
from tests.testlibraries.exceptions import Terminated
from tests.testlibraries.local_socket import LocalSocket
async def process_cpu_bound(task_id: Optional[int] = None, send_process_id: bool = False) -> str:
return cpu_bound(task_id, send_process_id)
def process_cpu_bound_method(
task_id: Optional[int] = None, send_process_id: bool = False, connection: Optional[Connection] = None
) -> None:
result = cpu_bound(task_id, send_process_id)
if connection:
connection.send(result)
def cpu_bound(task_id: Optional[int] = None, send_process_id: bool = False) -> str:
"""
CPU-bound operations will block the event loop:
in general it is preferable to run them in a process pool.
"""
try:
logger = getLogger(__name__)
def hander(_signum: int, _frame: Optional[Any]) -> NoReturn:
logger.info("CPU-bound: Terminate")
raise Terminated()
signal(SIGTERM, hander)
process_id = os.getpid()
print(process_id)
logger.info("CPU-bound: process id = %d", process_id)
if send_process_id:
time.sleep(SECOND_SLEEP_FOR_TEST_MIDDLE)
logger.info("CPU-bound: Send process id")
LocalSocket.send(str(process_id))
logger.info("CPU-bound: Start")
result = sum(i * i for i in range(10 ** 7))
logger.debug("CPU-bound: Finish")
logger.debug("%d %s", task_id, datetime.now())
return ("" if task_id is None else f"task_id: {task_id}, ") + f"result: {result}"
except KeyboardInterrupt:
logger.info("CPU-bound: KeyboardInterupt")
raise
def expect_process_cpu_bound(task_id: Optional[int] = None) -> str:
return ("" if task_id is None else f"task_id: {task_id}, ") + "result: 333333283333335000000"
| true
|
a50da690bc756c77349387249ac866a7aa31c06c
|
Python
|
perlfu/timelapse-ae
|
/render-frames.py
|
UTF-8
| 2,563
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import math
import pickle
import os
import re
import sys
from cmd_queue import CommandQueue
cmd_queue = CommandQueue()
def render_frame(src_path, dst_path, day, srcs, n, gn, img_type='hdn'):
# pick mode
if len(srcs) <= 5:
weighting = True
mode = '-m'
else:
weighting = False
mode = '-g'
# always use arithmetic mean for normalised input
if img_type == 'hdn':
mode = '-m'
# compile sources and weights
avg_srcs = []
for (src, f) in srcs:
clean = src.replace("-hd.png", "")
if weighting:
avg_srcs.append(("%.4f:" % f) + os.path.join(src_path, day, 'day', clean + '-' + img_type + '.png'))
else:
avg_srcs.append(os.path.join(src_path, day, 'day', clean + '-' + img_type + '.png'))
# plain average frame
avg_dst = os.path.join(dst_path, 'plain-' + day + '-' + ("%03d" % n) + '.png')
plain_gn = os.path.join(dst_path, "frame-plain-%05d.png" % gn)
avg_cmd = ['avgimg', mode, avg_dst ] + avg_srcs
cmd_queue.add(avg_dst, [], avg_cmd)
cmd_queue.add(plain_gn, [avg_dst], ['ln', '-s', avg_dst, plain_gn])
# annotated frame
ann_dst = os.path.join(dst_path, 'annotated-' + day + '-' + ("%03d" % n) + '.png')
ann_gn = os.path.join(dst_path, "frame-annotated-%05d.png" % gn)
ann_cmd = ['convert', avg_dst,
'-font', 'Bookman-Light',
'-pointsize', '64',
'-fill', '#ffffffa0',
'-gravity', 'SouthWest',
'-annotate', '+1570%+20%', day,
ann_dst ]
cmd_queue.add(ann_dst, [avg_dst], ann_cmd)
cmd_queue.add(ann_gn, [ann_dst], ['ln', '-s', ann_dst, ann_gn])
def main(args):
if len(args) >= 3:
(src_path, in_file, out_path) = args[0:3]
img_type = 'hdn'
if len(args) > 3:
img_type = args[3]
with open(in_file, 'rb') as f:
data = pickle.load(f)
days = data['days']
day_count = data['day_count']
picked = data['picked']
frame_n = 0
for day in days:
if day in picked:
frame_sets = picked[day]
for (i, ls) in zip(range(len(frame_sets)), frame_sets):
render_frame(src_path, out_path, day, ls, i, frame_n, img_type=img_type)
frame_n += 1
cmd_queue.run()
else:
print 'render-frames.py <src-path> <in-file> <out-path>'
if __name__ == "__main__":
main(sys.argv[1:])
sys.exit(0)
| true
|
2bb4d69012b8c629193e8fa46f76c65da68514ca
|
Python
|
tonycolucci/AVC_Project
|
/src_test/train_model.py
|
UTF-8
| 2,856
| 2.921875
| 3
|
[] |
no_license
|
# Imports
import logging
import yaml
import pickle
import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LogisticRegressionCV
logging.basicConfig(level=logging.INFO, format="%(name)-12s %(levelname)-8s %(message)s")
logger = logging.getLogger()
def split_response(data, response_col, analysis_cols): #, train_share
""" Splits the data set into separate dataframes, one with features and one with response for model training.
Args:
data (:py:class:`pandas.DataFrame`): DataFrame containing the features
analysis_cols (:obj:`list`): List of columnms to use as features
response_col (:obj:`list`): List of a single column to use as the response column
Returns:
features (:py:class:`pandas.DataFrame`): DataFrame containing features columns
response (:py:class:`pandas.DataFrame`): DataFrame containing a single column for the response variable
"""
# Split out features and response dataframes
features = data[analysis_cols]
response = data[response_col]
return features, response
def model_training(X_train, y_train, CV_folds, random_state):
""" Trains a logistic regression model using cross validation
Args:
X_train (:py:class:`pandas.DataFrame`): DataFrame containing features columns
y_train (:py:class:`pandas.DataFrame`): DataFrame containing a single column for the response variable
CV_folds (int): Integer indicating the number of folds over which to run cross validation.
Returns:
logistic (sklearn model object): The model generated by regressing the response over the features
"""
logistic = LogisticRegressionCV(cv = CV_folds, random_state = random_state).fit(X_train, y_train)
return logistic
if __name__ == "__main__":
# Load config
with open("config/config.yml","r") as yml:
config = yaml.load(yml)
config = config["train_model"]
# Get data with features
data_address = config["data_address"]
analysis_data = pd.read_csv(data_address)
logger.info("train_model: data read in from {}".format(data_address))
# Load arguments for splitting data and split out response column
response_col = config["response_col"]
analysis_cols = config["analysis_cols"]
features, response = split_response(analysis_data, response_col, analysis_cols)
# Load argument for model training and return the trained model
CV_folds = config["CV_folds"]
random_state = config["random_state"]
logger.info("train_model: Training using logistic regression with {}-fold cross validation and random state of {}".format(CV_folds, random_state))
model = model_training(features, response, CV_folds, random_state)
# Create a pickle object containing the model
pickle.dump( model, open( config["model_address"], "wb" ) )
| true
|
646af16bb1ced4ac570774856328432fd8379787
|
Python
|
dheeraj-326/fetch_coding
|
/src/utilities/emails.py
|
UTF-8
| 1,119
| 2.96875
| 3
|
[] |
no_license
|
'''
Created on Oct 13, 2020
@author: dheer
'''
from src.data.constants import Constants
class Emails(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def count_unique_emails(self, emails):
return len(self.__cleanup_emails(emails))
def __cleanup_emails(self, emails):
'''
Strips the emails of off irrelevant characters and
counts unique email addresses.
'''
emails = list(emails)
print(emails)
cleaned_emails = set()
for email in emails:
for character in Constants.Email.IGNORE_CHARACTERS:
email = email.replace(character, '')
email = email.split('@')
email[0] = email[0].split(Constants.Email.IGNORE_AFTER)[0]
cleaned_emails.add('@'.join(email))
return list(cleaned_emails)
# emails = [
# "dheeraj.326@gmail.com",
# "dheeraj.326+abc@gmail.com",
# "chichumail1@gmail.com"
# ]
# emails_utility = Emails()
# print(emails_utility.count_unique_emails(emails))
| true
|
b4fe7e4e9a2df97b1017274301b17032f3c104d7
|
Python
|
dlrocker/pikamon-py
|
/pikamon/spawner/spawner.py
|
UTF-8
| 1,873
| 3
| 3
|
[
"MIT"
] |
permissive
|
import random
import logging
from discord import Embed
from pikamon.constants import Pokemon, DiscordMessage
logger = logging.getLogger(__name__)
async def spawn(message, cache):
"""Spawns a pokemon based on probability if a pokemon has not already been spawned for the channel with the
current message being processed
Parameters
----------
message : discord.Message
Discord Message context
cache : cachetools.TTLCache
Cache object which contains channels that have active pokemon spawns
"""
channel_name = message.channel
logger.debug("Attempting to spawn pokemon on channel \"{}\"".format(channel_name))
# Clear all channel caches so that if a pokemon has expired, it is no longer available
# TODO - Print message when pokemon for channel expires. May need to extend TTLCache and edit the following:
# - https://github.com/tkem/cachetools/blob/master/src/cachetools/ttl.py#L158
cache.expire()
if channel_name in cache:
logger.info("A pokemon has already been spawned for channel \"{}\"".format(channel_name))
else:
if random.random() <= Pokemon.SPAWN_RATE:
logger.info("Spawning new Pokemon!")
pokemon_id = random.randint(0, Pokemon.MAX_ID)
cache[channel_name] = pokemon_id
embeded_msg = Embed(
title="A wild pokémon has appeared!",
description="Guess the pokémon аnd type `p!ka catch <pokémon>` to cаtch it!",
colour=DiscordMessage.COLOR
)
embeded_msg.set_image(
url=f"https://raw.githubusercontent.com/PokeAPI/sprites/master/sprites/pokemon/other/official-artwork/{pokemon_id}.png")
await message.channel.send(embed=embeded_msg)
else:
logger.debug("Pokemon will not be spawned this time.")
| true
|
688b67c0cd4a92246c797e4d1a572a9bfea560b9
|
Python
|
yinzuopu/api_test
|
/db2.py
|
UTF-8
| 1,063
| 2.859375
| 3
|
[] |
no_license
|
#另一种封装方法
#导入pymysql库
import pymysql
class DB:
def __init__(self):
self.conn =pymysql.connect(host="127.0.0.1",
port=3306,
user="root",
passwd="123456",#注意是passwd不是password
db="mysql")
self.cur =self.conn.cursor()
def __del__(self):#分析函数,实施删除时触发
self.cur.close()
self.conn.close()
def query(self,sql):
self.cur.execute(sql)
return self.cur.fetchall()
def exec(self,sql):
try:
self.cur.execute(sql)
self.conn.commit()
except Exception as e:
self.conn.rollback()
print(str(e))
def check_user(self,name):
result = self.query("select * from student where name ='{}'".format(name))
return True if result else False
def del_user(self,name):
self.exec("delete from student where name ='{}'".format(name))
| true
|
8507ab96b74cbd8971c5b9bce206777649094ca3
|
Python
|
Aurigae-a/tape_bouncing
|
/obstacle.py
|
UTF-8
| 2,232
| 3.359375
| 3
|
[] |
no_license
|
class Obstacle:
"""
这个类实现障碍物
"""
def __init__(self, init_number, init_x, init_y, init_radius):
"""
构造函数
"""
# 编号
self.number = init_number
# 生命值
self.lifeTime = 3
# 障碍物中心坐标位置
self.pos = []
self.pos.append(init_x)
self.pos.append(init_y)
# 障碍物的半径
self.radius = init_radius
def checkCollision(self, checkedTape):
"""
用来检测碰撞的函数
"""
# 只有当生命值大于0的时候,才进行检查,否则说明这个障碍物已经消失
if self.lifeTime > 0:
# 获得待查胶带的位置
tape_pos = checkedTape.pos
# 计算障碍物中心到待查胶带之间的距离
dx = tape_pos[0] - self.pos[0]
dy = tape_pos[1] - self.pos[1]
distance = pow(pow(dx,2) + pow(dy,2) , 0.5)
# 如果距离小于两者半径之和,则说明发生了碰撞
if distance < (self.radius + checkedTape.radius):
# 障碍物的生命值-1
self.lifeTime -= 1
# 计算两圆心连线与水平方向的夹角
sin_theta = dy / distance
cos_theta = dx / distance
# 获得胶带的
tape_vel = checkedTape.velocity
# 计算胶带速度沿着球心连线的切向和法向的速度
vn = tape_vel[0] * cos_theta + tape_vel[1] * sin_theta
vt = (-1.0) * tape_vel[0] * sin_theta + tape_vel[1] * cos_theta
# 法向速度反相
vn = (-0.9) * vn
# 计算胶带碰撞后的水平和竖直速度
checkedTape.velocity[0] = vn * cos_theta - vt * sin_theta
checkedTape.velocity[1] = vn * sin_theta + vt * cos_theta
# 将胶带的中心手动的放置在障碍物之外
checkedTape.pos[0] = self.pos[0] + (self.radius+checkedTape.radius + 0.1) * cos_theta
checkedTape.pos[1] = self.pos[1] + (self.radius+checkedTape.radius + 0.1) * sin_theta
| true
|