blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0ba04732a57f637047dcb0dded281d89a32dd15a | Python | Adurden/group_roll_table_bot | /src/roll_funcs.py | UTF-8 | 3,783 | 3.3125 | 3 | [
"MIT"
] | permissive | import numpy as np
def roll(num, face, mod=0):
"""
a wrapper around numpy randint for generating a set dice rolls
Parameters
----------
num : int
the number of dice inteded to be rolled
face : int
the number of faces on the dice being rolled
mod : int
a modifier to be added to the results of each roll
Returns
-------
rolls : np.array
a series of results from each roll
"""
return np.random.randint(1+mod, face+1+mod, num)
def pass_distribution(num, face, dc, mod=0, tests=2500000, adv=-1):
"""
rolls a large number of tests to simulate a distrubtion of numbers of
passes from a number of attempts
Parameters
----------
num : int
the number of attempts in a single test
face : int
the number of faces on the roll for the test
dc : int
the value needed to succeed
mod : int, default = 0
the value to be added to each attempt
tests : int, default = 10,000,000
the number of tests to simulate
adv : bool
if true give all dice
Returns
-------
distrubtion : np.array
an array giving the distrubtion of pass counts where the count is the
index. i.e. [0.25, 0.5, 0.25] would be a 1/4 cahnce for 0 passes etc.
"""
if adv < 0:
rolls = roll((tests, num, 2), face, mod)
adv_passed = np.sum(rolls > dc, axis=2)
adv_passed[adv_passed > 1] = 1
num_passed = np.sum(adv_passed, axis=1)
else:
rolls_adv = roll((tests, adv, 2), face, mod)
adv_passed = np.sum(rolls_adv > dc, axis=2)
adv_passed = np.sum(adv_passed, axis=1)
roll_nadv = roll((tests, num-adv), face, mod)
num_passed = np.sum(roll_nadv > dc, axis=1) + adv_passed
num_passed, cnts = np.unique(num_passed, return_counts=True)
return(cnts/tests)
def roll_table(num, face, dc, mod=0, tests=2500000, adv=-1):
"""
build a roll table applying a pass distrubtion to a single d20 roll
Parameters
----------
num : int
the number of attempts in a single test
face : int
the number of faces on the roll for the test
dc : int
the value needed to succeed
mod : int, default = 0
the value to be added to each attempt
tests : int, default = 10,000,000
the number of tests to simulate
Returns
-------
roll_table : dict
a dict giving the number of passes for a given result of a single d20
roll to mimic the chances of a full roll's success counts where the
keys are 1-20 and the vals are the number of passes
"""
# cast input to int for discord command parse should move to the bot call
num, face, dc, mod = int(num), int(face), int(dc), int(mod)
# get the distribution of successes
num_passed = pass_distribution(num, face, dc, mod, tests, adv)
# filter for more than 1 in 20 chance
hit_counts = np.where(num_passed > 0.05)[0]
num_passed = num_passed[hit_counts]
# assign a number of rolls to each distribution value
dist = np.round(num_passed/min(num_passed))
num_splits = np.sum(dist)
num_per_split = 20/num_splits
num_per_cnt = np.round(dist*num_per_split)
# correct to 20 from rounding errrors
if np.sum(num_per_cnt) < 20:
num_per_cnt[round(len(num_per_cnt)/2)] += 20 - np.sum(num_per_cnt)
if np.sum(num_per_cnt) > 20:
num_per_cnt[round(len(num_per_cnt)/2)] -= np.sum(num_per_cnt) - 20
# build roll table
cur_key = 1
roll_table = dict()
for num_hit, rolls in zip(hit_counts, num_per_cnt):
for i in range(cur_key, cur_key+int(rolls)):
roll_table[i] = num_hit
cur_key += int(rolls)
return(roll_table)
| true |
d7355f5ef9db8a3e1da769b2b634693b80937e93 | Python | aamuru/python_practice | /Number_of_Islands/Number_of_Islands.py | UTF-8 | 1,693 | 3.203125 | 3 | [] | no_license | class Solution:
'''def dfs(self,grid,i,j):
if(i<0 or j<0 or i>=len(grid) or j>=len(grid[0]) or grid[i][j]=='0'):
return
grid[i][j]='0'
self.dfs(grid,i+1,j)
self.dfs(grid,i-1,j)
self.dfs(grid,i,j-1)
self.dfs(grid,i,j+1)
def numIslands(self, grid: List[List[str]]) -> int:
islands=0
m=len(grid)
if m==0: return 0
n=len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j]=='1':
print(i,j)
self.dfs(grid,i,j)
islands+=1
return islands'''
def numIslands(self, grid: List[List[str]]) -> int:
if not grid: return 0
m, n = len(grid), len(grid[0])
q = collections.deque()
island=0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
q.append((i,j))
self.bfs(q,grid)
island+=1
#print(q)
return island
def bfs(self,q,grid):
if not grid:
return 0
m, n = len(grid), len(grid[0])
dirs = [(0,1), (0,-1), (1,0), (-1,0)]
while q:
x, y = q.popleft()
for dx, dy in dirs:
nx = x + dx
ny = y + dy
if 0<=nx<m and 0<=ny<n and grid[nx][ny] !='0':
grid[nx][ny] = '0'
q.append((nx,ny))
#print('-----------',q)
| true |
a4a8d9f0a8bb803c9cd1ca8deee2c36cd1f049f0 | Python | davsvijayakumar/python-programming | /player/number reversed.py | UTF-8 | 72 | 2.921875 | 3 | [] | no_license | a=str(input("enter the number"))
print("\n",''.join(list(reversed(a))))
| true |
6f6ef094d8bbef50e69dbb7f008beeeae5c5ff84 | Python | kemingy/daily-coding-problem | /src/2d_iterator.py | UTF-8 | 1,311 | 4.5 | 4 | [
"Unlicense"
] | permissive | # Implement a 2D iterator class. It will be initialized with an array of arrays,
# and should implement the following methods:
# • next(): returns the next element in the array of arrays. If there are no
# more elements, raise an exception.
# • has_next(): returns whether or not the iterator still has elements left.
# For example, given the input [[1, 2], [3], [], [4, 5, 6]], calling next()
# repeatedly should output 1, 2, 3, 4, 5, 6.
# Do not use flatten or otherwise clone the arrays. Some of the arrays can be
# empty.
class NestArray:
def __init__(self, arrays):
self.arrays = arrays
self.i = 0
self.j = 0
def next(self):
if self.has_next():
n = self.arrays[self.i][self.j]
self.j += 1
return n
raise StopIteration("End of arrays.")
def has_next(self):
if self.i >= len(self.arrays):
return False
if self.j >= len(self.arrays[self.i]):
self.j = 0
self.i += 1
return self.has_next()
return True
if __name__ == "__main__":
na = NestArray([[1, 2], [3], [], [4, 5, 6]])
while True:
try:
print(na.next())
except StopIteration:
break
| true |
7a4f145787e09eeece4209e408cc0cb5fa942589 | Python | rbrandao22/susep | /models2.py | UTF-8 | 38,293 | 2.5625 | 3 | [] | no_license | #########################################################################
## Regression models of counts and claims data for auto insurance cost ##
#########################################################################
import os
import sys
import pickle
import shelve
import numpy as np
import scipy.special as sp
import scipy.stats as st
import pdb
import time
# Raise exception on runtime warnings, plus lower bounds and precision parameters:
np.seterr(all='raise')
lb_log = 1e-323
lb_ratio = 1e-308
lb_alpha = 1e-77
lb_sigma2 = 1e-102
prec_param = 1e-8
# Data directory:
data_dir = 'persistent/'
# List of factors, should match data_dict in matrices.py, used for interactions
factors_list = {'veh_age': (0, 2), 'region': (3, 7), 'sex': (8,), 'bonus': (9,), 'age': (10, 13), 'cov': (14, 16), 'year': (17, 19)}
# Auxiliary functions:
def file_load(filename):
try:
os.path.exists(data_dir + filename)
with open(data_dir + filename, 'rb') as cfile:
x = pickle.load(cfile)
except:
print('File ' + filename + ' not found')
return x
def save_results_db(res_dict, prefix, model, claim_type):
db_file = data_dir + prefix + '_results_' + claim_type + '.db'
with shelve.open(db_file, writeback=True) as db:
if model in db.keys():
for key in res_dict.keys():
db[model][key] = res_dict[key]
else:
db[model] = res_dict
print(prefix + ' results from ' + model + ' ' + claim_type + ', saved in db file')
return
def save_results_pkl(res, prefix, model, claim_type):
try:
os.remove(data_dir + prefix + '_results_' + model + '_' + claim_type + '.pkl')
except OSError:
pass
with open(data_dir + prefix + '_results_' + model + '_' + claim_type + '.pkl', 'wb') as filename:
pickle.dump(res, filename)
print(prefix + ' results from ' + model + ' ' + claim_type + ', saved in pkl file')
def grab_results_db(prefix, model, claim_type, keys=None):
db_file = data_dir + prefix + '_results_' + claim_type + '.db'
if not os.path.exists(db_file):
raise Exception('File ' + dbfile + ' not found')
db = shelve.open(db_file)
if keys == None:
res = db[model]
else:
res = {}
for key in keys:
res[key] = db[model][key]
db.close()
return res
def interactions(X, dependent, interactions_list):
if dependent == 'freq':
aux_disp = 3
elif dependent == 'sev':
aux_disp = 2
for item in interactions_list:
item0f = factors_list[item[0]][0]
item0l = factors_list[item[0]][-1]
item0_size = item0l - item0f + 2
item1f = factors_list[item[1]][0]
item1l = factors_list[item[1]][-1]
item1_size = item1l - item1f + 2
item_size = (item0_size - 1) * (item1_size - 1)
X_add = np.zeros((np.shape(X)[0], item_size))
aux_pos = -1
for i in range(item0_size - 1):
level_i = item0f + aux_disp + i
for j in range(item1_size - 1):
aux_pos += 1
level_j = item1f + aux_disp + j
index = np.where((X[:, [level_i, level_j]] == [1, 1]).all(-1))[0]
X_add[index, aux_pos] = 1
X = np.hstack((X, X_add))
return X
def interactions_new_key(interactions_list, key):
key = [int(j) for j in list(key)]
for item in interactions_list:
item0f = factors_list[item[0]][0]
item0l = factors_list[item[0]][-1]
item0_size = item0l - item0f + 2
item1f = factors_list[item[1]][0]
item1l = factors_list[item[1]][-1]
item1_size = item1l - item1f + 2
item_size = (item0_size - 1) * (item1_size - 1)
key_add = [0] * item_size
aux_pos = -1
for i in range(item0_size - 1):
level_i = item0f + i
for j in range(item1_size - 1):
aux_pos += 1
level_j = item1f + j
if key[level_i] == 1 and key[level_j] == 1:
key_add[aux_pos] = 1
key = key + key_add
key = ''.join([str(i) for i in key])
return key
# Classes:
class Estimation:
'''
Estimation of parameters of regression models.
Parameters:
-----------
claim_type - type of claim to be analyzed
model - specification of density of random component
Methods:
--------
save_results
'''
def __init__(self, model, claim_type, interactions_list=None):
if claim_type not in {'casco', 'rcd'} or model not in {'Poisson', 'NB2', 'Logit', 'Probit', 'C-loglog', 'LNormal', 'Gamma', 'InvGaussian'}:
raise Exception('Model or claim_type provided not in permissible set')
if model in {'Poisson', 'NB2', 'Logit', 'Probit', 'C-loglog'}:
dependent = 'freq'
elif model in {'LNormal', 'Gamma', 'InvGaussian'}:
dependent = 'sev'
X = file_load(dependent + '_' + claim_type + '_matrix.pkl')
if interactions_list != None:
X = interactions(X, dependent, interactions_list)
if model == 'Poisson':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Poisson regression model
[y_i - exposure_i * exp(x_i'beta)] * x_i
'''
aux_vec = X[:, [0]] - X[:, [1]] * np.exp(X[:, 2:] @ beta)
res = (aux_vec.T @ X[:, 2:]).T
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of Poisson loglikelihood
inv mu_i * x_i * x_i'
'''
res = np.linalg.inv(X[:, 2:].T @ (X[:, [1]] * np.exp(X[:, 2:] @ beta) * X[:, 2:]))
return res
elif model == 'NB2':
def grad_func(X, beta):
'''
Gradient of loglikelihood for NB2 regression model
((y_i - mu_i)/(1+alpha*mu_i)] * x_i
(1/alpha^2)*(ln(1+alpha*mu_i)-sum_{j=0}^{y_i-1}1/(j+alpha^-1)+(y_i-mu_i)/(alpha*(1+alpha*mu_i)
'''
beta[-1] = np.maximum(beta[-1], lb_alpha)
mu = X[:, [1]] * np.exp(X[:, 2:] @ beta[:-1])
aux_vec = (X[:, [0]] - mu) / (1 + beta[-1] * mu)
aux_beta = (aux_vec.T @ X[:, 2:]).T
aux_jsum = np.array([np.sum((np.arange(X[i, [0]])+beta[-1]**(-1))**(-1)) for i in range(len(X))])[:, np.newaxis]
aux_alpha = np.sum(beta[-1]**(-2) * (np.log(1 + beta[-1] * mu) - aux_jsum) + (X[:, [0]] - mu) / (beta[-1] * (1 + beta[-1] * mu)))
res = np.vstack((aux_beta, aux_alpha))
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of NB2 loglikelihood
inv (mu_i/(1+alpha*mu_i)] * x_i * x_i'
inv (1/alpha^4)*[ln(1+alpha*mu_i)-sum_{j=0}^{y_i-1}1/(j+alpha^-1)]^2+mu_i/(alpha**2*(1+alpha*mu_i)
'''
beta[-1] = np.maximum(beta[-1], lb_alpha)
mu = X[:, [1]] * np.exp(X[:, 2:] @ beta[:-1])
aux_beta = np.linalg.inv(X[:, 2:].T @ ((mu / (1 + beta[-1] * mu)) * X[:, 2:]))
aux_jsum = np.array([np.sum((np.arange(X[i, [0]])+beta[-1]**(-1))**(-1)) for i in range(len(X))])[:, np.newaxis]
aux_alpha = (np.sum(beta[-1]**(-4) * (np.log(1 + beta[-1] * mu) - aux_jsum)**2 + mu / (beta[-1]**2 * (1 + beta[-1] * mu))))**(-1)
res = np.hstack((aux_beta, np.zeros(np.shape(aux_beta)[0])[:, np.newaxis]))
res = np.vstack((res, np.concatenate((np.zeros(np.shape(res)[1]-1), [aux_alpha]))))
return res
if model == 'Logit':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Logit regression model
[y_i - m_i * exp(x_i'beta)/(1+exp(x_i'beta)] * x_i
'''
aux_vec = X[:, [0]] - X[:, [1]] * np.exp(X[:, 2:] @ beta) / (1 + np.exp(X[:, 2:] @ beta))
res = (aux_vec.T @ X[:, 2:]).T
return res
def hess_ninv(X, beta):
'''
Inverse of negative expected Hessian of Logit loglikelihood
inv m_i * exp(x_i'beta)/(1+exp(x_i'beta)**2 * x_i * x_i'
'''
res = np.linalg.inv(X[:, 2:].T @ (X[:, [1]] * np.exp(X[:, 2:] @ beta) / (1 + np.exp(X[:, 2:] @ beta))**2 * X[:, 2:]))
return res
if model == 'Probit':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Probit regression model
[((y_i - m_i * Phi(x_i'beta))/(Phi(x_i'beta)(1-Phi(x_i'beta)) * phi(x_i'beta)] * x_i
'''
Phi = st.norm.cdf(X[:, 2:] @ beta)
phi = st.norm.pdf(X[:, 2:] @ beta)
aux_vec = ((X[:, [0]] - X[:, [1]] * Phi) / (Phi * (1 - Phi))) * phi
res = (aux_vec.T @ X[:, 2:]).T
return res
def hess_ninv(X, beta):
'''
Inverse of negative expected Hessian of Probit loglikelihood
inv m_i * phi(x_i'beta)**2 / (Phi(x_i'beta)(1-Phi(x_i'beta)) * x_i * x_i'
'''
Phi = st.norm.cdf(X[:, 2:] @ beta)
phi = st.norm.pdf(X[:, 2:] @ beta)
aux_vec = (X[:, [1]] * phi**2) / (Phi * (1 - Phi))
res = np.linalg.inv(X[:, 2:].T @ (aux_vec * X[:, 2:]))
return res
if model == 'C-loglog':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Complementary log-log regression model
{[y_i * (1 - exp(-exp(x_i'beta)))^(-1) - m_i] * exp(x_i'beta)} * x_i
'''
aux_vec = X[:, [0]] - X[:, [1]] * np.exp(X[:, 2:] @ beta)
res = (aux_vec.T @ X[:, 2:]).T
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of Poisson loglikelihood
inv {m_i * exp(x_i'beta)^2*exp(-exp(x_i'beta))*[1-exp(-exp(x_i'beta))]^(-1)} * x_i * x_i'
'''
aux_vec = X[:, [1]] * np.exp(X[:, 2:] @ beta)**2 * np.exp(-np.exp(X[:, 2:] @ beta)) * (1 - np.exp(-np.exp(X[:, 2:] @ beta)))**(-1)
res = np.linalg.inv(X[:, 2:].T @ (aux_vec * X[:, 2:]))
return res
elif model == 'LNormal':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Log-Normal regression model
[1/sigma^2 * (ln y_i - x_i'beta] * x_i
-1/2*sigma^2 + (ln y_i - x_i'beta)^2/2*sigma^4
'''
aux_beta = beta[-1]**(-1) * (np.log(X[:, [0]]) - X[:, 1:] @ beta[:-1])
aux_beta = (aux_beta.T @ X[:, 1:]).T
aux_sigma2 = np.sum(- (2 * beta[-1])**(-1) + (2 * beta[-1]**2)**(-1) * (np.log(X[:, [0]]) - X[:, 1:] @ beta[:-1])**2)
res = np.vstack((aux_beta, aux_sigma2))
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of Log-Normal loglikelihood
inv 1/sigma^2 * x_i * x_i'
inv -(2*sigma^4)^(-1) + (ln y_i - x_i'beta)^2/sigma^6
'''
aux_beta = np.linalg.inv(X[:, 1:].T @ (beta[-1]**(-1) * X[:, 1:]))
aux_sigma2 = (np.sum(- (2 * beta[-1]**2)**(-1) + (np.log(X[:, [0]]) - X[:, 1:] @ beta[:-1])**2 * beta[-1]**(-3)))**(-1)
res = np.hstack((aux_beta, np.zeros(np.shape(aux_beta)[0])[:, np.newaxis]))
res = np.vstack((res, np.concatenate((np.zeros(np.shape(res)[1]-1), [aux_sigma2]))))
return res
elif model == 'Gamma':
def grad_func(X, beta):
'''
Gradient of loglikelihood for Gamma regression model
[(y_i/exp(x_i'beta)-1] * x_i
-y*exp(-x_i'beta)-x_i'beta+ln(y)+ln(nu)+1-digamma(nu)
'''
beta[-1] = np.maximum(beta[-1], lb_log)
aux_beta = X[:, [0]] * np.exp(-1 * X[:, 1:] @ beta[:-1]) - 1
aux_beta = (aux_beta.T @ X[:, 1:]).T
aux_nu = np.sum(- X[:, [0]] * np.exp(-1 * X[:, 1:] @ beta[:-1]) - (X[:, 1:] @ beta[:-1]) + np.log(X[:, [0]]) + np.log(beta[-1]) + 1 - sp.digamma(beta[-1]))
res = np.vstack((aux_beta, aux_nu))
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of Gamma loglikelihood
inv [y_i/exp(x_i'beta] * x_i * x_i'
inv [polygamma(1,nu)-nu^(-1)]
'''
beta[-1] = np.maximum(beta[-1], lb_ratio)
aux_beta = np.linalg.inv(X[:, 1:].T @ (X[:, [0]] * np.exp(-1 * X[:, 1:] @ beta[:-1]) * X[:, 1:]))
aux_nu = (np.sum(np.ones(len(X))[:, np.newaxis] * sp.polygamma(1, beta[-1]) - beta[-1]**(-1)))**(-1)
res = np.hstack((aux_beta, np.zeros(np.shape(aux_beta)[0])[:, np.newaxis]))
res = np.vstack((res, np.concatenate((np.zeros(np.shape(res)[1]-1), [aux_nu]))))
return res
elif model == 'InvGaussian':
def grad_func(X, beta):
'''
Gradient of loglikelihood for InvGaussian regression model
[y_i*exp(-2*x_i'beta) - exp(-x_i'beta)] * x_i
-1/2*sigma^2 + 1/sigma^4*(y_i*exp(-2x_i'beta)/2 - exp(-x_i'beta) + 1/2*y_i)
'''
beta[-1] = np.maximum(beta[-1], lb_sigma2)
aux_beta = X[:, [0]] * np.exp(-2 * X[:, 1:] @ beta[:-1]) - np.exp(-1 * X[:, 1:] @ beta[:-1])
aux_beta = (aux_beta.T @ X[:, 1:]).T
aux_sigma2 = np.sum(-(2 * beta[-1])**(-1) + beta[-1]**(-2) * (.5 * X[:, [0]] * np.exp(-2 * X[:, 1:] @ beta[:-1]) - np.exp(-1 * X[:, 1:] @ beta[:-1]) + (2 * X[:, [0]])**(-1)))
res = np.vstack((aux_beta, aux_sigma2))
return res
def hess_ninv(X, beta):
'''
Inverse of negative Hessian of InvGaussian loglikelihood
[2 * y_i * exp(-2*x_i'beta) - exp(-x_i'beta)] * x_i * x_i'
-1/2*sigma^4 + 2/sigma^6*(y_i*exp(-2x_i'beta)/2 - exp(-x_i'beta) + 1/2*y_i)
'''
beta[-1] = np.maximum(beta[-1], lb_sigma2)
aux_beta = np.linalg.inv(X[:, 1:].T @ ((2 * X[:, [0]] * np.exp(-2 * X[:, 1:] @ beta[:-1]) - np.exp(-1 * X[:, 1:] @ beta[:-1])) * X[:, 1:]))
aux_sigma2 = (np.sum(-(2 * beta[-1]**2)**(-1) + (2/beta[-1]**(3)) * (.5 * X[:, [0]] * np.exp(-2 * X[:, 1:] @ beta[:-1]) - np.exp(-1 * X[:, 1:] @ beta[:-1]) + (2 * X[:, [0]])**(-1))))**(-1)
res = np.hstack((aux_beta, np.zeros(np.shape(aux_beta)[0])[:, np.newaxis]))
res = np.vstack((res, np.concatenate((np.zeros(np.shape(res)[1]-1), [aux_sigma2]))))
return res
# Initial guesses and stoping parameter:
if dependent == 'freq':
beta = np.zeros(np.shape(X)[1] - 2)[:, np.newaxis]
elif dependent == 'sev':
beta = np.zeros(np.shape(X)[1] - 1)[:, np.newaxis]
if model in {'NB2', 'Gamma', 'InvGaussian'}:
beta = np.vstack((beta, np.array([.5])))
elif model == 'LNormal':
beta = np.vstack((beta, np.array([1])))
if dependent == 'freq':
if model in {'Poisson', 'NB2'}:
beta[0] = np.log(X[0, [0]] / X[0, [1]])
elif model in {'Logit', 'Probit', 'C-loglog'}:
beta[0] = .2
elif dependent == 'sev':
beta[0] = 1
grad = grad_func(X, beta)
A = hess_ninv(X, beta)
epsilon = prec_param
lda_step = 1
# Newton-Raphson algorithm:
def beta_update(beta, lda_step, A, grad):
beta_prime = beta + lda_step * A @ grad
return beta_prime
start_time = time.perf_counter()
print('Estimation: ' + model + ' ' + claim_type)
while True:
if np.all(np.absolute(grad) < np.ones(np.shape(grad)) * epsilon):
print('Convergence attained, model ' + model + ', claim type ' + claim_type)
print('Ellapsed time: ', (time.perf_counter() - start_time)/60)
break
beta_prime = beta_update(beta, lda_step, A, grad)
beta = beta_prime
grad = grad_func(X, beta)
A = hess_ninv(X, beta)
sys.stdout.write('Current grad norm: %1.6g \r' % (np.linalg.norm(grad)))
sys.stdout.flush()
self.model = model
if interactions_list != None:
self.extended_model = model + str(interactions_list)
self.claim_type = claim_type
self.beta = beta
self.var = A
self.std = np.sqrt(np.diag(A))[:, np.newaxis]
self.z_stat = beta / self.std
if dependent == 'freq':
self.y_bar = np.sum(X[:, [0]]) / np.sum(X[:, [1]])
elif dependent == 'sev':
if model != 'LNormal':
self.y_bar = np.average(X[:, [0]])
else:
self.y_bar = np.average(np.log(X[:, [0]]))
def save_estimation_results(self, keys=None):
if keys != None:
for key in keys:
res_dict[key] = self.key
else:
res_dict = {'beta': self.beta, 'var': self.var, 'std': self.std, 'z_stat': self.z_stat, 'y_bar': self.y_bar}
prefix = 'overall'
try:
save_results_db(res_dict, prefix, self.extended_model, self.claim_type)
except:
save_results_db(res_dict, prefix, self.model, self.claim_type)
class Stdout:
'''
Provides standard output for GLMs.
Reads freq/sev_<claim_type>_dictionary.pkl, where keys index vector of dummies and:
Frequency:
column 0 = claims count, y_i
column 1 = exposure_i
Severity:
column 0 = claim cost, y_i
Three persistent files w/ following statistics:
individual_results_xxx.pkl: deviances and chis
cell_results_xxx.db: 0)#obs, 1)y_bar, 2)mu_hat, 3)D^L, 4)Pearson^L
cell_results_xxx.db freq data only: 5)total exposure, 6)exposure sum of squared deviations
overall_results_xxx.db: LL, D, Pearson, Chi^2
Standard outpu is different for models w/ Binomial distribution:
counts and exposure are aggregated within cells before computation of likelihood and residuals
'''
def __init__(self, model, claim_type, interactions_list=None):
self.model = model
self.claim_type = claim_type
if model in {'Poisson', 'NB2', 'Logit', 'Probit', 'C-loglog'}:
model_type = 'freq'
elif model in {'LNormal', 'Gamma', 'InvGaussian'}:
model_type = 'sev'
self.model_type = model_type
X_dict = file_load(model_type + '_' + claim_type + '_dict.pkl')
if interactions_list == None:
res = grab_results_db('overall', model, claim_type)
else:
res = grab_results_db('overall', model + str(interactions_list), claim_type)
self.extended_model = model + str(interactions_list)
self.beta = res['beta']
self.var = res['var']
self.std = res['std']
self.z_stat = res['z_stat']
self.y_bar = res['y_bar']
ind_res = {}
if model_type == 'freq':
cell_res = np.empty([len(X_dict), 7])
elif model_type == 'sev':
cell_res = np.empty([len(X_dict), 5])
if model == 'Poisson':
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i -exposure_i * exp(x_i'beta) + y_i * ln(exposure_i * exp(x_i'beta)) - ln y_i!
'''
res = np.sum(- X[:, [1]] * mu + X[:, [0]] * np.log(X[:, [1]] * mu) - np.log(sp.factorial(X[:, [0]])))
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
aux_res = np.zeros(np.shape(X[:, [0]]))
index = np.where(X[:, [0]] > 0)[0]
aux_res[index] = - X[:, [0]][index] + X[:, [0]][index] * np.log(X[:, [0]][index]) - np.log(sp.factorial(X[:, [0]][index]))
res = np.sum(aux_res)
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = y_i * ln(y_i/mu_i) - (y_i - mu_i)
'''
aux_dev = np.zeros(np.shape(X[:, [0]]))
aux_dev_y_bar = np.zeros(np.shape(X[:, [0]]))
index = np.where(X[:, [0]] > 0)[0]
aux_dev[index] = X[:, [0]][index] * np.log(X[:, [0]][index] / (X[:, [1]][index] * mu))
aux_dev_y_bar[index] = X[:, [0]][index] * np.log(X[:, [0]][index] / (X[:, [1]][index] * y_bar))
aux2_dev = X[:, [0]] - X[:, [1]] * mu
aux2_dev_y_bar = X[:, [0]] - X[:, [1]] * y_bar
dev_local = 2 * np.sum(aux_dev - aux2_dev)
dev_y_bar_local = 2 * np.sum(aux_dev_y_bar - aux2_dev_y_bar)
index2 = np.where(X[:, [0]] - X[:, [1]] * mu < 0)[0]
dev_is = (2*(aux_dev - aux2_dev))**.5
dev_is[index2] = -1 * dev_is[index2]
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''(y_i-mu_i) / mu_i^.5'''
Pearson_is = (X[:, [0]] - X[:, [1]] * mu) / (X[:, [1]] * mu)**.5
Pearson_local = np.sum(Pearson_is**2)
return (Pearson_is, Pearson_local)
elif model == 'NB2':
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i ln(gamma(y_i + alpha^(-1))/gamma(alpha^(-1))) -ln y_i! -(y_i + alpha^(-1))*ln(alpha^(-1) + exposure_i*exp(x_i'beta)) + alpha^(-1)*ln(alpha^(-1)) + y_i*ln(exposure_i*exp(x_i'beta))
'''
inv_alpha = extra_param**(-1)
res = np.sum(np.log(sp.gamma(X[:, [0]] + inv_alpha) / sp.gamma(inv_alpha)) - np.log(sp.factorial(X[:, [0]])) - (X[:, [0]] + inv_alpha) * np.log(inv_alpha + X[:, [1]] * mu) + inv_alpha * np.log(inv_alpha) + X[:, [0]] * np.log(X[:, [1]] * mu))
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
inv_alpha = extra_param**(-1)
aux_res = np.log(sp.gamma(X[:, [0]] + inv_alpha) / sp.gamma(inv_alpha)) - np.log(sp.factorial(X[:, [0]])) - (X[:, [0]] + inv_alpha) * np.log(inv_alpha + X[:, [0]]) + inv_alpha * np.log(inv_alpha)
index = np.where(X[:, [0]] > 0)[0]
aux_res[index] = aux_res[index] + X[:, [0]][index] * np.log(X[:, [0]][index])
res = np.sum(aux_res)
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = y_i * ln(y_i/mu_i) - (y_i + alpha^(-1))*ln((y_i + alpha^(-1))/(mu_i + alpha^(-1)))
'''
inv_alpha = extra_param**(-1)
aux_dev = np.zeros(np.shape(X[:, [0]]))
aux_dev_y_bar = np.zeros(np.shape(X[:, [0]]))
index = np.where(X[:, [0]] > 0)[0]
aux_dev[index] = X[:, [0]][index] * np.log(X[:, [0]][index] / (X[:, [1]][index] * mu))
aux_dev_y_bar[index] = X[:, [0]][index] * np.log(X[:, [0]][index] / (X[:, [1]][index] * y_bar))
aux2_dev = (X[:, [0]] + inv_alpha) * np.log((X[:, [0]] + inv_alpha) / (X[:, [1]] * mu + inv_alpha))
aux2_dev_y_bar = (X[:, [0]] + inv_alpha) * np.log((X[:, [0]] + inv_alpha) / (X[:, [1]] * y_bar + inv_alpha))
dev_local = 2 * np.sum(aux_dev - aux2_dev)
dev_y_bar_local = 2 * np.sum(aux_dev_y_bar - aux2_dev_y_bar)
index2 = np.where(X[:, [0]] - X[:, [1]] * mu < 0)[0]
dev_is = (2*(aux_dev - aux2_dev))**.5
dev_is[index2] = -1 * dev_is[index2]
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''(y_i-mu_i) / (mu_i+alpha*mu_i^2)^.5'''
Pearson_is = (X[:, [0]] - X[:, [1]] * mu) / (X[:, [1]] * mu + extra_param * (X[:, [1]] * mu)**2)**.5
Pearson_local = np.sum(Pearson_is**2)
return (Pearson_is, Pearson_local)
elif model in {'Logit', 'Probit', 'C-loglog'}:
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i y_i * ln(mui_i/(1-mu_i)) + m_i * ln(1-mu_i)
'''
y = np.sum(X[:, [0]])
m = np.sum(X[:, [1]])
res = np.sum(y * np.log(mu / (1 - mu)) + m * np.log(1 - mu))
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
y = np.sum(X[:, [0]])
m = np.sum(X[:, [1]])
if y > 0:
res = np.sum(y * np.log((y / m) / (1 - (y / m))) + m * np.log(1 - (y / m)))
else:
res = 0
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = y_i * ln(y_i/mu_i) + (m_i - y_i) * ln((m_i - y_i)/(m_i-mu_i))
'''
y = np.sum(X[:, [0]])
m = np.sum(X[:, [1]])
if y > 0:
dev_local = 2 * (y * np.log(y / mu) + (m - y) * np.log((m - y) / (m - mu)))
dev_y_bar_local = 2 * (y * np.log(y / y_bar) + (m - y) * np.log((m - y) / (m - y_bar)))
else:
try:
dev_local = 2 * (m * np.log(m / (m - mu)))
except:
dev_local = 0
dev_y_bar_local = 0
if y >= mu:
dev_is = dev_local**.5
else:
dev_is = - dev_local**.5
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''(y_i - m_i*mu_i)^2 / m_i*mu_i*(1-mu_i)'''
y = np.sum(X[:, [0]])
m = np.sum(X[:, [1]])
Pearson_is = (y - m * mu) / (m * mu * (1 - mu))**.5
Pearson_local = Pearson_is**2
return (Pearson_is, Pearson_local)
elif model == 'LNormal':
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i -ln 2pi/2 -ln sigma^2/2 - (ln y_i - mu_i)^2/2sigma^2
'''
sigma2 = extra_param
res = np.sum(- np.log(2 * np.pi) / 2 - np.log(sigma2) / 2 - (np.log(X) - mu)**2 / 2 * sigma2)
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
sigma2 = extra_param
res = np.sum(- np.log(2 * np.pi) / 2 - np.log(sigma2) / 2)
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = (ln y_i - mu_i)^2
'''
aux_dev = (np.log(X) - mu)**2
aux_dev_y_bar = (np.log(X) - y_bar)**2
dev_local = np.sum(aux_dev)
dev_y_bar_local = np.sum(aux_dev_y_bar)
index = np.where(np.log(X) - mu < 0)[0]
dev_is = aux_dev**.5
dev_is[index] = -1 * dev_is[index]
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''ln y_i - mu_i'''
Pearson_is = np.log(X) - mu
Pearson_local = np.sum(Pearson_is**2)
return (Pearson_is, Pearson_local)
elif model == 'Gamma':
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i -nu*(y_i/mu_i)-nu*ln(mu_i)+nu*ln(y_i)+nu*ln(nu)-ln(y_i)-ln(gamma(nu))
'''
nu = extra_param
res = np.sum(- nu * (X / mu) - nu * np.log(mu) + nu * np.log(X) + nu * np.log(nu) - np.log(X) - np.log(sp.gamma(nu)))
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
nu = extra_param
res = np.sum(- nu - nu * np.log(X) + nu * np.log(X) + nu * np.log(nu) - np.log(X) - np.log(sp.gamma(nu)))
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = -ln(y_i/mu_i) + (y_i - mu_i) / mu_i
'''
aux_dev = - np.log(X / mu) + (X - mu) / mu
aux_dev_y_bar = - np.log(X / y_bar) + (X - y_bar) / y_bar
dev_local = 2 * np.sum(aux_dev)
dev_y_bar_local = 2 * np.sum(aux_dev_y_bar)
index = np.where(X - mu < 0)[0]
dev_is = (2*aux_dev)**.5
dev_is[index] = -1 * dev_is[index]
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''(y_i-mu_i) / (mu_i^2)^.5'''
Pearson_is = (X - mu) / mu
Pearson_local = np.sum(Pearson_is**2)
return (Pearson_is, Pearson_local)
elif model == 'InvGaussian':
def LL_func(X, mu, extra_param):
'''
loglikelihood: sum_i -.5*ln(2*pi*sigma2)-.5*ln(y_i^3)-y_i/2*sigma2*mu^2+1/sigma2*mu_i-(1/2*sigma2*y_i
'''
sigma2 = extra_param
res = np.sum(- .5 * np.log(2 * np.pi * sigma2) - .5 * np.log(X**3) - X / (2 * sigma2 * mu**2) + (sigma2 * mu)**(-1) - (2 * sigma2 * X)**(-1))
return res
def LL_saturated(X, extra_param):
'''
loglikelihood of saturated model
'''
sigma2 = extra_param
res = np.sum(- .5 * np.log(2 * np.pi * sigma2) - .5 * np.log(X**3))
return res
def deviance(X, mu, extra_param, y_bar):
'''
deviance^2 = (y_i-mu_i)^2/(mu_i^2*y_i)
'''
aux_dev = (X - mu)**2 / (mu**2*X)
aux_dev_y_bar = (X - y_bar)**2 / (y_bar**2*X)
dev_local = np.sum(aux_dev)
dev_y_bar_local = np.sum(aux_dev_y_bar)
index = np.where(X - mu < 0)[0]
dev_is = aux_dev**.5
dev_is[index] = -1 * dev_is[index]
return (dev_is, dev_local, dev_y_bar_local)
def Pearson(X, mu, extra_param):
'''(y_i-mu_i) / (mu_i^3)^.5'''
Pearson_is = (X - mu) / mu**1.5
Pearson_local = np.sum(Pearson_is**2)
return (Pearson_is, Pearson_local)
LL_sum = 0
if interactions_list == None:
LL_saturated_sum = 0
dev_stat_sum = 0
dev_y_bar_stat_sum = 0
Pearson_stat_sum = 0
for i, key in enumerate(X_dict.keys()):
X = X_dict[key]
if interactions_list == None:
pass
else:
key = interactions_new_key(interactions_list, key)
if model == 'Poisson':
mu = np.exp(np.array([1] + [float(j) for j in list(key)]) @ self.beta)
extra_param = None
elif model == 'Logit':
aux_mu = np.exp(np.array([1] + [float(j) for j in list(key)]) @ self.beta)
mu = aux_mu / (1 + aux_mu)
extra_param = None
elif model == 'Probit':
aux_mu = np.array([1] + [float(j) for j in list(key)]) @ self.beta
mu = st.norm.cdf(aux_mu)
extra_param = None
elif model == 'C-loglog':
mu = 1 - np.exp(- np.exp(np.array([1] + [float(j) for j in list(key)]) @ self.beta))
extra_param = None
elif model == 'LNormal':
mu = np.array([1] + [float(j) for j in list(key)]) @ self.beta[:-1]
extra_param = self.beta[-1]
elif model in {'NB2', 'Gamma', 'InvGaussian'}:
mu = np.exp(np.array([1] + [float(j) for j in list(key)]) @ self.beta[:-1])
extra_param = self.beta[-1]
if np.shape(X)[0] > 0:
LL_sum += LL_func(X, mu, extra_param)
if interactions_list == None:
LL_saturated_sum += LL_saturated(X, extra_param)
(dev_is, dev_local, dev_y_bar_local) = deviance(X, mu, extra_param, self.y_bar)
(Pearson_is, Pearson_local) = Pearson(X, mu, extra_param)
ind_res[key] = np.hstack((dev_is, Pearson_is))
if model_type == 'freq':
cell_res[i, 1] = np.average(X[:, [0]] * X[:, [1]])
elif model_type == 'sev':
if model != 'LNormal':
cell_res[i, 1] = np.average(X)
else:
cell_res[i, 1] = np.average(np.log(X))
else:
ind_res[key] = np.array([])
dev_local = 0
dev_y_bar_local = 0
Pearson_local = 0
cell_res[i, 1] = 0
cell_res[i, 0] = len(X)
cell_res[i, 2] = mu
cell_res[i, 3] = dev_local
cell_res[i, 4] = Pearson_local
if model_type == 'freq':
cell_res[i, 5] = np.sum(X[:, [1]])
cell_res[i, 6] = np.sum((X[:, [1]] - (cell_res[i, 5] / cell_res[i, 0]))**2)
dev_stat_sum += dev_local
dev_y_bar_stat_sum += dev_y_bar_local
Pearson_stat_sum += Pearson_local
self.ind_res = ind_res
self.cell_res = cell_res
self.p_value = st.norm.cdf(abs(self.z_stat))
self.n = np.sum(cell_res[:, [0]])
self.k = len(self.beta)
self.J = len(self.cell_res)
self.LL = LL_sum
self.D = dev_stat_sum
if interactions_list == None:
self.LL_saturated = LL_saturated_sum
self.D_scaled = 2 * (LL_saturated_sum - LL_sum)
self.Pearson = Pearson_stat_sum
self.pseudo_R2 = 1 - self.D / dev_y_bar_stat_sum
self.GF = np.sum((cell_res[:, [0]] * (cell_res[:, [1]] - cell_res[:, [2]])**2) / cell_res[:, [2]])
if model_type == 'freq':
self.exp_tot = np.sum(cell_res[:, [5]])
self.exp_avg = self.exp_tot / self.n
self.exp_std = (np.sum(cell_res[:, [6]])/self.n)**.5
def save_stdout_results(self, grouped='no', individual='no'):
'''
Saves main or overall results on shelve database.
In addition specify grouped = 'yes' or individual = 'yes' for cell_res or ind_res persistency as pickle file.
'''
if self.model_type == 'freq':
res_dict = {'p_value': self.p_value, 'n': self.n, 'exp_tot': self.exp_tot, 'exp_avg': self.exp_avg, 'exp_std': self.exp_std, 'k': self.k, 'J': self.J, 'LL': self.LL, 'D': self.D, 'Pearson': self.Pearson, 'pseudo_R2': self.pseudo_R2, 'GF': self.GF}
else:
res_dict = {'p_value': self.p_value, 'n': self.n, 'k': self.k, 'J': self.J, 'LL': self.LL, 'D': self.D, 'Pearson': self.Pearson, 'pseudo_R2': self.pseudo_R2, 'GF': self.GF}
if hasattr(self, 'LL_saturated'):
res_dict['LL_saturated'] = self.LL_saturated
res_dict['D_scaled'] = self.D_scaled
prefix = 'overall'
try:
save_results_db(res_dict, prefix, self.extended_model, self.claim_type)
except:
save_results_db(res_dict, prefix, self.model, self.claim_type)
if grouped == 'yes':
prefix = 'grouped'
try:
save_results_pkl(self.cell_res, prefix, self.extended_model, self.claim_type)
except:
save_results_pkl(self.cell_res, prefix, self.model, self.claim_type)
if individual == 'yes':
prefix = 'individual'
try:
save_results_pkl(self.ind_res, prefix, self.extended_model, self.claim_type)
except:
save_results_pkl(self.ind_res, prefix, self.model, self.claim_type)
if __name__ == '__main__':
for model in ('Logit', 'Probit', 'C-loglog', 'LNormal', 'Gamma', 'InvGaussian', 'Poisson', 'NB2'):
#for claim_type in ('casco', 'rcd'):
for claim_type in ('casco',):
for int_list in [(('veh_age', 'region'),), (('veh_age', 'sex'),), (('veh_age', 'bonus'),), (('veh_age', 'age'),), (('veh_age', 'cov'),), (('region', 'sex'),), (('region', 'bonus'),), (('region', 'age'),), (('region', 'cov'),), (('sex', 'bonus'),), (('sex', 'age'),), (('sex', 'cov'),), (('bonus', 'age'),), (('bonus', 'cov'),), (('age', 'cov'),), (('veh_age', 'region'), ('veh_age', 'sex'), ('veh_age', 'bonus'), ('veh_age', 'age'), ('veh_age', 'cov'), ('region', 'sex'), ('region', 'bonus'), ('region', 'age'), ('region', 'cov'), ('sex', 'bonus'), ('sex', 'age'), ('sex', 'cov'), ('bonus', 'age'), ('bonus', 'cov'), ('age', 'cov'))]:
x = Estimation(model, claim_type, interactions_list=int_list)
#x.save_estimation_results()
y = Stdout(model, claim_type, interactions_list=int_list)
#y.save_stdout_results()
| true |
9a4f99ead700621ca0b7636bb6682bae4fd50b47 | Python | szhyuling/aha-algorithm-python | /Chapter_1/xiaohengmaishu.py | UTF-8 | 1,662 | 3.65625 | 4 | [] | no_license | import numpy as np
from bubble_sort import bubble_sort
from quick_sort import quick_sort
#去重+排序问题
#解法一:先去重,后排序(桶排序)
#解法二:先排序,后去重(常规排序算法)
def bucket_uniquesort(m, nums, sort="ascend"):
assert(sort=="ascend" or sort=="descend")
book = np.zeros((m,), dtype=int)
n=len(nums)
for i in range(n):
book[nums[i]]=1
sorted_nums = np.zeros((n,), dtype=int)
t = 0
if sort=="ascend":
for i in range(m):
if book[i]>0:
sorted_nums[t]=i
t+=1
else:
for i in range(m-1,-1,-1):
if book[i]>0:
sorted_nums[t]=i
t+=1
nums_unique_sort = np.zeros((t,), dtype=int)
for j in range(t):
nums_unique_sort[j]=sorted_nums[j]
return nums_unique_sort
def sort_unique(nums, sort="ascend"):
nums_copy = nums.copy()
bubble_sort(nums_copy,sort)
n = len(nums_copy)
j = 1
i = 0
while j<n:
if nums_copy[j]!=nums_copy[i]:
nums_copy[i+1]=nums_copy[j]
i+=1
j+=1
nums_unique_sort = np.zeros((i+1,), dtype=int)
for j in range(i+1):
nums_unique_sort[j]=nums_copy[j]
return nums_unique_sort
if __name__=="__main__":
nums = [20,40,32,67,40,20,89,300,400,15]
print("原列表:", nums)
sorted_nums = bucket_uniquesort(1001, nums)
print("先去重后排序的列表:", sorted_nums)
nums_unique_sort = sort_unique(nums)
print("先排序后去重的列表:", nums_unique_sort)
| true |
83b0a3332b1ce9a465f7a835471f9a217f1c107f | Python | Egogorka/gameplatformer | /src/utility/Eventer.py | UTF-8 | 421 | 2.65625 | 3 | [] | no_license | from src.utility.EventListener import EventListener
class Eventer:
def __init__(self):
self._observers = []
def addListener(self, inObserver : EventListener):
self._observers.append(inObserver)
def removeListener(self, inObserver : EventListener):
self._observers.remove(inObserver)
def fireEvent(self, event):
for x in self._observers:
x.eventFired(event) | true |
8bf14969f0aa9a66725469f2381ed1eb4aba702f | Python | Zedmor/hackerrank-puzzles | /leetcode/4.py | UTF-8 | 891 | 3.921875 | 4 | [] | no_license | """
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
Subscribe to see which companies asked this question.
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
hyp1 = len(nums1) // 2
hyp2 = len(nums2) // 2
print(nums1[hyp1], nums2[hyp2])
if nums1[hyp1] < nums2[hyp2]:
if nums1[hyp1] > nums2[hyp2-1]:
return (nums1[hyp1] + nums2[hyp2]) /2
nums1 = [1, 2, 9]
nums2 = [4, 6, 8, 10,13]
print(Solution().findMedianSortedArrays(nums1, nums2))
| true |
26dc6d3fb834cda18c6dfc6610266f042d8c0548 | Python | assen0817/bfindex | /File.py | UTF-8 | 3,701 | 3.078125 | 3 | [] | no_license | from BloomFilte import bfindex
# ブルームフィルターのビット列
m=64
# 登録可能キーワード数
k=3
# ファイルへの書き込み
# ファイル名、データ内容、日付
def write(file_name, data, date):
# バイナリ登録用のファイル名と名前が被らないように避ける
if file_name == 'binary_data':
print('ファイル名を変えてください')
return 'ファイル名を変えてください'
# ファイル名がぶつからないように存在していたらエラーを取得
# ファイルが存在することを返す
try:
# 入力したfile_nameでファイルを作成する
with open(f'data/{file_name}.txt', mode='x') as f:
# データ内容を記入
f.write(f'{data}\n')
# 日付の記入
f.write(f'{date}\n')
except FileExistsError:
print('ファイルはもう存在します。')
return 'ファイルはもう存在します。'
# m個のビット列とk個の登録制限でブルームフィルターを作る
b = bfindex(m=m, k=k)
# ビット列にデータを登録
b.add(file_name)
b.add(data)
b.add(date)
# バイナリ管理用のファイルに計算したビット列とファイルの名前を登録
with open(f'data/binary_data.txt', mode='a') as f:
f.write(f'{b.bit}, {file_name}\n')
# バイナリファイルのバイナリにkeyのバイナリが登録されているかどうか
def check_file(key):
# 見つかったファイル名を出力用に保存する
result = {}
# バイナリーファイルを検索
with open('data/binary_data.txt') as f:
# 一行ごとに読み込み
ls = f.readlines()
for l in ls:
# ,での区切りでバイナリとファイル名を取得
s = l.split(', ')
# ブルームフィルターをセット
b = bfindex(m=m, k=k)
# ファイルのバイナリをブルームフィルターセットし、
# 上限値を入れておく
b._set(int(s[0]))
b.count = k
# バイナリにkeyの値が入っているかどうか
if b.check(key):
# 入っていれば、ファイル名を取得
file_name = s[1].split('\n')[0]
# keyとファイル名が一緒なら、それを記録
# true positiveも記録し、次のデータを見る
if file_name == key:
result[file_name] = 'true positive'
print('true positive')
continue
# ファイルの中身を確認する
with open(f'data/{file_name}.txt') as fn:
# 一行ずつ中身を見る
fls = fn.readlines()
for fl in fls:
# ファイルの中身のデータとkeyが一緒なら、そのファイル名を記録
# true positiveも記録し、次のデータを見る
if fl.split('\n')[0] == key:
print('true positive')
result[file_name] = 'true positive'
break
else:
# ファイルの中身とマッチングせずに、ループが終わったら、そのファイル名を記録
# false positiveも記録する
result[file_name] = 'false positive'
print('false positive')
# 結果を返す
return result | true |
eaf2c68e78298c0f702f0672633783c848ef781a | Python | dandyvica/articles | /python_lists/a6.py | UTF-8 | 273 | 2.5625 | 3 | [] | no_license | phrase = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."
# convert each character to its ASCII representation
converted_phrase = [ord(x) for x in phrase]
# now compute sdbm hash
sdbm = reduce() | true |
4c4994ad8636d74511edf7d4963fc14e893aafc8 | Python | mfitton/Wordpress-Grabber | /blogclass.py | UTF-8 | 552 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
WordpressGrab
Created by Max Fitton on 2012-12-16.
Copyright (c) 2012 __MyCompanyName__. All rights reserved.
"""
#in loc tags
import sys
from bs4 import BeautifulSoup
def get_list_of_posts(xml_file):
list_of_posts = []
sitemap = open(xml_file)
soup = BeautifulSoup(sitemap, 'xml')
for loctag in soup.find_all('loc'):
list_of_posts.append( loctag.get_text() )
print list_of_posts
return list_of_posts
def main():
get_list_of_posts('sitemap.xml')
if __name__ == '__main__':
main() | true |
1357968fc2ced3c602c26064166ba746488e1520 | Python | oddeirikigland/tdt4171 | /classifiers/sklearn_classifier.py | UTF-8 | 1,905 | 3.0625 | 3 | [] | no_license | import os
import pickle
from sklearn import feature_extraction, naive_bayes, tree, metrics
def transform_input_data(input_training, input_test):
vectorized = feature_extraction.text.HashingVectorizer(
stop_words="english", binary=True
) # n_features
return (
vectorized.fit_transform(input_training),
vectorized.fit_transform(input_test),
)
def get_data(filename):
data = pickle.load(open(filename, "rb"))
x_train, x_test, y_train, y_test = (
data["x_train"],
data["x_test"],
data["y_train"],
data["y_test"],
)
x_train_transform, x_test_transform = transform_input_data(x_train, x_test)
return x_train_transform, x_test_transform, y_train, y_test
def fit_data(x, y, classifier):
return classifier.fit(x, y)
def predict_data(test_data, classifier):
return classifier.predict(test_data)
def accuracy_score(prediction, test):
return metrics.accuracy_score(prediction, test)
def classify_and_predict(
x_train_transform, x_test_transform, y_train, y_test, classifier
):
fit_data(x_train_transform, y_train, classifier)
prediction = predict_data(x_test_transform, classifier)
return accuracy_score(prediction, y_test)
def get_full_path(rel_path):
return os.path.join(os.path.dirname(__file__), rel_path)
def main():
sklearn_data = "data/sklearn-data.pickle"
data = get_data(get_full_path(sklearn_data))
naive_bayes_classifier = naive_bayes.BernoulliNB()
decision_tree_classifier = tree.DecisionTreeClassifier()
naive_bayes_accuracy = classify_and_predict(*data, naive_bayes_classifier)
print("Accuracy Naive Bayes: {}".format(naive_bayes_accuracy))
decision_tree_accuracy = classify_and_predict(*data, decision_tree_classifier)
print("Accuracy Decision Tree: {}".format(decision_tree_accuracy))
if __name__ == "__main__":
main()
| true |
0747534c10b9bddb5b524bf7608bdfd9c7ae3b27 | Python | lijikai1206/Pycharm_Projects01 | /PythonFile/10_MySqlTest/demosql/demo_sql02_CreatTable.py | UTF-8 | 463 | 2.703125 | 3 | [] | no_license | import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="root",
passwd="ljk1103",
database="runoob"
)
mycursor = mydb.cursor()
#创建数据表
mycursor.execute("CREATE TABLE Websites (id VARCHAR(255), "
"name VARCHAR(255), url VARCHAR(255),alexa VARCHAR(255),"
"country VARCHAR(255))")
#查看数据表是否已存在。
mycursor.execute("SHOW TABLES")
for x in mycursor:
print(x) | true |
eb511cfd9fee2787ef8572fa5e2391da991aa5cf | Python | DankGroundhog/PyMarlin | /pymarlin/core/module_interface.py | UTF-8 | 8,497 | 2.984375 | 3 | [
"MIT"
] | permissive | """
Module Interface module:
This module contains the abstract classes CallbackInterface and
ModuleInterface that can provide everything necessary for model
training. Users should implement these abstract classes in their
Scenarios.
"""
from abc import ABC, abstractmethod
import enum
from typing import Iterable, Tuple, Union, Dict
import torch
class Stage(enum.Enum):
"""Stages: train, val, test"""
TRAIN = 1
VAL = 2
TEST = 3
class CallbackInterface(ABC):
"""A callback class used to add scenario specific outputs/logging/debugging during training.
"""
def on_begin_train_epoch(self, global_step: int, epoch: int):
"""Hook before training epoch (before model forward).
Args:
global_step (int): [description]
epoch (int): Current training epoch
"""
def on_end_train_step(self, global_step:int, *train_step_collated_outputs):
"""Runs after end of a global training step.
Args:
global_step (int): current global step
train_step_collated_outputs (list): all train step outputs in a list.
If train_step returns loss, logits train_step_collated_outputs will have [loss_collated, logits_collated]
"""
def on_end_train_epoch(self, global_step:int, *train_step_collated_outputs):
"""
Hook after training epoch.
Args:
global_step (int): [description]
train_step_collated_outputs (list): all train step outputs in a list.
If train_step returns loss, logits train_step_collated_outputs will have [loss_collated, logits_collated]
"""
def on_end_backward(self, global_step:int, loss_tensor):
"""Hook after each backward
Args:
global_step (int): [description]
loss_tensor(torch.Tensor): Undetached loss tensor
"""
def on_end_val_epoch(self, global_step:int, *val_step_collated_outputs, key="default"):
"""Update value at end of end of end of variable
Args:
global_step (int): [description]
val_step_collated_outputs : all val step outputs in a list.
If val_step returns loss, logits train_step_collated_outputs will have [loss_collated, logits_collated]
key (str, optional): The id of the validation dataloader.
Defaults to "default".
"""
def on_end_train(self, global_step:int):
"""Hook after training finishes
Args:
global_step (int): [description]
"""
class ModuleInterface(torch.nn.Module, CallbackInterface):
"""Interface for PyTorch modules.
This interface contains model architecture in the form of a PyTorch
`nn.Module` together with optimizers and schedules, train and validation
step recipes and any callbacks.
Note: The forward function is overridden.
Note: Users are encouraged to override the `train_step` and `val_step`
methods.
"""
@abstractmethod
def get_optimizers_schedulers(
self, estimated_global_steps_per_epoch: int, epochs: int
) -> Tuple[Iterable[torch.optim.Optimizer], Iterable]:
"""
Returns a list of optimizers and schedulers
that are used to instantiate the optimizers .
Returns:
Tuple[Iterable[torch.optim.Optimizer], Iterable]:
list of optimizers and list of schedulers
"""
@abstractmethod
def get_train_dataloader(
self, sampler:type, batch_size:int
) -> torch.utils.data.DataLoader:
"""
Returns a dataloader for the training loop .
Called every epoch.
Args:
sampler (type): data sampler type which is a derived class of torch.utils.data.Sampler
Create concrete sampler object before creating dataloader.
batch_size (int): batch size per step per device
Returns:
torch.utils.data.DataLoader: Training dataloader
Example:
train_ds = self.data.get_train_dataset()
dl = DataLoader(train_ds, batch_size = batch_size, collate_fn= self.collate_fin, sampler = sampler(train_ds))
return dl
"""
@abstractmethod
def get_val_dataloaders(
self, sampler:torch.utils.data.Sampler, batch_size : int
) -> Union[
Dict[str, torch.utils.data.DataLoader],
torch.utils.data.DataLoader
]:
"""
Returns dataloader(s) for validation loop .
Supports multiple dataloaders based on key value.
Keys will be passed in the callback functions.
Called every epoch .
Args:
sampler (type): data sampler type which is a derived class of torch.utils.data.Sampler
Create concrete sampler object before creating dataloader.
batch_size (int): validation batch size per step per device
Returns:
Union[ Dict[str, torch.utils.data.DataLoader],
torch.utils.data.DataLoader ]:
A single dataloader or a dictionary of dataloaders
with key as the data id and value as dataloader
"""
def get_test_dataloaders(self, sampler, batch_size):
"""
Returns test dataloaders
Args:
sampler ([type]): [description]
batch_size ([type]): [description]
"""
pass
def forward(
self,
stage: Stage,
global_step: int,
batch,
device: Union[torch.device, str, int],
):
"""
torch.nn.Module's forward() function.
Overridden to call train_step() or val_step() based on stage .
Args:
stage (Stage): trian/val/test
global_step (int): current global step
batch ([type]): output of dataloader step
device (Union[torch.device, str, int]): device
Raises:
AttributeError: if stage is different than train, val, test
"""
if stage == Stage.TRAIN:
return self.train_step(
batch = batch, device = device, global_step = global_step)
elif stage == Stage.VAL:
return self.val_step(
batch = batch, device = device, global_step = global_step)
elif stage == Stage.TEST:
return self.test_step(
batch = batch, device = device, global_step = global_step)
else:
raise AttributeError("Stage not supported")
@abstractmethod
def train_step(
self, global_step: int, batch, device : Union[torch.device, str, int]
) -> Union[torch.Tensor, Tuple]:
"""
Train a single train step .
Batch should be moved to device before any operation.
Args:
global_step (int): [description]
batch ([type]): output of train dataloader step
device (Union[torch.device, str, int]): device
Returns:
Union[torch.Tensor, Iterable[torch.Tensor]]:
The first return value must be the loss tensor.
Can return more than one values in output. All outputs must be tensors
Callbacks will collate all outputs.
"""
@abstractmethod
def val_step(self, global_step: int, batch, device) -> Tuple:
"""
Runs a single Validation step .
Args:
global_step (int): [description]
batch ([type]): [description]
device ([type]): [description]
Returns:
Union[torch.Tensor, Iterable[torch.Tensor]]: values that need to be collected - loss, logits etc.
All outputs must be tensors
"""
def test_step(self, global_step: int, batch, device):
"""
Runs a single test step .
Args:
global_step (int): [description]
batch ([type]): [description]
device ([type]): [description]
"""
def get_state(self):
"""
Get the current state of the module, used for checkpointing.
Returns:
Dict: Dictionary of variables or objects to checkpoint.
"""
state_dict = self.state_dict()
return state_dict
def update_state(self, state: Dict):
"""
Update the module from a checkpointed state.
Args:
state (Dict): Output of get_state() during checkpointing.
"""
if state:
self.load_state_dict(state)
| true |
488a0a3bd5c4e52f98a4bc5fee28687e68fd3d65 | Python | sofide/apicolapp | /accounting/manage_data.py | UTF-8 | 3,602 | 2.890625 | 3 | [] | no_license | """
Manipulate accounting data to be used in views.
"""
from collections import defaultdict
import datetime
from django.db.models import Q, Sum, Count
from django.db.models.functions import Coalesce
from django.utils.text import slugify
from accounting.models import Category, Product, Purchase
def purchases_by_categories(user_id, from_date, to_date):
"""
Group purchases by categories. Returns a dict with the following structure:
{
'category_name': {
'slug': 'category_label_slug',
'desciption': 'category_description',
'amount': amount_of_money_spended_in_category,
'purchases': [purchases_list]
}
}
}
"""
category_amount = Coalesce(
Sum(
'products__purchases__value',
filter=Q(
products__user__pk=user_id,
products__purchases__date__range=(from_date, to_date)
)
),
0
)
categories = Category.objects.all().annotate(amount=category_amount)
grouped_purchases = {}
for category in categories:
if category.depreciation_period:
deprecation_days = category.depreciation_period * 365
from_purchase_date = from_date - datetime.timedelta(days=deprecation_days)
else:
from_purchase_date = from_date
categ_purchases = Purchase.objects.filter(
product__user__pk=user_id,
date__range=(from_purchase_date, to_date),
product__category=category,
).values(
'pk', 'amount', 'value', 'date', 'product__pk', 'product__name'
)
grouped_purchases[category.label] = {
'slug': slugify(category.label),
'description': category.description,
'amount': category.amount,
'depreciation_period': category.depreciation_period,
'purchases': categ_purchases,
}
categ_info = categ_purchases.aggregate(
total=Coalesce(Count('id'), 0),
products=Coalesce(Count('product', distinct=True), 0)
)
grouped_purchases[category.label].update(categ_info)
if category.depreciation_period:
total_depreciation = 0
for purchase in grouped_purchases[category.label]['purchases']:
depreciation = depreciation_calc_in_a_purchase(purchase, from_date, to_date,
category.depreciation_period)
total_depreciation += depreciation
purchase['depreciation'] = depreciation
grouped_purchases[category.label]['amount'] = total_depreciation
return grouped_purchases
def depreciation_calc_in_a_purchase(purchase, from_date, to_date, depreciation_period):
"""Caluclate the depreciation value of a purchase in a specific range of time"""
deprecation_days = depreciation_period * 365
depreciation_finish = purchase['date'] + datetime.timedelta(days=deprecation_days)
if depreciation_finish < from_date:
days_in_calc = 0
elif purchase['date'] <= from_date:
if depreciation_finish < from_date:
days_in_calc = (depreciation_finish - from_date).days
else:
days_in_calc = (to_date - from_date).days
else:
if depreciation_finish < from_date:
days_in_calc = (depreciation_finish - purchase['date']).days
else:
days_in_calc = (to_date - purchase['date']).days
return round((purchase['value'] / deprecation_days) * days_in_calc, 2)
| true |
afc052a5cfd58e0edbd8f5cbc15c79424f958638 | Python | arunpsg/unscramble_python | /Task2.py | UTF-8 | 1,197 | 3.9375 | 4 | [] | no_license | """
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
phone_duration = {}
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
for call in calls:
phone_value = 0
phone_value = phone_duration.get(call[0], 0)
phone_value += int(call[3])
phone_duration[call[0]] = phone_value
phone_value1 = 0
if phone_duration.__contains__(call[1]):
phone_value1 = phone_duration.get(call[1])
phone_value1 += int(call[3])
phone_duration[call[1]] = phone_value1
max_phone_number = max(phone_duration, key=phone_duration.get)
print("{} spent the longest time, {} seconds, on the phone during September 2016.".format(max_phone_number, phone_duration[max_phone_number]))
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
| true |
dca6cc043fabe096b5084136e3cf12b1e74a8c35 | Python | jchernjr/code | /advent2021/day3.py | UTF-8 | 1,552 | 3.8125 | 4 | [] | no_license | from typing import List, Dict
def get_all_ith_chars(strings: List[str], i: int) -> List[str]:
"""Get the ith char of all input strings, and return them as a list"""
return [s[i] for s in strings]
def count_chars(chars: List[str]) -> Dict[str, int]:
counts = {}
for c in chars:
if c not in counts:
counts[c] = 0
counts[c] += 1
return counts
def get_most_common_char(chars: List[str]) -> str:
counts = count_chars(chars)
max_tuple = max(counts.items(), key=lambda item: item[1]) # 2nd part of tuple is the count
return max_tuple[0]
def bit_chars_to_int(bits: List[str]) -> int:
return int("".join(bits), 2)
if __name__ == "__main__":
with open("day3input.txt", "r") as f:
raw_lines = f.readlines()
# each line is a string of 0's and 1's followed by \n (except for the last line), so we have to strip the \n
lines = [s.strip() for s in raw_lines]
N_BITS = len(lines[0])
print(N_BITS)
# figure out most common bit (0 or 1) in the 1st, 2nd, ..., Nth bit position
most_common_bits = [get_most_common_char(get_all_ith_chars(lines, i)) for i in range(N_BITS)]
least_common_bits = ['0' if c == '1' else '1' for c in most_common_bits]
print(most_common_bits)
print(least_common_bits)
gamma_val = bit_chars_to_int(most_common_bits)
eps_val = bit_chars_to_int(least_common_bits)
print(f"gamma: {gamma_val}")
print(f"eps: {eps_val}")
print(gamma_val * eps_val)
| true |
48b93a12601c712db7f59101fed60db81c73d07c | Python | coolkevinc/CSIT-230-Final-Project | /Not_Gate.py | UTF-8 | 512 | 3.765625 | 4 | [] | no_license | ##Created by Kevin Chau
##interface class for the NOT gate
def NOT(a): ##defines the NOT Gate
if(a == True):
return int(False)
else:
return int(True)
def NOTrunner():
#defines the method that will simulate the NOT Gate
##inverts the user input (1 or 0)
##run NOTrunner.py for the simulation
a = bool(int(input("Enter 1 or 0: "), 2))
if(a == 1):
x = True
elif(a == 0):
x = False
print("a = ", int(a))
print("NOT a = ", NOT(a))
| true |
541d164b300d40d1e7ac0fdbe29ff234e198a998 | Python | likr/egraph-rs | /crates/python/examples/stress_majorization.py | UTF-8 | 878 | 2.921875 | 3 | [
"MIT"
] | permissive | import networkx as nx
from egraph import Graph, Coordinates, StressMajorization, stress, warshall_floyd
import matplotlib.pyplot as plt
def main():
nx_graph = nx.les_miserables_graph()
graph = Graph()
indices = {}
for u in nx_graph.nodes:
indices[u] = graph.add_node(u)
for u, v in nx_graph.edges:
graph.add_edge(indices[u], indices[v], (u, v))
drawing = Coordinates.initial_placement(graph)
d = warshall_floyd(graph, lambda _: 100)
s0 = stress(drawing, d)
stress_majorization = StressMajorization.with_distance_matrix(drawing, d)
stress_majorization.run(drawing)
s = stress(drawing, d)
print(f'stress {s0:.2f} -> {s:.2f}')
pos = {u: (drawing.x(i), drawing.y(i)) for u, i in indices.items()}
nx.draw(nx_graph, pos)
plt.savefig('tmp/stress_majorization.png')
if __name__ == '__main__':
main()
| true |
b0c24221fa58866d322bb250490896333d8743e9 | Python | angrycaptain19/logtree | /render.py | UTF-8 | 3,504 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
import networkx as nx
import string
import random
import itertools as it
import sys
from logtree import LogTree
def render(tree, output):
# create graph
G = nx.DiGraph()
heights = {}
column_labels = {}
node_labels = {}
#edge_labels = {}
edge_colors = []
for i, node in enumerate(tree.nodes):
#G.add_node(i)
column_labels[i] = '%s%s: %s' % (
'c' if node.type == 'create' else
'd' if node.type == 'delete' else
'',
node.key, node.value)
heights[i] = 0
for j, alt in enumerate(node.alts):
heights[i] = max(heights.get(i, 0), j+1)
G.add_node((i, j))
# if j != 0:
# G.add_edge((i, j-1), (i, j), color='b')
G.add_edge((i, j), (i, j+1), color=alt.colors[0])
G.add_edge((i, j), (alt.off, alt.skip), color=alt.colors[1])
node_labels[(i, j)] = (
"%s%s%s" % (
"<" if alt.lt else "≥",
alt.key,
'%+d' % alt.delta if alt.delta else ''))
for k, v in heights.items():
G.add_node((k, v))
# if v != 0:
# G.add_edge((k, v-1), (k, v), color='b')
node_labels[(k, v)] = column_labels[k]
#pos = {1: (0, 0), 2: (-1, 0.3), 3: (2, 0.17), 4: (4, 0.255), 5: (5, 0.03)}
#plt.tight_layout(pad=0)
plt.figure(figsize=(10 + len(tree.nodes), 7))
# assign positions
pos = {node: (2*node[0], heights[node[0]]-node[1]) for node in G.nodes}
options = {
"font_size": 12, #36,
"node_size": 3000,
"node_color": "white",
"edgecolors": "black",
"linewidths": 2,
"width": 2,
"with_labels": False,
}
edges = G.edges()
edge_colors = ['#c44e52' if G[u][v]['color'] == 'r' else 'black' for u,v in edges]
nx.draw_networkx(G, pos, **options, edges=edges, edge_color=edge_colors)
nx.draw_networkx_labels(G, pos, node_labels)
#nx.draw_networkx_edge_labels(G, pos, edge_labels)
#ax = fig.add_axes([0, 0, 1, 1])
ax = plt.gca()
ax.margins(0, 0.1)
ax.set_axis_off()
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.savefig(output, bbox_layout='tight', pad_inches=0)
def main(output, *xs):
if xs in [(), ('append',)]:
# good for appends
xs = [3,8,6,1,7,4,5,2,0,9]
elif xs in [('create',)]:
# good for creates
xs = [0,1,1,0,3,2,3,1,0,9]
# create tree
tree = LogTree()
action = 'append'
alphas = it.cycle(string.ascii_lowercase)
for x in xs:
if action != 'string':
try:
_ = int(x)
except ValueError:
action = x
continue
if action == 'string':
x = list(enumerate(x))
random.shuffle(x)
for i, c in x:
tree.append(i, c)
elif action == 'append':
tree.append(int(x), next(alphas))
elif action == 'create':
tree.create(int(x), next(alphas))
elif action == 'lookup':
tree.lookup(int(x))
elif action == 'traverse':
tree.traverse()
else:
print('unknown action %r' % action)
sys.exit(1)
render(tree, output)
if __name__ == "__main__":
import sys
main(*sys.argv[1:])
| true |
38b44b172f4905b657bdbb19207e4237623b0a8c | Python | RT-Thread/mpy-snippets | /examples/03.board/1.stm32l4_pandora/pin_num.py | UTF-8 | 775 | 3.015625 | 3 | [] | no_license | #
# Copyright (c) 2006-2019, RT-Thread Development Team
#
# SPDX-License-Identifier: MIT License
#
# Change Logs:
# Date Author Notes
# 2019-06-28 SummerGift first version
#
def pin_num(pin_index):
"""
Get the GPIO pin number through the GPIO index, format must be "P + <A~K> + number", such as PE7
"""
if pin_index[0] != 'P':
print("ERROR : Please pass in the correct parameters P + <A~K> + number, such as PE7")
return
if not pin_index[1].isupper():
print("ERROR : Please pass in the correct parameters P + <A~K> + number, such as PE7")
return
return (ord(pin_index[1]) - ord('A')) * 16 + int(pin_index[2:])
print("The pin number of PE7 is %d."%pin_num("PE7")) # Get the pin number for PE7
| true |
051ea84461366fe711fb38c978c4bea4f316a81b | Python | se210/tracy | /src/common/Library.py | UTF-8 | 13,033 | 2.890625 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2011 Nokia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
A class hierarchy that represents the functions in an API.
"""
import common
class Type(object):
"""A type declaration for a Variable or a Function."""
def __init__(self, declaration, dimension = None):
self.declaration = declaration
self.dimension = dimension
self.modifiers = []
self.qualifiers = []
self.symbolicConstants = {}
self.name = None
self.isObject = False # Does this type refer to an object?
if declaration:
for part in declaration.split(" "):
if part in ["signed", "unsigned"]:
self.modifiers.append(part)
elif part in ["const", "volatile"]:
self.qualifiers.append(part)
elif part not in ["*", "&"]:
self.name = part
self._hash = hash(self.name) ^ hash(tuple(self.dimension or []))
# Type of decoration this variable receives when shown to the user,
# e.g. "enum", "bitfield"
self.decoration = None
# Hint for the decoration, e.g. a regular expression that limits the allowed
# bitfield constants
self.decorationHint = None
def addSymbolicConstant(self, name, value):
"""
Add a symbolic constant (enumeration) for this type.
@param name: Name of symbolic constant
@param value: Constant value
"""
self.symbolicConstants[name] = value
def __hash__(self):
#return hash(self.name) ^ hash(tuple(self.dimension or []))
return self._hash
def __eq__(self, t):
return self.declaration == t.declaration and self.dimension == t.dimension
def __repr__(self):
if self.dimension is not None:
#return self.declaration + "".join(["[%s]" % d for d in self.dimension])
return self.declaration + "*" * len(self.dimension)
return self.declaration
def isPointer(self):
"""
Indicates whether or not this Type is a pointer instead of an integral type
"""
return "*" in self.declaration or self.dimension is not None
def getDereferencedType(self):
"""
@returns the dereferenced Type of this pointer. This Type must be a pointer.
"""
assert self.isPointer()
return Type(" ".join(self.qualifiers + self.modifiers + [self.name]))
def isConstant(self):
return self.declaration.startswith("const ")
class Variable(object):
"""A single variable."""
def __init__(self, name, type):
# Variable name
self.name = name
# Variable Type association
self.type = type
class MetaType(object):
"""A special type for a parameter, such as an array or an image."""
def __init__(self, name):
self.name = name
self.values = {}
class MetaValue(object):
"""A special attribute of a Parameter, e.g. an image width or height for image parameters."""
def __init__(self, name, default = None):
self.name = name
# The default value
self.default = default
# Conditional values are listed as (predicate_name, predicate_value, this_meta_value) tuples
self.predicates = []
def addPredicate(self, predicate, predicateValue, value):
self.predicates.append((predicate, predicateValue, value))
class StateRelation(object):
"""A state relation describes how a function or a parameter for a
function affects the library state structure.
"""
pass
class StateRelationModify(StateRelation):
def __init__(self, path):
self.path = path
class StateRelationGet(StateRelation):
def __init__(self, path):
self.path = path
class StateRelationSet(StateRelation):
def __init__(self, path):
self.path = path
class StateRelationCopy(StateRelation):
def __init__(self, sourcePath, destPath):
self.destPath = destPath
self.sourcePath = sourcePath
class Parameter(Variable):
"""A parameter for a Function."""
def __init__(self, name, type):
Variable.__init__(self, name, type)
self.metaType = None # MetaType instance or None
self.stateRelation = None # StateRelation instance or None
self.isTerminal = False # Does the state path point to a terminal?
self.isOut = False # Is this parameter modified by the function?
class Function(object):
"""A single function in a Library."""
def __init__(self, name, type):
self.name = name # Function name
self.type = type # Function return type
self.parameters = common.OrderedDict() # Function parameters
self.libName = None # Link library name (DLL) or None
self.headerName = None # C header name or None
self.exportOrdinal = None # Ordinal to export at
self.ordinal = None # Ordinal to link to
self.body = None # Code for the function body
self.linkage = None # C linkage specification
self.isTerminator = False # Does the function uninitialize the API?
self.isFrameMarker = False # Does the function cause a frame swap?
self.isRenderCall = False # Does the function do rendering?
self.passthrough = False # Should event logging be skipped for this function?
self.generate = True # Should any code be generated for this function?
self.language = None # Function interface language
self.runtimeStateTracking = False # Should the state table be updated during runtime?
self.staticLinkage = True # Is the function statically linked, i.e. has a valid symbol/ordinal
self.hooks = {} # Code hooks for this function
self.statePath = None # Function return value state path
self.isTerminal = False # Does the state path point to a terminal?
self.retStateRelation = None # State relation for the return value
self.stateRelations = [] # A list of StateRelation mappings for this function
class Class(object):
"""A class of objects."""
def __init__(self, name):
self.name = name
self.namespacePath = None
self.overridable = False
class Library(object):
"""A collection of functions."""
def __init__(self, name = None, id = 0):
self.name = name
self.id = id
self.functions = common.OrderedDict()
self.classes = common.OrderedDict()
self.typeDefs = common.OrderedDict()
self.hooks = {}
self.language = None
self.constants = {}
# Map all C types to native types
self.typeMap = {
Type("void"): "void",
Type("char"): "byte",
Type("short"): "short",
Type("int"): "int",
Type("long"): "long",
Type("float"): "float",
Type("double"): "double",
Type("enum"): "int",
}
def resolveType(self, type, ignoreModifiersAndQualifiers = True):
"""
Determine the final type of a possibly intermediate type by
processing type definitions.
@param type: Type instance to resolve.
@returns the final Type.
"""
if not ignoreModifiersAndQualifiers:
while type in self.typeDefs:
type = self.typeDefs[type]
else:
while True:
for t, n in self.typeDefs.items():
if t.name == type.name or t.declaration == type.declaration:
type = n
break
else:
break
return type
def isPointerType(self, type):
"""
Determine whether a given type is compiled to a pointer type.
@param type: Type instance examine.
@retval True type is a pointer
@retval True type is not a pointer
"""
return type.isPointer() or self.resolveType(type).isPointer()
def isObjectType(self, type):
"""
Determine whether a given type is compiled to an object type.
@param type: Type instance examine.
@retval True type is an object
@retval True type is not an object
"""
return type.isObject or self.resolveType(type).isObject
def isIntegralType(self, type):
"""
Determine whether a given type is compiled to an integral type
@param type: Type instance examine.
@retval True type is an integral type
@retval True type is not an integral type
"""
return self.resolveType(type).declaration.split(" ")[-1] in ["char", "short", "int", "long"]
def getNativeType(self, type, ignoreModifiersAndQualifiers = True):
"""
Get the native equivalent of a Type instance.
@param type: Type instance to convert to native type
@returns the name of the matching native type or None
"""
def lookup(type):
if not ignoreModifiersAndQualifiers:
return self.typeMap[type]
else:
for t, n in self.typeMap.items():
if t.name == type.name:
return n
raise KeyError(type)
# Is it an object?
if self.isObjectType(type):
return "object"
# Check whether there's an exact mapping for this type
try:
return self.typeMap[type]
except KeyError:
pass
# Is it a pointer?
if self.isPointerType(type):
return "pointer"
# Resolve any type definitions
type = self.resolveType(type, ignoreModifiersAndQualifiers)
# Try again with the resolved type
try:
return lookup(type)
except KeyError:
return None
def merge(self, library):
"""
Merge the functions and other definitions found in another library into this library.
@param library: Source library for new functions and definitions
"""
# Update constants
for name, value in library.constants.items():
if name in self.constants and value != self.constants[name]:
raise ValueError("Constant '%s' value '%s' does not match previously defined value '%s'" % (name, value, self.constants[name]))
self.constants[name] = value
# Update typedefs
for name, value in library.typeDefs.items():
if name in self.typeDefs and value.declaration != self.typeDefs[name].declaration:
raise ValueError("Type definition of '%s' as '%s' does not match previously defined '%s'" % (name, value, self.typeDefs[name]))
self.typeDefs[name] = value
# Update functions
for f in library.functions.values():
# If the function is already in the library, make sure the signature matches
if f.name in self.functions:
f2 = self.functions[f.name]
if f.type.declaration != f2.type.declaration:
raise ValueError("%s: Return type '%s' of function '%s' does not match previously declared '%s'." % \
(where(pos), f.type.declaration, f.name, f2.type.declaration))
for p1, p2 in zip(f.parameters.values(), f2.parameters.values()):
if p1.type.declaration != p2.type.declaration:
raise ValueError("%s: Type '%s' of function '%s' parameter '%s' does not match previously declared '%s'." % \
(where(pos), p1.type.declaration, f.name, p1.name, p2.type.declaration))
self.functions[f.name] = f
| true |
af1d0e3cf48bf37997c8497eaa43cd7935ef014f | Python | Shaligram/spworks | /multi/echoclient.py | UTF-8 | 1,403 | 2.53125 | 3 | [] | no_license | import socket
import IN
import sys
import time
import timeit
i = 1
while i < 2:
i += 1
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((sys.argv[1], 0))
server_address = (sys.argv[2], int(sys.argv[3]))
message = 'x' * 1000
try:
# Send data
#print >>sys.stderr, 'sending "%s"' % message
sendTime = time.time()
sent = sock.sendto(message, server_address)
# Receive response
data, server = sock.recvfrom(4096)
recvTime = time.time()
#print >>sys.stderr, 'received "%d"' % len(data)
time.sleep(2)
send2Time = time.time()
sent = sock.sendto(message, server_address)
data, server = sock.recvfrom(4096)
recv2Time = time.time()
send3Time = time.time()
sent = sock.sendto(message, server_address)
data, server = sock.recvfrom(4096)
recv3Time = time.time()
send4Time = time.time()
sent = sock.sendto(message, server_address)
data, server = sock.recvfrom(4096)
recv4Time = time.time()
print >> sys.stderr, "T %.4f %.4f %.4f %.4f" %(recvTime-sendTime, recv2Time-send2Time, recv3Time-send3Time, recv4Time-send4Time)
finally:
#print >>sys.stderr, 'closing socket'
sock.close()
| true |
226f3e379753a10f663aecf1b044582cef2008dc | Python | j-bennet/cracking | /2.0_linked_list_basics.py | UTF-8 | 208 | 3.234375 | 3 | [] | no_license | from linked_list import Node, LinkedList
ll = LinkedList(range(1, 11))
to_remove = [2, 1, 8]
print 'Original:'
ll.output()
for i in to_remove:
print 'Removed ' + str(i) + ':'
ll.remove(i)
ll.output()
| true |
7eb11421abdd7c23a78638749b0139cd3a0a3098 | Python | MiltonCastilloG/gplsiScrapper | /director.py | UTF-8 | 1,244 | 2.78125 | 3 | [] | no_license | from scrapper import simple_get
from questions_parser import *
from questions_xml_generator import *
from regex_repository import *
def generate_questionary_xml(url, correct_answers_path):
content = get_page(url)
parsed_content = parse_questions(content, get_answer_list(correct_answers_path))
file_content = generate_xml_from_dictionary(parsed_content["content"], eliminate_special_characters(parsed_content["title"]))
title = adapt_title_for_windows(parsed_content["title"])
folder = get_folder_for_xml()
return create_xml_file(title, file_content, file_path=folder)
def get_page(url):
response = simple_get(url)
if response is not None:
return response
raise Exception('Error retrieving contents at {}'.format(url))
def get_answer_list(correct_answers_path):
answers_file = get_text_from_file(correct_answers_path)
if answers_file is not None:
return get_correct_answer_list(answers_file)
else:
return None
def parse_questions(content, answer_list):
return get_questions_in_html(content, answer_list)
def build_xml(parsed_content, title):
return generate_xml_from_dictionary(parsed_content, title)
def get_folder_for_xml():
return create_xml_folder()
| true |
382412168366888ff9f5bd07612c454b6ca34ab5 | Python | numberonewastefellow/projects | /small_ds/readingFileinChunks.py | UTF-8 | 1,063 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #https://stackoverflow.com/questions/7167008/efficiently-finding-the-last-line-in-a-text-file
def last_line(in_file, block_size=1024, ignore_ending_newline=False):
suffix = ""
in_file.seek(0, os.SEEK_END)
in_file_length = in_file.tell()
seek_offset = 0
while(-seek_offset < in_file_length):
# Read from end.
seek_offset -= block_size
if -seek_offset > in_file_length:
# Limit if we ran out of file (can't seek backward from start).
block_size -= -seek_offset - in_file_length
if block_size == 0:
break
seek_offset = -in_file_length
in_file.seek(seek_offset, os.SEEK_END)
buf = in_file.read(block_size)
# Search for line end.
if ignore_ending_newline and seek_offset == -block_size and buf[-1] == '\n':
buf = buf[:-1]
pos = buf.rfind('\n')
if pos != -1:
# Found line end.
return buf[pos+1:] + suffix
suffix = buf + suffix
# One-line file.
return suffix | true |
c854056dfd38217063d2bf623fdd73c4b137e20e | Python | DenSinH/AdventOfCode2019 | /day10/day10.py | UTF-8 | 2,133 | 2.953125 | 3 | [] | no_license | from math import gcd, atan2, pi
def see(pos, asteroids):
if asteroids[pos[1]][pos[0]] != "#":
return 0
for dy in range(-pos[1], len(asteroids) - pos[1]):
for dx in range(-pos[0], len(asteroids[0]) - pos[0]):
if gcd(dx, dy) != 1:
continue
yield int(any(asteroids[pos[1] + d*dy][pos[0] + d*dx] == "#" for d in range(1, int(min(abs(len(asteroids[0]) / dx),
abs(len(asteroids) / dy))) + 1 if dx * dy != 0 else max(len(asteroids), len(asteroids[0])))
if 0 <= pos[1] + d*dy < len(asteroids) and 0 <= pos[0] + d*dx < len(asteroids[0])))
pos, p1 = (lambda asteroids: max([((x, y), sum(see((x, y), asteroids)))
for y in range(len(asteroids)) for x in range(len(asteroids[0]))],
key=lambda o: o[1]))(open("input.txt", "r").readlines())
print("PART 1", p1)
def vaporize(pos, asteroids):
c = 0
directions = [(dx, dy) for dx in range(-len(asteroids[0]), len(asteroids[0])) for dy in range(-len(asteroids), len(asteroids))
if gcd(dx, dy) == 1 and not dx == dy == 0]
while True:
for (dx, dy) in sorted(directions, key=lambda _d: atan2(*_d) % (2 * pi)):
for d in range(1, int(min(abs(len(asteroids[0]) / dx), abs(len(asteroids) / dy))) + 1 if dx * dy != 0 else max(len(asteroids), len(asteroids[0]))):
px, py = pos[0] + d*dx, pos[1] - d*dy
try:
if 0 <= px < len(asteroids[0]) and 0 <= py < len(asteroids):
if asteroids[py][px] == "#":
asteroids[py][px] = "."
c += 1
if c == 200:
return px, py
break
except IndexError:
break
print("PART 2", (lambda x, y: 100*x + y)(*vaporize(pos, [list(l) for l in open("input.txt", "r").readlines()])))
| true |
4cd1989b2772d4315e99f3e01bd820ad29aac62c | Python | shahnawaz-pabon/Python-with-HackerRank | /Problems' Solution/SwapCase.py | UTF-8 | 98 | 2.78125 | 3 | [] | no_license | if __name__ == '__main__':
inp = str(input())
inp = inp.swapcase()
print(inp) | true |
66b69240e0fa6403a09a31399891c83422a48faa | Python | comtihon/catcher_modules | /test/resources/airflow_hello_world.py | UTF-8 | 596 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | from datetime import datetime
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
def print_hello():
return 'Hello world!'
dag = DAG('hello_world',
description='Simple tutorial DAG',
schedule_interval=None,
start_date=datetime(2020, 1, 1),
catchup=False)
dummy_operator = DummyOperator(task_id='dummy_task', retries=3, dag=dag)
hello_operator = PythonOperator(task_id='hello_task', python_callable=print_hello, dag=dag)
dummy_operator >> hello_operator
| true |
39fcd5cd53b58f9677425c8a2e6928eed6df17b8 | Python | jskim062/calculator | /01_calc/hello.py | UTF-8 | 507 | 3.1875 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
win = Tk ()
win.title("Raspberry Pi UI")
win.geometry('200x100+200+200')
def clickMe():
messagebox.showinfo("Button Clicked", str.get())
str = StringVar()
textbox = ttk.Entry(win, width=20, textvariable=str)
textbox.grid(column = 0 , row = 0)
action=ttk.Button(win, text="Click Me", command=clickMe)
action.grid(column=0, row=1)
win.mainloop()
#[출처] TKinter textbox 텍스트박스 위젯|작성자 조만간 빠마장 | true |
3be902cd18f2040e826748c2a00e62d845865861 | Python | ufwt/dizzy | /dizzy/tests/test_dizzy.py | UTF-8 | 15,984 | 2.609375 | 3 | [
"BSD-2-Clause"
] | permissive | # test_dizz.py
#
# Copyright 2017 Daniel Mende <mail@c0decafe.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unittest import TestCase, main
from dizzy.tests import first
from dizzy.dizz import Dizz, load_dizz
from dizzy.value import Value
from dizzy.objects import START, END
from dizzy.objects.field import Field
from dizzy.functions.length import length
from dizzy.functions import BOTH
class TestDizzy(TestCase):
def test_init(self):
objects = list()
objects.append(Field("test0", b"\x01\xff", 10, "std"))
objects.append(Field("test1", b"\xab", 8, "std"))
objects.append(Field("test2", b"\x00\xff", 12, "std"))
d = Dizz("test", objects, fuzz="none")
self.assertEqual(first(d), Value(b'\x1f\xfa\xb0\xff', 30))
def test_iter(self):
expected = [Value(b'\x00""w3\x00!', 49), Value(b'\x00\x00\x00w3\x00!', 49), Value(b'\x00\x00\x02w3\x00!', 49),
Value(b'\x00\x00\x04w3\x00!', 49), Value(b'\x00\x00\x06w3\x00!', 49),
Value(b'\x00\x00\x08w3\x00!', 49), Value(b'\x01\xff\xf6w3\x00!', 49),
Value(b'\x01\xff\xf8w3\x00!', 49), Value(b'\x01\xff\xfaw3\x00!', 49),
Value(b'\x01\xff\xfcw3\x00!', 49), Value(b'\x01\xff\xfew3\x00!', 49),
Value(b'\x00\xff\xf8w3\x00!', 49), Value(b'\x00\xff\xfaw3\x00!', 49),
Value(b'\x00\xff\xfcw3\x00!', 49), Value(b'\x00\xff\xfew3\x00!', 49),
Value(b'\x00\x02\x00w3\x00!', 49), Value(b'\x00\x04\x00w3\x00!', 49),
Value(b'\x00\x06\x00w3\x00!', 49), Value(b'\x00\x08\x00w3\x00!', 49), Value(b'\x00""33\x00!', 49),
Value(b'\x00""33\x00!', 49), Value(b'\x00""73\x00!', 49), Value(b'\x00""73\x00!', 49),
Value(b'\x00"";3\x00!', 49), Value(b'\x00"#\xf73\x00!', 49), Value(b'\x00"#\xfb3\x00!', 49),
Value(b'\x00"#\xfb3\x00!', 49), Value(b'\x00"#\xff3\x00!', 49), Value(b'\x00"#\xff3\x00!', 49),
Value(b'\x00""\xfb3\x00!', 49), Value(b'\x00""\xfb3\x00!', 49), Value(b'\x00""\xff3\x00!', 49),
Value(b'\x00""\xff3\x00!', 49), Value(b'\x00""D\x00\x00!', 49), Value(b'\x00""D\x01\x00!', 49),
Value(b'\x00""D\x02\x00!', 49), Value(b'\x00""D\x03\x00!', 49), Value(b'\x00""D\x04\x00!', 49),
Value(b'\x00""E\xfb\x00!', 49), Value(b'\x00""E\xfc\x00!', 49), Value(b'\x00""E\xfd\x00!', 49),
Value(b'\x00""E\xfe\x00!', 49), Value(b'\x00""E\xff\x00!', 49), Value(b'\x00""D\xfc\x00!', 49),
Value(b'\x00""D\xfd\x00!', 49), Value(b'\x00""D\xfe\x00!', 49), Value(b'\x00""D\xff\x00!', 49),
Value(b'\x00""E\x00\x00!', 49), Value(b'\x00""F\x00\x00!', 49), Value(b'\x00""G\x00\x00!', 49),
Value(b'\x00""D\x00\x00!', 49), Value(b'\x00DD\x88\x00\x00"', 50),
Value(b'\x00DD\x88\x01\x00"', 50), Value(b'\x00DD\x88\x02\x00"', 50),
Value(b'\x00DD\x88\x03\x00"', 50), Value(b'\x00DD\x88\x04\x00"', 50),
Value(b'\x00DD\x8b\xfb\x00"', 50), Value(b'\x00DD\x8b\xfc\x00"', 50),
Value(b'\x00DD\x8b\xfd\x00"', 50), Value(b'\x00DD\x8b\xfe\x00"', 50),
Value(b'\x00DD\x8b\xff\x00"', 50), Value(b'\x00DD\x89\xfc\x00"', 50),
Value(b'\x00DD\x89\xfd\x00"', 50), Value(b'\x00DD\x89\xfe\x00"', 50),
Value(b'\x00DD\x89\xff\x00"', 50), Value(b'\x00DD\x89\x00\x00"', 50),
Value(b'\x00DD\x8a\x00\x00"', 50), Value(b'\x00DD\x8b\x00\x00"', 50),
Value(b'\x00DD\x8c\x00\x00"', 50), Value(b'\x00\x88\x89\x10\x00\x00#', 51),
Value(b'\x00\x88\x89\x10\x01\x00#', 51), Value(b'\x00\x88\x89\x10\x02\x00#', 51),
Value(b'\x00\x88\x89\x10\x03\x00#', 51), Value(b'\x00\x88\x89\x10\x04\x00#', 51),
Value(b'\x00\x88\x89\x17\xfb\x00#', 51), Value(b'\x00\x88\x89\x17\xfc\x00#', 51),
Value(b'\x00\x88\x89\x17\xfd\x00#', 51), Value(b'\x00\x88\x89\x17\xfe\x00#', 51),
Value(b'\x00\x88\x89\x17\xff\x00#', 51), Value(b'\x00\x88\x89\x13\xfc\x00#', 51),
Value(b'\x00\x88\x89\x13\xfd\x00#', 51), Value(b'\x00\x88\x89\x13\xfe\x00#', 51),
Value(b'\x00\x88\x89\x13\xff\x00#', 51), Value(b'\x00\x88\x89\x11\x00\x00#', 51),
Value(b'\x00\x88\x89\x12\x00\x00#', 51), Value(b'\x00\x88\x89\x13\x00\x00#', 51),
Value(b'\x00\x88\x89\x14\x00\x00#', 51), Value(b'\x01\x11\x12 \x00\x00$', 52),
Value(b'\x01\x11\x12 \x01\x00$', 52), Value(b'\x01\x11\x12 \x02\x00$', 52),
Value(b'\x01\x11\x12 \x03\x00$', 52), Value(b'\x01\x11\x12 \x04\x00$', 52),
Value(b'\x01\x11\x12/\xfb\x00$', 52), Value(b'\x01\x11\x12/\xfc\x00$', 52),
Value(b'\x01\x11\x12/\xfd\x00$', 52), Value(b'\x01\x11\x12/\xfe\x00$', 52),
Value(b'\x01\x11\x12/\xff\x00$', 52), Value(b"\x01\x11\x12'\xfc\x00$", 52),
Value(b"\x01\x11\x12'\xfd\x00$", 52), Value(b"\x01\x11\x12'\xfe\x00$", 52),
Value(b"\x01\x11\x12'\xff\x00$", 52), Value(b'\x01\x11\x12!\x00\x00$', 52),
Value(b'\x01\x11\x12"\x00\x00$', 52), Value(b'\x01\x11\x12#\x00\x00$', 52),
Value(b'\x01\x11\x12$\x00\x00$', 52), Value(b'\x02"$@\x00\x00%', 53),
Value(b'\x02"$@\x01\x00%', 53), Value(b'\x02"$@\x02\x00%', 53), Value(b'\x02"$@\x03\x00%', 53),
Value(b'\x02"$@\x04\x00%', 53), Value(b'\x02"$_\xfb\x00%', 53), Value(b'\x02"$_\xfc\x00%', 53),
Value(b'\x02"$_\xfd\x00%', 53), Value(b'\x02"$_\xfe\x00%', 53), Value(b'\x02"$_\xff\x00%', 53),
Value(b'\x02"$O\xfc\x00%', 53), Value(b'\x02"$O\xfd\x00%', 53), Value(b'\x02"$O\xfe\x00%', 53),
Value(b'\x02"$O\xff\x00%', 53), Value(b'\x02"$A\x00\x00%', 53), Value(b'\x02"$B\x00\x00%', 53),
Value(b'\x02"$C\x00\x00%', 53), Value(b'\x02"$D\x00\x00%', 53), Value(b'\x04DH\x80\x00\x00&', 54),
Value(b'\x04DH\x80\x01\x00&', 54), Value(b'\x04DH\x80\x02\x00&', 54),
Value(b'\x04DH\x80\x03\x00&', 54), Value(b'\x04DH\x80\x04\x00&', 54),
Value(b'\x04DH\xbf\xfb\x00&', 54), Value(b'\x04DH\xbf\xfc\x00&', 54),
Value(b'\x04DH\xbf\xfd\x00&', 54), Value(b'\x04DH\xbf\xfe\x00&', 54),
Value(b'\x04DH\xbf\xff\x00&', 54), Value(b'\x04DH\x9f\xfc\x00&', 54),
Value(b'\x04DH\x9f\xfd\x00&', 54), Value(b'\x04DH\x9f\xfe\x00&', 54),
Value(b'\x04DH\x9f\xff\x00&', 54), Value(b'\x04DH\x81\x00\x00&', 54),
Value(b'\x04DH\x82\x00\x00&', 54), Value(b'\x04DH\x83\x00\x00&', 54),
Value(b'\x04DH\x84\x00\x00&', 54), Value(b"\x08\x88\x91\x00\x00\x00'", 55),
Value(b"\x08\x88\x91\x00\x01\x00'", 55), Value(b"\x08\x88\x91\x00\x02\x00'", 55),
Value(b"\x08\x88\x91\x00\x03\x00'", 55), Value(b"\x08\x88\x91\x00\x04\x00'", 55),
Value(b"\x08\x88\x91\x7f\xfb\x00'", 55), Value(b"\x08\x88\x91\x7f\xfc\x00'", 55),
Value(b"\x08\x88\x91\x7f\xfd\x00'", 55), Value(b"\x08\x88\x91\x7f\xfe\x00'", 55),
Value(b"\x08\x88\x91\x7f\xff\x00'", 55), Value(b"\x08\x88\x91?\xfc\x00'", 55),
Value(b"\x08\x88\x91?\xfd\x00'", 55), Value(b"\x08\x88\x91?\xfe\x00'", 55),
Value(b"\x08\x88\x91?\xff\x00'", 55), Value(b"\x08\x88\x91\x01\x00\x00'", 55),
Value(b"\x08\x88\x91\x02\x00\x00'", 55), Value(b"\x08\x88\x91\x03\x00\x00'", 55),
Value(b"\x08\x88\x91\x04\x00\x00'", 55), Value(b'\x11\x11"\x00\x00\x00(', 56),
Value(b'\x11\x11"\x00\x01\x00(', 56), Value(b'\x11\x11"\x00\x02\x00(', 56),
Value(b'\x11\x11"\x00\x03\x00(', 56), Value(b'\x11\x11"\x00\x04\x00(', 56),
Value(b'\x11\x11"\xff\xfb\x00(', 56), Value(b'\x11\x11"\xff\xfc\x00(', 56),
Value(b'\x11\x11"\xff\xfd\x00(', 56), Value(b'\x11\x11"\xff\xfe\x00(', 56),
Value(b'\x11\x11"\xff\xff\x00(', 56), Value(b'\x11\x11"\x7f\xfc\x00(', 56),
Value(b'\x11\x11"\x7f\xfd\x00(', 56), Value(b'\x11\x11"\x7f\xfe\x00(', 56),
Value(b'\x11\x11"\x7f\xff\x00(', 56), Value(b'\x11\x11"\x01\x00\x00(', 56),
Value(b'\x11\x11"\x02\x00\x00(', 56), Value(b'\x11\x11"\x03\x00\x00(', 56),
Value(b'\x11\x11"\x04\x00\x00(', 56), Value(b'\x00""w3\x00\x00', 49),
Value(b'\x00""w3\x00\x01', 49), Value(b'\x00""w3\x00\x02', 49), Value(b'\x00""w3\x00\x03', 49),
Value(b'\x00""w3\x00\x04', 49), Value(b'\x00""w3\xff\xfb', 49), Value(b'\x00""w3\xff\xfc', 49),
Value(b'\x00""w3\xff\xfd', 49), Value(b'\x00""w3\xff\xfe', 49), Value(b'\x00""w3\xff\xff', 49),
Value(b'\x00""w3\x7f\xfc', 49), Value(b'\x00""w3\x7f\xfd', 49), Value(b'\x00""w3\x7f\xfe', 49),
Value(b'\x00""w3\x7f\xff', 49), Value(b'\x00""w3\x01\x00', 49), Value(b'\x00""w3\x02\x00', 49),
Value(b'\x00""w3\x03\x00', 49), Value(b'\x00""w3\x04\x00', 49)]
objects = list()
objects.append(Field("test0", b"\x11\x11", fuzz="std"))
objects.append(Field("test1", b"\x22", fuzz="std"))
objects.append(Field("test2", b"\x33\x33", slice(9, 17), fuzz="std"))
objects.append(Field("length", b"\x00\x00", fuzz="std"))
functions = list()
functions.append(length("length", "test0", "test2"))
d = Dizz("test", objects, functions, fuzz="std")
self.assertEqual([i for i in d], expected)
def test_length(self):
objects = list()
objects.append(Field("test0", b"\x01\xff", 10, "std"))
objects.append(Field("test1", b"\xab", 8, "std"))
objects.append(Field("test2", b"\x00\xff", 12, "std"))
d = Dizz("test", objects, fuzz="std")
self.assertEqual(len(list(d)), d.length())
def test_start_at_std(self):
objects = list()
objects.append(Field("test0", b"\x01\xff", 10, "std"))
objects.append(Field("test1", b"\xab", 8, "std"))
objects.append(Field("test2", b"\x00\xff", 12, "std"))
objects.append(Field("length", b"\x00\x00", fuzz="std"))
functions = list()
functions.append(length("length"))
excepted = list(Dizz("test", objects, functions, fuzz="std"))
for i in range(len(excepted)):
got = list(Dizz("test", objects, functions, fuzz="std", start_at=i))
self.assertEqual(excepted[i:], got)
def test_start_at_full(self):
objects = list()
objects.append(Field("test0", b"\x01\xff", 10, "std"))
objects.append(Field("test1", b"\xab", 8, "std"))
objects.append(Field("test2", b"\x00\xff", 12, "std"))
excepted = list(Dizz("test", objects, fuzz="full"))
for i in range(len(excepted), 4):
self.assertEqual(excepted[i:], list(Dizz("test", objects, fuzz="full", start_at=i)))
def test_load(self):
expected = Value(b'\x00\x00\x00\x00\x00\n\x18\x00\x00\x00', 80)
d = load_dizz("test", "modules_src/demo/demo/dizz/demo.dizz")
self.assertEqual(first(d), expected)
def test_import(self):
expected = [Value(b'\n\xed\xcc', 20), Value(b'\x02\xed\xcc', 20), Value(b'\x06\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\x0e\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xec\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xee\xcc', 20), Value(b'\n\xef\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20), Value(b'\n\xed\xcc', 20),
Value(b'\n\xed\xcc', 20)]
objects = list()
objects.append(Field("test0", b"\x01", 2, "full"))
objects.append(Field("test1", b"\xff", 8, "std"))
def func1(dizzy_iterator):
dizzy_iterator["test1"] = b"\xaa"
d1 = Dizz("test_import", objects, [(func1, BOTH)], fuzz="std")
objects = list()
objects.append(Field("test0", b"\x02", 2, "full"))
objects.append(Field("test1", b"\xff", 8, "std"))
objects.append(d1)
def func0(dizzy_iterator):
dizzy_iterator["test1"] = b"\xbb"
dizzy_iterator["test_import"]["test1"] = b"\xcc"
d0 = Dizz("test", objects, [(func0, BOTH)], fuzz="std")
self.assertEqual(list(d0), expected)
def test_int_assignment(self):
objects = list()
objects.append(Field("test0", b"\xaa", 10, "full", endian="<"))
objects.append(Field("test1", b"\xff", 8, "std"))
d0 = Dizz("test", objects, fuzz="std")
d0_iter = iter(d0)
d0_iter["test0"] = 1337
self.assertEqual(d0_iter["test0"].byte, b'9\x05')
def test_START_END(self):
objects = list()
objects.append(Field("test0", b"\x00"))
objects.append(Field("test1", b"\xff\xff"))
objects.append(Field("test2", b"\xaa"))
d = Dizz("test", objects, [length("test1", endian="<")], fuzz="std")
d_iter = iter(d)
next(d_iter)
self.assertEqual(d_iter[START], Value(b"\x00"))
self.assertEqual(d_iter["test1"], Value(b"\x20\x00"))
self.assertEqual(d_iter[END], Value(b"\xaa"))
if __name__ == '__main__':
main()
| true |
6d559f4940104ebd5278eadab9cb5b2b7c265032 | Python | buddydeepansh/LEtsUpgrade_Python_Batch7 | /Day8.py | UTF-8 | 704 | 4.1875 | 4 | [] | no_license | # decorator wala program
def outer_decorator_function(fun):
print("We are inside Outer decorator function.")
a = int(input("Enter a range"))
def inner_decorated_function():
print("Inside decotrated function")
fun(a)
return inner_decorated_function()
def fibbo(n):
a = 0
b = 1
c = a + b
i = 0
while i < n:
print(a)
c = a + b
a = b
b = c
i += 1
fibbo = outer_decorator_function(fibbo)
# exception handling
file = open("abx.txt","r")
try:
file.write("This is the text we are writing")
except:
print("The file is opened in the read mode so you cannot write in it.") | true |
3db94d6b496679b10b732fbd57f535cefc552b97 | Python | abretaud/galaxy | /lib/galaxy/tools/toolbox/lineages/stock.py | UTF-8 | 1,632 | 2.703125 | 3 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import threading
from distutils.version import LooseVersion
from .interface import ToolLineage
from .interface import ToolLineageVersion
class StockLineage(ToolLineage):
""" Simple tool's loaded directly from file system with lineage
determined solely by distutil's LooseVersion naming scheme.
"""
lineages_by_id = {}
lock = threading.Lock()
def __init__(self, tool_id, **kwds):
self.tool_id = tool_id
self.tool_versions = set()
@staticmethod
def from_tool( tool ):
tool_id = tool.id
lineages_by_id = StockLineage.lineages_by_id
with StockLineage.lock:
if tool_id not in lineages_by_id:
lineages_by_id[ tool_id ] = StockLineage( tool_id )
lineage = lineages_by_id[ tool_id ]
lineage.register_version( tool.version )
return lineage
def register_version( self, tool_version ):
assert tool_version is not None
self.tool_versions.add( tool_version )
def get_versions( self, reverse=False ):
versions = [ ToolLineageVersion( self.tool_id, v ) for v in self.tool_versions ]
# Sort using LooseVersion which defines an appropriate __cmp__
# method for comparing tool versions.
return sorted( versions, key=_to_loose_version, reverse=reverse )
def to_dict(self):
return dict(
tool_id=self.tool_id,
tool_versions=list(self.tool_versions),
lineage_type='stock',
)
def _to_loose_version( tool_lineage_version ):
version = str( tool_lineage_version.version )
return LooseVersion( version )
| true |
f9493469e4abad8ed0416cbb01905121a48e07e1 | Python | DarkShadow4/Python | /clase/python/str6.py | UTF-8 | 107 | 3.453125 | 3 | [
"MIT"
] | permissive | palabra = raw_input("Introduce palabra:")
if palabra == palabra[::-1]:
print "si"
else:
print "no"
| true |
c88b9c9f7f268dcc6c22fa02034d704793ad3382 | Python | hiddeagema/programming | /les06/opdracht6_1.py | UTF-8 | 64 | 3.109375 | 3 | [] | no_license | s = '0123456789'
print(s[2:5])
print(s[7:9])
print(s[1:8])
| true |
1a4f93587ea18a6809d0d075673863c9bd86f905 | Python | walkccc/LeetCode | /solutions/1153. String Transforms Into Another String/1153.py | UTF-8 | 435 | 3.15625 | 3 | [
"MIT"
] | permissive | class Solution:
def canConvert(self, str1: str, str2: str) -> bool:
if str1 == str2:
return True
mappings = {}
# No char in str1 can be mapped to > 1 char in str2
for a, b in zip(str1, str2):
if mappings.get(a, b) != b:
return False
mappings[a] = b
# No char in str1 maps to > 1 char in str2 and
# There is at lest one temp char can break any loops
return len(set(str2)) < 26
| true |
456c908cfbaa76d1d78b4804df4c28a5effd015a | Python | houxudong1997/compuational_physics_N2015301020064 | /random1.py | UTF-8 | 845 | 3.53125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 16:24:12 2017
@author: Houxudong
"""
import random
import turtle
wn=turtle.Screen()
turtle.screensize(2000,2000)
wn.title("Random walk: 2D")
wn.bgcolor('white')
tess=turtle.Turtle()
tess.shape('classic')
tess.color('black')
tess.pensize(1)
for i in range (200):
x=30
det=random.randrange(4)
if det==0:
tess.forward(x)
tess.left(90)
tess.speed(5)
tess.stamp()
elif det==1:
tess.forward(x)
tess.left(180)
tess.speed(5)
tess.stamp()
elif det==2:
tess.forward(x)
tess.left(270)
tess.speed(5)
tess.stamp()
elif det==3:
tess.forward(x)
tess.left(360)
tess.speed(5)
tess.stamp()
wn.exitonclick() | true |
f727bcf62e5a0eea51983e40b1f0f086001b399c | Python | YZJ6GitHub/PyTorch_Learing | /torch_save_model.py | UTF-8 | 1,910 | 3.015625 | 3 | [] | no_license | import torch
import matplotlib.pyplot as plt
from torch.autograd import Variable
import torch.nn.functional as F
x = torch.unsqueeze(torch.linspace(-1,1,100),dim=1)
y = x.pow(2) + 0.2*torch.rand(x.size())
def save():
net = torch.nn.Sequential(
torch.nn.Linear(1,10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
optimezer = torch.optim.SGD(net.parameters(),lr= 0.25)
loss_func = torch.nn.MSELoss()
for i in range(100):
prediction = net(x)
loss = loss_func(prediction,y)
optimezer.zero_grad()
loss.backward()
optimezer.step()
plt.figure(1,figsize=(10,3))
plt.subplot(131)
plt.title('Net1')
plt.scatter(x.data.numpy(),y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
# 保存网路图结构以及参数, 保存整个网络
torch.save(net,'net.pkl')
# 只保存网络中的参数 (速度快, 占内存少)
torch.save(net.state_dict(),'net_params.pkl')
#保存了整个网络,只需要加载模型就行
def restore_net():
net2 = torch.load('net.pkl')
prediction = net2(x)
plt.figure(1, figsize=(10, 3))
plt.subplot(132)
plt.title('Net2')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
def restore_params():
net3 = torch.nn.Sequential(
torch.nn.Linear(1, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 1),
)
net3.load_state_dict(torch.load('net_params.pkl'))
prediction = net3(x)
plt.subplot(133)
plt.title('Net3')
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.show()
# save net1
save()
# restore entire net (may slow)
restore_net()
# restore only the net parameters
restore_params()
| true |
c5ba8f1e39a86eff16f48d375802942102c7c96c | Python | ErikBuchholz/kidsMathQuiz | /kidsMathQuiz/user_interface.py | UTF-8 | 1,626 | 3.703125 | 4 | [] | no_license | #
#
#
#
#
#
#
def display_dialogue(prob_text, question_num):
print("\n")
display_problem(prob_text, question_num)
user_answer = get_answer()
return user_answer
#
#
#
#
#
def display_problem(prob_text, question_num):
print("Question #%d: %s" % (question_num, prob_text))
#
#
#
#
#
def get_answer():
answer = input("> Your answer: ")
return answer
#
#
#
#
def display_result(question_pass_fail, user_answer, correct_answer):
if (question_pass_fail == True) :
feedback = "Job well done!"
else:
feedback = "You will have to practice this problem again."
# Handle blank answers
if user_answer == '':
result_string = "You failed to provide any answer at all."
else:
result_string = "Result: " + str(question_pass_fail) + ", you entered " + \
str(user_answer) + " and the correct answer is " + \
str(correct_answer) + "."
print(result_string)
print(feedback)
return 0
# display_test_summary()
#
# Args: (float) percent_correct
# (int) total_num_correct
# (int) total_questions
# (int) total_num_tries
# Rets:
def display_test_summary(percent_correct, total_num_correct, total_questions, total_num_tries):
print("\n#########################################")
print(" Test Summary")
print(" %.2f%% correct the first time" % percent_correct)
print(" %d correct of %d questions" % (total_num_correct, total_questions))
print(" %d total tries (including repeats)" % total_num_tries)
print("#########################################")
| true |
1eb1a20cca4e64744c3c860ba9ffc78209de8c23 | Python | wsgan001/PyFPattern | /Data Set/bug-fixing-4/3339d802402fd2f2ed5e954434c637bf7a68124d-<_make_validation_split>-bug.py | UTF-8 | 1,278 | 2.796875 | 3 | [] | no_license | def _make_validation_split(self, y):
'Split the dataset between training set and validation set.\n\n Parameters\n ----------\n y : array, shape (n_samples, )\n Target values.\n\n Returns\n -------\n validation_mask : array, shape (n_samples, )\n Equal to 1 on the validation set, 0 on the training set.\n '
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if (not self.early_stopping):
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(test_size=self.validation_fraction, random_state=self.random_state)
(idx_train, idx_val) = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if ((idx_train.shape[0] == 0) or (idx_val.shape[0] == 0)):
raise ValueError(('Splitting %d samples into a train set and a validation set with validation_fraction=%r led to an empty set (%d and %d samples). Please either change validation_fraction, increase number of samples, or disable early_stopping.' % (n_samples, self.validation_fraction, idx_train.shape[0], idx_val.shape[0])))
validation_mask[idx_val] = 1
return validation_mask | true |
a5b8c461e1dd874f60e1eba9c190373c9912502a | Python | JaeZheng/unet | /test.py | UTF-8 | 688 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : JaeZheng
# @Time : 2019/10/22 21:04
# @File : test.py
import cv2
import numpy as np
def iou(y_true, y_pred):
y_true_mask = (y_true == 255)
y_pred_mask = (y_pred == 255)
iou = np.sum(y_true_mask & y_pred_mask) / np.sum(y_true_mask | y_pred_mask)
return iou
mean_iou = []
for i in range(10):
y_true = cv2.imread("./data/thyroid/test/"+str(i)+"_true.bmp", cv2.IMREAD_GRAYSCALE)
y_pred = cv2.imread("./data/thyroid/test/"+str(i)+"_predict.png", cv2.IMREAD_GRAYSCALE)
y_pred[y_pred[:,:]<127] = 0
y_pred[y_pred[:,:]>=127] = 255
mean_iou.append(iou(y_true, y_pred))
print(np.mean(mean_iou)) | true |
8ae00e9e0eebba0416a945744452756c1ac7cde2 | Python | jlas/misc | /store_credit/test_store_credit.py | UTF-8 | 594 | 2.9375 | 3 | [] | no_license | import store_credit as sc
def test_it():
global ref_list
for credit, L, ans in [
(100, [5, 75, 25], [1, 2]),
(200, [150, 24, 79, 50, 88, 345, 3], [0, 3]),
(8, [2, 1, 9, 4, 4, 56, 90, 3], [3, 4]),
(32, [1, 1, 1, 1, 32, 2, 2, 2], [4]),
(42, [3, 8, 12, 80, 18, 12], [2, 4, 5]),
(1001, [1, 100, 450, 50, 333, 67], [0, 1, 2, 3, 4, 5])]:
sc.ref_list = L
result = sc.store_credit(credit, L)
assert result == ans, \
"Expected %s, got %s, for list: %s" % (result, ans, L)
print 'tests passed!'
test_it()
| true |
7785a28a266567fd68a666969feab65cceeaadac | Python | cgnarendiran/bandit_problem | /bandit.py | UTF-8 | 4,349 | 3.4375 | 3 | [] | no_license | """
Bandit Algorithms defined.
"""
import numpy as np
class Bandit:
def __init__(self, n_arms, arm_option = 1):
# No. of arms
self.k = n_arms
# State Mean values:
# self.q = np.random.random((self.k,))*10.0
# self.q = [1.5,1.5,9.5,1.5,1.5]
self.q = [4.0,4.5,5.0,5.5,6.0]
# State
def pull(self, arm):
return np.random.normal(self.q[arm], 1)
class EpsilonGreedy:
def __init__(self, epsilon, n_arms):
self.epsilon = epsilon
# No. of arms:
self.k = n_arms
# No. of times the arm is pulled
self.count = None
# q estimate of that particular arm
self.Q = None
def initialize(self):
# Value function:
self.Q = [0.0] * self.k
# No. of times each of the arms were pulled:
self.count = [0] * self.k
def update(self, chosen_arm, reward):
a = chosen_arm
r = reward
# Update the count:
self.count[a] += 1
# Update the estimate of the value function
self.Q[a] += (r - self.Q[a])/(self.count[a])
def select_arm(self):
z = np.random.random()
if z< self.epsilon:
return np.random.randint(0, self.k)
else:
return np.argmax(self.Q)
class OptimisticGreedy:
def __init__(self, n_arms):
# No. of arms:
self.k = n_arms
# No. of times the arm is pulled
self.count = None
# q estimate of that particular arm
self.Q = None
def initialize(self):
# Value function:
self.Q = [10.0] * self.k
# No. of times each of the arms were pulled:
self.count = [0] * self.k
def update(self, chosen_arm, reward):
a = chosen_arm
r = reward
# Update the count:
self.count[a] += 1
# Update the estimate of the value function
self.Q[a] += (r - self.Q[a])/(self.count[a])
def select_arm(self):
return np.argmax(self.Q)
class UCB:
"""Implementing `UCB` algorithm.
Parameters
----------
counts : list or array-like
number of times each arm was played, shape (num_arms,).
values : list or array-like
estimated value (mean) of rewards of each arm, shape (num_arms,).
Attributes
----------
select_arm : int
select the arm based on the knowledge we know about each one. All arms
will be rescaled based on how many times they were selected to avoid
neglecting arms that are good overall but the algorithm has had a
negative initial interactions.
"""
def __init__(self, n_arms):
# No. of arms:
self.k = n_arms
# No. of times the arm is pulled
self.count = None
# q estimate of that particular arm
self.Q = None
def initialize(self):
"""Initialize counts and values array with zeros."""
# Value function:
self.Q = [10.0] * self.k
# No. of times each of the arms were pulled:
self.count = [0] * self.k
def select_arm(self):
# Make sure to visit each arm at least once at the beginning
for arm in range(self.k):
if self.count[arm] == 0:
return arm
# Compute estimated value using original values and bonus
ucb_values = np.zeros(self.k)
n = np.sum(self.count)
for arm in range(self.k):
# Rescale based on total counts and arm_specific count
bonus = np.sqrt((np.log(n)) / (2 * self.count[arm]))
ucb_values[arm] = self.Q[arm] + bonus
return np.argmax(ucb_values)
def update(self, chosen_arm, reward):
"""Update counts and estimated value of rewards for the chosen arm."""
# Increment counts
a = chosen_arm
r = reward
# Update the count:
self.count[a] += 1
# Update the estimate of the value function
self.Q[a] += (r - self.Q[a])/(self.count[a])
def test_algorithm(bandit, algo, num_simulations = 1000, horizon = 500):
# record results:
chosen_arms = np.zeros((num_simulations, horizon))
rewards = np.zeros((num_simulations, horizon))
for sim in range(num_simulations):
# Re-initialize counts and Q values to zero:
algo.initialize()
for t in range(horizon):
chosen_arm = algo.select_arm()
# print(chosen_arm)
chosen_arms[sim,t] = chosen_arm
reward = bandit.pull(chosen_arm)
rewards[sim,t] = reward
algo.update(chosen_arm, reward)
# Average rewards across all sims and compute cumulative rewards
average_rewards = np.mean(rewards, axis=0)
cumulative_rewards = np.cumsum(average_rewards)
# print("Average Rewards {}".format(average_rewards))
# print("cumulative Rewards {}".format(cumulative_rewards))
return chosen_arms, average_rewards, cumulative_rewards
# for epsilon in [0.01, 0.1]:
# for n_arms in range(5,20):
| true |
50e0dd0a6f8877bc14fd58a463ef6e7c00b9bde3 | Python | uccser/cs-field-guide | /csfieldguide/tests/test_repository.py | UTF-8 | 1,849 | 2.578125 | 3 | [
"CC-BY-NC-SA-4.0",
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Unlicense",
"LicenseRef-scancode-secret-labs-2011",
"WTFPL",
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"CC-BY-NC-2.5",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown... | permissive | """Test class for other respository tests."""
import os.path
import yaml
import glob
from django.test import SimpleTestCase
class RepositoryTests(SimpleTestCase):
"""Test class for tests for other areas of the project repository."""
def test_node_modules_setup(self):
"""Check if all 'package.json' files are setup in Docker correctly.
Each 'package.json' needs to:
- Install dependencies in the Node Dockerfile.
- Exclude installed dependencies in local Docker Compose setup.
"""
BASE_DIRECTORY = "/test_files/"
DOCKER_COMPOSE_FILE = "docker-compose.local.yml"
DOCKER_COMPOSE_ENTRY = "/app/{}/node_modules/"
DOCKERFILE_FILE = "Dockerfile"
DOCKERFILE_ENTRY = "WORKDIR /app/{}/\nRUN npm install"
with open(os.path.join(BASE_DIRECTORY, DOCKERFILE_FILE)) as f:
dockerfile_contents = f.read()
with open(os.path.join(BASE_DIRECTORY, DOCKER_COMPOSE_FILE)) as f:
docker_compose_config = yaml.safe_load(f.read())
node_volumes = docker_compose_config['services']['node']['volumes']
for filepath in glob.iglob("static/**/package.json", recursive=True):
directory = os.path.dirname(filepath)
# Check in Dockerfile
self.assertIn(
DOCKERFILE_ENTRY.format(directory),
dockerfile_contents,
msg="'{}' could not be found referenced in Dockerfile".format(
filepath,
)
)
# Check in docker-compose.local.yaml
self.assertIn(
DOCKER_COMPOSE_ENTRY.format(directory),
node_volumes,
msg="'{}' could not be found referenced in Dockerfile".format(
filepath,
)
)
| true |
b81a3a2fed4d308f722b17348a0b1198101d75b3 | Python | xeeeion/Python_unI | /Pr2/Pr_2/11.1. Розенкранц и Гильденстерн меняют профессию.py | UTF-8 | 221 | 3.140625 | 3 | [] | no_license | string = input()
i = 0
k = 0
mx = 0
for n in string:
if n == 'о' and k == 0:
i += 1
elif n == 'о' and k != 0:
i = 1
k = 0
else:
k = 1
if i > mx:
mx = i
print(mx)
| true |
2e799b523aa2cb694c5084e6f041920c88dd70fe | Python | MStaniek/ECL | /Perzeptron/Perzeptron.py | UTF-8 | 835 | 3.390625 | 3 | [] | no_license | import math
class Perceptron:
def __init__(self, dimensionen):
self.weightvector=[0]*dimensionen
def update(self, test):
sum=0
for a,b in enumerate(self.weightvector):
sum+=b*test[a]
result=sum*test[-1]
if result <= 0:
for a,b in enumerate(self.weightvector):
self.weightvector[a]+=(test[-1]*test[a])
self.weightvector[a]=round(self.weightvector[a], 5)
def iteration(self, testlist):
for i in range(10):
for a, test in enumerate(testlist):
self.update(test)
print("Iteration " +str(i) + "." + str(a) + " " + str(self.weightvector))
percep=Perceptron(2)
Testlist=[[9.2, 17, 1], [8.8, 38, 1], [8.8, 7, 1], [1.2, 0, -1], [3.3, 1, -1], [5.8, 4, -1]]
percep.iteration(Testlist)
| true |
a8353dfed9b1964e52f4b8b9637d400969da798c | Python | pavit939/A-December-of-Algorithms | /December-15/pascal.py | UTF-8 | 571 | 3.359375 | 3 | [] | no_license | def pascal(n):
l1 = []
for line in range(0,n):
for i in range(0,line + 1):
l1.append(coeffbi(line,i))
a = len(l1)
a = a-n
while(a < len(l1)):
j = 0
for i in range(n-1,-1,-1):
print(l1[a],"x^",i,"y^",j)
a = a + 1
j = j + 1
def coeffbi(n,k):
result = 1
if (k > n - k):
k = n - k
for i in range(0,k):
result = result * (n - i)
result = result // (i + 1)
return result
def main():
n =int(input("Enter the number:"))
pascal(n+1)
main()
| true |
6f912f266609e25f26a33f675a1fd61e3d9bd30c | Python | DIEE-ISDe-code/design_pattern | /observer/observer3.py | UTF-8 | 2,088 | 3.796875 | 4 | [] | no_license |
# observer3.py (Python 3)
# The Observer
class Subscriber:
def __init__(self, name):
self.name = name
def update(self, message):
print( self.name,' received the message ', message)
# The Observable
class Publisher:
def __init__(self, events):
# The constructor accepts in input a list of events
# __init__ uses this list to initialize a dictionary
self.subscribers = { event: dict() for event in events}
def get_subscribers(self, event): #helper method
return self.subscribers[event]
def register(self, event, anObserver, callback=None):
if callback==None:
# set the callback to the update method
callback=getattr(anObserver,'update')
self.get_subscribers(event)[anObserver]=callback
def unregister(self, event, anObserver):
del self.get_subscribers(event)[anObserver]
def dispatch(self, event, message):
for subscriber, callback in self.get_subscribers(event).items():
# returns all "key, value" couples FOR THE DICTIONARY RELATED TO THIS EVENT
callback(message)
# driver
# possible events are ('lunch', 'happyhour')
publisher=Publisher(['lunch', 'happyhour'])
bob=Subscriber('Bob')
alice=Subscriber('Alice')
john=Subscriber('John')
# bob and john are intrested in the event 'lunch'
publisher.register('lunch', bob )
publisher.register('lunch', john )
# alice and john are intrested in the event 'happyhour'
publisher.register('happyhour', alice )
publisher.register('happyhour', john )
# send a message
print ('\nLUNCHTIME!')
publisher.dispatch('lunch','Lunchtime!') #event, message
print ('\nHAPPYHOUR!')
publisher.dispatch('happyhour','HAPPY HOUR!') #event, message
print("\nNow john is no longer interested in event 'happyhour'")
print("but he remains interested in event 'lunch'\n")
publisher.unregister('happyhour',john)
# send a message
print ('\nLUNCHTIME!')
publisher.dispatch('lunch','Lunchtime!') #event, message
print ('\nHAPPYHOUR!')
publisher.dispatch('happyhour','HAPPY HOUR!') #event, message
| true |
d0b6aba8643a851750c80647b164b34431c7ed9d | Python | ShangHung0314/sc-projects | /stanCode_Projects/hangman_game/Substitution_Cipher_ext.py | UTF-8 | 3,568 | 4.46875 | 4 | [
"MIT"
] | permissive | """
File: Substitution_Cipher_ext.py
Name: Cage
-----------------------------
This program use the concept of substitution cipher.
I use the secret code to form a set of alphabet sequence.
For example, if my SECRET is 'HELLO WORLD'.
The new set of sequence will be 'HELOWRDABCFGHIJKMNPQSTUVWXYZ', deleting the space and repetitive letters.
Therefore, if someone send me the message: 'USOPOTR'
And I type the correct secret code, for example: 'TSLA TO THE MOON'
I can decrypt the message as 'UBEREAT', so the sender must be hungry.
"""
# This constant shows the original order of alphabetic sequence.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
SECRET = 'TSLA TO THE MOON!'
def main():
"""
TODO:
First, the secret code is transformed to an upper case, no space, no punctuations,
and no repetitive-letters sequence so that the secret code can be any sentence, phrase, or words.
Therefore, a transform function is made to deal with the text.
Then, my_enigma() will encrypt the message sent.
If the secret code is correct, I can successfully decrypt the message.
"""
# encrypt
secret = transform(SECRET)
message = input("Send Your Message: ").upper()
encrypted_message = my_enigma(message, secret)
print('The encrypted message received: ', encrypted_message)
# decrypt
secret_code = input('Secret Code: ')
decrypted_message = my_decrypt(encrypted_message, secret_code)
if decrypted_message == message:
print('The message you decrypted is: ', decrypted_message)
else:
print('Your Code is Wrong')
def my_decrypt(encrypted_message, secret_code):
"""
:param encrypted_message: str, the encrypted message from the my_enigma function.
:param secret_code: str, the code necessary to break the message.
:return: str, the decrypted message, same as original sent message.
"""
find_sequence = ""
find_sequence += transform(secret_code)
for i in range(len(ALPHABET)):
if ALPHABET[i] not in find_sequence:
find_sequence += ALPHABET[i]
deciphered = ""
for i in range(len(encrypted_message)):
n = find_sequence.find(encrypted_message[i])
deciphered += ALPHABET[n]
return deciphered
def my_enigma(message, secret):
"""
:param message: str, the message sent.
:param secret: str, the transformed code set at the beginning.
:return: str, the encrypted message.
"""
new_alphabet = ''
new_alphabet += secret
for i in range(len(ALPHABET)):
if ALPHABET[i] not in new_alphabet:
new_alphabet += ALPHABET[i]
password = ""
for i in range(len(message)):
n = ALPHABET.find(message[i])
password += new_alphabet[n]
return password
def transform(secret):
"""
:param secret: str, the code needed to be transform
:return: str, an upper-case sequence without space, punctuations, and repetitive letters.
"""
result = ""
# deal with case-insensitive
secret = secret.upper()
# delete space and punctuations
for i in range(len(secret)):
if secret[i].isalpha():
result += secret[i]
pass
result1 = ""
# delete repetitive letters
for j in range(len(result)):
n = result[0:j].find(result[j])
if n == -1:
result1 += result[j]
pass
return result1
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
| true |
05ae9ba97a714251b53c5c1431ea1f6adf186fcb | Python | ati-ozgur/course-python | /2020/examples-in-class-2020-11-05/answer_to_question05.py | UTF-8 | 81 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | def multiple_outputs(a,b):
return a*b,a+b, a-b
print(multiple_outputs(5,4))
| true |
da7c9c722a82bba77ee56e1410a5b10661b99201 | Python | SenadI/tea | /tea/console/utils.py | UTF-8 | 1,367 | 2.984375 | 3 | [] | no_license | __author__ = 'Viktor Kerkez <alefnula@gmail.com>'
__date__ = '20 October 2010'
__copyright__ = 'Copyright (c) 2010 Viktor Kerkez'
import os
from tea.system import platform
if platform.is_a(platform.WINDOWS | platform.DOTNET):
import msvcrt
def _clear_screen(numlines):
os.system('cls')
_getch = msvcrt.getch
elif platform.is_a(platform.POSIX):
import sys
def _clear_screen(numlines):
os.system('clear')
def _getch():
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
else:
raise platform.not_supported('tea.console.utils')
def clear_screen(numlines=100):
"""Clear the console.
:param int numlines: This is an optional argument used only as a fall-back
if the operating system console doesn't have clear screen function.
:rtype: None
"""
_clear_screen(numlines)
def getch():
"""Cross-platform getch() function.
Same as the getch function from msvcrt library, but works on all platforms.
:rtype: str
:return: One character got from standard input.
"""
return _getch()
| true |
1ea324f90f564e108fb5a3c147a0332427061edb | Python | Mitrou/rep-one | /_lrn/old_stuff/new_hope/int1.py | UTF-8 | 525 | 3.140625 | 3 | [] | no_license | # def extendList(val, list=[]):
# list.append(val)
# return list
#
# list1 = extendList(10)
# list2 = extendList(123,[])
# list3 = extendList('a')
#
# print "list1 = %s" % list1
# print "list2 = %s" % list2
# print "list3 = %s" % list3
#
# print list1
def extendList(val, list=[]):
if len(list) != 0:
list = []
list.append(val)
return list
list1 = extendList(10)
list2 = extendList(123,[])
list3 = extendList('a')
print "list1 = %s" % list1
print "list2 = %s" % list2
print "list3 = %s" % list3 | true |
5504cabb82b8bda2b4055262db18192be34a3d7b | Python | inchiyoung/Listeria_ISGylome | /script/Parse_RSA.py | UTF-8 | 1,236 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python
import os
import sys
import glob
import numpy as np
from collections import defaultdict
def reading_RSA(infile_RSA):
infile = open(infile_RSA,'r')
RSA_dict = defaultdict(list)
for line in infile.xreadlines():
if 'UniProt_ID' in line: continue
line = line.rstrip().rsplit("\t")
RSA_dict["|".join(line[1::])].append(line[0])
infile.close()
return RSA_dict
def filtering_RSA(RSA_dict, outfile_RSA):
print "Writing: %s" % outfile_RSA
outfile = open(outfile_RSA, 'w')
outfile.write("\t".join(['Type','UniProt_ID','position','PDB','PDB_chain','PDB_resi','RSA'])+"\n")
for info in RSA_dict.keys():
newinfo = info.rsplit('|')
if float(newinfo[-1]) > 1:
newinfo = "\t".join(newinfo[0:-1])+"\t"+'1.0'
else:
newinfo = "\t".join(newinfo)
if len(RSA_dict[info]) > 1:
outfile.write('ISG15'+"\t"+newinfo+"\n")
if len(RSA_dict[info]) == 1:
outfile.write(RSA_dict[info][0].replace('All','Other')+"\t"+newinfo+"\n")
outfile.close()
def main():
infile_RSA = 'result/ISG15_Lys_RSA.tsv'
outfile_RSA = 'result/ISG15_Lys_RSA_filtered.tsv'
RSA_dict = reading_RSA(infile_RSA)
filtering_RSA(RSA_dict, outfile_RSA)
if __name__ == "__main__":
main()
| true |
5424e3dd9a1448b009bf9044c3d522b9bb44414c | Python | SergeiBondarev/B_test | /Less-4.1_home.py | UTF-8 | 1,668 | 2.78125 | 3 | [] | no_license | # Home: добавление комментария (94)
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
options = Options()
options.add_argument("user-data-dir=C:\\profile")
driver = webdriver.Chrome(chrome_options=options)
driver.maximize_window()
driver.get("https://google.com")
time.sleep(1)
# Откройте http://practice.automationtesting.in/
driver.get("http://practice.automationtesting.in/")
time.sleep(1)
# Проскролльте страницу вниз на 600 пикселей
driver.execute_script("window.scrollBy(0, 600);")
# Нажмите на название книги "Selenium Ruby"
Selenium_Ruby = driver.find_element_by_xpath("//*[@id='text-22-sub_row_1-0-2-0-0']/div/ul/li/a[1]/h3")
Selenium_Ruby.click()
time.sleep(1)
# Нажмите на вкладку "REVIEWS"
Reviews_btn = driver.find_element_by_css_selector("[href='#tab-reviews']")
Reviews_btn.click()
time.sleep(1)
# Поставьте 5 звёзд
star5_btn = driver.find_element_by_class_name("star-5")
star5_btn.click()
time.sleep(1)
# Заполните поле "Review" сообщением: "Nice book!"
Comment_field = driver.find_element_by_id("comment")
Comment_field.send_keys("Nice book!")
time.sleep(1)
# Заполните поле "Name"
Name_field = driver.find_element_by_id("author")
Name_field.send_keys("Ecoist")
time.sleep(1)
# Заполните "Email"
Email_field = driver.find_element_by_id("email")
Email_field.send_keys("eco@mail.ru")
time.sleep(1)
# Нажмите на кнопку "SUBMIT"
Submit_btn = driver.find_element_by_id("submit")
Submit_btn.click()
time.sleep(1)
driver.quit() | true |
b91d0f8e6da7ba7ea376f42e7d45c84bc393a871 | Python | 3enoit3/tools | /vimrc/parse_vimrc.py | UTF-8 | 6,798 | 2.703125 | 3 | [] | no_license |
import sys
import re
# Parser
class sourceLineIter:
def __init__(self, iLines):
self._lines = iLines
self._lineCount = len(iLines)
self._currLineNb = 1
self._previousLine = ''
self._blockDepth = 0
self._blockEnds = None
# Iteration
def get(self):
if self._currLineNb > self._lineCount:
return None
else:
return (self._currLineNb, self._lines[self._currLineNb - 1])
def next(self):
self._currLineNb += 1
if self._currLineNb > self._lineCount:
return None
else:
return self.get()
def eof(self):
return self._currLineNb > self._lineCount
# Lines
def getCmd(self):
return self.get()[1].strip()
def getFirstToken(self):
aMatch = re.search('^(\w+)', self.getCmd())
if aMatch:
return aMatch.group(0)
return None
def isBlank(self):
return self.getCmd() == ""
def isComment(self):
return self.getCmd().startswith('"')
def isBlockBegin(self):
aBlockEnd = {
'function': 'endfunction',
'if': 'endif',
'augroup': 'augroup'}.get( self.getFirstToken() )
if aBlockEnd:
self._blockEnds = (self.getFirstToken(), aBlockEnd)
self._blockDepth = 1
return True
return False
def isBlockEnd(self):
if self._blockDepth:
if self.getFirstToken() == self._blockEnds[1]:
self._blockDepth -= 1
if not self._blockDepth:
self._blockEnds = None
return True
if self.getFirstToken() == self._blockEnds[0]:
self._blockDepth += 1
return False
# Public
def consume(self):
if self.eof():
return None
#print self.get()
aComments = []
while self.isBlank() or self.isComment():
# Reset comment
aComments = []
# Find non empty line
while self.isBlank():
self.next()
if self.eof():
return None
# Find comment if any
while self.isComment():
aComments.append( self.get()[1] )
self.next()
if self.eof():
return None
aLineNb, aCmd = self.get()
# Handle functions
if self.isBlockBegin():
self.next()
while not self.isBlockEnd():
self.next()
self.next()
return (aLineNb, aCmd, aComments)
def getInstructions(iLines):
aInstrs = []
it = sourceLineIter(iLines)
aInstr = it.consume()
while aInstr:
aLineNb, aCmd, aComments = aInstr
# Merge multilines
if aCmd.lstrip().startswith("\\") and aInstrs:
aInstrs[-1][1].append(aCmd.lstrip())
else:
# Clean comments
aInstrs.append( (aLineNb, [aCmd.lstrip()], [c.lstrip(' "') for c in aComments]) )
aInstr = it.consume()
return aInstrs
# Data
class VimType:
def __init__(self, iPattern, iColumns = []):
self._pattern = re.compile(iPattern)
self._columns = iColumns
self._found = []
def collect(self, iInstr):
aMatch = self._pattern.search(iInstr[1][0])
if aMatch:
self._found.append( aMatch.groupdict().items() + [('line',iInstr[0]), ('command',iInstr[1]), ('comment',iInstr[2])] )
return True
return False
def get(self):
aTable = [ self.getHeader() ]
for f in self._found:
aTable.append( self.getLine(f) )
return aTable
def getHeader(self):
return self._columns
def getLine(self, iFound):
aFoundDict = dict(iFound)
return [aFoundDict[c] for c in self._columns]
class Var(VimType):
def __init__(self):
VimType.__init__(self,
r"^(?P<set>[sl]et)\s+(?P<var>\S+)(?:\s*\+?=\s*(?P<val>.*))?\s*$",
['line', 'set', 'var', 'comment'] )
class Map(VimType):
def __init__(self):
VimType.__init__(self,
r"^(?P<map>.*map)\s+(?P<keys>\S+)\s+(?P<action>.*)\s*$",
['line', 'map', 'keys', 'comment'])
class AutoCmd(VimType):
def __init__(self):
VimType.__init__(self,
r'^(?P<auto>au(?:tocmd)?\s+.*)\s*$',
['line', 'auto', 'comment'])
class Command(VimType):
def __init__(self):
VimType.__init__(self,
r'^(?P<type>com(?:mand)?!)\s+(?P<cmd>\S+)\s+(?P<action>.*)\s*$',
['line', 'cmd', 'comment'])
class Function(VimType):
def __init__(self):
VimType.__init__(self,
r'^(?P<type>fun(?:ction)?!)\s+(?P<function>\S+)\s*\(\s*(?P<args>.*)\s*\)\s*$',
['line', 'function', 'comment'])
class Abbrev(VimType):
def __init__(self):
VimType.__init__(self,
r'^(?P<abbrev>\w*ab(?:brev)?\s+.*)\s*$',
['line', 'abbrev', 'comment'])
class Other(VimType):
def __init__(self):
VimType.__init__(self,
r'^(?P<cmd>.*)\s*$',
['line', 'command', 'comment'])
# Output
def printOutput(iTypes):
for n, t in iTypes:
print n
aTable = t.get()
print aTable[0]
print aTable[1:]
def htmlString(i):
return str(i).replace("<", "<").replace(">", ">")
def htmlCell(i):
if isinstance(i, list):
return "<br>".join( [htmlString(e) for e in i] )
else:
return htmlString(i)
def htmlOutput(iTypes):
print "<html>\n\
<head><title>Vimrc</title></head>\n\n\
<body>"
for n, t in iTypes:
print "<h4>{}</h4><table>".format(n)
aTable = t.get()
print " <tr>{}</tr>".format( "".join(["<th>"+htmlCell(c)+"</th>" for c in aTable[0]]) )
for r in aTable[1:]:
print " <tr>{}</tr>".format( "".join(["<td>"+htmlCell(c)+"</td>" for c in r]) )
print "</table>"
print "</body>\n\
</html>"
# Main
def main(aArgv):
# Read file
with open(".vimrc") as aFile:
aContent = aFile.read()
aLines = [l.rstrip("\n") for l in aContent.split("\n")]
# Types
aTypes = { 'map':Map(), 'var':Var(), 'autocmd':AutoCmd(), 'command':Command(), 'function':Function(), 'abbrev':Abbrev() }
aOther = Other()
# Collect
for i in getInstructions(aLines):
for t in aTypes.values() + [aOther]:
if t.collect(i):
# stop if found
break
# Output
htmlOutput( aTypes.items() + [('other', aOther)] )
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| true |
a3d8ba69e835adfa1f34599b53ebb1b5016b530a | Python | mjiana/python | /pythonProject/py001-023/py007.py | UTF-8 | 217 | 3.609375 | 4 | [] | no_license | # 도전
# 반복문을 이용하여 정삼각형과 정사각형 그리기
import turtle as t
for x in range(3):
t.forward(100)
t.left(120)
for x in range(4):
t.forward(200)
t.left(90)
| true |
39d8dec52c343129e1cc772ba7dc0c0bb4f4cfab | Python | jb55/cloudvibe-client | /src/cloudvibe/gui.py | UTF-8 | 1,342 | 2.9375 | 3 | [] | no_license | import sys
class Tray():
""" The Cloudvibe tray """
def __init__(self):
self.handlers = {}
def on(self, event, fn):
"""
Register event handlers
Events:
- sync
"""
if event in self.handlers:
self.handlers[event].append(fn)
else:
self.handlers[event] = [fn]
def callHandlers(self, event):
if event in self.handlers:
for handler in self.handlers[event]:
handler()
def onSync(self):
""" Called when Sync Now is pressed in the tray menu """
self.callHandlers("sync")
def onSite(self):
""" Called when Visit Cloudvibe Website is pressed in the tray menu """
self.callHandlers("site")
def getMenu(self):
return {
"sync": ("Sync Now", self.onSync),
"site": ("Visit Cloudvibe Website", self.onSite),
"quit": ("Quit", None),
}
def load(self):
""" Load and start the tray """
menu = self.getMenu()
if sys.platform == 'darwin':
import cloudvibe.platform.darwin.tray as tray
tray.load(menu)
elif sys.platform == 'win32':
import cloudvibe.platform.win32.win_sys_tray_icon as tray
tray.load(menu)
if __name__ == '__main__':
tray = Tray()
def sync():
print "sync"
def site():
print "site"
tray.on("sync", sync)
tray.on("site", site)
tray.load()
| true |
7f7a74a77e213e0427e79ecc42f4de403aeb6382 | Python | andrewsris/preProcessing | /Normalize.py | UTF-8 | 3,032 | 2.65625 | 3 | [
"MIT"
] | permissive | """
@author: Narmin Ghaffari Laleh <narminghaffari23@gmail.com> - Nov 2020
"""
##############################################################################
from multiprocessing.dummy import Pool as ThreadPool
import stainNorm_Macenko
import multiprocessing
import os
import cv2
import numpy as np
global inputPath
global outputPath
global normalizer
##############################################################################
def Normalize_Main(item):
outputPathRoot = os.path.join(outputPath, item)
inputPathRoot = os.path.join(inputPath, item)
inputPathRootContent = os.listdir(inputPathRoot)
print()
if not len(inputPathRootContent) == 0:
if not os.path.exists(outputPathRoot):
os.mkdir(outputPathRoot)
temp = os.path.join(inputPath, item)
tempContent = os.listdir(temp)
tempContent = [i for i in tempContent if i.endswith('.jpg')]
for tempItem in tempContent:
img = cv2.imread(os.path.join(inputPathRoot, tempItem))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
edge = cv2.Canny(img, 40, 40)
edge = edge / np.max(edge)
edge = (np.sum(np.sum(edge)) / (img.shape[0] *img.shape[1])) * 100
print(edge)
if edge > 2:
try:
nor_img = normalizer.transform(img)
cv2.imwrite(os.path.join(outputPathRoot, tempItem), cv2.cvtColor(nor_img, cv2.COLOR_RGB2BGR))
except:
print('Failed to normalize the tile {}.'.format(tempItem))
##############################################################################
def poolcontext(*args, **kwargs):
pool = multiprocessing.Pool(*args, **kwargs)
yield pool
pool.terminate()
###############################################################################
def Normalization(inputPath, outputPath, sampleImagePath, num_threads = 8):
inputPathContent = os.listdir(inputPath)
normPathContent = os.listdir(outputPath)
remainlList = []
for i in inputPathContent:
if not i in normPathContent:
remainlList.append(i)
inputPathContent = [i for i in remainlList if not i.endswith('.bat')]
inputPathContent = [i for i in inputPathContent if not i.endswith('.txt')]
target = cv2.imread(sampleImagePath)
target = cv2.cvtColor(target, cv2.COLOR_BGR2RGB)
normalizer = stainNorm_Macenko.Normalizer()
normalizer.fit(target)
pool = ThreadPool(num_threads)
pool.map(Normalize_Main, inputPathContent)
pool.close()
pool.join()
###############################################################################
inputPath = r""
outputPath = r""
sampleImagePath = r""
Normalization(inputPath, outputPath,sampleImagePath, num_threads = 2)
| true |
abf1e1bece917d607b76eeaeaf364be77d9fbd12 | Python | Aasthaengg/IBMdataset | /Python_codes/p03241/s345879830.py | UTF-8 | 244 | 2.890625 | 3 | [] | no_license | n, m = map(int, input().split())
result = 1
for i in range(int(m**0.5), 0, -1):
if m % i == 0:
j = m // i
if i >= n:
result = max(result, j)
elif j >= n:
result = max(result, i)
print(result)
| true |
4f8c0119910d26b2f480a4588373de728a5967d4 | Python | SpenceGuo/py3-learning | /coding/Fibobacci.py | UTF-8 | 85 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | a, b = 0, 1
while b <= 10000:
print(b, end=",")
m = b
b = a+b
a = m
| true |
4515818d3b81c85a20ed4c9e433aa0ecb5a3403d | Python | x95102003/leetcode | /binary_tree_level_order_traversal_II.py | UTF-8 | 665 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
def _addstage(node, i, result):
if node:
result.append([])
result[i] += [node.val]
i += 1
_addstage(node.left, i, result)
_addstage(node.right, i, result)
return i
result = []
_addstage(root, 0, result)
result.reverse()
return [s for s in result if s]
| true |
8b4ba4169ddb8ff0a36bcc5a10a0e81a17a0314b | Python | Krzyzaku21/Git_Folder | /_creating_programs/lotto.py | UTF-8 | 330 | 2.765625 | 3 | [] | no_license | #symulator liczb lotto
# %%
from random import randint as ran
set_nums = set()
def big_lotto():
while len(set_nums) != 6:
random_nums = int(ran(1,49))
set_nums.add(random_nums)
str_nums = "".join(str(set_nums).replace("{", "")).replace("}", "")
print(f" Win numbers are: {str_nums}")
big_lotto()
# %%
| true |
fb43e10e7a02ac2b47a2ae08139f5fa8518d6fb2 | Python | lalitmahato/Content-Management-System | /event/models.py | UTF-8 | 1,188 | 2.734375 | 3 | [] | no_license | from django.db import models
from datetime import datetime
from pages.imageCompression import compress_image
class Event(models.Model):
"""
Event model
Fields
event title (Foreign Key)
event description
event_image
event created date
event location
event time duration in AM, PM
event date
event location
"""
title = models.CharField(max_length=400)
description = models.TextField()
event_image = models.ImageField(upload_to = 'photos/%Y/%m/%d/')
created_at = models.DateTimeField(auto_now_add=True)
location_detail = models.CharField(max_length=400)
event_time = models.CharField(max_length=200)
event_date = models.DateTimeField()
event_location_campus = models.CharField(max_length=150,default="On Campus")
def __str__(self):
return self.title
def save(self, *args, **kwargs):
"""
Overriding save method to compress image before saving it
"""
new_image = compress_image(self.event_image)
# set self.image to new_image
self.event_image = new_image
# save
super().save(*args, **kwargs)
| true |
565eec001c8fc1064d0747cdb23122f692cb9907 | Python | littlelienpeanut/Leetcode_challenge | /Two_Sum_IV_-_Input_is_a_BST.py | UTF-8 | 715 | 3.140625 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
nums, remain = set(), [root]
while remain:
curr = remain.pop(0)
nums.add(curr.val)
if curr.left: remain += [curr.left]
if curr.right: remain += [curr.right]
for n in nums:
nums.remove(n)
if k-n in nums:
return True
nums.add(n)
return False
| true |
87649b5087392f339bf998cd7cf2ec8fbfcac361 | Python | sherry-roar/Roar | /pydfs/port1.py | UTF-8 | 502 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'Mr.R'
import socket
# 1.创建socket对象
s = socket.socket()
# 获取本地主机名
host = socket.gethostname()
# 设置端口
port = 12345
# 2.绑定端口
s.bind((host, port))
# 3.等待客户端连接,监听socket对象
s.listen(5)
while True:
c, addr = s.accept() # 建立客户端连接
print('连接地址:', addr)
# msg='欢迎访问百度!'
# msg=msg.encode("UTF-8")
# c.send(msg)
# c.close() | true |
2faa36c61324d9d51e2d86a73363641211b26ec0 | Python | mkw18/CellSegmentation | /supplementary_modify/see.py | UTF-8 | 10,167 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu May 28 00:05:53 2020
@author: SC
"""
from __future__ import absolute_import
import cv2
import numpy as np
import os
import os.path as osp
from tensorflow.keras import layers, models, optimizers
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as keras
# turn uint16 to unit8
def unit16b2uint8(img):
if img.dtype == 'uint8':
return img
elif img.dtype == 'uint16':
return img.astype(np.uint8)
else:
raise TypeError('No such of img transfer type: {} for img'.format(img.dtype))
# standardization - turn 2D to 3D
def img_standardization(img):
img = unit16b2uint8(img)
if len(img.shape) == 2:
img = np.expand_dims(img, 2)
img = np.tile(img, (1, 1, 3))
return img
elif len(img.shape) == 3:
return img
else:
raise TypeError('The Depth of image large than 3 \n')
# load images and standardization
def load_images(file_names):
images = []
for file_name in file_names:
img = cv2.imread(file_name, -1)
img = img_standardization(img)
img = bgr_to_gray(img)
img = img.astype('float32')
images.append(img / 255)
return images
def load_images_result(file_names):
images = []
for file_name in file_names:
img = cv2.imread(file_name, -1)
img = img_standardization(img)
img = bgr_to_gray(img)
img = np.int16(img > 0)
images.append(img)
return images
def bgr_to_gray(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
# unet模型损失函数
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + 100) / (K.sum(y_true_f) + K.sum(y_pred_f) + 100)
# unet模型损失函数
def dice_coef_np(y_true, y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = numpy.sum(y_true_f * y_pred_f)
return (2. * intersection + 100) / (numpy.sum(y_true_f) + numpy.sum(y_pred_f) + 100)
# unet模型损失函数
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
class myUnet(object):
def __init__(self, img_rows=512, img_cols=512):
self.img_rows = img_rows
self.img_cols = img_cols
class BinaryThresholding:
def __init__(self, threshold):
self.threshold = threshold
def __call__(self, img):
gray = bgr_to_gray(img)
# image binarization - 0, 255
(_, binary_mask) = cv2.threshold(gray, self.threshold, 255, cv2.THRESH_BINARY)
# filtering
binary_mask = cv2.medianBlur(binary_mask, 5)
connectivity = 4
_, label_img, _, _ = cv2.connectedComponentsWithStats(binary_mask, connectivity, cv2.CV_32S)
return label_img
if __name__ == "__main__":
N = 175
# segmentor = BinaryThresholding(threshold=110)
image_path = './dataset1/train'
result_path = './dataset1/train_GT/SEG'
image_list = sorted([osp.join(image_path, image) for image in os.listdir(image_path)])[0:N]
result_list = sorted([osp.join(result_path, result) for result in os.listdir(result_path)])[0:N]
images = load_images(image_list)
images = np.array(images)
images = images.reshape(N, 628, 628, 1)
results = load_images_result(result_list)
results = np.array(results)
results = results.reshape(N, 628, 628, 1)
images_new = np.empty((N, 256, 256))
results_new = np.empty((N, 256, 256))
for i in range(N):
new_shape = (256, 256)
images_new[i] = cv2.resize(images[i], new_shape)
results_new[i] = cv2.resize(results[i], new_shape)
plt.imshow(images_new[0])
plt.show()
plt.imshow(images_new[1])
plt.show()
plt.imshow(images_new[2])
plt.show()
images_new = images_new.reshape(N, 256, 256, 1)
results_new = results_new.reshape(N, 256, 256)
plt.imshow(results_new[0])
plt.show()
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
input = layers.Input(shape=(256, 256, 1))
conv2d_1_1 = layers.Conv2D(64, kernel_size=3, padding='same', activation='relu',
name='conv2d_1_1', kernel_initializer='he_normal')(input)
conv2d_1_2 = layers.Conv2D(64, kernel_size=3, padding='same', activation='relu',
name='conv2d_1_2', kernel_initializer='he_normal')(conv2d_1_1)
maxpooling2d_1 = layers.MaxPooling2D(pool_size=2, name='maxpooling2d_1')(conv2d_1_2)
conv2d_2_1 = layers.Conv2D(128, kernel_size=3, padding='same', activation='relu',
name='conv2d_2_1', kernel_initializer='he_normal')(maxpooling2d_1)
conv2d_2_2 = layers.Conv2D(128, kernel_size=3, padding='same', activation='relu',
name='conv2d_2_2', kernel_initializer='he_normal')(conv2d_2_1)
maxpooling2d_2 = layers.MaxPooling2D(pool_size=2, name='maxpooling2d_2')(conv2d_2_2)
conv2d_3_1 = layers.Conv2D(256, kernel_size=3, padding='same', activation='relu',
name='conv2d_3_1', kernel_initializer='he_normal')(maxpooling2d_2)
conv2d_3_2 = layers.Conv2D(256, kernel_size=3, padding='same', activation='relu',
name='conv2d_3_2', kernel_initializer='he_normal')(conv2d_3_1)
maxpooling2d_3 = layers.MaxPooling2D(pool_size=2, name='maxpooling2d_3')(conv2d_3_2)
conv2d_4_1 = layers.Conv2D(512, kernel_size=3, padding='same', activation='relu',
name='conv2d_4_1', kernel_initializer='he_normal')(maxpooling2d_3)
conv2d_4_2 = layers.Conv2D(512, kernel_size=3, padding='same', activation='relu',
name='conv2d_4_2', kernel_initializer='he_normal')(conv2d_4_1)
dropout_4 = layers.Dropout(rate=0.5, noise_shape=None, seed=None, name='dropout_4')(conv2d_4_2)
maxpooling2d_4 = layers.MaxPooling2D(pool_size=2, name='maxpooling2d_4')(dropout_4)
conv2d_5_1 = layers.Conv2D(1024, kernel_size=3, padding='same', activation='relu',
name='conv2d_5_1', kernel_initializer='he_normal')(maxpooling2d_4)
conv2d_5_2 = layers.Conv2D(1024, kernel_size=3, padding='same', activation='relu',
name='conv2d_5_2', kernel_initializer='he_normal')(conv2d_5_1)
dropout_5 = layers.Dropout(rate=0.5, noise_shape=None, seed=None, name='dropout_5')(conv2d_5_2)
conv2d_6_1 = layers.Conv2D(512, kernel_size=3, padding='same', activation='relu',
name='conv2d_6_1', kernel_initializer='he_normal')(layers.UpSampling2D(size=(2, 2))(dropout_5))
concatenate_6 = layers.Concatenate(axis=-1, name='concatenate_6')([dropout_4, conv2d_6_1])
conv2d_6_2 = layers.Conv2D(512, kernel_size=3, padding='same', activation='relu',
name='conv2d_6_2', kernel_initializer='he_normal')(concatenate_6)
conv2d_6_3 = layers.Conv2D(512, kernel_size=3, padding='same', activation='relu',
name='conv2d_6_3', kernel_initializer='he_normal')(conv2d_6_2)
conv2d_7_1 = layers.Conv2D(256, kernel_size=3, padding='same', activation='relu',
name='conv2d_7_1', kernel_initializer='he_normal')(layers.UpSampling2D(size=(2, 2))(conv2d_6_3))
concatenate_7 = layers.Concatenate(axis=-1, name='concatenate_7')([conv2d_3_2, conv2d_7_1])
conv2d_7_2 = layers.Conv2D(256, kernel_size=3, padding='same', activation='relu',
name='conv2d_7_2', kernel_initializer='he_normal')(concatenate_7)
conv2d_7_3 = layers.Conv2D(256, kernel_size=3, padding='same', activation='relu',
name='conv2d_7_3', kernel_initializer='he_normal')(conv2d_7_2)
conv2d_8_1 = layers.Conv2D(128, kernel_size=3, padding='same', activation='relu',
name='conv2d_8_1', kernel_initializer='he_normal')(layers.UpSampling2D(size=(2, 2))(conv2d_7_3))
concatenate_8 = layers.Concatenate(axis=-1, name='concatenate_8')([conv2d_2_2, conv2d_8_1])
conv2d_8_2 = layers.Conv2D(128, kernel_size=3, padding='same', activation='relu',
name='conv2d_8_2', kernel_initializer='he_normal')(concatenate_8)
conv2d_8_3 = layers.Conv2D(128, kernel_size=3, padding='same', activation='relu',
name='conv2d_8_3', kernel_initializer='he_normal')(conv2d_8_2)
conv2d_9_1 = layers.Conv2D(64, kernel_size=3, padding='same', activation='relu',
name='conv2d_9_1', kernel_initializer='he_normal')(layers.UpSampling2D(size=(2, 2))(conv2d_8_3))
concatenate_9 = layers.Concatenate(axis=-1, name='concatenate_9')([conv2d_1_2, conv2d_9_1])
conv2d_9_2 = layers.Conv2D(64, kernel_size=3, padding='same', activation='relu',
name='conv2d_9_2', kernel_initializer='he_normal')(concatenate_9)
conv2d_9_3 = layers.Conv2D(64, kernel_size=3, padding='same', activation='relu',
name='conv2d_9_3', kernel_initializer='he_normal')(conv2d_9_2)
conv2d_9_4 = layers.Conv2D(2, kernel_size=3, padding='same', activation='relu',
name='conv2d_9_4', kernel_initializer='he_normal')(conv2d_9_3)
conv2d_10 = layers.Conv2D(1, kernel_size=1, activation='sigmoid', name='conv2d_10')(conv2d_9_4)
model = models.Model(inputs=input, outputs=conv2d_10)
model.summary()
model.compile(optimizer=optimizers.SGD(lr=1e-5, momentum=0.9, nesterov=True), loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x=images_new, y=results_new, batch_size=16, epochs=16)
model.save_weights('unet_test_10.h5', save_format='h5')
new = model.predict(images_new).reshape(N, 256, 256)
plt.imshow(new[0])
plt.show() | true |
eeed92298ada742ea09fe30c7f347f72294990e5 | Python | KarolAntczak/DeepStenosisDetection | /Network.py | UTF-8 | 1,579 | 2.703125 | 3 | [] | no_license | import pickle
from Keras.keras.backend import *
from Keras.keras.layers import *
from Keras.keras.models import *
from Keras.keras.optimizers import SGD, Adam
def load_dataset(filename):
dataset = pickle.load(open(filename, 'rb'))
return dataset
def generate_output_set(dataset, assigned_class):
return np.full(dataset.shape[0], assigned_class)
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
negative_dataset = load_dataset("datasets/5000x32x32 negative.pickle")
positive_dataset = load_dataset("datasets/5000x32x32 positive.pickle")
x_set = np.concatenate((negative_dataset,
positive_dataset))
y_set = np.concatenate((generate_output_set(negative_dataset, 0),
generate_output_set(positive_dataset, 1)))
x_set, y_set = unison_shuffled_copies(x_set, y_set)
print("X set: %s, Y set: %s" % (x_set.shape, y_set.shape))
model = Sequential()
model.add(Flatten(input_shape=(x_set.shape[1], x_set.shape[2], x_set.shape[3])))
model.add(Dense(128, activation='relu', kernel_initializer='random_uniform', bias_initializer='random_uniform' ))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid', kernel_initializer='random_uniform', bias_initializer='random_uniform'))
model.compile(loss='binary_crossentropy', optimizer=SGD(momentum=0.8), metrics=['binary_accuracy'])
print(model.summary())
model.fit(x_set, y_set, batch_size=100, epochs=1000, verbose=2, validation_split=0.2)
model.save("networks/128d1d.net", overwrite=True)
| true |
2d2896886fbc8efec9b56653b392f0e39c58cf63 | Python | JoaoPauloAntunes/Python | /exs-python-brasil/EstruturaSequencial/5-metros-para-centimentros.py | UTF-8 | 88 | 3.71875 | 4 | [] | no_license | # 1 m = 100 cm
metros = float(input('Metros: '))
print(f'centímetros: {metros * 100}') | true |
cee2e189d2974098579fa0aa07c3c7319950d1e1 | Python | utkuozbulak/pytorch-cnn-visualizations | /src/LRP.py | UTF-8 | 5,680 | 2.8125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 14 13:32:09 2022
@author: ut
"""
import copy
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from misc_functions import apply_heatmap, get_example_params
class LRP():
"""
Layer-wise relevance propagation with gamma+epsilon rule
This code is largely based on the code shared in: https://git.tu-berlin.de/gmontavon/lrp-tutorial
Some stuff is removed, some stuff is cleaned, and some stuff is re-organized compared to that repository.
"""
def __init__(self, model):
self.model = model
def LRP_forward(self, layer, input_tensor, gamma=None, epsilon=None):
# This implementation uses both gamma and epsilon rule for all layers
# The original paper argues that it might be beneficial to sometimes use
# or not use gamma/epsilon rule depending on the layer location
# Have a look a the paper and adjust the code according to your needs
# LRP-Gamma rule
if gamma is None:
gamma = lambda value: value + 0.05 * copy.deepcopy(value.data.detach()).clamp(min=0)
# LRP-Epsilon rule
if epsilon is None:
eps = 1e-9
epsilon = lambda value: value + eps
# Copy the layer to prevent breaking the graph
layer = copy.deepcopy(layer)
# Modify weight and bias with the gamma rule
try:
layer.weight = nn.Parameter(gamma(layer.weight))
except AttributeError:
pass
# print('This layer has no weight')
try:
layer.bias = nn.Parameter(gamma(layer.bias))
except AttributeError:
pass
# print('This layer has no bias')
# Forward with gamma + epsilon rule
return epsilon(layer(input_tensor))
def LRP_step(self, forward_output, layer, LRP_next_layer):
# Enable the gradient flow
forward_output = forward_output.requires_grad_(True)
# Get LRP forward out based on the LRP rules
lrp_rule_forward_out = self.LRP_forward(layer, forward_output, None, None)
# Perform element-wise division
ele_div = (LRP_next_layer / lrp_rule_forward_out).data
# Propagate
(lrp_rule_forward_out * ele_div).sum().backward()
# Get the visualization
LRP_this_layer = (forward_output * forward_output.grad).data
return LRP_this_layer
def generate(self, input_image, target_class):
layers_in_model = list(self.model._modules['features']) + list(self.model._modules['classifier'])
number_of_layers = len(layers_in_model)
# Needed to know where flattening happens
features_to_classifier_loc = len(self.model._modules['features'])
# Forward outputs start with the input image
forward_output = [input_image]
# Then we do forward pass with each layer
for conv_layer in list(self.model._modules['features']):
forward_output.append(conv_layer.forward(forward_output[-1].detach()))
# To know the change in the dimensions between features and classifier
feature_to_class_shape = forward_output[-1].shape
# Flatten so we can continue doing forward passes at classifier layers
forward_output[-1] = torch.flatten(forward_output[-1], 1)
for index, classifier_layer in enumerate(list(self.model._modules['classifier'])):
forward_output.append(classifier_layer.forward(forward_output[-1].detach()))
# Target for backprop
target_class_one_hot = torch.FloatTensor(1, 1000).zero_()
target_class_one_hot[0][target_class] = 1
# This is where we accumulate the LRP results
LRP_per_layer = [None] * number_of_layers + [(forward_output[-1] * target_class_one_hot).data]
for layer_index in range(1, number_of_layers)[::-1]:
# This is where features to classifier change happens
# Have to flatten the lrp of the next layer to match the dimensions
if layer_index == features_to_classifier_loc-1:
LRP_per_layer[layer_index+1] = LRP_per_layer[layer_index+1].reshape(feature_to_class_shape)
if isinstance(layers_in_model[layer_index], (torch.nn.Linear, torch.nn.Conv2d, torch.nn.MaxPool2d)):
# In the paper implementation, they replace maxpool with avgpool because of certain properties
# I didn't want to modify the model like the original implementation but
# feel free to modify this part according to your need(s)
lrp_this_layer = self.LRP_step(forward_output[layer_index], layers_in_model[layer_index], LRP_per_layer[layer_index+1])
LRP_per_layer[layer_index] = lrp_this_layer
else:
LRP_per_layer[layer_index] = LRP_per_layer[layer_index+1]
return LRP_per_layer
if __name__ == '__main__':
# Get params
target_example = 2 # Spider
(original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
get_example_params(target_example)
# LRP
layerwise_relevance = LRP(pretrained_model)
# Generate visualization(s)
LRP_per_layer = layerwise_relevance.generate(prep_img, target_class)
# Convert the output nicely, selecting the first layer
lrp_to_vis = np.array(LRP_per_layer[1][0]).sum(axis=0)
lrp_to_vis = np.array(Image.fromarray(lrp_to_vis).resize((prep_img.shape[2],
prep_img.shape[3]), Image.ANTIALIAS))
# Apply heatmap and save
heatmap = apply_heatmap(lrp_to_vis, 4, 4)
heatmap.figure.savefig('../results/LRP_out.png')
| true |
74ff4a54c781d9ed1ed46465f064e11ed96a36cb | Python | jokojeke/Python | /week1/1.1.py | UTF-8 | 277 | 3.640625 | 4 | [] | no_license | name =input('What is your name?\n')
surname =input('What is your surname?\n')
Education =int(input('What is your Education\n'))
Studentcode =int(input('What is your Studentcode\n'))
print(" %s." %name)
print(" %s." %surname)
print(" %s." %Education)
print(" %s." %Studentcode) | true |
8a7a5e589433400aa97fd1de80f41e5bf18407b4 | Python | dnaka/EV3_sample | /sample/testColor.py | UTF-8 | 570 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import ColorSensor
from pybricks.parameters import Port, Color
from pybricks.tools import wait, DataLog
"""
色センサーの確認用コード
"""
colorSensor = ColorSensor(Port.S3)
ev3 = EV3Brick()
# ログファイル指定
#data = DataLog('R', 'G', 'B', append=True)
while True:
(r, g, b) = colorSensor.rgb()
ev3.screen.clear()
message = "R:" + str(r) + " G:" + str(g) + " B:" + str(b)
ev3.screen.draw_text(0, 0, message)
#data.log(r, g, b)
wait(1000)
| true |
c9eba56daa4f391483fc4f91594868ba07371af0 | Python | bw33/Python_Games | /Week 2 Project - Guess_The_Number.py | UTF-8 | 2,237 | 4.0625 | 4 | [] | no_license | # Guess the Number Game
# Import the module
import simplegui
import random
# Define global variables (program state)
print "Instructions!"
print "_____________"
print ""
print "Pick a number range, i.e. from 1-100 or 1-1000."
print "The goal of the game is to guess correctly the number"
print "that the computer randomly picks. You get 7 chances"
print "to guess the number for the first game and 10 chances"
print "to guess the number for the second game. Good luck!"
secret_number = 0
# Define event handler functions
def new_game():
print ""
print "Step right up and guess the number!"
input_guess
def secret_number():
global secret_number
secret_number = random.randrange(0, 101)
global counter
counter = 7
global min
min = 0
global max
max = 100
new_game()
def secret_number2():
global secret_number
secret_number = random.randrange(0,1001)
global counter
counter = 10
global min
min = 0
global max
max = 1000
new_game()
def input_guess(guess):
global counter
counter = counter - 1
counter2 = str(counter)
guess2 = int(guess)
print ""
print "Your guess is: " + guess
if counter == 0:
print "Game over! You lose! Please start another game!"
elif guess2 < min:
print "Error, please choose a number within range."
print "Number of tries: " + counter2
elif guess2 > max:
print "Error, please choose a number within range."
print "Number of tries left: " + counter2
elif guess2 < secret_number:
print "Higher!"
print "Number of tries left: " + counter2
elif guess2 > secret_number:
print "Lower!"
print "Number of tries left: " + counter2
elif guess2 == secret_number:
print "Awesome! Good job guessing correctly. Play again!"
print "Please choose, again, one of the game modes..."
# Create a frame
frame = simplegui.create_frame("Guess the number", 200, 200)
# Register event handlers
frame.add_button("0-100", secret_number, 100)
frame.add_button("0-1000", secret_number2, 100)
frame.add_input("Number Game!", input_guess, 100)
# Start frame and timers
frame.start()
| true |
22a6875d3591c9d8b3db56b902f8994134464970 | Python | johnhjernestam/John_Hjernestam_TE19C | /Programmeringslaboration/Uppgift 1/euppgift.py | UTF-8 | 1,081 | 3.84375 | 4 | [] | no_license | import random as rnd # Importerar random för att få slumpmässiga punkter till rad 4 och 5
n = 0 # Variabel för hur många punkter som kommer hamna i cirkeln
for k in range(20): # Först gången programmet kör denna rad kod så är k = 0, nästa gång k=1 osv. upp till k=19
x = rnd.uniform(-1,1) # Eftersom raden har en indentering tillhör den for-satsen
y = rnd.uniform(-1,1)
print("(x,y) = (",x,",", y,")" )
if x**2+y**2 <= 1: # Om radien är mindre eller = 1 räknas punkten som inne i cirkeln.
n += 1 # Plussar på ett för varje punkt som är i cirkeln.
print(n) # Printar hur många punkter som är innanför cirkeln
print("Antal punkter i cirkeln dividerat med antal simulerade punkter = ",n/(k+1)*100, "%")
# Räknar ut hur många procent av punkterna som hamnar i cirkeln genom att ta n (vilket är antal punkter som
# hamnar i cirkeln) delat med k+1 (k är nitton vilket är varför + 1 är med då det är tjugo punkter) och till
# sist gånger 100 för att få i svaret i procentenheter.
| true |
18ee8cb9eca62af3e2719fa5bb63aafe8d69e3ac | Python | Fondamenti18/fondamenti-di-programmazione | /students/1742740/homework04/program02.py | UTF-8 | 11,073 | 3.875 | 4 | [] | no_license | '''
Il tris e' un popolarissimo gioco. Si gioca su una griglia quadrata di 3×3 caselle.
A turno, i due giocatori scelgono una cella vuota e vi disegnano il proprio simbolo
(un giocatore ha come simbolo una "o" e l'avversario una 'x').
Vince il giocatore che riesce a disporre tre dei propri simboli in linea retta
orizzontale, verticale o diagonale. Se la griglia viene riempita
senza che nessuno dei giocatori sia riuscito a completare una linea
retta di tre simboli, il gioco finisce in parità. Nel caso in cui il gioco
finisse in parità, la partita è detta "patta".
Per convenzione a griglia vuota la prima mossa spetta sempre al giocatore 'o'
Una configurazione del gioco e' dunque univocamente determinata dal contenuto della griglia.
Nel seguito assumiamo che il contenuto della griglia sia rappresentato tramite lista di liste.
La dimensione della lista di liste M e' 3x3 ed M[i][j] contiene '', 'x', o 'o' a seconda
che la cella della griglia appartenente all'iesima riga e j-ma colonna sia ancora libera,
contenga il simbolo 'x' o contenga il simbolo 'o'.
Data una configurazione C del gioco, l'albero di gioco per C e' l'albero che
si ottiene ricorsivamente partendo dalla configurazione C e assegnando come figli le configurazioni
che e' possibile ottenere da C con una mossa ulteriore del gioco. Ovviamente risulteranno
foglie dell'albero i possibili esiti della partita vale a dire le diverse configurazioni cui e'
possibile arrivare partendo da C e che rappresentano patte, vittorie per 'o' o vittorie per 'x'.
Se veda ad esempio l'immagine albero_di_gioco.png che mostra l' albero di gioco che si ottiene a partire
dalla configurazione rappresentata da [['x', 'o', 'o'], ['x', 'x', 'o'], ['', '', '']]
Si consideri la seguente Classe di oggetti:
class NodoTris:
def __init__(self, griglia):
self.nome = griglia
self.lista_figli = []
Bisogna progettare le seguente funzione
gen_tree(griglia)
che, data la configurazione di gioco griglia, costruisce l'albero di gioco che si ottiene a partire
dalla configurazione griglia e ne restituisce la radice. I nodi dell'albero devono essere
oggetti della classe NodoTris.
Per testare la correttezza della vostra implementazione di gen_tree() il grade utilizzera' quattro metodi
della classe NodoTris che dovete comunque implementare:
1)
tipo(self)
che, dato un nodo NodoTris, restituisce:
'o' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'o'
'x' se la configurazione rappresentata dal nodo e' una configurazione di vittoria per il giocatore 'x'
'-' se la configurazione rappresentata dal nodo e' una configurazione di patta
'?' se la configurazione rappresentata dal nodo e' una configurazione di gioco non ancora terminato
2)
esiti(self)
che, dato un nodo radice di un albero di gioco, restituisce una tripla con i possibili
esiti della partita che ha come configurazione iniziale quella rappresentata dal nodo.
Piu' precisamente: il primo elemento della tripla è il numero di patte possibili,
il secondo è il numero di possibili vittorie per il giocatore 'o' mentre il terzo elemento
e' il numero di possibili vittorie per il giocatore 'x'.
3)
vittorie_livello(self, giocatore, h)
che, dato un nodo radice di un albero di gioco, uno dei due giocatori ed un intero h,
restituisce il numero di nodi che rappresentano una vittoria per il giocatore e si
trovano ad altezza h nell'albero. In altri termini restituisce il numero di vittorie possibili
per giocatore in esattamente h mosse, nella partita che ha come configurazione iniziale
quella rappresentata dalla radice dell'albero.
4)
strategia_vincente(self,giocatore)
che, dato un nodo radice di un albero di gioco ed uno dei due giocatori, restituisce True o False.
Restituisce True se giocatore ha una strategia vincente nella partita
che ha come configurazione iniziale quella rappresentata dal nodo radice, False altrimenti.
Nota che un giocatore ha una strategia vincente rispetto ad una certa configurazione se,
qualunque siano le mosse dell'avversario ha sempre la possibilita' di rispondere in modo
che la partita termini con la sua vittoria.
Potete ovviamente definire ulteriori funzioni e altri metodi per la Classe NodiTris
se li ritenete utili al fine della risoluzione del compito.
Potete assumere che le configurazioni di gioco rappresentate da griglia siano sempre configurazioni
lecite (vale a dire ottenute dopo un certo numero di mosse a parire dalla griglia vuota).
AVVERTENZE: non usare caratteri non ASCII, come le lettere accentate; non
importare moduli che non sono nella libreria standard.
ATTENZIONE: i test vengono eseguiti con un timeout globale di 2*N secondi (se il grader esegue N test).
'''
class NodoTris:
def __init__(self, griglia):
self.nome = griglia #griglia del nodo
self.lista_figli = [] #lista dei nodi figli
self.vuoti=None #numero spazi vuoti della griglia
self.turn='' #turno del giocatore
self.win='' #lo stato della griglia
self.height=0 #l'altezza nell'albero del nodo
def tipo(self): #funzione che torna se vince
return vitt(self)
def esiti(self):#funzione che trova tutti gli esiti di una configurazione
n_s=0
n_x=0
n_o=0
if self.win=='':
for i in self.lista_figli:
t2=i.esiti()
n_s+=t2[0]
n_o+=t2[1]
n_x+=t2[2]
else:
if self.win=='-':
n_s+=1
elif self.win=='o':
n_o+=1
elif self.win=='x':
n_x+=1
return (n_s,n_o,n_x)
def vittorie_livello(self, giocatore, h): #funzione che conta il numero di vittorie per un giocatore ad un livello dell'albero specifico
l=h+self.height
return conta_vitt(self,giocatore,l)
def strategia_vincente(self,giocatore): #funzione che indica se un giocatore ha una strategia vincente
if self.turn!=giocatore:
v = strategia(self, self.turn)
if v == -1:
return True
else:
return False
else:
v = strategia(self, self.turn)
if v==1:
return True
else:
return False
def strategia(nodo, g): #funzione che trova se un giocatore ha la possibilita' di vincere qualsiasi mossa faccia l'avversario
v=0
if len(nodo.lista_figli)==0:
if nodo.win=='o':
v=1
elif nodo.win=='x':
v=-1
elif nodo.win=='-':
v=0
elif g=='o':
v=-1
for c in nodo.lista_figli:
v=max(v,strategia(c,'x'))
else:
v=1
for c in nodo.lista_figli:
v=min(v,strategia(c,'o'))
return v
def conta_vitt(radice,g,l): #funzione che conta il numero di vittorie di un giocatore
c=0
if radice.height!=l:
for i in radice.lista_figli:
c+=conta_vitt(i,g,l)
else:
if radice.win==g:
c+=1
return c
def vitt(radice): #funzione che combina tutti i possibili tipi di vittoria
v1=win_diag(radice.nome)
v2=win_vert(radice.nome)
v3=win_oriz(radice.nome)
if v1==None:
if v2==None:
if v3==None:
if radice.vuoti==0:
return '-'
else:
return '?'
else:
return v3
else:
return v2
else:
return v1
def stallo(griglia):#funzione che indica se si e' in uno stato di stallo
v='-'
for y in range(3):
for x in range(3):
if griglia[y][x]=='':
v='?'
break
return v
def win_oriz(griglia):#funzione che indica se un giocatore ha vinto in orizontale
linea_x=['x','x','x']
linea_o=['o','o','o']
v=None
for y in range(3):
if griglia[y]==linea_x:
v='x'
elif griglia[y]==linea_o:
v='o'
return v
def win_vert(griglia):#funzione che indica se un giocatore ha vinto in verticale
v = None
for x in range(3):
if griglia[0][x]=='x':
if griglia[1][x]=='x' and griglia[2][x]=='x':
v='x'
elif griglia[0][x]=='o':
if griglia[1][x] == 'o' and griglia[2][x] == 'o':
v='o'
return v
def win_diag(griglia): #funzione che indica se un giocatore ha vinto in diagonale
if griglia[1][1]=='x':
if griglia[0][0]=='x' and griglia[2][2]=='x':
return 'x'
elif griglia[0][2]=='x' and griglia[2][0]=='x':
return 'x'
else:
return None
elif griglia[1][1]=='o':
if griglia[0][0]=='o' and griglia[2][2]=='o':
return 'o'
elif griglia[0][2]=='o' and griglia[2][0]=='o':
return 'o'
else:
return None
else:
return None
def successivo(griglia, turn, vuote): #funzione che crea tutte le possibili combinazioni di mosse successive
l=[]
if vuote!=0:
for y in range(3):
for x in range(3):
if griglia[y][x]=='':
lista = [['', '', ''], ['', '', ''], ['', '', '']]
for k in range(3):
for j in range(3):
lista[k][j] += griglia[k][j]
lista[y][x]+=turn
r=NodoTris(lista)
l+=[r]
return l
def vuoti(griglia): #funzione che stabilisce quanti spazi vuoti ci sono nella griglia
vuote=0
for y in range(3):
for x in range(3):
if griglia[y][x]!='x' and griglia[y][x]!='o':
vuote+=1
return vuote
def turno(griglia): #funzione che stabilisce il turno
c_x=0
c_o=0
for y in range(3):
for x in range(3):
if griglia[y][x]=='x': c_x+=1
elif griglia[y][x]=='o':c_o+=1
if c_x==c_o:
return 'o'
elif c_o>c_x:
return 'x'
def ins_tree(r,radice): #funzione che inserisce i nodi nell'albero
v=radice.vuoti-1
r.vuoti=v
r.height=radice.height+1
if r.vuoti==0:
r.win=vitt(r)
else:
w=vitt(r)
if w!='?':
r.win=w
else:
if radice.turn=='o': r.turn='x'
else: r.turn='o'
r.lista_figli=successivo(r.nome,r.turn,r.vuoti)
for i in r.lista_figli:
ins_tree(i,r)
def gen_tree(griglia): #funzione che genera l'albero
v=vuoti(griglia)
radice=NodoTris(griglia)
radice.vuoti=v
if radice.vuoti==0:
radice.win=vitt(radice)
else:
w=vitt(radice)
if w!='?':
radice.win=w
else:
radice.turn=turno(radice.nome)
radice.lista_figli=successivo(radice.nome,radice.turn,radice.vuoti)
for i in radice.lista_figli:
ins_tree(i,radice)
return radice
| true |
1a9d4463d479c4264fa1f75add3d3da978618895 | Python | kimbomi99/05_week | /funtional_test.py | UTF-8 | 3,153 | 3.015625 | 3 | [] | no_license | from selenium import webdriver
import unittest
class FuntionalTest(unittest.TestCase):
class QuestionDetailViewTests(TestCase):
...
def test_has_a_href_link(self):
"""
Questions with a pub_date in the past are displayed on the
detail page with a href link to result page.
"""
question = create_question(question_text="Recent question.", timedelta_from_now=datetime.timedelta(days=-30))
response = self.client.get(f"/polls/{question.id}/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, f'href="/polls/{question.id}/results/"')
class QuestionResultViewTests(TestCase):
def test_future_question(self):
"""
The result view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.',
timedelta_from_now=datetime.timedelta(days=5))
response = self.client.get(f'/polls/{future_question.id}/results/')
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The result view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Future question.',
timedelta_from_now=datetime.timedelta(days=-5))
response = self.client.get(f'/polls/{past_question.id}/results/')
self.assertContains(response, past_question.question_text)
def test_past_question_with_choices(self):
"""
The result view of a question with a pub_date in the past
displays the question's result.
"""
past_question = create_question(question_text='Future question.',
timedelta_from_now=datetime.timedelta(days=-5))
choice1 = Choice(question=past_question, choice_text="choice 1")
choice1.save()
choice2 = Choice(question=past_question, choice_text="choice 2")
choice2.save()
response = self.client.get(f'/polls/{past_question.id}/results/')
self.assertContains(response, f"Choice: {choice1.choice_text}")
self.assertContains(response, f"Vote Count: {choice1.votes}")
def test_go_to_result_page(self):
self.driver.get("http://localhost:8000/polls/1/")
a_tag = self.driver.find_element_by_tag_name("a")
self.assertIn(a_tag.text, "투표 결과 보기")
a_tag.click()
self.assertEqual(self.driver.current_url, "http://localhost:8000/polls/1/results/")
self.assertIn(self.driver.find_element_by_tag_name("h1").text, "What's up?")
p_tags = self.driver.find_elements_by_tag_name("ul > p")
self.assertTrue(
any('Choice:' in p_tag.text for p_tag in p_tags)
)
self.assertTrue(
any('Vote Count:' in p_tag.text for p_tag in p_tags)
) | true |
f460e723d8847fc9d80dddf1b262a731487a49a2 | Python | thangteo/TheCryptoBall | /telegram.py | UTF-8 | 646 | 2.859375 | 3 | [] | no_license | import datetime
import json
import requests
import time
import urllib
# define key/global variables
baseURL = "https://api.telegram.org/bot"
# send message through telegram bot
def send_message(text, chatID , token):
text = urllib.parse.quote(text)
url = baseURL + token + "/sendMessage?text={0}&chat_id={1}".format(text, chat_id)
# decode response into JSON object
def get_json(url):
content = get_url(url)
js = json.loads(content)
return js
# send command and return response
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content | true |
2350a5160bb8de897ec1e0da1029b2a6bc59163f | Python | JASAdrian1/EDD_SmartClass_201901704 | /Fase2/matriz_dispersa/lista_interna_matriz.py | UTF-8 | 3,194 | 3.046875 | 3 | [] | no_license | from matriz_dispersa.nodo_interno import nodo_interno_matriz
class lista_interna_matriz:
def __init__(self):
self.primero = None
def insertarx(self,tarea,x,y):
nuevo_nodo = nodo_interno_matriz(tarea,x,y)
if self.primero is not None:
if nuevo_nodo.posy< self.primero.posy:
#print("Se inserto ", tarea.materia)
nuevo_nodo.siguiente = self.primero
self.primero.anterior = nuevo_nodo
self.primero = nuevo_nodo
else:
#ARREGLAR ESTA SECCION
tmp = self.primero
while tmp is not None:
if nuevo_nodo.posy < tmp.posy:
#print("Se inserto ", tarea.materia)
nuevo_nodo.siguiente = tmp
nuevo_nodo.anterior = tmp.anterior
tmp.anterior.siguiente = nuevo_nodo
tmp.anterior = nuevo_nodo
break
#Si el nodo ya existe únicamente se inserta la tarea dentro de la lista de
#tareas del nodo existente
elif nuevo_nodo.posx == tmp.posx and nuevo_nodo.posy == tmp.posy:
#print("Se inserto ", tarea.materia)
tmp.tareas.insertar(tarea)
break
else:
if tmp.siguiente == None:
#print("Se inserto ", tarea.materia)
tmp.siguiente = nuevo_nodo
nuevo_nodo.anterior = tmp
break
else:
tmp = tmp.siguiente
else:
#print("Se inserto ", tarea.materia)
self.primero = nuevo_nodo
def insertary(self, tarea, x, y):
nuevo_nodo = nodo_interno_matriz(tarea, x, y)
if self.primero is not None:
if nuevo_nodo.posx < self.primero.posx:
nuevo_nodo.abajo = self.primero
self.primero.arriba = nuevo_nodo
self.primero = nuevo_nodo
else:
tmp = self.primero
while tmp is not None:
if nuevo_nodo.posx < tmp.posx:
nuevo_nodo.abajo = tmp
nuevo_nodo.arriba = tmp.arriba
tmp.arriba.abajo = nuevo_nodo
tmp.arriba = nuevo_nodo
break
# Si el nodo ya existe únicamente se inserta la tarea dentro de la lista de
# tareas del nodo existente
elif nuevo_nodo.posx == tmp.posx and nuevo_nodo.posy == tmp.posy:
tmp.tareas.insertar(tarea)
break
else:
if tmp.abajo == None:
tmp.abajo = nuevo_nodo
nuevo_nodo.arriba = tmp
break
else:
tmp = tmp.siguiente
else:
self.primero = nuevo_nodo
| true |
2d22b3e00ab6ba15c9f6da6fdc128c85c80828f6 | Python | XelorR/adventofcode_2015 | /day_03/first_part.py | UTF-8 | 418 | 3.703125 | 4 | [] | no_license | INPUT = open("input.txt").read()
def visit_houses(directions):
visited = set()
x = 0
y = 0
for direction in directions:
if direction == "^":
y += 1
elif direction == "<":
x -= 1
elif direction == ">":
x += 1
elif direction == "v":
y -= 1
visited.add((x, y))
return visited
print(len(visit_houses(INPUT)))
| true |
c1bb35216a283cb8cb1ef171408aa7f91a9d5ab9 | Python | bluaxe/TNN | /tools/caffe2onnx/src/OPs/Pooling.py | UTF-8 | 7,053 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | import numpy as np
import src.c2oObject as Node
import math
import copy
def get_pool_pads(layer):
pad = layer.pooling_param.pad
if pad != 0:
pad_h = pad_w = pad
else:
if layer.pooling_param.pad_h != 0 and layer.pooling_param.pad_w != 0:
pad_h = layer.pooling_param.pad_h
pad_w = layer.pooling_param.pad_w
else:
pad_h = pad_w = 0
pads = [0, 0, pad_h, pad_w, 0, 0, pad_h, pad_w]
return pads
def calculate_pad_output_shape(input_shape, pads):
pad_h = pads[2]
pad_w = pads[3]
output_shape = copy.deepcopy(input_shape[0])
output_shape[2] = output_shape[2] + 2 * pad_h
output_shape[3] = output_shape[3] + 2 * pad_w
return [output_shape]
def create_pad_node(layer, node_name, input_name, output_name, input_shape):
pads = get_pool_pads(layer)
attributes = {"mode": "constant"}
pad_input_name = input_name
pad_output_name = output_name
pad_output_shape = calculate_pad_output_shape(input_shape, pads)
node = Node.c2oNode(layer, node_name, 'Pad', pad_input_name, pad_output_name, input_shape, pad_output_shape,
attributes)
return node
def get_pool_attributes(layer, pool_type, input_shape):
number = input_shape[0][0]
channel = input_shape[0][1]
height = input_shape[0][2]
weight = input_shape[0][3]
kernel_size = layer.pooling_param.kernel_size
pad = layer.pooling_param.pad
stride = layer.pooling_param.stride
if pool_type == 'GlobalMaxPool' or pool_type == 'GlobalAveragePool':
global_pooling = True
else:
global_pooling = False
# pass kernel_shape
if global_pooling:
kernel_h = height
kernel_w = weight
else:
if kernel_size != 0:
kernel_h = kernel_w = kernel_size
elif layer.pooling_param.kernel_h != 0 and layer.pooling_param.kernel_w != 0:
kernel_h = layer.pooling_param.kernel_h
kernel_w = layer.pooling_param.kernel_w
else:
kernel_h = 1
kernel_w = 1
kernel_shape = [kernel_h, kernel_w]
# pass pad
if pad != 0:
pad_h = pad_w = pad
else:
if layer.pooling_param.pad_h != 0 and layer.pooling_param.pad_w != 0:
pad_h = layer.pooling_param.pad_h
pad_w = layer.pooling_param.pad_w
else:
pad_h = pad_w = 0
pads = [pad_h, pad_w, pad_h, pad_w]
# 由于 caffe 与 onnx 的 pad 的计算的原因,将 pad 属性,单独创建一个节点
pads = [0, 0, 0, 0]
# pass strides
stride_h = stride_w = 1
if stride != 1:
stride_h = stride_w = stride
else:
if layer.pooling_param.stride_h != 0 and layer.pooling_param.stride_w != 0:
stride_h = layer.pooling_param.stride_h
stride_w = layer.pooling_param.stride_w
else:
stride_h = stride_w = 1
strides = [stride_h, stride_w]
# pass round_mode
# caffe 上默认是使用 ceil 的,但是在 onnx 默认使用 floor
# caffe definition
# enum RoundMode {
# CEIL = 0;
# FLOOR = 1;
# }
# default Ceil = 0
# onnx ceil_mode floor = 0, ceil = 1, default: floor = 0
round_mode_ceil = 0
round_mode_floor = 1
round_mode = 0
if layer.pooling_param.round_mode == 0:
round_mode = round_mode_ceil
elif layer.pooling_param.round_mode == 1:
round_mode = round_mode_floor
else:
# wrong condition
exit(-1)
if round_mode == round_mode_ceil:
ceil_mode = 1
else:
ceil_mode = 0
attributes = {"kernel_shape": kernel_shape,
"strides": strides,
"pads": pads,
"ceil_mode": ceil_mode
}
return attributes
# 计算输出维度
def get_pooling_output_shape(input_shape, layer, attributes, with_indices=False):
number = input_shape[0][0]
channel = input_shape[0][1]
kernel_shape = attributes["kernel_shape"]
kernel_h = kernel_shape[0]
kernel_w = kernel_shape[1]
pads = attributes["pads"]
strides = attributes["strides"]
stride_h = strides[0]
stride_w = strides[1]
ceil_mode = attributes["ceil_mode"]
pad_h = pads[2]
pad_w = pads[3]
height = input_shape[0][2]
width = input_shape[0][3]
if ceil_mode == 1:
# ceil
pooled_height = int(math.ceil((height + 2 * pad_h - kernel_h) / stride_h)) + 1
pooled_width = int(math.ceil((width + 2 * pad_h - kernel_w) / stride_w)) + 1
else:
# floor
pooled_height = int(math.floor((height + 2 * pad_h - kernel_h) / stride_h)) + 1
pooled_width = int(math.floor((width + 2 * pad_h - kernel_w) / stride_w)) + 1
if pad_h != 0 or pad_w != 0:
if ((pooled_height - 1) * stride_h) >= (height + pad_h):
pooled_height = pooled_height - 1
if ((pooled_width - 1) * stride_w) >= (width + pad_w):
pooled_width = pooled_width - 1
if kernel_h == 0:
kernel_h = kernel_w = 1
if with_indices:
output_shape = [[number, channel, pooled_height, pooled_width],
[number, channel, pooled_height, pooled_width]]
else:
output_shape = [[number, channel, pooled_height, pooled_width]]
return output_shape
def pooling_type(layer):
pool_value = layer.pooling_param.pool
global_value = layer.pooling_param.global_pooling
if pool_value == 0 and global_value is True:
return 'GlobalMaxPool'
elif pool_value == 1 and global_value is True:
return 'GlobalAveragePool'
elif pool_value == 0 and global_value is False:
return 'MaxPool'
elif pool_value == 1 and global_value is False:
return 'AveragePool'
else:
print("unsupport pooling!")
exit(-1)
# 构建节点
def create_pooling_node(layer, nodename, inname, outname, input_shape):
pool_type = pooling_type(layer)
node = None
attributes = get_pool_attributes(layer, pool_type, input_shape)
with_indices = True if len(outname) == 2 else False
output_shape = get_pooling_output_shape(input_shape, layer, attributes, with_indices=with_indices)
# 判断是池化种类,最大池化、平均池化
if pool_type == 'GlobalMaxPool':
node = Node.c2oNode(layer, nodename, "GlobalMaxPool", inname, outname, input_shape, output_shape, dict={})
elif pool_type == 'MaxPool':
node = Node.c2oNode(layer, nodename, "MaxPool", inname, outname, input_shape, output_shape, dict=attributes)
elif pool_type == 'GlobalAveragePool':
node = Node.c2oNode(layer, nodename, "GlobalAveragePool", inname, outname, input_shape, output_shape,
dict={})
elif pool_type == 'AveragePool':
node = Node.c2oNode(layer, nodename, "AveragePool", inname, outname, input_shape, output_shape,
dict=attributes)
# Layers[i].pooling_param.pool==2为随机池化
assert (node is not None)
return node
| true |
d9049f681d71aebc220bde95041be187d4b5fa45 | Python | amin-sorkhei/PythonProjects | /BuildingMachineLearningSystemsWithPython-master/ch10/simple_classification.py | UTF-8 | 2,224 | 3.015625 | 3 | [
"MIT"
] | permissive | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import mahotas as mh
from sklearn import cross_validation
from sklearn.linear_model.logistic import LogisticRegression
import numpy as np
from glob import glob
from features import texture, edginess_sobel
basedir = '../SimpleImageDataset/'
haralicks = []
sobels = []
labels = []
print('This script will test (with cross-validation) classification of the simple 3 class dataset')
print('Computing features...')
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
# We sort the images to ensure that they are always processed in the same order
# Otherwise, this would introduce some variation just based on the random
# ordering that the filesystem uses
for fname in sorted(images):
im = mh.imread(fname, as_grey=True)
haralicks.append(texture(im))
sobels.append(edginess_sobel(im))
# Files are named like building00.jpg, scene23.jpg...
labels.append(fname[:-len('xx.jpg')])
print('Finished computing features.')
haralicks = np.array(haralicks)
sobels = np.array(sobels)
labels = np.array(labels)
# We use logistic regression because it is very fast.
# Feel free to experiment with other classifiers
scores = cross_validation.cross_val_score(
LogisticRegression(), haralicks, labels, cv=5)
print('Accuracy (5 fold x-val) with Logistic Regression [std features]: {}%'.format(
0.1 * round(1000 * scores.mean())))
haralick_plus_sobel = np.hstack([np.atleast_2d(sobels).T, haralicks])
scores = cross_validation.cross_val_score(
LogisticRegression(), haralick_plus_sobel, labels, cv=5).mean()
print('Accuracy (5 fold x-val) with Logistic Regression [std features + sobel]: {}%'.format(
0.1 * round(1000 * scores.mean())))
# We can try to just use the sobel feature. The result is almost completely
# random.
scores = cross_validation.cross_val_score(
LogisticRegression(), np.atleast_2d(sobels).T, labels, cv=5).mean()
print('Accuracy (5 fold x-val) with Logistic Regression [only using sobel feature]: {}%'.format(
0.1 * round(1000 * scores.mean())))
| true |
482c08b2fc7faab8f6555182ff4f01d45ecf0177 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/2958.py | UTF-8 | 531 | 2.953125 | 3 | [] | no_license | import sys
lines = sys.stdin.readlines()
n_tests = int(lines[0])
for j in range(1,len(lines)):
line = lines[j]
if not line.strip():
continue
nums = line.split(" ")
smax = int(nums[0])
counts = [int(x) for x in nums[1].strip()]
invites = 0
standing = 0
for i in range(len(counts)):
if counts[i] == 0:
continue
if standing < i:
invites += (i - standing)
standing = i
standing += counts[i]
print("Case #%d: %d" % (j, invites))
| true |
caf8e0a10c785221f8207a5c69e5efd46fa29d27 | Python | Swastik-Saha/Python | /Print_List_of_Even_Numbers.py | UTF-8 | 355 | 4.3125 | 4 | [
"MIT"
] | permissive | # INPUT NUMBER OF EVEN NUMBERS
n = int(input("Enter the limit : ")) # user input
if n < 0:
print("Invalid number, please enter a Non-negative number!")
else:
even_list = [i for i in range(0,n+1,2)] # creating string with number "i"
print(even_list) # in range from 0 till "n" with step 2
| true |
9a4698ed416370f07c1d78e3173084974d3972ef | Python | saksham0309/GUI-Music-Player-using-Tkinter-and-Pygame | /MusicPlayer.py | UTF-8 | 2,328 | 3.546875 | 4 | [] | no_license | #Importing Necessary Modules
import pygame
import tkinter as tkr
from tkinter.filedialog import askdirectory
import os
#creating window (interface) for player
musicplayer = tkr.Tk()
#adding title for interface
musicplayer.title("Music Player")
#setting dimensions of tkinter window
musicplayer.geometry('450x350')
#askdirectory() prompt the user to choose a directory(music directory)
directory = askdirectory()
#os.chdir() method in python is used to change the current working directory to specified path.
# It takes only a single argument as new directory path
os.chdir(directory)
#os.listdir() returns a list conatining the names of the entries in the directory given by path.
songlist = os.listdir()
#creating the playlist
playlist = tkr.Listbox(musicplayer, font = "Arial 14 bold", bg = "cyan2", selectmode=tkr.SINGLE)
#adding songs from songlist to playlist
for item in songlist:
pos = 0
playlist.insert(pos, item)
pos = pos+1
#initializing modules
pygame.init()
pygame.mixer.init()
#function for play button
def play():
pygame.mixer.music.load(playlist.get(tkr.ACTIVE))
var.set(playlist.get(tkr.ACTIVE))
pygame.mixer.music.play()
#function for stop button
def ExitMusicPlayer():
pygame.mixer.music.stop()
#funtion for pause button
def pause():
pygame.mixer.music.pause()
#function for resume button
def resume():
pygame.mixer.music.unpause()
#Creating buttons
Button1 = tkr.Button(musicplayer, width=5, height=1, font="Arial 20 bold", text="Play Music", command=play, bg="green", fg="black")
Button2 = tkr.Button(musicplayer, width=5, height=1, font="Arial 20 bold", text="Stop Music", command=ExitMusicPlayer, bg="red", fg="black")
Button3 = tkr.Button(musicplayer, width=5, height=1, font="Arial 16 bold", text="Pause Music", command=pause, bg="yellow", fg="black")
Button4 = tkr.Button(musicplayer, width=5, height=1, font="Arial 16 bold", text="Resume Music", command=resume, bg="skyblue", fg="black")
var = tkr.StringVar()
songtitle = tkr.Label(musicplayer, font="Arial 12 bold", textvariable=var)
songtitle.pack()
Button1.pack(fill="x")
Button2.pack(fill="x")
Button3.pack(fill="x")
Button4.pack(fill="x")
playlist.pack(fill="both", expand="yes")
musicplayer.mainloop() | true |
9750a57f98f60e192b3530c22876a6db24b56047 | Python | SonjaGrusche/LPTHW | /EX06/ex6.py | UTF-8 | 1,301 | 4.6875 | 5 | [] | no_license | # the variable for x gets set, it consists of a string with a format character and its variable at the end
x = "There are %d types of people." % 10
# the variable for binary is binary
binary = "binary"
# the variable for do_not is don't
do_not = "don't"
# the variable for y is a string that includes two format characters and its variables at the and
y = "Those who know %s and those who %s." % (binary, do_not) # two strings inside a string
# line 11 and 12 print what'sinside the variables for x and y
print x
print y
# lines 15 and 16 print what's inside the quotes usind the variables at the end to fill in the format characters
print "I said %r." % x # string inside string
print "I also said: '%s'." % y # one string that already includes two strings is put inside string
# lines 19 and 20 define two more variables
hilarious = False
joke_evaluation = "Isn't that joke so funny?! %r"
# line 23 prints the variable for joke_evaluation and fills in the variable for hilarious for the format character
print joke_evaluation % hilarious
# line 26 and 27 define two variables
w = "This is the left side of..."
e = "a string with a right side."
# variables from line 26 and 27 get printed
print w + e
# Study Drill
# 4. the + concatenates the two strings (not functioning as a math operater)
| true |
beecfeba880731495bc60a7c49d3829707b9dac8 | Python | tasusu/ProjectEuler | /problem28.py | UTF-8 | 235 | 3.1875 | 3 | [] | no_license | '''
Problem 28
https://projecteuler.net/problem=28
'''
if __name__ == '__main__':
s = 1
n = 1
for i in range(1, 1001//2 + 1):
for j in range(4):
n = n + 2 * i
s += n
print(s) | true |
da036ad944b3bcf7bfc5fe9a1e742d5df41c67ce | Python | PELTECH/raspberry-pi-gmail-alarm | /GmailAlarm.py | UTF-8 | 3,401 | 3.25 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
#-------------------------------------------------------------------------------
# GmailAlarm.py
#-------------------------------------------------------------------------------
# Description:
# A Python script written for the Raspberry Pi which checks your Gmail inbox for
# an alarming subject and sounds an alarm if it finds it.
# How to use:
# (1) Make sure your Raspberry Pi has a reliable Internet connection.
# (2) Change the following constants as needed.
# - GMAIL_IMAP_URL = the URL to Gmail's IMAP service
# - GMAIL_ADDRESS = the Gmail address
# - GMAIL_PASSWORD = the Gmail password
# - ALARMING_SUBJECT = If the subject contains this text, sound the alarm.
# - ALARM_COMMAND = If the alarm is sounded, this is the Linux command
# that will get run.
# The default is to run mpg321 against alarm.mp3
# in the present working directory.
# (3) If you are using the default ALARM_COMMAND, find an alarming MP3 file,
# name it alarm.mp3, and drop it into the present working directory.
# Otherwise, do what you need to do get your custom ALARM_COMMAND working.
# (4) Add a cron job which runs this python script as often as you like.
# The easiest way to do this is to run "crontab -e" and
# add a line to the crontab file.
#-------------------------------------------------------------------------------
import imaplib, os, sys
# Keep track of the present working directory.
PWD = os.path.dirname(os.path.realpath(sys.argv[0]))
# Change these constants as needed.
GMAIL_IMAP_URL = 'imap.gmail.com'
GMAIL_ADDRESS = 'username@gmail.com'
GMAIL_PASSWORD = 'password'
ALARMING_SUBJECT = 'subject'
ALARM_COMMAND = '/usr/bin/mpg321 ' + PWD + '/alarm.mp3 &'
def checkGmail():
# Create the Gmail IMAP4 SSL connection.
gmail = createGmail(GMAIL_IMAP_URL, GMAIL_ADDRESS, GMAIL_PASSWORD)
# If any of the inbox email subjects are alarming, sound the alarm.
if inboxEmailSubjectsAreAlarming(gmail):
alarm()
else:
print 'don\'t panic'
# Destroy the gmail IMAP4 SSL connection.
destroyGmail(gmail)
def createGmail(gmailImapUrl, gmailAddress, gmailPassword):
# Create the Gmail IMAP4 SSL connection.
print 'logging into gmail'
gmail = imaplib.IMAP4_SSL(gmailImapUrl)
gmail.login(gmailAddress, gmailPassword)
return gmail
def inboxEmailSubjectsAreAlarming(gmail):
# Search the inbox email subjects for the alarming subject.
print 'searching inbox email subjects for "' + ALARMING_SUBJECT +'"'
gmail.select('inbox')
result, data = gmail.uid('search', None, '(SUBJECT "' + ALARMING_SUBJECT +'")')
# Anything alarming?
if result != 'OK':
print 'search failed'
print 'result: ' + result
return False
else:
uidsString = data[0];
hasUids = len(uidsString) > 0
if hasUids:
print 'found ' + str(len(uidsString.split(' '))) + ' email UID(s): ' + str(uidsString)
return True
else:
print 'found 0 email UIDs'
return False
def alarm():
# Sound the alarm!
print 'ALARM!!!'
print ALARM_COMMAND
os.system(ALARM_COMMAND)
def destroyGmail(gmail):
# Destroy the Gmail IMAP4 SSL connection.
print 'logging out of gmail'
gmail.close()
gmail.logout()
# Check Gmail.
checkGmail()
| true |
5b7078e29a5e1e59fdf62d0d8fc71fa4e650381c | Python | ryudox/yehg-core-lab-misc | /dll-hijack-helper/dll-hijack-helper.py | UTF-8 | 5,251 | 2.59375 | 3 | [] | no_license | # DLL Hijacking Helper
# Myo Soe, http://yehg.net/
# 2013-12-08
# platform: Python 2.x @ Windows
import csv
import shutil
import hashlib
import os
import sys
import re
def md5sum(filename):
md5 = hashlib.md5()
if os.path.exists(filename) == True:
with open(filename,'rb') as f:
for chunk in iter(lambda: f.read(128*md5.block_size), b''):
md5.update(chunk)
return md5.hexdigest()
else:
print filename + " does not exist. \nYou should run dll-hijack-helper from its directory not your tested application directory."
sys.exit()
# default ProcMon Output
file = 'Logfile.CSV'
if os.path.exists(file) == False:
print "\nLogfile.CSV was not found. Read README.txt.\n"
sys.exit()
# command to kill tested application process
kill_cmd = 'taskkill /f /im target.exe && taskkill /f /im calc.exe'
hijacker_dll = 'hijacker.dll'
hijacker_dll_md5 = md5sum('hijacker.dll')
hijacker_exe = 'hijacker.exe'
hijacker_exe_md5 = md5sum('hijacker.exe')
print "\nRunning DLL Hijack Helper - by Myo Soe , http://yehg.net"
print "\n\nhijacker_dll_m5: " + hijacker_dll_md5
print "hijacker_exe_m5: " + hijacker_exe_md5 + "\n\n----------------------------------\n"
vuln_dlls = []
scanned_dlls = []
header = 0
with open(file,'rb') as csvfile:
line_reader = csv.reader(csvfile,delimiter=",",quotechar='"')
for row in line_reader:
if header != 0:
target_file = str(row[4])
if re.search('exe$',target_file,re.M|re.I) and not target_file in scanned_dlls:
if not os.path.exists(target_file):
shutil.copyfile(hijacker_exe,target_file)
print "\nCreated EXE -> " + target_file + "\n"
is_vuln = raw_input("\n[!] Launch the application to test it.\n[!] Enter 'y' key if it works, other key to continue\n_")
if re.search('y',is_vuln,re.M|re.I):
vuln_dlls.append(target_file)
print "\nKilling the process ...\n"
os.system(kill_cmd)
os.remove(target_file)
scanned_dlls.append(target_file)
else:
target_file_md5 = md5sum(target_file)
if (target_file_md5!=hijacker_exe_md5):
os.remove(target_file)
shutil.copyfile(hijacker_exe,target_file)
print "\nCreated EXE -> " + target_file + "\n"
is_vuln = raw_input("\n[!] Launch the application to test it.\n[!] Enter 'y' key if it works, other key to continue.\n_")
if re.search('y',is_vuln,re.M|re.I):
vuln_dlls.append(target_file)
print "\nKilling the process ...\n"
os.system(kill_cmd)
os.remove(target_file)
scanned_dlls.append(target_file)
else:
print "Hijacker EXE already exists. \nSkipped creating -> " + target_file + "\n"
elif re.search('dll$',target_file,re.M|re.I) and not target_file in scanned_dlls:
if not os.path.exists(target_file):
shutil.copyfile(hijacker_dll,target_file)
print "\nCreated DLL -> " + target_file + "\n"
is_vuln = raw_input("\n[!] Launch the application to test it.\n[!] Enter 'y' key if it works, other key to continue.\n_")
if re.search('y',is_vuln,re.M|re.I):
vuln_dlls.append(target_file)
print "\nKilling the process ...\n"
os.system(kill_cmd)
os.remove(target_file)
scanned_dlls.append(target_file)
else:
target_file_md5 = md5sum(target_file)
if (target_file_md5!=hijacker_dll_md5):
os.remove(target_file)
shutil.copyfile(hijacker_dll,target_file)
print "\nCreated DLL -> " + target_file + "\n"
is_vuln = raw_input("\n[!] Launch the application to test it. \n[!] Enter 'y' key if it works, other key to continue.")
if re.search('y',is_vuln,re.M|re.I):
vuln_dlls.append(target_file)
print "\nKilling the process ...\n"
os.system(kill_cmd)
os.remove(target_file)
scanned_dlls.append(target_file)
else:
print "Hijacker DLL already exists. \nSkipped creating -> " + target_file + "\n"
else:
header = 1
if len(vuln_dlls)>0:
print "\n------------------------------\nVulnerable DLL/EXEs: \n\n - " + "\n - ".join(vuln_dlls)
| true |
8581195d63c35c9962bc8d7496e920e04fe743a5 | Python | Alwayswithme/LeetCode | /Python/007-reversed-integer.py | UTF-8 | 658 | 3.75 | 4 | [] | no_license | #!/bin/python
#
# Author : Ye Jinchang
# Date : 2015-04-27 23:06:18
# Title : 7 reversed integer.py
# Reverse digits of an integer.
#
# Example1: x = 123, return 321
# Example2: x = -123, return -321
class Solution(object):
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
sign = 1 if x > 0 else -1
result = 0
max = 1 << 31
num = abs(x)
while num > 0:
num, reminder = divmod(num, 10)
if (result > (max - reminder) / 10):
return 0
result = result * 10 + reminder
return sign * result
| true |
e78ba6354ac6d5367370c94dfc5c5b9bc441adb9 | Python | fela/triangles | /test_triangles.py | UTF-8 | 1,671 | 2.796875 | 3 | [] | no_license | import unittest
from triangles import subsets, list_of_set_to_set_of_set
class TestListOfSetToSetOfSet(unittest.TestCase):
def test_two(self):
inp = [{1}, set()]
output = {
frozenset({1}),
frozenset(set())
}
self.assertEqual(list_of_set_to_set_of_set(inp), output)
def test_empty(self):
inp = []
output = set()
self.assertEqual(list_of_set_to_set_of_set(inp), output)
def test_one(self):
inp = [{123}]
output = {frozenset({123})}
self.assertEqual(list_of_set_to_set_of_set(inp), output)
class TestSubset(unittest.TestCase):
def test_empty(self):
inp = {}
output = {frozenset({})}
self.assertEqual(subsets(inp), output)
def test_one(self):
inp = {1}
output = {frozenset({1}), frozenset({})}
self.assertEqual(subsets(inp), output)
def test_two(self):
inp = {1, 2}
output = {
frozenset({}),
frozenset({1}),
frozenset({2}),
frozenset({1, 2})
}
self.assertEqual(subsets(inp), output)
def test_four(self):
"""
I'm using list_of_set_to_set_of_set so if that function does not
run this test will be broken
"""
inp = subsets({1, 2, 3, 4})
output = list_of_set_to_set_of_set([
{},
{1}, {2}, {3}, {4},
{1, 2}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 4},
{1, 2, 3}, {1, 2, 4}, {1, 3, 4}, {2, 3, 4},
{1, 2, 3, 4}
])
self.assertEqual(inp, output)
if __name__ == '__main__':
unittest.main() | true |
6e08ca834437f12829c02c1267b234e7c0b859ad | Python | tsvikas/hanabi | /players/humanlike.py | UTF-8 | 12,294 | 2.59375 | 3 | [] | no_license | from collections import namedtuple
from game import Clue, Play, Discard, ResolvedClue
CardInfo = namedtuple('CardInfo', 'positive negative')
Info = namedtuple('Info', 'suit rank')
PossibleClue = namedtuple('PossibleClue', 'player card type')
def humanlike_player(state, log, hands, rules, tokens, slots, discard_pile):
"""
Ofer's humanlike player
"""
def add_card_to_state(given_state, card_id):
if card_id not in given_state:
given_state[card_id] = CardInfo(
Info(None, None),
Info([True for _ in range(rules.suits)], [True for _ in rules.ranks])
)
def update_cards(cards, player_id=None, clue=None):
if player_id is None:
player_id = my_id
hinted_cards = set()
_log = log[-len(hands):]
if clue is not None:
_log.append(clue)
for move in _log:
if isinstance(move, ResolvedClue):
if move.player == player_id:
for card in move.cards:
hinted_cards.add(card.id)
card_ids_in_hint = set()
for card in move.cards:
add_card_to_state(cards, card.id)
card_ids_in_hint.add(card.id)
cards[card.id] = cards[card.id]._replace(
positive=cards[card.id].positive._replace(**{move.type: move.param}))
for card in hands[player_id]:
if card.id not in card_ids_in_hint:
add_card_to_state(cards, card.id)
new_negative = getattr(cards[card.id].negative, move.type)
new_negative[move.param] = False
cards[card.id] = cards[card.id]._replace(
negative=cards[card.id].negative._replace(**{move.type: new_negative}))
# Consolidate negatives in hand
for card_id in hinted_cards:
if cards[card_id].negative.suit.count(True) == 1:
cards[card.id]._replace(positive=cards[card.id].positive._replace(
suit=[i for i, v in enumerate(cards[card_id].negative.suit) if v]))
if cards[card_id].negative.rank.count(True) == 1:
cards[card.id]._replace(positive=cards[card.id].positive._replace(
rank=[i for i, v in enumerate(cards[card_id].negative.rank) if v]))
return cards, hinted_cards
def get_max_rank_in_suit(suit, _slots, _discard_pile):
max_rank_in_suit = None
for rank in range(len(rules.ranks)):
left_in_rank = rules.ranks[rank] - _discard_pile[suit][rank]
if rank >= _slots[suit] and left_in_rank == 0:
max_rank_in_suit = rank
break
return max_rank_in_suit
def is_playable_suit(suit, _slots, _discard_pile):
if _slots[suit] > len(rules.ranks):
return False
max_rank_in_suit = get_max_rank_in_suit(suit, _slots, _discard_pile)
if max_rank_in_suit is not None and max_rank_in_suit < _slots[suit]:
return False
return True
def should_play_card(cards, cards_in_hand, hinted_cards, _slots=None, _discard_pile=None):
if _slots is None:
_slots = slots
if _discard_pile is None:
_discard_pile = discard_pile
hinted_cards = hinted_cards.intersection(cards_in_hand)
definate_cards_to_play = set()
cards_to_play = set()
for card_id in cards_in_hand:
add_card_to_state(cards, card_id)
if cards[card_id].positive.suit is not None and cards[card_id].positive.rank is not None:
if is_play_legal(cards[card_id].positive.suit, cards[card_id].positive.rank, _slots):
definate_cards_to_play.add(card_id)
if card_id in sorted(hinted_cards):
if cards[card_id].positive.rank is not None and cards[card_id].positive.suit is None and any(
[is_playable_suit(suit, _slots, _discard_pile) and cards[card_id].positive.rank == _slots[suit]
for suit in range(len(_slots))]):
cards_to_play.add(card_id)
if cards[card_id].positive.suit is not None and cards[card_id].positive.rank is None \
and is_playable_suit(cards[card_id].positive.suit, _slots, _discard_pile):
cards_to_play.add(card_id)
if definate_cards_to_play: # its better to go up than go sideways!
highest_rank = 0
cards_in_highest_rank = set()
for card_id in definate_cards_to_play:
if cards[card_id].positive.rank > highest_rank:
highest_rank = cards[card_id].positive.rank
cards_in_highest_rank = set()
if cards[card_id].positive.rank == highest_rank:
cards_in_highest_rank.add(card_id)
return sorted(cards_in_highest_rank)[-1] # play newest card
if cards_to_play:
return sorted(cards_to_play)[-1] # play newest card
return None
def what_will_player_play(cards, hand, player_id, clue, _slots, _discard_pile):
cards, _hinted_cards = update_cards(cards, player_id, clue)
card_id = should_play_card(cards, [card.id for card in hand], _hinted_cards, _slots, _discard_pile)
if card_id is not None:
card = [card for card in hand if card.id == card_id][0]
legal = is_play_legal(card.data.suit, card.data.rank, _slots)
return cards, legal, card_id
else:
return cards, None, None
def is_play_legal(suit, rank, _slots):
return _slots[suit] == rank
def create_clue(my_id, _player, clue_type, param):
cards = [card for card in hands[_player] if getattr(card.data, clue_type) == param]
cards_neg = [card for card in hands[_player] if getattr(card.data, clue_type) != param]
return ResolvedClue.create(my_id, _player, clue_type, param, cards, cards_neg)
# Start
if state is None:
state = {}
my_id = len(log) % len(hands)
state, state_actions = update_cards(state)
my_card_ids = [card.id for card in hands[my_id]]
card_to_play = should_play_card(state, my_card_ids, state_actions, slots, discard_pile)
if card_to_play is not None: # Its better to play than hint
return state, Play.create(card_to_play), 'Played card'
if tokens.clues > 0: # Its better to hint than discard
foreseen_slots = list(slots)
foreseen_state = dict(state)
for i in range(len(hands) - 1):
player = (my_id + i + 1) % len(hands)
foreseen_state, is_legal, play = what_will_player_play(
foreseen_state, hands[player], player, None, foreseen_slots, discard_pile)
player_state, player_hinted = update_cards(foreseen_state, player)
player_play = should_play_card(state, [card.id for card in hands[player]], player_hinted)
if player_play is not None:
card = [card for card in hands[player] if card.id == player_play][0]
if is_play_legal(card.data.suit, card.data.rank, slots):
foreseen_slots[card.data.suit] = card.data.rank
continue
else: # try and rectify stupidity
for card in hands[player]:
suit_clue = create_clue(my_id, player, 'suit', card.data.suit)
_, is_legal, play = what_will_player_play(
dict(foreseen_state), hands[player], player, suit_clue, foreseen_slots, discard_pile)
if is_legal or play is None:
return state, Clue.create(player, 'suit', card.data.suit), 'Gave hint against stupid play'
rank_clue = create_clue(my_id, player, 'rank', card.data.rank)
_, is_legal, play = what_will_player_play(
dict(foreseen_state), hands[player], player, rank_clue, foreseen_slots, discard_pile)
if is_legal or play is None:
return state, Clue.create(player, 'rank', card.data.rank), 'Gave hint against stupid play'
good_clues = set()
for card in hands[player]:
if slots[card.data.suit] == card.data.rank:
suit_clue = create_clue(my_id, player, 'suit', card.data.suit)
_, is_legal, play = what_will_player_play(
dict(foreseen_state), hands[player], player, suit_clue, foreseen_slots, discard_pile)
if is_legal and play == card.id:
good_clues.add(PossibleClue(player=player, card=card, type='suit'))
rank_clue = create_clue(my_id, player, 'rank', card.data.rank)
_, is_legal, play = what_will_player_play(
dict(foreseen_state), hands[player], player, rank_clue, foreseen_slots, discard_pile)
if is_legal and play == card.id:
good_clues.add(PossibleClue(player=player, card=card, type='rank'))
if good_clues:
# make sure highest card possible is played
highest_rank = 0
given_clue = None
for clue in good_clues:
if given_clue is None:
given_clue = clue
if clue.card.data.rank > highest_rank:
highest_rank = clue.card.data.rank
given_clue = clue
return state, Clue.create(given_clue.player, given_clue.type,
getattr(given_clue.card.data, given_clue.type)), 'Gave actionable clue'
if tokens.clues < rules.max_tokens.clues: # Its better to discard then playing like an idiot
protected_cards = set()
for card_id in my_card_ids:
# Throw away useless cards
if state[card_id].positive.suit is not None and not is_playable_suit(state[card_id].positive.suit, slots,
discard_pile):
return state, Discard.create(card_id), 'Discarded unplayable suit'
if state[card_id].positive.rank is not None and all(
[slot < state[card_id].positive.rank for slot in slots]):
return state, Discard.create(card_id), 'Discarded Unplayable rank'
if state[card_id].positive.suit is not None and state[card_id].positive.rank is not None:
if slots[state[card_id].positive.suit] < state[card_id].positive.rank:
return state, Discard.create(card_id), 'Discarded unplayable known card'
# Don't throw away lone copies
available_copies = rules.ranks[state[card_id].positive.rank]
discarded_copies = discard_pile[state[card_id].positive.suit][state[card_id].positive.rank]
if available_copies - discarded_copies == 1:
protected_cards.add(card_id)
# Don't throw away 5s
if state[card_id].positive.rank is not None:
available_copies = rules.ranks[state[card_id].positive.rank]
if available_copies == 1:
protected_cards.add(card_id)
throwaways = set(my_card_ids) - protected_cards
if throwaways:
return state, Discard.create(min(throwaways)), 'Discarded unprotected card'
return state, Discard.create(min(my_card_ids)), 'Discarded oldest card'
if tokens.clues > 0:
# give random clue to the player playing before you so the other players may fix it
player = (my_id - 1) % len(hands)
if hands[player]:
highest_rank_in_hand = sorted([card.data.rank for card in hands[player]])[-1]
return state, Clue.create(player, 'rank', highest_rank_in_hand), 'Gave random clue'
# If all else fails, play like an idiot
return state, Play.create(max(my_card_ids)), 'Played random card'
| true |
6fad53e1c2ccb6a6dd2bed74d02262f587452cf7 | Python | MirekPz/WSB | /csv-2-excel_pasek_postepu.py | UTF-8 | 890 | 3.625 | 4 | [] | no_license | # Konwersja wielu plików CSV do formatu Excela
# uwaga: w folderze "Dane" mogą być przed konwersją tylko pliki CSV
import os
import pandas as pd
from tqdm import tqdm
import time
print(os.getcwd())
files_list = os.listdir("Dane")
print("\nZawartość katalogu przed konwersją plików:\n", files_list)
print()
print(os.getcwd())
liczba_plikow_csv = len(files_list)
print("Liczba plików CSV:", liczba_plikow_csv)
os.chdir("Dane")
for i in tqdm(range(liczba_plikow_csv)):
print(i)
plik = files_list[i]
print(plik)
df = pd.read_csv(plik, sep=';')
df.to_excel(plik[:-4] + '.xlsx', index=None, header=True)
# [:-4] - wycina z nazwy pliku tekst: .csv
time.sleep(0.9) # opóźnienie - zakomentować cały wiersz
new_files_list = os.listdir()
print("\nZawartość katalogu po konwersji plików:\n", new_files_list)
os.chdir("..")
print(os.getcwd())
| true |
e7cceae7fb33e63f72018334db4e805b979a5d93 | Python | Thunor12/cours-soutient-telecom | /Python/SourcesP/06-control-exception.py | UTF-8 | 441 | 3.578125 | 4 | [] | no_license | # =============== Gestion d'exception, sans "2nde chance"
try:
i = int(input(" (Saisie securise): Donner un entier SVP: "))
except ValueError:
print(" Il faut donner un ENTIER!")
print(i)
# =============== Gestion d'exception AVEC "2nde chance"
while True:
try:
i = int(input(" (Saisie securise): Donner un entier SVP: "))
break
except ValueError:
print(" Il faut donner un ENTIER!")
| true |
56290d0ac7b0e4ff2c8a0ba7e2ab71f7e78fd8ff | Python | sun1218/SuperProjects | /Super tkinter/file_read.py | UTF-8 | 4,499 | 3.234375 | 3 | [
"MIT"
] | permissive | # 导入模块
import tkinter
import os
import time
from tkinter import ttk
from tkinter import filedialog
from PIL import Image, ImageTk
# 定义类
class Application():
def __init__(self):
# 设置根窗口
self.root = tkinter.Tk()
self.root.title('文件预览') # 设置标题
self.entryvar = tkinter.StringVar() # 获取返回值
self.entryvar.set(os.sep) # 将值初始化
# 设置分隔栏
self.topframe = tkinter.Frame(self.root)
self.treeframe = tkinter.Frame(self.root)
# 设置组件
self.label1 = tkinter.Label(self.topframe, text='当前文件夹:')
self.entry1 = tkinter.Entry(self.topframe, textvariable=self.entryvar)
self.button1 = tkinter.Button(self.topframe, text="choose", command=self.catch_file)
# 创建文件列表
self.list = ttk.Treeview(self.treeframe, columns=('文件名', '创建日期', '修改日期', 'Size', '是否属于文件夹(TRUE \ FALSE)'),
show="headings")
# self.image = ImageTk.PhotoImage(Image.open('folder.png'))
self.list.column('文件名', width=100, anchor='center')
self.list.column('创建日期', width=100, anchor='center')
self.list.column('修改日期', width=100, anchor='center')
self.list.column('Size', width=100, anchor='center')
self.list.column('是否属于文件夹(TRUE \ FALSE)', width=100, anchor='center')
self.list.heading('文件名', text='文件名')
self.list.heading('创建日期', text='创建日期')
self.list.heading('修改日期', text='修改日期')
self.list.heading('Size', text='Size')
self.list.heading('是否属于文件夹(TRUE \ FALSE)', text='是否属于文件夹(TRUE \ FALSE)')
# 设置事件
self.tree = ttk.Treeview(self.treeframe)
self.tree.bind('<<TreeviewSelect>>', self.onTreeviewSelect)
self.createWidgets()
def createWidgets(self):
"""
绘制
"""
self.topframe.pack(fill=tkinter.X)
self.treeframe.pack(fill=tkinter.X)
self.label1.grid(row=0, column=0)
self.entry1.grid(row=1, column=1)
self.button1.grid(row=1, column=2)
self.tree.pack(fill=tkinter.X)
self.list.pack(fill=tkinter.X)
def catch_file(self):
"""
捕获文件
"""
self.tree.delete(*self.tree.get_children())
self.start_dir = filedialog.askdirectory() # 获得起始文件
self.entryvar.set(self.start_dir)
self.myid = self.tree.insert('', 0, text=str(self.start_dir))
def get_dir(dir, tree, myid):
os.chdir(str(dir))
num = 0
for each_file in os.listdir(os.curdir):
if os.path.isdir(each_file) and each_file[0] != '.':
temp = tree.insert(myid, num, text=str(dir) + os.sep + each_file)
num += 1
# 递归读取
get_dir(dir + os.sep + each_file, tree, temp)
os.chdir(os.pardir)
get_dir(self.start_dir, self.tree, self.myid)
# 更新组件
self.tree.update()
self.entry1.update()
def onTreeviewSelect(self, event):
"""
写入列表
"""
self.list.delete(*self.list.get_children())
sels = event.widget.selection()
for idx in sels:
filestr = self.tree.item(idx)['text']
num = 0
if os.path.isdir(filestr):
os.chdir(filestr)
for each_file in os.listdir(os.curdir):
if each_file[0] != '.':
num += 1
mtime = time.ctime(os.path.getmtime(filestr + os.sep + each_file))
ctime = time.ctime(os.path.getctime(filestr + os.sep + each_file))
size = os.path.getsize(filestr + os.sep + each_file)
isdir = os.path.isdir(filestr + os.sep + each_file)
self.list.insert('', num,
values=(str(each_file), str(ctime), str(mtime), str(size), str(isdir)))
self.list.update()
def onListSelect(self, event):
sels = event.widget.selection()
for idx in sels:
print(self.tree.item(idx)['text'])
def run(self):
self.root.mainloop()
if __name__ == '__main__':
app = Application()
app.run()
| true |
2642f56d74b35360dbb3a12b27ffbef740ef085d | Python | LalithK90/LearningPython | /privious_learning_code/OS_Handling/os.close() Method.py | UTF-8 | 501 | 3.96875 | 4 | [] | no_license | # Description
#
# The method close() closes the associated with file descriptor fd.
# Syntax
#
# Following is the syntax for close() method −
#
# os.close(fd);
#
# Parameters
#
# fd − This is the file descriptor of the file.
#
# Return Value
#
# This method does not return any value.
# Example
import os, sys
# Open a file
fd = os.open("foo.txt", os.O_RDWR | os.O_CREAT)
# Write one string
os.write(fd, "This is test")
# Close opened file
os.close(fd)
print("Closed the file successfully!!")
| true |
b87f893aeade4d87b8e03980d98a74f9f9b720dc | Python | granatb/DS-y-mordeczki | /najlepszy_shellsort.py | UTF-8 | 534 | 3.46875 | 3 | [] | no_license | def shell_sort(t):
n = len(t)
h = 1
while True:
h = 3*h + 1
if h >= n:
h = h//9
break
if h == 0:
h = 1
while h >0:
for j in range(n-h-1,-1,-1):
x = t[j]
i = j+h
while i <= n-1 and x > t[i]:
t[i-h] = t[i]
i = i+h
t[i-h] = x
h = h//3
return t
import random
n = int(input("podaj dlugosc listy t"))
t = [random.randint(0,10000) for i in range(n)]
print(shell_sort(t))
| true |
35f1767c59ad4ffd6e83a30cb8b81aa7966b37cb | Python | mikaelgba/PythonDSA | /cap5/Metodos_Especiais.py | UTF-8 | 1,290 | 3.75 | 4 | [] | no_license | #Classe Livro
#Metodos especiais são metodos dentro da classe do objeto que permitem trabalhar o objeto com diversas funções Buiit-in
class Livro ():
def __init__( self, nome, autor, paginas ):
self.nome = nome
self.autor = autor
self.paginas = paginas
#Metodo que faz o mesmo serviço que print(livr)
#Mas deixando claro, se já existe metodos que fazem a mesma coisa.
#então não é necessario criar uma dentro da classe
def __str__( self ):
return "Nome: %s - Autor: %s - Paginas: %d" %(self.nome, self.autor, self.paginas)
def getNome( self ):
return self.nome
def setNome( self, novo_nome ):
self.nome = novo_nome
def getAutor( self ):
return self.autor
def setAutor( self, novo_autor ):
self.autor = novo_autor
def getPaginas( self ):
return self.paginas
def setPaginas( self, novas_paginas ):
self.paginas = novas_paginas
# In[ ]:
livr = Livro("O Senhor do Anéis, O retorno do rei", "J. R. R. Tolkien", 380)
print(str(livr),"\n")
print(livr.getAutor(),"\n")
# In[ ]:
print(livr,"\n")
# In[ ]:
del livr.paginas
# In[ ]:
print(hasattr(livr, "paginas"),"\n")
livr.setPaginas(380)
print(livr.getPaginas())
| true |
d98bf7fb05ff220efcab393e7ea7acd7cc7871b6 | Python | Aasthaengg/IBMdataset | /Python_codes/p02863/s590627710.py | UTF-8 | 811 | 2.75 | 3 | [] | no_license | import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
def main():
from operator import itemgetter
n, t = list(map(int, readline().split()))
mat = [list(map(int, readline().split())) for _ in range(n)]
mat.sort(key=itemgetter(0))
dp = [[0] * t for _ in range(n + 1)]
ans = 0
for dish, vec in enumerate(mat):
a = vec[0]
b = vec[1]
for time in range(t):
dp[dish + 1][time] = max(dp[dish + 1][time], dp[dish][time])
if time + a >= t:
ans = max(ans, dp[dish][time] + b)
else:
dp[dish + 1][time + a] = max(dp[dish + 1][time + a], dp[dish][time] + b)
ans = max(max(dp[n]), ans)
print(ans)
if __name__ == '__main__':
main()
| true |
72d1820040c883e8c5182b67beab0719732f53f1 | Python | michael-far/neuron-classfication | /learn_ephys_feats.py | UTF-8 | 3,756 | 2.515625 | 3 | [] | no_license | import pandas as pd
import numpy as np
from keras.layers import Dense, Dropout
from keras.models import Sequential
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from model import Model
from helper_func import calc_metrics, plot_confusion_matrix
class FeatureLearner(Model):
def __init__(self, db: pd.DataFrame, num_layers: int, num_nodes: int, batch_size: int = 64, epochs: int = 100,
files_root: str = '', segment_length: float = 3.0):
db = db[db['segment_length'] == segment_length]
db = db.dropna(axis=1, how='all')
db = db.dropna(axis=0)
irrelevant_columns = [c for c in db.columns if c.endswith('_i')] + \
[c for c in db.columns if c.endswith('index')] +\
['layer', 'mean_clipped', 'structure_area_abbrev', 'sampling_rate', 'segment_length']
db = db.drop([x for x in irrelevant_columns if x in db.columns], axis=1)
self.scaler = StandardScaler()
super(FeatureLearner, self).__init__(db, num_layers=num_layers,
num_nodes=num_nodes,
batch_size=batch_size,
epochs=epochs,
files_root=files_root, segment_length=segment_length)
def _create_model(self):
n_feats = len(self._db.columns)-1 # minus 1 because we removed the label column
model = Sequential()
model.add(Dense(self._num_nodes, activation='relu', input_dim=n_feats))
model.add(Dropout(0.5))
for _ in range(self._num_layers):
model.add(Dense(self._num_nodes, activation='relu'))
model.add(Dropout(0.5))
# model.add(Dense(y_train.shape[1], activation='softmax'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def test(self, x: np.ndarray):
x = self.scaler.transform(x)
pred = self.model.predict_classes(x)
return pred
def train_and_test(self, previous_accuracy: float = 0.0):
df = self._db
df['dendrite_type'] = pd.Categorical(df['dendrite_type'])
df['dendrite_type'] = df['dendrite_type'].cat.codes
y = df.pop('dendrite_type')
y = y.values.astype(float)
x = df.values
kf = StratifiedKFold(n_splits=5, random_state=42)
stats = []
for train_index, test_index in kf.split(x, y):
x_train = self.scaler.fit_transform(x[train_index])
y_train = y[train_index]
x_test = self.scaler.transform(x[test_index])
y_test = y[test_index]
self.model.fit(x_train, y_train, epochs=self._epochs)
pred = self.model.predict_classes(x_test)
results = calc_metrics(y_test, pred)
stats.append(results)
mean_f1 = np.asarray([x[0] for x in stats]).mean()
mean_accuracy = np.asarray([x[1] for x in stats]).mean()
if mean_accuracy > previous_accuracy:
self.model.save('data/models/ephys_dnn')
sum_cm = np.asarray([x[2] for x in stats]).sum(axis=0)
sum_cm = sum_cm / sum_cm.astype(np.float).sum(axis=1)
params = {'num_layers': self._num_layers, 'num_nodes': self._num_nodes,
'batch_size': self._batch_size, 'epochs': self._epochs}
res = {'mean_accuracy': mean_accuracy, 'mean_f1': mean_f1}
self._save_results(params, res, sum_cm, 'DNN')
return mean_accuracy
| true |
de1dd609dbe7e74269c6e0485f8a8a65639ff4ba | Python | destinyddx/HelloWorld | /PCA.py | UTF-8 | 2,158 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
class PCA:
def __init__(self, n_components):
"""初始化PCA"""
assert n_components >= 1, "n_components must be valid"
self.n_components = n_components
self.components_ = None
def fit(self, X, eta = 0.01, n_iters = 1e4):
"""获得数据集X的前n个主成分"""
assert self.n_components <= X.shape[1],\
"n_cpmonents must not be greater than the feature number of X"
def demean(X):
return X - np.mean(X, axis=0)
def f(w, X):
return np.sum((X.dot(w) ** 2)) / len(X)
def df(w, X):
return X.T.dot(X.dot(w)) * 2 / len(X)
def direction(w):
return w / np.linalg.norm(w)
def first_component(X, initial_w, eta, n_iters=1e4, epsilon=1e-8):
cur_iter = 0
w = direction(initial_w)
while cur_iter < n_iters:
gradient = df(w, X)
last_w = w
w = w + eta * gradient
w = direction(w) # 注意:每次求一个单位向量
if (abs(f(w, X) - f(last_w, X)) < epsilon):
break
cur_iter += 1
return w
X_pca = demean(X)
self.components_ = np.empty(shape=(self.n_components, X.shape[1]))
for i in range(self.n_components):
initial_w = np.random.random(X_pca.shape[1])
w = first_component(X_pca, initial_w, eta, n_iters)
self.components_[i,:] = w
X_pca = X_pca - X_pca.dot(w).reshape(-1,1)*w
return self
def transform(selfs, X):
"""将给定的X,映射到各个主成分分量中"""
assert X.shape[1] == selfs.components_.shape[1], \
""
return X.dot(selfs.components_.T)
def inverse_transform(self, X):
"""将给定的X,反向映射到原来的特征空间"""
assert X.shape[1] == self.components_.shape[0]
return X.dot(self.components_)
def __repr__(self):
return "PCA(n_components = %d)" % self.n_components | true |
1150edd260176d4f517c1eae995b8f08d2d750fe | Python | LzWaiting/03.PythonProcess | /code/example/process_lock.py | UTF-8 | 444 | 2.90625 | 3 | [] | no_license | from multiprocessing import Process,Lock
import sys
from time import sleep
def writer1():
lock.acquire()
for i in range(20):
sys.stdout.write('writer1 我想先向终端写入\n')
lock.release()
def writer2():
lock.acquire()
for i in range(20):
sys.stdout.write('writer2 我想先向终端写入\n')
lock.release()
lock = Lock()
w1 = Process(target=writer1)
w2 = Process(target=writer2)
w1.start()
w2.start()
w1.join()
w2.join() | true |