blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8a4d78753f8eff58504daca0eea1a2e98920bc2b | Python | strutt/seaveyMeasurements | /readS11s.py | UTF-8 | 8,279 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python2
from matplotlib import pyplot as plt
import math
import numpy as np
from glob import glob
"""
Simple script to read and draw the S11 data.
"""
def main():
filesV = glob('seaveyDataPalestine2014/S11s/*V.csv')
filesH = glob('seaveyDataPalestine2014/S11s/*H.csv')
filesV = orderGlobResults(filesV, 'V')
filesH = orderGlobResults(filesH, 'H')
nAnts = len(filesV)
nAntsPerAxis = 1
mean_vpol_s11_dB = []
rms_vpol_s11_dB = []
max_vpol_s11_dB = []
min_vpol_s11_dB = []
mean_vpol_phase = []
rms_vpol_phase = []
max_vpol_phase = []
min_vpol_phase = []
mean_hpol_s11_dB = []
rms_hpol_s11_dB = []
max_hpol_s11_dB = []
min_hpol_s11_dB = []
mean_hpol_phase = []
rms_hpol_phase = []
max_hpol_phase = []
min_hpol_phase = []
savePlots = False
counter = 0
for antInd, (fV, fH) in enumerate(zip(filesV, filesH)):
if counter % nAntsPerAxis == 0:
fig, axes = plt.subplots(2)
plt.suptitle('Antenna P' + str(antInd+1) + ' S11')
for ax in axes:
ax.grid(b=True, which='major', color='black', linestyle='--')
mags, phases, freqs = readS11(fV)
mags_dB = [10*math.log10(m) for m in mags]
#phases = [p*180./math.pi for p in phases]
axes[0].plot(freqs, mags_dB, label='VPol')
axes[1].plot(freqs, phases, label='VPol')
if antInd == 0:
mean_vpol_s11_dB = [0 for g in mags_dB]
rms_vpol_s11_dB = [0 for g in mags_dB]
max_vpol_s11_dB = [-100000 for g in mags_dB]
min_vpol_s11_dB = [1000000 for g in mags_dB]
mean_vpol_phase = [0 for g in phases]
rms_vpol_phase = [0 for g in phases]
max_vpol_phase = [-100000 for g in phases]
min_vpol_phase = [1000000 for g in phases]
mean_vpol_s11_dB = [m + g for m, g in zip(mean_vpol_s11_dB, mags_dB)]
rms_vpol_s11_dB = [r + g**2 for r, g in zip(rms_vpol_s11_dB, mags_dB)]
max_vpol_s11_dB = [g if g > maxG else maxG for g, maxG in zip(mags_dB, max_vpol_s11_dB)]
min_vpol_s11_dB = [g if g < minG else minG for g, minG in zip(mags_dB, min_vpol_s11_dB)]
mean_vpol_phase = [m + g for m, g in zip(mean_vpol_phase, phases)]
rms_vpol_phase = [r + g**2 for r, g in zip(rms_vpol_phase, phases)]
max_vpol_phase = [g if g > maxG else maxG for g, maxG in zip(phases, max_vpol_phase)]
min_vpol_phase = [g if g < minG else minG for g, minG in zip(phases, min_vpol_phase)]
mags, phases, freqs = readS11(fH)
mags_dB = [10*math.log10(m) for m in mags]
#phases = [p*180./math.pi for p in phases]
axes[0].plot(freqs, mags_dB, label='HPol')
axes[1].plot(freqs, phases, label='HPol')
if antInd == 0:
mean_hpol_s11_dB = [0 for g in mags_dB]
rms_hpol_s11_dB = [0 for g in mags_dB]
max_hpol_s11_dB = [-100000 for g in mags_dB]
min_hpol_s11_dB = [1000000 for g in mags_dB]
mean_hpol_phase = [0 for g in phases]
rms_hpol_phase = [0 for g in phases]
max_hpol_phase = [-100000 for g in phases]
min_hpol_phase = [1000000 for g in phases]
mean_hpol_s11_dB = [m + g for m, g in zip(mean_hpol_s11_dB, mags_dB)]
rms_hpol_s11_dB = [r + g**2 for r, g in zip(rms_hpol_s11_dB, mags_dB)]
max_hpol_s11_dB = [g if g > maxG else maxG for g, maxG in zip(mags_dB, max_hpol_s11_dB)]
min_hpol_s11_dB = [g if g < minG else minG for g, minG in zip(mags_dB, min_hpol_s11_dB)]
mean_hpol_phase = [m + g for m, g in zip(mean_hpol_phase, phases)]
rms_hpol_phase = [r + g**2 for r, g in zip(rms_hpol_phase, phases)]
max_hpol_phase = [g if g > maxG else maxG for g, maxG in zip(phases, max_hpol_phase)]
min_hpol_phase = [g if g < minG else minG for g, minG in zip(phases, min_hpol_phase)]
counter += 1
if counter % nAntsPerAxis == 0 or counter == nAnts - 1:
axes[0].legend(loc='lower right', fancybox = True)
axes[1].legend(loc='lower right', fancybox = True)
for ax in axes:
ax.set_xlabel('Frequency (MHz)')
ax.set_xlim([0, 1500])
axes[0].set_ylabel('Power (dB)')
axes[1].set_ylabel('Group delay (ns)')
#axes[1].set_ylim([-180, 180])
if savePlots == True:
antNum = antInd + 1
fileName = ''
if antNum <= 9:
fileName = 's11p0' + str(antNum)
else:
fileName = 's11p' + str(antNum)
fig.savefig('measurementSummaryDocs/'+fileName+'.png',dpi=100)
n = counter
# Finalize general calc
mean_vpol_s11_dB = [m/n for m in mean_vpol_s11_dB]
rms_vpol_s11_dB = [math.sqrt(r/n-m**2) for r, m in zip(rms_vpol_s11_dB, mean_vpol_s11_dB) ]
mean_hpol_s11_dB = [m/n for m in mean_hpol_s11_dB]
rms_hpol_s11_dB = [math.sqrt(r/n-m**2) for r, m in zip(rms_hpol_s11_dB, mean_hpol_s11_dB) ]
mean_vpol_phase = [m/n for m in mean_vpol_phase]
rms_vpol_phase = [math.sqrt(r/n-m**2) for r, m in zip(rms_vpol_phase, mean_vpol_phase) ]
mean_hpol_phase = [m/n for m in mean_hpol_phase]
rms_hpol_phase = [math.sqrt(r/n-m**2) for r, m in zip(rms_hpol_phase, mean_hpol_phase) ]
fig = plt.figure()
plt.title('Vertical Polarization Antenna S11')
plt.plot(freqs, mean_vpol_s11_dB, label = 'Mean')
plt.plot(freqs, [m+r for r, m in zip(rms_vpol_s11_dB, mean_vpol_s11_dB)], label = 'Mean + RMS')
plt.plot(freqs, [m-r for r, m in zip(rms_vpol_s11_dB, mean_vpol_s11_dB)], label = 'Mean - RMS')
plt.plot(freqs, [m for m in max_vpol_s11_dB], label = 'Bin-by-bin maximum')
plt.plot(freqs, [m for m in min_vpol_s11_dB], label = 'Bin-by-bin minimum')
plt.grid(b=True, which='major', color='black', linestyle='--')
plt.legend(loc='lower right', fancybox=True)
plt.xlabel('Frequency (MHz)')
plt.ylabel('S11 (dB)')
if savePlots == True:
fig.savefig('measurementSummaryDocs/s11VPolSummary.png',dpi=100)
plt.figure()
plt.title('Horizontal Polarization Antenna S11')
plt.plot(freqs, mean_hpol_s11_dB, label = 'Mean')
plt.plot(freqs, [m+r for r, m in zip(rms_hpol_s11_dB, mean_hpol_s11_dB)], label = 'Mean + RMS')
plt.plot(freqs, [m-r for r, m in zip(rms_hpol_s11_dB, mean_hpol_s11_dB)], label = 'Mean - RMS')
plt.plot(freqs, [m for m in max_hpol_s11_dB], label = 'Bin-by-bin maximum')
plt.plot(freqs, [m for m in min_hpol_s11_dB], label = 'Bin-by-bin minimum')
plt.grid(b=True, which='major', color='black', linestyle='--')
plt.legend(loc='lower right', fancybox=True)
plt.xlabel('Frequency (MHz)')
plt.ylabel('S11 (dB)')
if savePlots == True:
fig.savefig('measurementSummaryDocs/s11HPolSummary.png',dpi=100)
plt.show()
def orderGlobResults(fileList, polChar):
antNumber = []
for f in fileList:
a = f.split('P')[-1].split(polChar)[0]
antNumber.append(int(a))
fileListSorted = fileList[:]
for i, a in enumerate(antNumber):
fileListSorted[a-1] = fileList[i]
return fileListSorted
def readS11(fileName):
freqs = []
reals = []
imags = []
for lineInd, line in enumerate(file(fileName)):
#print lineInd, line
if 'END' in line:
break
if lineInd < 18:
continue
else:
vals = line.split(',')
freqs.append(1e-6*float(vals[0]))
reals.append(float(vals[1]))
imags.append(float(vals[2]))
dw = 2*math.pi*1e6*(freqs[1] - freqs[0])
mags = [re*re+im*im for re, im in zip(reals, imags)]
#phases = [math.asin(im/math.sqrt(mag)) for re, im, mag in zip(reals, imags, mags)]
phases = [math.atan2(im, re) for im, re in zip(imags, reals)] #math.sqrt(mag)) for re, im, mag in zip(reals, imags, mags)]
phases = np.unwrap(phases)
for i, (p, f) in enumerate(zip(phases, freqs)):
if i > 0:
if p > phases[i-1]:
print f
#phases = [-p/dw for p in phases]
return mags, phases, freqs
if __name__ == '__main__':
main()
| true |
38ece02182fed05201718de8f5d8e8d21bd846e6 | Python | SauloRicardo/trab_rec_paa | /trab_rec.py | UTF-8 | 7,074 | 2.671875 | 3 | [] | no_license | import networkx as nx
import copy
import random
import time
def rcsp(node_beg_f, node_end_f, qtd_max_res_f, k_paths, graph=nx.DiGraph()):
graph_aux = copy.deepcopy(graph)
possible_paths = {}
for nodes in range(1, graph_aux.number_of_nodes()+1):
possible_paths_aux = []
it = 0
for node_iter in graph_aux.successors(nodes):
if it == 0:
lower = graph_aux.get_edge_data(nodes, node_iter)['weight']
it = 1
possible_paths_aux.append(node_iter)
elif graph_aux.get_edge_data(nodes, node_iter)['weight'] <= lower:
lower = graph_aux.get_edge_data(nodes, node_iter)['weight']
possible_paths_aux.insert(0, node_iter)
else:
possible_paths_aux.append(node_iter)
possible_paths[nodes] = possible_paths_aux
for j in k_paths:
ant_aux = node_beg_f
for i in j:
if i != node_beg_f:
possible_paths[ant_aux].remove(i)
ant_aux = i
path = []
node_at = node_beg_f
path.append(node_at)
resources_consumed = [0]*len(qtd_max_res_f)
while node_at != node_end_f:
# print(path)
possibilities = len(possible_paths[node_at])
if possibilities > 0:
possible_node = possible_paths[node_at].pop(random.randrange(0, possibilities)) # Aleatório
# possible_node = possible_paths[node_at].pop(0) # Guloso
resources_consumed_aux = copy.deepcopy(resources_consumed)
cont_aux = 0
for resources in graph.get_edge_data(node_at, possible_node)['resource_consumed']:
resources_consumed_aux[cont_aux] += resources
cont_aux += 1
over = False
for verify in range(0, len(resources_consumed_aux)):
if resources_consumed_aux[verify] < qtd_max_res_f[verify]:
over = True
if over:
node_at = possible_node
resources_consumed = resources_consumed_aux
path.append(node_at)
else:
node_return = path.pop()
if len(path) == 0:
break
else:
node_at = path[len(path) - 1]
cont_aux = 0
for resources in graph.get_edge_data(node_at, node_return)['resource_consumed']:
resources_consumed[cont_aux] -= resources
cont_aux += 1
# print(path)
return path
def make_tests():
archives = ['rcsp1.txt', 'rcsp3.txt', 'rcsp6.txt', 'rcsp7.txt', 'rcsp9.txt',
'rcsp11.txt', 'rcsp13.txt', 'rcsp15.txt', 'rcsp19.txt', 'rcsp21.txt']
for arch in archives:
arq = arch
inp_txt = open(arq, 'r')
cont = 1
qtd_nodes = 0
qtd_edges = 0
qtd_resources = 0
qtd_min_res = []
qtd_max_res = []
G = nx.DiGraph()
for line in inp_txt:
line_aux = line.split(' ')
line_aux.pop(0)
line_aux.pop()
if cont == 1:
qtd_nodes, qtd_edges, qtd_resources = line_aux
qtd_nodes, qtd_edges, qtd_resources = int(qtd_nodes), int(qtd_edges), int(qtd_resources)
elif cont == 2:
qtd_min_res = line_aux
for x in range(0, len(qtd_min_res)):
qtd_min_res[x] = int(qtd_min_res[x])
elif cont == 3:
qtd_max_res = line_aux
for x in range(0, len(qtd_max_res)):
qtd_max_res[x] = int(qtd_max_res[x])
elif cont <= (qtd_nodes + 3):
G.add_node(cont - 3)
else:
node_beg = int(line_aux.pop(0))
node_end = int(line_aux.pop(0))
weight_aux = int(line_aux.pop(0))
res_cons_aux = line_aux
for x in range(0, len(res_cons_aux)):
res_cons_aux[x] = int(res_cons_aux[x])
G.add_edge(node_beg, node_end, weight=weight_aux, resource_consumed=res_cons_aux)
cont += 1
iterations = 10
total_final = 99999999
known_paths = []
time_tot = time.time()
for x in range(0, iterations):
path = rcsp(1, qtd_nodes, qtd_max_res, known_paths, G)
# print(path)
known_paths.append(path)
if len(path) > 0:
ant = path[0]
total = 0
for y in path:
if y != path[0]:
total += G.get_edge_data(ant, y)['weight']
ant = y
if total < total_final:
total_final = total
path_final = path
time_tot = time.time() - time_tot
logs.write(arch)
logs.write("\nCaminho encontrado : " + str(path_final))
logs.write("\nCusto do caminho : " + str(total_final))
logs.write("\nTempo Utilizado :" + str(time_tot))
logs.write("\n\n")
# print(arch)
# print("Caminho encontrado : ", path_final)
# print("Custo do caminho : ", total_final)
# print("Tempo Utilizado :", time_tot)
# print()
logs = open('log_rand.txt', 'w')
make_tests()
# arq = 'rcsp21.txt'
# inp_txt = open(arq, 'r')
#
# cont = 1
# qtd_nodes = 0
# qtd_edges = 0
# qtd_resources = 0
# qtd_min_res = []
# qtd_max_res = []
# G = nx.DiGraph()
#
# for line in inp_txt:
# line_aux = line.split(' ')
# line_aux.pop(0)
# line_aux.pop()
#
# if cont == 1:
# qtd_nodes, qtd_edges, qtd_resources = line_aux
# qtd_nodes, qtd_edges, qtd_resources = int(qtd_nodes), int(qtd_edges), int(qtd_resources)
#
# elif cont == 2:
# qtd_min_res = line_aux
# for x in range(0, len(qtd_min_res)):
# qtd_min_res[x] = int(qtd_min_res[x])
#
# elif cont == 3:
# qtd_max_res = line_aux
# for x in range(0, len(qtd_max_res)):
# qtd_max_res[x] = int(qtd_max_res[x])
#
# elif cont <= (qtd_nodes + 3):
# G.add_node(cont - 3)
#
# else:
# node_beg = int(line_aux.pop(0))
# node_end = int(line_aux.pop(0))
# weight_aux = int(line_aux.pop(0))
# res_cons_aux = line_aux
# for x in range(0, len(res_cons_aux)):
# res_cons_aux[x] = int(res_cons_aux[x])
# G.add_edge(node_beg, node_end, weight=weight_aux, resource_consumed=res_cons_aux)
# cont += 1
#
# iterations = 10
# total_final = 99999999
# known_paths = []
#
# for x in range(0, iterations):
# path = rcsp(1, 500, qtd_max_res, known_paths, G)
# # print(path)
# known_paths.append(path)
# if len(path) > 0:
# ant = path.pop(0)
# total = 0
# for y in path:
# total += G.get_edge_data(ant, y)['weight']
# ant = y
#
# if total < total_final:
# total_final = total
# path_final = path
#
# print(total_final)
| true |
dd00c0462afec232f206ddf2b6d2ad20a7b877be | Python | rhuckleberry/DL_Phylo | /Recombination/recombination_networks/recombination_ResNet.py | UTF-8 | 6,875 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
"""Quartet tree classification
* Model: Convolutional neural network with basic residual connections
and batch normalization.
* Training data:
* 100000 pre-simulated trees using training1.
* Each epoch uses randomly sampled 2000 trees.
* The batch size is 16.
* Validation data: 2000 pre-simulated trees using training1.
* Optimizer: Adam with an initial learning rate of 0.001.
"""
print("imports")
import visdom
import pathlib
import pickle
import random
import numpy as np
import torch.autograd
import torch.nn
import torch.optim
import torch.utils.data
import datetime
graph_title = "ResNet Recombination Model test1_fact5_sl10000"
graph_win = "test1_fact5_sl10000" #"recomb2"
data_test = "test1_fact5_sl10000"
model_number = "0"
class _Model(torch.nn.Module):
"""A neural network model to predict phylogenetic trees."""
def __init__(self):
"""Create a neural network model."""
print("making model")
super().__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv1d(16, 80, 1, groups=4),
torch.nn.BatchNorm1d(80),
torch.nn.ReLU(),
torch.nn.Conv1d(80, 32, 1),
torch.nn.BatchNorm1d(32),
torch.nn.ReLU(),
_ResidueModule(32),
_ResidueModule(32),
torch.nn.AvgPool1d(2),
_ResidueModule(32),
_ResidueModule(32),
torch.nn.AvgPool1d(2),
_ResidueModule(32),
_ResidueModule(32),
torch.nn.AvgPool1d(2),
_ResidueModule(32),
_ResidueModule(32),
torch.nn.AdaptiveAvgPool1d(1),
)
self.classifier = torch.nn.Linear(32, 3)
def forward(self, x):
"""Predict phylogenetic trees for the given sequences.
Parameters
----------
x : torch.Tensor
One-hot encoded sequences.
Returns
-------
torch.Tensor
The predicted adjacency trees.
"""
#print("forward")
x = x.view(x.size()[0], 16, -1)
x = self.conv(x).squeeze(dim=2)
return self.classifier(x)
class _ResidueModule(torch.nn.Module):
def __init__(self, channel_count):
#print("making resnet")
super().__init__()
self.layers = torch.nn.Sequential(
torch.nn.Conv1d(channel_count, channel_count, 1),
torch.nn.BatchNorm1d(channel_count),
torch.nn.ReLU(),
torch.nn.Conv1d(channel_count, channel_count, 1),
torch.nn.BatchNorm1d(channel_count),
torch.nn.ReLU(),
)
def forward(self, x):
#print("forward resnet")
return x + self.layers(x)
training_data = np.load(f"/Users/rhuck/Downloads/DL_Phylo/Recombination/data_generation/test_data/{data_test}_train.npy", allow_pickle = True)
dev_data = np.load(f"/Users/rhuck/Downloads/DL_Phylo/Recombination/data_generation/test_data/{data_test}_dev.npy", allow_pickle = True)
train_data = training_data.tolist()
validation_data = dev_data.tolist()
print("Train Set Size: ", len(train_data))
print("Development Set Size: ", len(validation_data))
#plotting
vis = visdom.Visdom()
#model Hyperparameters
model = _Model()
# #Load Model
# load_path = f"/Users/rhuck/Downloads/DL_Phylo/Recombination/models/{data_test}." + str(epoch)
# model = torch.load(load_path)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
#weight initialization...
loss_function = torch.nn.CrossEntropyLoss(reduction='sum')
BATCH_SIZE = 16
TRAIN_SIZE = 2000#len(train_data)#3600
epoch = 1
#Train
while epoch < 300:
#TRAIN
model.train()
train_start_time = datetime.datetime.now()
#randomly sample TRAIN_SIZE number of datapoints
epoch_train = random.sample(train_data, TRAIN_SIZE)
sample_count, correct, score = 0, 0, 0.0
for i in range(TRAIN_SIZE // BATCH_SIZE):
data = epoch_train[i * BATCH_SIZE : (i+1) * BATCH_SIZE]
x_list = []
y_list = []
for datapoint in data: #transformed_data:
sequences = datapoint[0]
label = datapoint[1]
x_list.append(sequences)
y_list.append(label)
x = torch.tensor(x_list, dtype=torch.float)
x = x.view(BATCH_SIZE, 4, 4, -1)
y = torch.tensor(y_list)
sample_count += x.size()[0]
optimizer.zero_grad()
output = model(x)
loss = loss_function(output, y)
loss.backward()
optimizer.step()
score += float(loss)
_, predicted = torch.max(output.data, 1)
correct += (predicted == y).sum().item()
print("\n", predicted, y, "\n")
score /= sample_count
accuracy = correct / sample_count
train_execution_time = datetime.datetime.now() - epoch_start_time
print("\n\n", "Epoch: \n", epoch, "Train Acc: ", accuracy, "Train Score: ", score,
"Training Execution Time: ", train_execution_time)
vis.line(
X = [epoch],
Y = [accuracy],
opts= dict(title=graph_title,
xlabel="Epochs",
showlegend=True),
win= graph_win,
name = "Train Accuracy",
update="append"
)
vis.line(
X = [epoch],
Y = [score],
win= graph_win,
name = "Train Score",
update="append"
)
##VALIDATE
optimizer.zero_grad()
model.train(False)
dev_start_time = datetime.datetime.now()
sample_count, correct, score = 0, 0, 0.0
tree_0_len, tree_1_len, tree_2_len = 0, 0, 0
guess_0, guess_1, guess_2 = 0,0,0
real_0, real_1, real_2 = 0,0,0
#NO PERMUTE -- batch size of 1
for x, y in validation_data:
x = torch.tensor(x, dtype=torch.float)
x = x.view(1, 4, 4, -1)
y = torch.tensor([y])
sample_count += x.size()[0]
output = model(x)
loss = loss_function(output, y)
score += float(loss)
_, predicted = torch.max(output.data, 1)
correct += (predicted == y).sum().item()
print("\n", predicted, y, "\n")
score /= sample_count
accuracy = correct / sample_count
dev_execution_time = datetime.datetime.now() - dev_start_time
print("\n", "Val Acc: ", accuracy, "Val Score: ", score, "Dev Execution Time: ", dev_execution_time)
vis.line(
X = [epoch],
Y = [accuracy],
win= graph_win,
name = "Dev Accuracy",
update="append"
)
vis.line(
X = [epoch],
Y = [score],
win= graph_win,
name = "Dev Score",
update="append"
)
#save MODEL
save_path = f"/Users/rhuck/Downloads/DL_Phylo/Recombination/recombination_networks/models/{data_test}_{model_number}." + str(epoch)
torch.save(model.state_dict(), save_path)
epoch += 1
| true |
c383ae7ff856ff3caed544b17456ef275daab90e | Python | frapa/tbcnn | /prepare_input.py | UTF-8 | 1,574 | 3.109375 | 3 | [
"MIT"
] | permissive | import os
import numpy as np
import imageio
def prepare(inDir, outFile):
"""Prepare input: convert to float with unit variance and zero mean,
extract labels and pack everything into a big numpy array to be used for training
outFile => path without extension (more than one file will be created)
"""
if os.path.exists(outFile + '.npy'):
return print("Input was already prepared")
files = sorted(os.listdir(inDir))
num = len(files)
name_list = []
label_list = []
image_list = []
for f in files:
in_path = os.path.join(inDir, f)
filename = os.path.splitext(f)[0]
pieces = filename.split('_')
name = pieces[1]
label = int(pieces[2]) # 1 tbc, 0 nothing
img = imageio.imread(in_path)
# Convert to float
img_float = img.astype(np.float32)
label_list.append(label)
name_list.append(name)
image_list.append(img_float)
# Now we have all images in an array
# First convert it to a single ndarray instead of a list
images = np.stack(image_list)
labels = np.array(label_list, dtype=np.int32)
# Input normalization
# Remove mean
images -= np.mean(images)
# Divide by standard deviation
images /= np.std(images)
# Add dummy channel layer
images = images.reshape((images.shape[0], images.shape[1], images.shape[2], 1))
# Write data
np.save(outFile + '.npy', images)
np.save(outFile + '_labels.npy', labels)
np.save(outFile + '_patients.npy', name_list)
| true |
ff19908937173367f24f3e2d29d18c4836f5e328 | Python | matt-drayton/fake-news-sentiment-analysis | /extract_training_data.py | UTF-8 | 1,475 | 3.03125 | 3 | [] | no_license | import json
from tqdm import tqdm
import pandas as pd
from utils import lemmatize_and_strip, preprocess_bulk, log
def load_training_data():
"""Parse the training data from its CSV file
Returns:
Tuple containing the pre-processed positive and negative tweets
"""
csv = pd.read_csv("training.1600000.processed.noemoticon.csv")
csv.columns = ["target", "id", "date", "query", "user", "text"]
positives = csv.loc[csv["target"] == 4]["text"].tolist()
negatives = csv.loc[csv["target"] == 0]["text"].tolist()
return (
preprocess_bulk(positives),
preprocess_bulk(negatives),
)
if __name__ == "__main__":
log("Loading training data")
(
positive_training_tweets,
negative_training_tweets,
) = load_training_data()
# Clean up training / raw data
log("Preprocessing positive and negative training tweets")
positive_training_tweets = [
lemmatize_and_strip(tweet) for tweet in tqdm(positive_training_tweets)
]
negative_training_tweets = [
lemmatize_and_strip(tweet) for tweet in tqdm(negative_training_tweets)
]
log("Saving to files")
with open("training_positive.json", "w") as file:
dump = json.dumps(positive_training_tweets, indent=4)
file.write(dump)
with open("training_negative.json", "w") as file:
dump = json.dumps(negative_training_tweets, indent=4)
file.write(dump)
log("Execution complete")
| true |
8bd2fb8b587cfc5ed0d729d6560351d3084731ae | Python | shivamnegi1705/Competitive-Programming | /Leetcode/Weekly Contest 220/1696. Jump Game VI.py | UTF-8 | 469 | 2.78125 | 3 | [] | no_license | # Question Link:- https://leetcode.com/problems/jump-game-vi/
class Solution:
def maxResult(self, arr: List[int], k: int) -> int:
ans = deque([])
n = len(arr)
ans.append([0,arr[0]])
for i in range(1,n):
while i-ans[0][0]>k:
ans.popleft()
r = [i,arr[i]+ans[0][1]]
while len(ans)>0 and ans[-1][1]<=r[1]:
ans.pop()
ans.append(r)
return ans[-1][1]
| true |
37db058bfac47ebd109ffa1dfaab18b22560e88d | Python | JoinNova/hackerrank | /ps_Lisa's Workbook.py | UTF-8 | 689 | 2.984375 | 3 | [] | no_license | #Lisa's Workbook
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the workbook function below.
def workbook(n, k, arr):
r=0;a=1
for _ in arr:
l=list(range(1,_+1))
chk=0
for i in range(_//k+1 if _%k>0else _//k):
#print(i,l[chk:chk+k])
if l[chk:chk+k].count(a):r+=1
a+=1;chk+=k
return r
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
arr = list(map(int, input().rstrip().split()))
result = workbook(n, k, arr)
print(result)
#fptr.write(str(result) + '\n')
#fptr.close()
| true |
372ac5748d75cba514a7b6ef6331cc3735d5f566 | Python | yeonghoey/hew | /hew/stt.py | UTF-8 | 1,618 | 2.53125 | 3 | [
"MIT"
] | permissive | import wave
from google.cloud import speech_v1
from moviepy.editor import AudioFileClip
from hew.util import Scheme, tempfile_path
scheme = Scheme()
@scheme
def recognize_speech():
def f(source_path, language_code='en-US'):
wav_path = convert_to_wav(source_path)
client = speech_v1.SpeechClient()
config = {
'language_code': language_code,
'sample_rate_hertz': sample_rate_hertz(wav_path),
}
audio = {
'content': content(wav_path),
}
response = client.recognize(config, audio)
return compose_transcript(response)
return f
def convert_to_wav(source_path):
clip = AudioFileClip(source_path)
wav_path = tempfile_path('.wav')
# NOTE: -ac stands for 'audio channels'
# Force mono since Google Cloud STT only accepts mono audio
ffmpeg_params = ['-ac', '1']
clip.write_audiofile(wav_path, ffmpeg_params=ffmpeg_params)
return wav_path
def sample_rate_hertz(wav_path):
with wave.open(wav_path, 'rb') as w:
return w.getframerate()
def content(wav_path):
with open(wav_path, 'rb') as f:
return f.read()
def compose_transcript(response):
def most_probable(result):
# NOTE: According to `SpeechRecognitionResult` reference, the `alternatives`
# will contain at least one recognition result, in the order of accuracy.
# SEE: https://cloud.google.com/speech-to-text/docs/reference/rpc/google.cloud.speech.v1
return result.alternatives[0].transcript
return ' '.join(most_probable(result) for result in response.results)
| true |
8483a7fd5cb0ccde3f6ccc5c0b9d71b95e48b099 | Python | mljes/WiTS-TextPet | /screen_functions.py | UTF-8 | 1,101 | 2.890625 | 3 | [] | no_license | from symbols import SYMBOLS
from colors import FG_COL, BG_COL, RESET
#############################################
# SCREEN FIELDS
max_width = 44
max_height = 13
#############################################
def printOptions(type):
printFrame("TOP", max_width-2)
for item in SYMBOLS[type]:
if item != "NONE":
option = SYMBOLS["LINE"]["V"] + SYMBOLS[type][item] + " " + item
if len(SYMBOLS[type][item]) < 10:
opt_size = len(option) + 1
else:
opt_size = len(option)
print(option, end="")
for i in range(max_width - opt_size + 8):
print(" ", end="")
print(SYMBOLS["LINE"]["V"])
printFrame("BOTTOM", max_width-2)
def printFrame(top_bottom, width):
top_bottom = top_bottom.upper()
print(SYMBOLS["CORNER"][top_bottom + "_LEFT"], end="")
print(SYMBOLS["LINE"]["H"] * (width), end="")
print(SYMBOLS["CORNER"][top_bottom + "_RIGHT"])
def setForeColor(color, text):
return FG_COL[color] + text + RESET
def setBackColor(color, text):
return BG_COL[color.upper()] + text + RESET
def clearScreen():
print(chr(27)+'[2j')
print('\033c')
print('\x1bc') | true |
9587d90d986fa0f5928f10474721ee1843ec3b43 | Python | sedhha/URC2019 | /Focal_length_calculation/fl_C.py | UTF-8 | 466 | 2.546875 | 3 | [] | no_license | import cv2
import numpy as np
cam=cv2.VideoCapture(0)
while True:
k=cv2.waitKey(1)
if k & 0xFF==ord('q'):
break
_,frame=cam.read()
img=cv2.inRange(frame,np.array([0,0,0]),np.array([180,255,30]))
im2,contours,hierarchy = cv2.findContours(img, 1, 2)
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area>200:
print(area)
cv2.imshow('Image',img)
| true |
b3e99a4141f733ec4e94ec86557d0d15e1299a89 | Python | Fixdq/python-learn | /weekday0520/client/core/src.py | UTF-8 | 428 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-20 下午3:09
# @Author : fixdq
# @File : src.py
# @Software: PyCharm
from core import admin,user
menu ="""
1.用户
2.管理员
"""
menu_dic = {
'1':user.view,
'2':admin.view
}
def run():
while True:
print(menu)
ch = input('>>>:').strip()
if ch =='q':break
if ch not in menu_dic:continue
menu_dic[ch]() | true |
e9b6b2a66a01a6af19468e32710cc22ce7566fb3 | Python | redmage123/intermediatepython | /examples/circuitous7.py | UTF-8 | 1,237 | 4.125 | 4 | [] | no_license | ''' Circuitous, LLC -
An advanced circle analytics company
'''
from math import pi,sqrt # Added sqrt import for bbd_to_radius function. sq
class Circle:
''' An advanced circle analytics toolkit.
Note use of class variable version
'''
version = '0.3'
def __init__(self,radius):
self.radius = radius
def area(self):
return pi * self.radius ** 2 # Note that we use the math library to ensure a consistent value for PI.
def perimeter(self):
return 2 * pi * self.radius
# Next customer is a national graphics company. Note that they have many shape vendors, not just your company.
# I.e. they have spheres, rectangles, triangles, etc.
# They'd really like to be able to create a circle without having to convert it from a radius to
# a bounding box diagonal.
# They don't really want to do this...
#Customer supplies the bbd function.
def bbd_to_radius(bbd):
radius = bbd /2.0 / sqrt(2.0)
bbd = 25.1
c = Circle(bbd_to_radius(bbd)) #This is awkward. Why does the customer have to exert effort to create a circle with a bbd?
print ('A circle with a bbd of 25.1 ')
print (' has a radius of ', c.radius)
print (' and an area of ',c.area()) | true |
40eabf33e4f885fd907c8dfaec662e921c61360a | Python | popravich/hiku | /hiku/compat.py | UTF-8 | 2,616 | 2.59375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import sys
import ast as _ast
import inspect
PY3 = sys.version_info[0] == 3
PY35 = sys.version_info >= (3, 5)
PY36 = sys.version_info >= (3, 6)
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
class _AST(object):
def __getattr__(self, name):
return getattr(_ast, name)
if PY3:
@staticmethod
def arguments(args, vararg, kwarg, defaults):
return _ast.arguments(args, vararg, [], [], kwarg, defaults)
@staticmethod
def arg(arg):
return _ast.arg(arg, None)
else:
@staticmethod
def Name(id, ctx):
return _ast.Name(str(id), ctx)
@staticmethod
def NameConstant(id):
return _ast.Name(str(id), _ast.Load())
@staticmethod
def Attribute(value, attr, ctx):
return _ast.Attribute(value, str(attr), ctx)
@staticmethod
def arguments(args, vararg, kwarg, defaults):
return _ast.arguments(args, vararg, kwarg, defaults)
@staticmethod
def arg(arg):
return _ast.Name(str(arg), _ast.Param())
if PY35:
@staticmethod
def Call(func, args, keywords, starargs, kwargs):
return _ast.Call(func, args, keywords)
else:
Call = _ast.Call
if PY36:
@staticmethod
def comprehension(target, iter, ifs, is_async=0):
return _ast.comprehension(target, iter, ifs, is_async)
else:
comprehension = _ast.comprehension
ast = _AST()
if PY3:
text_type = str
string_types = str,
integer_types = int,
def qualname(fn):
if inspect.ismethod(fn):
return fn.__func__.__qualname__
else:
return fn.__qualname__
else:
text_type = unicode # noqa
string_types = basestring, # noqa
integer_types = int, long # noqa
def qualname(fn):
if inspect.ismethod(fn):
return '{}.{}'.format(fn.im_class.__name__, fn.im_func.__name__)
else:
return fn.__name__
if PY35:
from ._compat import async_wrapper
async_wrapper = async_wrapper
else:
def async_wrapper(func):
raise RuntimeError('Can not use async/await in Python < 3.5')
| true |
53b43a41321f5039785df5b05d9e38442d8b7a0e | Python | lyqtiffany/learngit | /caiNiao/sanShiSiShi/siShiQiExchange.py | UTF-8 | 182 | 3.59375 | 4 | [] | no_license | def exchange(a, b):
a, b = b, a
return a, b
if __name__ == '__main__':
x = 12
y = 65
x, y = exchange(x, y)
print('x value is %d'%x, 'y value change to %d'%y) | true |
33ae7c9f65c1462e20cf31b50507a4e2a51c791e | Python | webclinic017/fa-absa-py3 | /Extensions/Advanced Corporate Actions/FPythonCode/FCorpActionPayoutViewer.py | UTF-8 | 2,739 | 2.546875 | 3 | [] | no_license | """ Compiled: 2020-09-18 10:38:49 """
#__src_file__ = "extensions/advanced_corporate_actions/./etc/FCorpActionPayoutViewer.py"
import acm
import FUxCore
def SelectFirstItem(objList, itemList):
if objList:
firstItem = objList[0]
itemList.SetData(firstItem)
def RemoveItem(objList, itemList, item):
index = objList.index(item)
objList.remove(item)
itemList.RemoveItem(item)
if objList:
if len(objList) <= index:
index -= 1
newItem = objList[index]
if newItem:
itemList.SetData(newItem)
def OnDeleteClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.FCorporateActionPayout[val].Delete()
RemoveItem(self.valList, self.m_values, val)
def OnValDoubleClicked(self, cd):
val = self.m_values.GetData()
if val:
acm.StartRunScript(acm.FCorporateActionPayout[val], 'Modify')
class PayoutsListCustomDialog(FUxCore.LayoutDialog):
LIST_VALUES = 'listValues'
BTN_DELETE = 'btnDelete'
def __init__(self, params):
self.choices = params['choices']
self.selected = params['selected']
self.caption = 'Payouts List'
self.valLabel = 'Payouts'
self.valList = []
self.selectList = []
def HandleApply(self):
resultDic = acm.FDictionary()
resultDic.AtPut('result', self.valList)
return resultDic
def SetControlData(self):
SelectFirstItem(self.valList, self.m_values)
def HandleCreate(self, dlg, layout):
self.m_fuxDlg = dlg
self.m_fuxDlg.Caption(self.caption)
self.m_values = layout.GetControl(self.LIST_VALUES)
self.m_values.AddCallback('DefaultAction', OnValDoubleClicked, self)
self.m_btnDelete = layout.GetControl(self.BTN_DELETE)
self.m_btnDelete.AddCallback('Activate', OnDeleteClicked, self)
self.PopulateControls()
self.SetControlData()
def CreateLayout(self):
b = acm.FUxLayoutBuilder()
b.BeginVertBox()
b. BeginHorzBox()
b. AddSpace(3)
b. BeginVertBox()
b. AddLabel("lblValues", self.valLabel)
b. AddList(self.LIST_VALUES, 10, -1, 15, -1)
b. EndBox()
b. AddSpace(3)
b. EndBox()
b. AddSpace(5)
b. BeginHorzBox()
b. AddFill()
b. AddButton(self.BTN_DELETE, "Delete")
b. AddButton('ok', 'Close')
b. AddSpace(3)
b. EndBox()
b.EndBox()
return b
def PopulateControls(self):
self.valList = [s for s in self.selected]
self.valList.sort()
self.m_values.Populate(self.valList)
if self.valList:
self.m_values.SetData(self.valList[0]) | true |
9e6008d1e9c926ccb5f9d8c85af83a9bfb7bce3b | Python | Timpryor91/tims_shoes | /classes/event_class.py | UTF-8 | 19,776 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: timpr
"""
from datetime import datetime, timedelta
import random as rand
from classes.customer_class import Customer
class EventsTable(object):
"""
An event table class, corresponding to a MySQL table used to store events on Tim's Shoes website
"""
def __init__(self, mycursor, start_date):
"""
Initialize an EventsTable object
Parameters:
mycursor (MySQL Cursor): a cursor to perform database operations from Python
start_date (datetime): the date the website went live
"""
self.mycursor = mycursor
self.date = start_date
self.end_date = datetime.today()
self.customer_list = []
self.customer_id_allocation = 1
self.camp_days_rem = 0
self.item_ids = self.get_item_ids()
self.device_dict = {"computer" : ["dell", "hp", "apple", "lenovo",
"microsoft", "asus", "asus", "other"],
"phone": ["apple", "google", "huawei", "samsung", "htc",
"nokia", "motorola", "other"],
"tablet": ["apple", "amazon", "microsoft", "other"]
}
self.shoe_club_join_prob = 0.25
self.control_conversion_prob = 0.7
# Base query to add events to the MySQL table
self.event_sql = '''INSERT INTO events (event_date, event_time, event_type, customer_id,
product_id, device_type, device_info, order_number, ab_test_notes)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)'''
self.day_counter = 0
# Randomly create events for each day
while self.date < self.end_date:
self.day_counter += 1
# Run a new A/B test every 2 weeks
if (self.day_counter - 1) % 14 == 0:
self.test_conversion_prob, self.test_label = self.initiate_ab_test(self.day_counter)
# Instigate bugs on randomly selected days
self.impacted_device_type, self.impacted_device_info = self.instigate_bug()
# Run an shoe club growth campaign once per year
if self.day_counter % 365 == 0:
self.shoe_club_join_prob, self.camp_days_rem = self.initiate_shoe_club_growth_campaign()
elif self.camp_days_rem > 1:
self.camp_days_rem -= 1
elif self.camp_days_rem == 1:
self.shoe_club_join_prob = 0.25
self.camp_days_rem -= 1
# Randomly generate new customers making their first purchase on the day
self.num_current_customers = len(self.customer_list)
self.new_customers = self.generate_new_customers(self.num_current_customers, self.date)
for new_cust in self.new_customers:
# Randomly simulate a view and event for the new customer
new_cust.set_customer_id(self.customer_id_allocation)
self.viewed_product = self.generate_viewed_product()
self.click_time = self.generate_click_time()
self.device_type, self.device_info = self.generate_user_device()
self.purchase_time = self.generate_purchase_time(self.click_time)
# Log events to the database
self.log_event_to_db(
self.date,
self.click_time,
"clickthrough",
self.customer_id_allocation,
self.viewed_product,
self.device_type,
self.device_info,
"0",
""
)
self.log_event_to_db(
self.date,
self.purchase_time,
"purchase",
self.customer_id_allocation,
self.viewed_product,
self.device_type,
self.device_info,
"0",
""
)
# Randomly select some new customers to sign up for the shoe club
if self.join_shoe_club(self.shoe_club_join_prob) == True:
new_cust = self.allocate_shoe_club_membership(new_cust, self.date)
# Increment id allocation to ensure each customer is assigned a unique id
self.customer_id_allocation += 1
# Select a subset of the existing customers to view an item on the day
if(self.num_current_customers > 0):
self.ret_indexes = self.generate_returning_customer_index_list(self.num_current_customers)
for i in self.ret_indexes:
# Simulate clickthroughs for each returning customer
self.viewed_product = self.generate_viewed_product()
self.click_time = self.generate_click_time()
self.device_type, self.device_info = self.generate_user_device()
# Check for bug impacts
if (self.device_type == self.impacted_device_type and
self.device_info in self.impacted_device_info):
continue
self.ret_cust_id = self.customer_list[i].get_customer_id()
# Select some customers to be in the A/B test control group for conversion
if self.assign_test_group() == True:
self.ret_cust_return_prob = self.test_conversion_prob
self.ret_cust_test_note = self.test_label + "_test"
else:
self.ret_cust_return_prob = self.control_conversion_prob
self.ret_cust_test_note = self.test_label + "_control"
self.log_event_to_db(
self.date,
self.click_time,
"clickthrough",
self.ret_cust_id,
self.viewed_product,
self.device_type,
self.device_info,
"0",
self.ret_cust_test_note
)
if(self.makes_purchase(self.ret_cust_return_prob) == True):
self.purchase_time = self.generate_purchase_time(self.click_time)
self.customer_list[i].set_last_purchase_date(self.date)
self.log_event_to_db(
self.date,
self.purchase_time,
"purchase",
self.ret_cust_id,
self.viewed_product,
self.device_type,
self.device_info,
"0",
self.ret_cust_test_note
)
# Randomly select some returning customers to sign up for or churn from the shoe club
if self.customer_list[i].get_shoe_club_status() == "Inactive":
if self.join_shoe_club(self.shoe_club_join_prob) == True:
self.allocate_shoe_club_membership(self.customer_list[i], self.date)
else:
self.leave_shoe_club(self.customer_list[i])
self.customer_list.extend(self.daily_new_customers)
self.date += timedelta(days = 1)
# Add all the customer data to the database
for cust in self.customer_list:
cust.log_customer_to_db(self.mycursor)
def get_item_ids(self):
"""
Create a list of all the ids of items available in the shop
Returns:
item_ids (List<string>): a list of item ids
"""
self.mycursor.execute('''SELECT
item_id
FROM
items
WHERE
inventory > 0
''')
self.item_ids = self.mycursor.fetchall()
return(self.item_ids)
def generate_new_customers(self, num_current_customers, current_date):
"""
Randomly creates a list of new customers, the length being proportionate to the number
of existing customers
Parameters:
num_current_customers (int): the current number of customers registered to the site
current_date (datetime): the current date
Returns:
daily_new_customers (List<Customer>): a list of new customer objects
"""
self.new_customers = int((10 + num_current_customers/200)*rand.random())
self.daily_new_customers = []
for i in range(self.new_customers):
self.daily_new_customers.append(Customer(current_date))
return(self.daily_new_customers)
def generate_viewed_product(self):
"""
Randomly selects an item for a customer to view
Parameters:
None
Returns:
viewed_product (string): the id of the viewed product
"""
self.viewed_product = rand.choice(self.item_ids)[0]
return(self.viewed_product)
def generate_user_device(self):
"""
Randomly selects a user device for an event
Returns:
device_type (string): the type of device the user is using e.g. computer
device_info (string): the make of the device e.g. Apple
"""
self.device_type = rand.choice(["computer", "phone", "tablet"])
self.device_info = rand.choice(self.device_dict[self.device_type])
return (self.device_type, self.device_info)
def generate_click_time(self):
"""
Generates a random time for an clickthrough event.
Parameters:
None
Returns:
event_time (string): An event time string in the form HH:MM:SS
"""
# Choose a random day time for a click event
self.event_datetime = datetime.today() + timedelta(hours = rand.randint(0,24)) +\
timedelta(minutes = rand.randint(0,60))
self.event_time = datetime.strftime(self.event_datetime,"%H:%M:%S")
return(self.event_time)
def generate_purchase_time(self, click_time):
"""
Generates a random time for an item purchase, given the time the item was clicked into.
Parameters:
click_time (string): the time (HH:MM:SS) that the item was clicked into.
Returns:
event_time (string): An event time string in the form HH:MM:SS
"""
self.click_time = click_time
# Add on a small amount of time after the corresponding click event
self.event_datetime = datetime.strptime(self.click_time, "%H:%M:%S") +\
timedelta(minutes = rand.randint(0,60))
self.event_time = datetime.strftime(self.event_datetime, "%H:%M:%S")
return(self.event_time)
def generate_returning_customer_index_list(self, num_current_customers):
"""
Creates a list of indexes relating to returning customers from the customer list
Parameters:
num_current_customers (int): current number of registered customers
Returns:
returning_customer_indexes (List<int>): A list of unique integers relating to
returning customers from the customer list
"""
self.num_ret_custs = int(rand.random()*0.05*self.num_current_customers)
self.returning_customer_indexes = rand.sample(range(0, self.num_current_customers - 1),
self.num_ret_custs)
return (self.returning_customer_indexes)
def log_event_to_db(self,
event_date,
event_time,
event_type,
customer_id,
product_id,
device_type,
device_info,
order_number,
ab_test_note):
"""
Inserts an event into the Events database table.
Parameters:
event_date (datetime): the day of the event
event_time (string): the time of the event in HH:MM:SS format
event_type (string): the type of event (clickthrough or purchase)
customer_id (int): the unique id of the customer making the event
product_id (int): the unique id of the product involved in the event
device_type (string): the type of device the customer is using
device_info (string): the make of the device
order_number (string): the order number of the customers purchase
ab_test_note (string): an identifier tag linking events to tests
Returns:
None
"""
self.event = (
event_date,
event_time,
event_type,
customer_id,
product_id,
device_type,
device_info,
order_number,
ab_test_note
)
self.mycursor.execute(self.event_sql, self.event)
return
def join_shoe_club(self, prob):
"""
Randomly decides if a customer will join the shoe club on the day.
Parameters:
prob (float): the probability of the customer joining the shoe club, e.g. 0.7
Returns:
joins_club (boolean): true if they join the club, false otherwise
"""
if rand.random() < prob:
return (True)
else:
return (False)
def allocate_shoe_club_membership(self, customer, current_date):
"""
Assign shoe club membership parameters to a customer joining the shoe club. Mutates
the input customer object
Parameters:
customer (Customer): a customer who is joining the shoe club
date (datetime): the current date
Returns:
None
"""
customer.set_shoe_club_id = customer.get_last_name() + str(int(1000*rand.random()))
customer.set_shoe_club_signup_date(current_date)
customer.set_shoe_club_status("Active")
return
def leave_shoe_club(self, customer):
"""
Checks to see if a user churns from the shoe club. Mutates the input customer object
Parameters:
customer (Customer): a customer who is joining the shoe club
Returns:
None
"""
if rand.random() < 0.005:
customer.set_shoe_club_status("Inactive")
return
def makes_purchase(self, prob):
"""
Randomly decides if a customer will purchase an item they have clicked into
Parameters:
prob (float): the probability of the customer making a purchase, e.g. 0.7
Returns:
makes_purchase (boolean): true if they make a purchase, false otherwise
"""
if rand.random() < prob:
return (True)
else:
return (False)
def initiate_ab_test(self, day_counter):
"""
Generate conversion rates for A/B test group
Parameters:
day_counter (int): the day number, counted from the stores opening day
Returns:
test_conversion_prob (float): the purchase probability for the test control group
test_label (string): a label used to identify users who have been exposed to a test
"""
self.test_conversion_prob = rand.choice([0.67,0.68,0.69,0.70,0.71,0.72,0.73,0.75,0.80])
self.test_label = "Test_" + str(int((day_counter-1)/14 + 1))
return(self.test_conversion_prob, self.test_label)
def assign_test_group(self):
"""
Allocated customers into the control or test group
Returns:
test_group (boolean): true if they are in the test group, false if they are in the control
"""
if rand.random() < 0.5:
return (True)
else:
return (False)
def instigate_bug(self,):
"""
With a low probability, creates a bug that prevents clicks or sales on certain devices. Bugs are assumed to be
resolved within a day
Return:
impacted_device_type (string): the device type that is impacted by the bug
impacted_device_info (List<string>): the particular devices that are impacted by the bug
"""
if rand.random() < 0.01:
self.impacted_device_type = rand.choice(["computer", "phone", "tablet"])
# Randomly select the device type variants that will be impacted
self.variants_impacted = rand.randint(1, len(self.device_dict[self.impacted_device_type]))
self.impacted_device_info = rand.sample(self.device_dict[self.impacted_device_type],
self.variants_impacted )
else:
self.impacted_device_type = ""
self.impacted_device_info = []
return (self.impacted_device_type, self.impacted_device_info)
def initiate_shoe_club_growth_campaign(self):
"""
Generate sign up rate for growth campaign period
Returns:
campaign_shoe_club_join_prob (float): the join probability for users during campaign
campaign_length (string): the length of the shoe club membership growth campaign
"""
self.campaign_shoe_club_join_prob = rand.choice([0.25,0.26,0.30,0.35,0.40])
self.campaign_length = 30
return(self.campaign_shoe_club_join_prob, self.campaign_length)
| true |
095a5cec3d6dc8109af29d10ebc708b4f149fdd2 | Python | Grace-JingXiao/AVEC_2017_DDS_CNN_Research | /Pretreatment/Text/Step3_Digital.py | UTF-8 | 1,623 | 2.5625 | 3 | [] | no_license | import os
import numpy
if __name__ == '__main__':
loadpath = 'D:/PythonProjects_Data/AVEC2017_Data/Text_Step1_RawText/'
savepath = 'D:/PythonProjects_Data/AVEC2017_Data/Text_Step3_Digital/'
dictionaryData = numpy.genfromtxt(fname='Dictionary.csv', dtype=str, delimiter=',')
dictionary = {}
for sample in dictionaryData:
dictionary[sample[0]] = int(sample[1])
print(dictionary)
for foldname in os.listdir(loadpath):
os.makedirs(os.path.join(savepath, foldname))
for filename in os.listdir(os.path.join(loadpath, foldname)):
with open(os.path.join(loadpath, foldname, filename), 'r') as file:
data = file.readlines()
with open(os.path.join(savepath, foldname, filename), 'w') as file:
for sample in data:
sample = sample.replace('[', '')
sample = sample.replace(']', '')
sample = sample.replace('_', '')
sample = sample.replace('<', '')
sample = sample.replace('>', '')
sample = sample.replace(' ', ' ')
sample = sample.replace(' ', ' ')
sample = sample.replace(' ', ' ')
sample = sample.replace(' ', ' ')
sample = sample.replace(' ', ' ')
for words in sample[0:-1].split(' '):
if words in dictionary.keys():
file.write(str(dictionary[words]) + ',')
file.write('\n')
# print(data)
# exit()
| true |
30364047be721bc5c400ff68515627e9c0a142cd | Python | melexis/xunit2rst | /tests/prefix_test.py | UTF-8 | 6,000 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
''' Test suite for functions that set the prefix and prefix_set variables '''
import unittest
from pathlib import Path
from mlx.xunit2rst import build_prefix_and_set, parse_xunit_root, verify_prefix_set, ITEST, UTEST, QTEST
TEST_IN_DIR = Path(__file__).parent / 'test_in'
class TestPrefix(unittest.TestCase):
def test_build_prefix_and_set_utest_default(self):
''' Use default prefix for unit test reports '''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'utest_my_lib_no_prefix_report.xml')
self.assertEqual(initial_prefix_set, UTEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, '', True, None)
self.assertEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix, 'UTEST-')
def test_build_prefix_and_set_itest_default(self):
''' Use default prefix for integration test reports '''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'itest_report.xml')
self.assertEqual(initial_prefix_set, ITEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, '', True, None)
self.assertEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix, 'ITEST-')
def test_build_prefix_and_set_from_name(self):
''' Get prefix from element name '''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'utest_my_lib_report.xml')
self.assertEqual(initial_prefix_set, UTEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, '', True, None)
self.assertEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix, 'UTEST_MY_LIB-')
def test_build_prefix_and_set_from_arg(self):
''' Get prefix from input argument `--prefix` and trim suffix of prefix '''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'utest_my_lib_report.xml')
self.assertEqual(initial_prefix_set, UTEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, 'TEST_MY_LIB_-', True, None)
self.assertEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix, 'TEST_MY_LIB-')
def test_build_prefix_and_set_from_arg_swap_set(self):
'''
Get prefix from input argument `--prefix` and base prefix_set on its first letter.
Don't trim suffix of prefix.
'''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'itest_report.xml')
self.assertEqual(initial_prefix_set, ITEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, 'UTEST_MY_LIB_-', False, None)
self.assertNotEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix_set, UTEST)
self.assertEqual(prefix, 'UTEST_MY_LIB_-')
def test_build_prefix_and_set_priority(self):
''' Argument --type must have the highest priority for determining the correct prefix_set. '''
test_suites, initial_prefix_set, _ = parse_xunit_root(TEST_IN_DIR / 'utest_my_lib_report.xml')
self.assertEqual(initial_prefix_set, UTEST)
prefix_set, prefix = build_prefix_and_set(test_suites, initial_prefix_set, 'UTEST_HOWDY-', False, 'i')
self.assertNotEqual(prefix_set, initial_prefix_set)
self.assertEqual(prefix_set, ITEST)
self.assertEqual(prefix, 'UTEST_HOWDY-')
def test_content_files(self):
''' Test the extraction of the content file path '''
_, _, content_files = parse_xunit_root(TEST_IN_DIR / 'qtest_my_lib_report.xml')
self.assertEqual(content_files, {3: Path("../../doc/source/extra_content.yml")})
def test_content_files_no_root(self):
''' Test the extraction of the content file path when the XML has no valid root element '''
_, _, content_files = parse_xunit_root(TEST_IN_DIR / 'itest_report.xml')
self.assertEqual(content_files, {0: Path('./extra_content1.yml')})
def test_verify_prefix_set(self):
'''
Tests verify_prefix_set function. The --type argument should have the highest priority and must
start with u/i/q (case-insensitive). The prefix argument has the second highest priority. A last resort is to
keep the input prefix_set.
'''
self.assertEqual(verify_prefix_set(UTEST, 'UTEST-', 'Itest'), ITEST)
self.assertEqual(verify_prefix_set(UTEST, 'UTEST-', 'i'), ITEST)
self.assertEqual(verify_prefix_set(UTEST, '', 'i'), ITEST)
self.assertEqual(verify_prefix_set(ITEST, 'ITEST-', 'Utest'), UTEST)
self.assertEqual(verify_prefix_set(UTEST, 'ITEST-', 'u'), UTEST)
self.assertEqual(verify_prefix_set(UTEST, 'UTEST-', 'u'), UTEST)
self.assertEqual(verify_prefix_set(UTEST, 'BLAH-', None), UTEST)
self.assertEqual(verify_prefix_set(ITEST, 'BLAH-', None), ITEST)
self.assertEqual(verify_prefix_set(UTEST, 'ITEST-', None), ITEST)
self.assertEqual(verify_prefix_set(ITEST, 'UTEST-', None), UTEST)
self.assertEqual(verify_prefix_set(ITEST, 'QTEST-', None), QTEST)
self.assertEqual(verify_prefix_set(ITEST, 'ITEST-', 'q'), QTEST)
self.assertEqual(verify_prefix_set(UTEST, 'UTEST-', 'Qtest'), QTEST)
self.assertEqual(verify_prefix_set(QTEST, '', 'u'), UTEST)
self.assertEqual(verify_prefix_set(QTEST, '', 'i'), ITEST)
self.assertEqual(verify_prefix_set(QTEST, 'UTEST', 'i'), ITEST)
self.assertEqual(verify_prefix_set(QTEST, 'FOO-', None), QTEST)
with self.assertRaises(ValueError):
verify_prefix_set(UTEST, 'UTEST-', 't')
with self.assertRaises(ValueError):
verify_prefix_set(ITEST, 'ITEST', 't')
with self.assertRaises(ValueError):
verify_prefix_set(ITEST, '', '')
with self.assertRaises(ValueError):
verify_prefix_set(QTEST, '', 't')
if __name__ == '__main__':
unittest.main()
| true |
a1b20a6b219678546700b96074b57f6c623cebe9 | Python | josiah-wolf-oberholtzer/tloen | /tloen/textui/__main__.py | UTF-8 | 511 | 2.71875 | 3 | [
"MIT"
] | permissive | import urwid
def on_a_press(*args, **kwargs):
list_walker.append(button_row)
def on_b_press(*args, **kwargs):
list_walker[2:] = []
button_row = urwid.LineBox(
urwid.Columns(
[
urwid.Button("add more", on_press=on_a_press),
urwid.Button("remove one", on_press=on_b_press),
]
)
)
list_walker = urwid.SimpleListWalker([button_row])
list_box = urwid.ListBox(list_walker)
if __name__ == "__main__":
loop = urwid.MainLoop(list_box)
loop.run()
| true |
ec0710fb25faee72d6b18bb03b5ef720e71540fe | Python | TINY-KE/floorplan-MapGeneralization | /misc/plot_graph_with_labels.py | UTF-8 | 1,468 | 2.765625 | 3 | [] | no_license | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.figure import figaspect
import matplotlib
matplotlib.use('TKAgg')
import os
import argparse
#GRAPH_PATH = r"C:\Users\Chrips\Aalborg Universitet\Frederik Myrup Thiesson - data\data_for_paper\Public\Musikkenshus\anno\MSP1-HoM-MA-XX+4-ET_w_annotations.gpickle"
def show_graph(GRAPH_PATH):
nxg = nx.read_gpickle(GRAPH_PATH)
label_dict = nx.get_node_attributes(nxg, 'label')
labels = list(label_dict.values())
print(nxg.number_of_nodes())
# create empty list for node colors
node_color = []
# for each node in the graph
for lab in labels:
if lab == 0.0:
node_color.append('blue')
elif lab == 1.0:
node_color.append('red')
pos = nx.get_node_attributes(nxg, 'pos')
w, h = figaspect(5 / 3)
fig, ax = plt.subplots(figsize=(w, h))
nx.draw(nxg, pos, node_color=node_color, node_size=20, ax=ax)
#nx.draw_networkx_labels(new_g, pos, lab, ax=ax)
plt.show()
if __name__ == "__main__":
"""
Command:
-p path/to/.gpickle
"""
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path", type=str,
default='../data/Public/AU/anno/A1325PE_1_w_annotations.gpickle', help="path to gpickle")
args = vars(ap.parse_args())
show_graph(args["path"])
| true |
83b8558b6e1c5f86fa9bb857061d8cb34cc3752f | Python | jankeromnes/Python-Calc | /algeb/AbsVal.py | UTF-8 | 1,284 | 3.078125 | 3 | [
"MIT"
] | permissive | import re
def absVal():
main = {
}
main1 = {
}
while True:
def r():
print('desired syntax: |(equ here)|')
str1 = input('what is the equ?: ')
equ = re.findall(r'\|.*\|', str1)
equp2 = re.findall(r'.*', str1)
str(equp2)
#this takes the syntax of the equasion ie. |1 + 1| so thay it can be put through the actual equ
z = len(equ) # this part is to verify that the syntax was correct and if it is correct updates the dict accordingly
if z > 0:
main.update({"equ":equ})
main1.update({"equp2":equp2})
pass
else:
print('')
print('')
print("SyntaxError")
print('')
print('')
print('')
print('')
r()
break
r()
def rc():
# Removes excess characters
string = main.get("equ")
string = str(string)
string1 = string
string2 = string1
char1 = "["
char2 = "]"
char3 = "|"
char4 = "'"
for i in char1:
string = string.replace(char1, '')
for i in char2:
string1 = string.replace(char2, '')
for i in char3:
string2 = string1.replace(char3, '')
for i in char4:
string3 = string2.replace(char4, '')
# string3 = float(string3)
main1.update({"equ1":string3})
x = main1.get("equ1")
c = eval(x)
z = abs(c)
print(z)
rc()
absVal()
if __name__ == "__main__":
pass | true |
58f1c8e1ebbe8656f2fdaa4518f2746f523a0625 | Python | cassebas/run-co-runners | /experiment/notebook_mälardalen-bsort-pi3.py | UTF-8 | 4,985 | 2.6875 | 3 | [] | no_license | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats import mannwhitneyu
import matplotlib.pyplot as plt
boxprops = dict(linestyle='-', linewidth=3, color='k')
medianprops = dict(linestyle='-', linewidth=3, color='k')
def set_boxplot_linewidth(bp, linewidth=3):
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
[[item.set_linewidth(linewidth) for item in bp[key]] for key in bp.keys()]
# +
from matplotlib import rcParams
labelsize = 32
rcParams['xtick.labelsize'] = labelsize
rcParams['ytick.labelsize'] = labelsize
bsort1_2000 = pd.read_csv('hypothesis1/cyclesdata-BENCH1_2000-core1-configseries2-configbench1-offset0.csv', sep=' ')
bsort1444_2000 = pd.read_csv('hypothesis1/cyclesdata-BENCH1444_2000-core4-configseries2111-configbench1222-offset0.csv', sep=' ')
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(10,10))
xlab1 = ax[0].set_xlabel('run in isolation', fontsize=32, color='green')
xlab2 = ax[1].set_xlabel('with co-runners', fontsize=32, color='red')
maximum = [0, 0]
median = [0, 0]
for i, df in enumerate([bsort1_2000, bsort1444_2000]):
df = df.loc[df['core'] == 0]
maximum[i] = df['cycles'].max()
median[i] = df['cycles'].median()
boxplot = df.boxplot(column=['cycles'], ax=ax[i], return_type='dict')
set_boxplot_linewidth(boxplot, linewidth=4)
plt.savefig('/home/caspar/git/RTS-thesis/report/img/bsort2000-boxplot.png', bbox_inches='tight')
print(f'Median 1 core is {median[0]}')
print(f'Median 4 cores is {median[1]}')
print(f'WCET 1 core is {maximum[0]}')
print(f'WCET 4 cores is {maximum[1]}')
print('The median of running 4 cores compared to 1 core is {} times slower'.format(median[1]/median[0]))
print('The WCET of running 4 cores compared to 1 core is {} times slower'.format(maximum[1]/maximum[0]))
# -
# Calculate the Mann-Whitney U test
a = bsort1_2000['cycles']
b = bsort1444_2000.loc[bsort1444_2000['core'] == 0]['cycles']
t, p = ttest_ind(b, a, equal_var=False)
stat, p = mannwhitneyu(a, b)
alpha = 0.05
print('The calculated statistic is {}.'.format(stat))
print('The calculated p-value is {}.'.format(p))
if p < alpha:
# enough evidence to reject H0
print('Based on the Mann-Whitney U test with stat={} and p={}, we can reject the Null-hypothesis'.format(stat, p))
else:
print('Based on the Mann-Whitney U test with stat={} and p={}, we cannot reject the Null-hypothesis'.format(stat, p))
# +
from matplotlib import rcParams
labelsize = 32
rcParams['xtick.labelsize'] = labelsize
rcParams['ytick.labelsize'] = labelsize
bsort1_8000 = pd.read_csv('hypothesis1/cyclesdata-BENCH1_8000-core1-configseries2-configbench1-offset0.csv', sep=' ')
bsort1444_8000 = pd.read_csv('hypothesis1/cyclesdata-BENCH1444_8000-core4-configseries2111-configbench1222-offset0.csv', sep=' ')
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(10,10))
xlab1 = ax[0].set_xlabel('run in isolation ', fontsize=32, color='green')
xlab2 = ax[1].set_xlabel(' with co-runners', fontsize=32, color='red')
maximum = [0, 0]
median = [0, 0]
for i, df in enumerate([bsort1_8000, bsort1444_8000]):
df = df.loc[df['core'] == 0]
maximum[i] = df['cycles'].max()
median[i] = df['cycles'].median()
boxplot = df.boxplot(column=['cycles'], ax=ax[i], return_type='dict')
set_boxplot_linewidth(boxplot, linewidth=4)
plt.savefig('/home/caspar/git/RTS-thesis/report/img/bsort8000-boxplot.png', bbox_inches='tight')
print(f'Median 1 core is {median[0]}')
print(f'Median 4 cores is {median[1]}')
print(f'WCET 1 core is {maximum[0]}')
print(f'WCET 4 cores is {maximum[1]}')
print('The median of running 4 cores compared to 1 core is {} times slower'.format(median[1]/median[0]))
print('The WCET of running 4 cores compared to 1 core is {} times slower'.format(maximum[1]/maximum[0]))
# -
# Calculate the student t-test
a = bsort1_8000['cycles']
b = bsort1444_8000.loc[bsort1444_8000['core'] == 0]['cycles']
stat, p = mannwhitneyu(a, b)
alpha = 0.05
print('The calculated statistic is {}.'.format(stat))
print('The calculated p-value is {}.'.format(p))
if p < alpha:
# enough evidence to reject H0
print('Based on the Mann-Whitney U test with stat={} and p={}, we can reject the Null-hypothesis'.format(stat, p))
else:
print('Based on the Mann-Whitney U test with stat={} and p={}, we cannot reject the Null-hypothesis'.format(stat, p))
| true |
a8aaf450fe18797fc1787c3df8f08011167f1caa | Python | durden/spygit | /spygitapp/pep8.py | UTF-8 | 2,884 | 2.71875 | 3 | [] | no_license | from django.shortcuts import render_to_response
import os
import string
import tempfile
from fnmatch import fnmatch
from models import Error, Run, File, RunError, Line
def add_file_to_set(myset, dirname, fnames):
"""Used with os.path.walk to make a list of all files"""
for filename in fnames:
name = dirname + '/' + filename
if fnmatch(name, '*.py'):
myset.add(name)
def parse_pep8(run, git_path, output):
"""Parse the pep8 output, store the results"""
errfiles_set = set()
errortype_set = set()
lineno_set = set()
# Add all files in the project to the db
allfiles = set()
os.path.walk(git_path, add_file_to_set, allfiles)
for filename in allfiles:
filename = filename.replace(git_path + '/', '', 1)
runfile = File(filename=filename, run=run)
runfile.save()
# Generate a set of error types, error files, and lines
for line in output.readlines():
filename, lineno, errnum, errtext = string.split(line, ':', 3)
lineno = int(lineno)
filename = filename.replace(git_path + '/', '', 1)
# Create sets to remove duplicates
errfiles_set.add(filename)
# Add new err types to the db
if (errnum, errtext) not in errortype_set:
errortype_set.add((errnum, errtext))
if not Error.objects.filter(error_type=errnum):
err = Error(error_type=errnum, short_descr=errtext)
err.save()
# Create a set of line numbers for each file
for ln in range(max(1, lineno - 3), lineno + 4):
lineno_set.add((filename, ln))
# Add err instances to the db
runfile = File.objects.get(run=run, filename=filename)
errtype = Error.objects.get(error_type=errnum)
runerr = RunError(error=errtype, file=runfile, line_number=lineno,
error_descr=errtext)
runerr.save()
# Add lines to the db
for filename in errfiles_set:
runfile = File.objects.get(run=run, filename=filename)
f = open(git_path + '/' + filename, 'r')
lineno = 1
for line in f:
if (filename, lineno) in lineno_set:
linetext = Line(file=runfile, line_number=lineno, text=line)
linetext.save()
lineno = lineno + 1
f.close()
def run_pep8(git_url, path, name, rev):
"""Check out the git project, run pep8, store the results"""
# Do not allow duplicates of the same project/rev
old_run = Run.objects.filter(project_name=name, git_revision=rev)
if not old_run:
run = Run(project_name=name, project_url=git_url, git_revision=rev)
run.save()
pep8_pipe = os.popen("pep8 -r %s" % path)
parse_pep8(run, path, pep8_pipe)
pep8_pipe.close()
os.system("rm -rf %s" % os.path.dirname(path))
| true |
4434de27b97c07a22797f04745a9dd747ebd7942 | Python | ProjectLSD/Noise-Reduction-Playground | /ops/mixer.py | UTF-8 | 702 | 2.765625 | 3 | [] | no_license | from pydub import AudioSegment
import random
def generate_rand_frequency():
audio = AudioSegment.from_wav("basictone.wav") # import in the file with a tone of 440 Hz
octaves = random.randint(-30, 30) / 10 #generates a random number from -3 to 3
new_sample_rate = int(audio.frame_rate * (2.0 ** octaves))
changed_audio = audio._spawn(audio.raw_data, overrides={'frame_rate': new_sample_rate})
changed_audio.export("test.wav", format="wav")
def combine_sounds(file1, file2, outputfile):
#combines two tones
sound1 = AudioSegment.from_file(file1)
sound2 = AudioSegment.from_file(file2)
combined = sound1.overlay(sound2)
combined.export(outputfile, format='wav') | true |
19cc618422c842ef13cab81b4504229ab99bcbee | Python | lzx3x3/Algorithm-CSE6140 | /CSE6140_HW4/Hongsup_OH_CSE6140_programming/Plot_divide_dynamic.py | UTF-8 | 621 | 3.390625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
#the number of k
k = [10,1000,2000,3000,4000,5000,6000,7000,8000,9000,10000]
#Divide and Conquer
dc = [0.168,43.55,68.7,103.64,132,159,200,224,269.1,275.8,328]
#Dynamic Programming
dp = [0.048,10.554,18.889,29.50,32.329,36.6,38.2,46.3,52.4,54.6,71]
#Plot Graph
plt.plot(k,dc,label = 'Divide and Conquer')
plt.plot(k,dp,label = 'Dynamic Programming')
plt.legend()
plt.xlabel('The number of k',fontsize = 20)
plt.ylabel('Time (millisecons)',fontsize = 20)
plt.title('Divide and Conquer VS Dynamic Programming',fontsize = 20)
plt.grid()
plt.show()
| true |
18f8757d2b722e4870ee9241c7017434e82df5f4 | Python | jvanvari/LeetCode | /python-solutions/trees/BFS/994_rotting_oranges.py | UTF-8 | 1,357 | 3.515625 | 4 | [] | no_license | from typing import List
# problem of graph traversal and zigzag order traversal where you keep track of levels
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
rows = len(grid)
cols = len(grid[0])
queue = []
mins = 0 # levels
for i in range(rows):
for j in range(cols):
if grid[i][j] == 2:
queue.append((i, j, mins))
# in number of islands problem , while loop is inside the nested for loop
# here we needed to traverse the graph first to see where are all the 2s
while queue:
r, c, mins = queue.pop(0)
if r > 0 and grid[r-1][c] == 1:
grid[r-1][c] = 2
queue.append((r-1, c, mins+1)) # adding a tuple to the queue
if r < rows-1 and grid[r+1][c] == 1 :
grid[r-1][c] = 2
queue.append((r-1, c, mins+1))
if c > 0 and grid[r][c-1] == 1:
grid[r][c-1] = 2
queue.append((r-1, c, mins+1))
if c < cols-1 and grid[r][c+1] == 1:
grid[r][c+1] = 2
queue.append((r, c+1, mins+1))
for i in range(rows):
for j in range(cols):
if grid[i][j] == 1:
return -1
return mins
| true |
7c72cfbacf81d4709a42760afb949fb1d13d0958 | Python | Jagrmi-C/jagrmitest | /coursera_/queue4.py | UTF-8 | 400 | 3.21875 | 3 | [
"MIT"
] | permissive | from queue import Queue
from threading import Thread
def worker(q, n):
while True:
item = q.get()
if item is None:
break
print("process data:", n, item)
q = Queue(5)
th1 = Thread(target=worker, args=(q, 1))
th2 = Thread(target=worker, args=(q, 2))
th1.start(); th2.start()
for i in range(50):
q.put(i)
q.put(None); q.put(None)
th1.join(); th2.join()
| true |
af042215ab562f2d98bab8e8ffbc617607601c10 | Python | alainaahuja/face_recognition | /face_matching.py | UTF-8 | 2,322 | 3.15625 | 3 | [] | no_license | #importing desired modules/libraries
from tkinter import *
from tkinter import filedialog
import face_recognition
#function command for first variable; gives path for first choice
def browsingPIC1():
fileVarPIC1=filedialog.askopenfilename( filetypes=(("Alaina's stuff",".gif"),("All files","*.*")))
global enc1
enc1=pictureDisplay(fileVarPIC1,0,0)
x=enc1
#function command for second variable; gives path for second choice
def browsingPIC2():
fileVarPIC2=filedialog.askopenfilename( filetypes=(("Alaina's stuff",".gif"),("All files","*.*")))
global enc2
enc2 = pictureDisplay(fileVarPIC2,700,0)
y=enc2
#displaying picture; providing face encodings
def pictureDisplay(fileVarPIC,x,y):
pic=face_recognition.load_image_file(fileVarPIC)
tkpic=PhotoImage(file=fileVarPIC)
contestant1=canvas.create_image((x,y),image=tkpic,anchor='nw')
label=Label()
label.image=tkpic
label.pack()
contestant_face_encodings=face_recognition.face_encodings(pic)
return contestant_face_encodings[0]
#comparing both choices
def compare_choices():
result=face_recognition.compare_faces([enc1],enc2,0.5)
if result==[True]:
canvas.create_text(400,400,text='The images are of the same person!!!', fill='green',font=('Helvetica',30))
else:
canvas.create_text(400,400,text='The images are not of the same person!!!', fill='red',font=('Helvetica',30))
def clear():
canvas.delete("all")
#customizing tkinter window
window=Tk()
window.title('face_matcher')
#creating canvas to expand window
canvas=Canvas(window,width=1280,height=680)
canvas.pack()
#creating first browse button
browse=Button(window, text='Browse',command=browsingPIC1)
browse.pack()
#creating second browse button
browse2=Button(window, text='Browse2',command=browsingPIC2)
browse2.pack()
#creating compare button
compare=Button(window, text='Compare',command=compare_choices)
compare.pack()
#reset button
reset=Button(window,text='Reset',command=clear)
reset.pack()
#creating area to display
display_area=Label(window, text="")
display_area.pack()
#creating app title
title=canvas.create_text(200,200, text='The face matcher',fill='blue',font=('Helvetica',30))
#mandatory line
window.mainloop()
| true |
f42f197213e57c8ee9f802598135609a28e91df4 | Python | Shikhar0907/Algo-and-data-structure-questions | /Amazon_interview/amazon practice/anagrm maximum subset.py | UTF-8 | 431 | 3.421875 | 3 | [] | no_license | def anagram(string):
max_num = 0
string = string.split()
for i in range(len(string)):
string[i] = ''.join(sorted(string[i]))
temp = {}
for item in string:
if item not in temp:
temp[item] = 1
else:
temp[item] += 1
print(max(temp.items(), key = lambda x: x[1]))
def main():
string = "ant magenta magnate tan gnamate"
anagram(string)
main()
| true |
082d4e850468c3d5ea20431a1b59c062c6bbd81c | Python | AprajitaChhawi/365DaysOfCode.MAY | /Day 24 strong number.py | UTF-8 | 679 | 3.546875 | 4 | [] | no_license | #User function Template for python3
class Solution:
def fact(self,n):
fac=1
for i in range(2,n+1):
fac=fac*i
return fac
def isStrong(self, N):
s=0
num=N
while(num!=0):
temp=num%10;
s=s+ob.fact(temp)
num=int(num/10)
if(N==s):
return 1
else:
return 0
# code here
#{
# Driver Code Starts
#Initial Template for Python 3
import math
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
N=int(input())
ob = Solution()
print(ob.isStrong(N))
# } Driver Code Ends
| true |
bef3d1abd584e898424c12ea699eca28bc807f0c | Python | robinhouston/maze-experiments | /pylib-mazer/mazer/generate.py | UTF-8 | 2,103 | 3.671875 | 4 | [] | no_license | """
Maze generation algorithms.
"""
from mazer.maze import (Maze, DIRECTIONS, RELATIVE_DIRECTIONS, N, E, S, W, random_direction)
import random
def recursive_backtracking(maze):
"""Complete the maze using the recursive backtracking algorithm.
"""
directions = [ d for d in RELATIVE_DIRECTIONS ]
random.shuffle(directions)
for d in maze.branch(directions):
if maze.cell_is_empty(d):
maze.carve(d)
recursive_backtracking(maze)
return maze
def kruskal(maze):
"""Complete the maze using the randomised Kruskal's algorithm.
"""
walls = list(maze.walls())
random.shuffle(walls)
for x, y, d in walls:
if not maze.is_connected(x, y, d):
maze.carve(x, y, d)
return maze
def aldous_broder(maze):
"""Complete the maze using the Aldous-Broder algorithm.
(If the starting maze has no passages, then this algorithm
will produce all possible mazes with equal probability.)
"""
while maze.has_empty_cells():
d = random_direction()
if maze.cell_is_empty(d):
maze.carve(d)
else:
maze.move(d)
return maze
def _lerw(maze, c, stopping_set):
"""Perform a loop-erased random walk from starting position c
until an element of stopping_set is hit.
"""
path = [c]
path_indices_by_cell = {c: [0]}
maze.move(*c)
while maze.cursor_cell() not in stopping_set:
if maze.move(random_direction()):
c = maze.cursor_cell()
if c in path_indices_by_cell and path_indices_by_cell[c]:
prev_index = path_indices_by_cell[c][-1]
for d in path[(prev_index + 1):]:
path_indices_by_cell[d].pop()
path = path[:(prev_index + 1)]
else:
path_indices_by_cell.setdefault(c, []).append(len(path))
path.append(c)
return path
def wilson(maze):
"""Complete the maze using Wilson's algorithm.
This will produce all possible completions with equal probability.
"""
cells_in_maze = set([(0,0)])
for c in maze.cells():
if c not in cells_in_maze:
path = _lerw(maze, c, cells_in_maze)
maze.carve_path(path)
cells_in_maze.update(path)
return maze
| true |
c171bf574918a08a6ce6089fe0e60f9aeb36c179 | Python | akingyin1987/pytest | /mypy14.py | UTF-8 | 833 | 3.8125 | 4 | [] | no_license | # 工厂模式
class CarFactory:
def create_car(self, brand):
if brand == "奔驰":
return Benz()
elif brand == "宝马":
return BMW()
elif brand == "比亚迪":
return BYD()
else:
return "无品牌无法创建"
class Benz:
pass
class BMW:
pass
class BYD:
pass
c = CarFactory().create_car("宝马")
# 单例模式
class MySingleton:
__obj = None
__init_flat = True
# 需要重写__new__ 方法
def __new__(cls, *args, **kwargs):
if cls.__obj is None:
cls.__obj = object.__new__(cls)
return cls.__obj
def __int__(self, name):
if MySingleton.__init_flat:
self.name = name
a = MySingleton("a")
b = MySingleton("b")
print(dir(a))
print(a.name)
print(b)
| true |
4f0e8029dfb7ac16369555191937bfe22633404f | Python | BorisDundakov/Python---Fundamentals | /07. Dictionaries - Lab/keys itteration.py | UTF-8 | 639 | 3.859375 | 4 | [] | no_license | # # # squares = {1: 1, 2: 4, 3: 9}
# # #
# # # for each_key in squares.keys():
# # # print(each_key, end=" ")
# #
# #
# # squares = {1: "Ferrari", 2: "Lamborghini", 3: "McLaren"}
# #
# # for (key, value) in squares.items():
# # print(f"Key: {key}, Value: {value}")
#
#
# # my_dict = {1: "Hello", 2: "World"}
# #
# # new_dict = my_dict.copy()
# #
# # new_dict.clear()
# # print(my_dict)
# # print(new_dict)
#
#
# pop_dict = {1: "Hello", 7: "World"}
# pop_dict.pop(7)
# print(pop_dict)
dict = {1: 'a', 2: '', 3: 'b', 4: '', 5: '', 6: 'c'}
for key, value in list(dict.items()):
if (value == ''):
del dict[key]
print(dict) | true |
290e44b7c21941a8aa1f07e4e21270cec42750ce | Python | yavoratanassov/softuni_python_fundamentals | /11_lists_basics_excercices/invert_values.py | UTF-8 | 174 | 3.453125 | 3 | [] | no_license | numbers = input().split()
opposite_numbers = []
for n in range(len(numbers)):
number = int(numbers[n]) * -1
opposite_numbers.append(number)
print(opposite_numbers) | true |
a770f07b5d65290639f203c9dd8ab69b7ee2495f | Python | yishuen/python-flask-app-HTML-templates-lab-nyc-career-ds-102218 | /test/index_test.py | UTF-8 | 2,298 | 2.6875 | 3 | [] | no_license | import unittest, sys
sys.path.insert(0, '..')
from app import app
class HTMLTemplateTestCase(unittest.TestCase):
testy = app.test_client()
def test_index_status_code(self):
response = self.testy.get('/')
self.assertEqual(response.status_code, 200)
def test_index_html(self):
response = self.testy.get('/')
raw = response.data
result = raw.decode("utf-8")
h1, p_tag = result.split("</h1>")
self.assertTrue('<h1>Welcome to my flask app' in h1)
self.assertTrue('<p>be careful, it\'s still under construction...</p>' in p_tag)
def test_user_profile_status_code(self):
response = self.testy.get('/profile/frank')
self.assertEqual(response.status_code, 200)
def test_user_profile_html(self):
response = self.testy.get('/profile/frank')
raw = response.data
result = raw.decode("utf-8")
self.assertTrue('<h1>Welcome to Frank\'s profile</h1>' in result)
def test_hello_world_template_status_code(self):
response = self.testy.get('/hello-world-template')
self.assertEqual(response.status_code, 200)
def test_hello_world_template_html(self):
response = self.testy.get('/hello-world-template')
raw = response.data
result = raw.decode("utf-8")
self.assertTrue('<h1>Hello again, World! This is a template!</h1>' in result)
def test_profile_page_template_status_code(self):
response = self.testy.get('/profile/frank/31/foisting_toasters/jackson_hole,wy')
self.assertEqual(response.status_code, 200)
def test_profile_page_template_html(self):
response = self.testy.get('/profile/frank/31/foisting_toasters/jackson_hole,wy')
raw = response.data
result = raw.decode("utf-8")
self.assertTrue('<h1>Welcome to Frank\'s profile!</h1>' in result)
self.assertTrue('<h3>About Frank:</h3>' in result)
self.assertTrue('<strong>Age:</strong>'in result)
self.assertTrue('<li>31</li>' in result)
self.assertTrue('<strong>Favorite Hobby:</strong>' in result)
self.assertTrue('<li>Foisting_Toasters</li>' in result)
self.assertTrue('<strong>Hometown:</strong>' in result)
self.assertTrue('<li>Jackson Hole, WY</li>' in result)
| true |
5e42cdd6de7c95d8b0690093fa841b7243c31f26 | Python | Annajose23/pythonLearning | /mostFrequent.py | UTF-8 | 326 | 3.421875 | 3 | [] | no_license | def mostFrequent(arr):
arr_freq = {}
for item in arr:
if item in arr_freq:
arr_freq[item] = arr_freq[item] + 1
else:
arr_freq[item] = 1
arr_sorted = sorted(arr_freq.items(), key=lambda kv: kv[1], reverse=True)
print(arr_sorted[0][0])
mostFrequent([4,1,3,3,2,1,4,1]) | true |
685e9e377faccb765bd47ca768790ca8d6dce019 | Python | MivlaM/AIVirtualAssistant | /Mivbot.py | UTF-8 | 9,949 | 2.984375 | 3 | [] | no_license |
import pyttsx3 # pip install pyttsx3
import datetime
import speech_recognition as sr #Pip install speech recognition
import wikipedia #pip install wikipedia
import webbrowser as wb
import psutil #pip install psutil
import pyjokes #pip install pyjokes
import os
import pyautogui #pip install pyautogui
import random
import json
import requests
from urllib.request import urlopen
import wolframalpha #pip install wolframalpha
import time
wb.register('chrome', None, wb.BackgroundBrowser("C:\Program Files (x86)\Google\Chrome\Application/chrome.exe"))
engine = pyttsx3.init()
wolframalpha_app_id = "WLW5AT-82T4ALK6GV"
def speak(audio):
engine.say(audio)
engine.runAndWait()
speak ("Olá usuário")
def time_():
Time=datetime.datetime.now().strftime("%I:%M:%S") #for 24 hour clock
speak("Atualmente são: ")
speak(Time)
def date_():
year = datetime.datetime.now().year
month = datetime.datetime.now().month
date = datetime.datetime.now().day
speak("Hoje é dia ")
speak(date)
speak("Do mês")
speak(month)
speak("Do ano")
speak(year)
def wishme():
speak("Saudações Garots!")
time_()
date_()
#Aqui vão as saudações
hour = datetime.datetime.now().hour
if hour >= 6 and hour <12:
speak("Bom dia camarada!")
elif hour >=12 and hour <18:
speak("Boa tarde camarada!")
elif hour >=18 and hour <24:
speak("Boa noite camarada!")
else:
speak("Boa noite camarada!")
speak("Estou a seu serviço. Por favor, diga-me como ajudá-lo hoje?")
def TakeCommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print("Listening.....")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio,language="en-US")
print(query)
except Exception as e:
print(e)
print("Por favor repita... ")
return "None"
return query
def cpu():
usage = str(psutil.cpu_percent())
speak("Cpu is at"+usage)
battery = psutil.sensors_battery()
speak("Battery is at")
speak(battery.percent)
def screenshot():
img = pyautogui.screenshot()
img.save("image1.png")
print ("Screenshot taken!")
def joke():
speak(pyjokes.get_joke())
if __name__ == "__main__":
wishme()
while True:
query = TakeCommand().lower()
#All commands will be stored in lower case in query
#For easy recognition
if "time" in query: # Tell us the time when asked
time_()
if "date" in query: #Tell us the date when asked
date_()
elif "wikipedia" in query:
speak("Procurando....")
query=query.replace("Wikipedia","")
result = wikipedia.summary(query,sentences=5)
speak("Segundo a Wikipedia...")
print(result)
speak(result)
elif 'search in chrome' in query:
speak('O que você gostaria de pesquisar?')
search = TakeCommand().lower()
wb.get('chrome').open_new_tab(search + '.com')
elif "search youtube" in query:
speak("O que eu devo pesquisar no YouTube?")
Search_Term = TakeCommand().lower()
speak("Lá vamos nós para o YouTube")
wb.open("https://www.youtube.com/results?search_query="+Search_Term)
elif "search google" in query:
speak("O que eu devo pesquisar?")
Search_Term2 = TakeCommand().lower()
speak("Aperte os cintos, vamos decolar rumo ao Google")
wb.open("https://www.google.com/search?q="+Search_Term2) ##Só funfa com palavras em inglês
elif "cpu" in query:
cpu()
elif "joke" in query:
joke()
elif "go offline" in query:
speak("Estou de saída, camarada!")
quit()
elif "microsoft word" in query:
speak("Abrindo o word....")
ms_word = r"C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Microsoft Office 2016\Word 2016.exe"
os.startfile(ms_word) #Only open the folders
elif "microsoft word premium" in query:
speak("Abrindo o word....")
ms_word =r"E:/Office/Office16/WINWORD.EXE"
os.startfile(ms_word) #You need the paid version
elif "write a note" in query:
speak("O que eu deveria escrever?")
notes = TakeCommand()
file = open("notes.txt","w")
speak("Devo incluir data e hora?")
ans = TakeCommand()
if "yes" or "sure" in ans:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
file.write(strTime)
file.write(":-")
file.write(notes)
speak("Done Taking notes")
else:
file.write(notes)
elif "show notes" in query:
speak("mostrando anotações")
file = open("notes.txt", "r")
print(file.read())
speak(file.read())
elif "screenshot" in query:
screenshot()
elif "play music" in query:
if "play music" in query:
audio_1 = "E:\cr"
audio_2 = "E:\dr"
speak ("Qual devo escolher, potato ou tomato")
ans = (TakeCommand().lower())
while(ans!= "potato" and ans!= "tomato"):
speak("Tente de novo!")
ans = (TakeCommand().lower())
if "potato" in ans:
songs_dir = audio_1
songs = os.listdir(songs_dir)
print(songs)
elif "tomato" in ans:
songs_dir = audio_2
songs = os.listdir(songs_dir)
print(songs)
speak("Select a random number")
rand = (TakeCommand().lower())
while("number" not in rand and rand !="random"):
speak("Não conseguir entender, por favor repita")
rand =(TakeCommand().lower())
if "number" in rand:
rand = int(rand.replace("number ",""))
os.startfile(os.path.join(songs_dir,songs[rand]))
continue
elif "random" in rand:
rand = random.randint(1,11)
os.startfile(os.path.join(songs_dir,songs[rand]))
continue
elif "play song" in query:
music_dir = "E:\dr"
music = os.listdir(music_dir)
speak("O que devo escolher?")
ans = TakeCommand().lower()
no = int(ans.replace("number", ""))
os.startfile(os.path.join(music_dir,music[no]))
elif "nice job" in query:
speak("Eu vos agradeço de todo coração pelo elogio!")
TakeCommand().lower()
elif "good job" in query:
speak("Eu agradeço a você desde o fundo de meu coração")
TakeCommand().lower()
elif "Who is your creator" in query:
speak("O meu criador é um exemplo de pessoa, não posso te dizer muito a respeito dele, ele gosta de se manter discreto")
TakeCommand().lower()
elif "remember that" in query:
speak("O que eu devo me lembrar")
memory = TakeCommand()
speak("Você me pediu para me lembrar"+memory)
remember = open("memory.txt","w")
remember.write(memory)
remember.close()
elif "do you remember anything" in query:
remember = open("memory.txt","r")
speak("Você me mandou me lembrar disso"+remember.read())
elif "where is" in query:
query = query.replace("where is","")
location = query
speak("Você me pediu para localizar"+location)
wb.open_new_tab("https://www.google.com/maps/place/"+location)
elif "news" in query:
try:
jsonObj = urlopen("https://newsapi.org/v2/top-headlines?country=us&category=entertainment&apiKey=445fc78a0d7a41cf9c848a78ef0b8ddb")
data = json.load(jsonObj)
i = 1
speak("Aqui estão as novas notícias sobre entretenimento")
print("========= Top Headlines ===========")
for item in data["articles"]:
print(str(i)+". "+item["title"]+"\n")
print(item["description"]+"\n")
speak(item["title"])
i += 1
except Exception as e:
print(str(e))
elif "calculate" in query:
client = wolframalpha.Client(wolframalpha_app_id)
indx = query.lower().split().index("calculate")
query = query.split()[indx + 1:]
res = client.query("".join(query))
answer = next(res.results).text
print("The answer is : "+answer)
speak("A resposta é "+answer)
elif "what is" in query or "who is" in query:
client = wolframalpha.Client(wolframalpha_app_id)
res = client.query(query)
try:
print(next(res.results).text)
speak(next(res.results).text)
except StopIteration:
print("No results")
elif "stop listening" in query:
speak("Por quantos segundos você deseja que eu pare de executar vossos comandos?")
ans = int(TakeCommand())
time.sleep(ans)
print(ans)
elif "log out" in query:
os.system("shutdown -1")
elif "restart" in query:
os.system("shutdown /r /t 1")
elif "shutdown" in query:
os.system("shutdown /s /t 1")
| true |
0524578214dd63e46f6496fff74c5b6050142c79 | Python | geekhub-python/akvarium-2-0-valiknet18 | /aquarium_test.py | UTF-8 | 2,603 | 2.703125 | 3 | [] | no_license | import unittest
import occupants
from random import choice
from aquarium import Aquarium, AquariumFilter, FishCreator
class TestAquarium(unittest.TestCase):
OCCUPANTS_MAX_COUNT = 200
OCCUPANTS_MIN_COUNT = 20
def setUp(self):
self.aquarium = Aquarium()
def test_occupants_length(self):
self.assertGreater(self.OCCUPANTS_MAX_COUNT, len(self.aquarium.occupants))
self.assertLess(self.OCCUPANTS_MIN_COUNT, len(self.aquarium.occupants))
class TestFishCreator(unittest.TestCase):
def test_create_predators(self):
predators = FishCreator.get_occupants(FishCreator.PREDATOR_TYPE)
self.assertGreaterEqual(FishCreator.MAX_PREDATORS_COUNT, len(predators))
self.assertIsInstance(choice(predators), occupants.Predator)
def test_create_seaweed(self):
seaweeds = FishCreator.get_occupants(FishCreator.SEAWEED_TYPE)
self.assertGreaterEqual(FishCreator.MAX_SEAWEED_COUNT, len(seaweeds))
self.assertIsInstance(choice(seaweeds), occupants.Seaweed)
def test_create_herbivores(self):
herbivores = FishCreator.get_occupants(FishCreator.HERBIVORES_TYPE)
self.assertGreaterEqual(FishCreator.MAX_HERBIVORES_COUNT, len(herbivores))
self.assertIsInstance(choice(herbivores), occupants.Herbivores)
def test_create_snail(self):
snails = FishCreator.get_occupants(FishCreator.SNAIL_TYPE)
self.assertGreaterEqual(FishCreator.MAX_SNAILS_COUNT, len(snails))
self.assertIsInstance(choice(snails), occupants.Snail)
class TestAquariumFilter(unittest.TestCase):
def setUp(self):
self.aquarium = Aquarium()
self.aquarium_filter = AquariumFilter(self.aquarium)
def test_is_predator(self):
predators = self.aquarium_filter.get_predators()
self.assertLess(0, len(predators))
self.assertIsInstance(choice(predators), occupants.Predator)
def test_is_snail(self):
snails = self.aquarium_filter.get_snails()
self.assertLess(0, len(snails))
self.assertIsInstance(choice(snails), occupants.Snail)
def test_is_seaweed(self):
seaweeds = self.aquarium_filter.filter_by_class_name(occupants.Seaweed)
self.assertLess(0, len(seaweeds))
self.assertIsInstance(choice(seaweeds), occupants.Seaweed)
def test_is_herbivores(self):
herbivores = self.aquarium_filter.filter_by_class_name(occupants.Herbivores)
self.assertLess(0, len(herbivores))
self.assertIsInstance(choice(herbivores), occupants.Herbivores)
if __name__ == '__main__':
unittest.main() | true |
9a8fe880ff7121eb5078b1bfa24f7f6d594677db | Python | ehsanik/dogTorch | /models/resnet18_image2imu.py | UTF-8 | 3,540 | 2.609375 | 3 | [
"MIT"
] | permissive | """
=================
This model is a non-recurrent model for Inferring the action of the dog using a Resnet-18 network. Given two images, infer the imu changes corresponding to those two frames.
=================
"""
import torch
import torch.nn as nn
import pdb
from torchvision.models import resnet18 as torchvision_resnet18
from extensions.multi_label_cross_entropy import MultiLabelCrossEntropyLoss
from training import metrics
from .basemodel import BaseModel
class ResNet18Image2IMU(BaseModel):
metric = [
metrics.AllAtOnce, metrics.AngleEvaluationMetric,
metrics.SequenceMultiClassMetric
]
def __init__(self, args):
super(ResNet18Image2IMU, self).__init__()
assert args.sequence_length == 2, "ResNet18Image2IMU supports seq-len=2"
assert args.input_length == 2, "input length not currect"
assert args.output_length == 1, "output length not currect"
self.class_weights = args.dataset.CLASS_WEIGHTS[torch.LongTensor(
args.imus)]
self.output_length = args.output_length
self.lr = args.lrm
self.step_size = args.step_size
self.decay = args.weight_decay
resnet_model = torchvision_resnet18(pretrained=args.pretrain)
# Remove the last fully connected layer.
del resnet_model.fc
self.resnet = resnet_model
num_features = 512
num_frames = 2
num_classes = args.num_classes
# Make num_imu fc layers
self.imus = args.imus
for i in self.imus:
setattr(self, 'imu{}'.format(i),
nn.Linear(num_frames * num_features, num_classes))
self.dropout = nn.Dropout(p=args.dropout_ratio)
def resnet_features(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
x = self.resnet.avgpool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
return x
def feats(self, x):
frame1 = x[:, :3, :, :]
frame2 = x[:, 3:, :, :]
return torch.cat(
[self.resnet_features(frame1),
self.resnet_features(frame2)], 1)
def forward(self, input, target):
features = self.feats(input)
output_indices = list(
range(target.size(1) - self.output_length, target.size(1)))
# Iterate over fully connecteds for each imu, perform forward pass and
# record the output.
imu_out = []
for i in self.imus:
imu_i = getattr(self, 'imu{}'.format(i))
imu_out.append(imu_i(features))
# Add a singleton dim at 1 for sequence length, which is always 1 in
# this model.
return torch.stack(
imu_out,
dim=1).unsqueeze(1), target, torch.LongTensor(output_indices)
def loss(self):
return MultiLabelCrossEntropyLoss(self.class_weights)
def learning_rate(self, epoch):
base_lr = self.lr
decay_rate = self.decay
step = self.step_size
assert 1 <= epoch
if 1 <= epoch <= step:
return base_lr
elif step <= epoch <= step * 2:
return base_lr * decay_rate
elif step * 2 <= epoch <= step * 3:
return base_lr * decay_rate * decay_rate
else:
return base_lr * decay_rate * decay_rate * decay_rate
| true |
c946ca5c86aa1be91d1ba845d0fd4c9b27da199f | Python | Hdwig/Math | /Lesson_3_1.py | UTF-8 | 215 | 3.171875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-10, 10)
ys = 5 * x + 3
ym = 7 * x + 4
yl = 0 * x + 7
plt.xlabel("x")
plt.ylabel("y")
plt.grid(True)
plt.plot(x, ys)
plt.plot(x, ym)
plt.plot(x, yl)
| true |
15eeb2aa75d6332fa2dd280a8fa84718d15667e3 | Python | Ansore/super_resolution | /images_to_tfrecord.py | UTF-8 | 1,451 | 2.59375 | 3 | [
"MIT"
] | permissive | from configs import *
import os
import numpy as np
from PIL import Image
import tensorflow as tf
def scan_file(path):
result = []
for filename in os.listdir(path):
if filename.endswith('.png'):
filename = path + '/' + filename
result.append(filename)
return result
def image_to_example(image_path):
img = Image.open(image_path, 'r')
# 将图片转化为二进制格式
img_raw = img.tobytes()
example = tf.train.Example(
features=tf.train.Features(feature={
'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw]))
}))
return example
def main():
train_tfrecord_path = TFRECORD_PATH + '/train.tfrecord'
test_tfrecord_path = TFRECORD_PATH + '/test.tfrecord'
train_writer = tf.python_io.TFRecordWriter(train_tfrecord_path)
test_writer = tf.python_io.TFRecordWriter(test_tfrecord_path)
train_file_list = scan_file(TRAINING_DATA_PATH)
test_file_list = scan_file(TESTING_DATA_PATH)
for train_file in train_file_list:
example = image_to_example(train_file)
# 序列化为字符串
train_writer.write(example.SerializeToString())
train_writer.close()
for test_file in test_file_list:
example = image_to_example(test_file)
# 序列化为字符串
test_writer.write(example.SerializeToString())
test_writer.close()
if __name__ == '__main__':
main() | true |
edf7256452ef397cd8f46fccabab0e04182eed80 | Python | merissab44/superHero | /ability.py | UTF-8 | 330 | 3.578125 | 4 | [] | no_license | import random
class Ability:
def __init__(self, name, max_damage):
# initialized values that will be passed when
# we call the class
self.name = name
self.max_damage = max_damage
def attack(self):
random_value = random.randint(0, int(self.max_damage))
return random_value
| true |
06e89ce84ac1b87267f827031f050104b8497e32 | Python | robertdavidwest/forexpricecomparison | /app/db_create.py | UTF-8 | 877 | 2.828125 | 3 | [] | no_license | # db_create.py
from datetime import datetime
from views import db
from models import FXQuotes
if __name__ == '__main__':
# create the database and the db table
db.create_all()
# add one row of sample data - this is actual data
quote_time = datetime(year=2015, month=9, day=30,
hour=21, minute=36, second=50)
row_obj = FXQuotes(provider='TransferWise',
provider_href='https:/www.transferwise.com',
details='Transfer from UK to US bank account',
quote_time=quote_time,
source_currency='GBP',
target_currency='USD',
fee=1.0,
source_value=100.0,
target_value=149.76)
db.session.add(row_obj)
# commit the changes
db.session.commit()
| true |
92244fdcac2ac00ce7f02270a6548ee6344d130c | Python | edimaudo/Python-projects | /visualization/scatterplot.py | UTF-8 | 445 | 3.390625 | 3 | [] | no_license | import matplotlib.pyplot as plt
X = [590,540,740,130,810,300,320,230,470,620,770,250]
Y = [32,36,39,52,61,72,77,75,68,57,48,48]
#scatter plot
plt.scatter(X, Y, s=60, c='red', marker='^')
#change axes ranges
plt.xlim(0,1000)
plt.ylim(0,100)
#add title
plt.title('Relationship Between Temperature and Iced Coffee Sales')
#add x and y labels
plt.xlabel('Cups of Iced Coffee Sold')
plt.ylabel('Temperature in Fahrenheit')
#show plot
plt.show() | true |
a21574955b9b8042dfb5d6a3ae95b982880acfc6 | Python | isseikz/Multicopter | /sub_system/logger.py | UTF-8 | 3,440 | 2.609375 | 3 | [
"MIT"
] | permissive | """Logger"""
import numpy as np
import matplotlib.pyplot as plt
import math
class Logger(object):
"""docstring for Logger."""
def __init__(self):
super(Logger, self).__init__()
self.log_tim = []
self.log_pos = []
self.log_vel = []
self.log_acc = []
self.log_ang = []
self.log_avl = []
self.log_aac = []
self.log_rof = []
self.log_rom = []
self.log_fm = []
def add_data(self, model):
self.log_tim.append(model.dynamics.get_time())
self.log_pos.append(model.get_position())
self.log_vel.append(model.get_velocity())
self.log_acc.append(model.get_acceleration())
self.log_ang.append([angle * 180 / math.pi for angle in model.get_euler_angle()]) #self.log_ang.append(model.get_quartanion())#
self.log_avl.append(model.get_angular_velocity())
self.log_aac.append(model.get_angular_acceleration())
self.log_rof.append([rotor.get_total_force() for rotor in model.r])
self.log_rom.append([rotor.get_total_torque() for rotor in model.r])
self.log_fm.append(np.hstack((model.get_force(), model.get_torque())))
def visualize_data(self, save=False, show=True, filename=''):
# Visulize datas
fig, ax = plt.subplots(3,3, sharex='col', figsize=(12,9))
lineobj = ax[0,0].plot(self.log_tim, self.log_pos)
ax[0,0].legend(iter(lineobj), ['x','y','z'])
ax[0,0].set_title('CoG position [m]')
ax[0,0].grid()
lineobj = ax[1,0].plot(self.log_tim, self.log_vel)
ax[1,0].legend(iter(lineobj), ['dxdt','dydt','dzdt'])
ax[1,0].set_title('CoG velocity [m/s]')
ax[1,0].grid()
lineobj = ax[2,0].plot(self.log_tim, self.log_acc)
ax[2,0].legend(iter(lineobj), ['d2xdt2','dy2dt2','dz2dt2'])
ax[2,0].set_title('CoG acceleration [m/s2]')
ax[2,0].set_xlabel('time [s]')
ax[2,0].grid()
lineobj = ax[0,1].plot(self.log_tim, self.log_ang)
ax[0,1].legend(iter(lineobj), ['roll','pitch','yaw'])
ax[0,1].set_title('Attitude angle [deg]')
ax[0,1].grid()
lineobj = ax[1,1].plot(self.log_tim, self.log_avl)
ax[1,1].legend(iter(lineobj), ['p','q','r'])
ax[1,1].set_title('Anguler velocity [rad/s]')
ax[1,1].grid()
lineobj = ax[2,1].plot(self.log_tim, self.log_aac)
ax[2,1].legend(iter(lineobj), ['dpdt','dqdt','drdt'])
ax[2,1].set_title('Angular acceleration [rad/s2]')
ax[2,1].set_xlabel('time [s]')
ax[2,1].grid()
lineobj = ax[0,2].plot(self.log_tim, [[log[2] for log in log_rotor] for log_rotor in self.log_rof])
ax[0,2].legend(iter(lineobj), ['rotor1','rotor2','rotor3','rotor4'])
ax[0,2].set_title('propeller force [N]')
ax[0,2].grid()
lineobj = ax[1,2].plot(self.log_tim, [[log[2] for log in log_rotor] for log_rotor in self.log_rom])
ax[1,2].legend(iter(lineobj), ['rotor1','rotor2','rotor3','rotor4'])
ax[1,2].set_title('propeller torque [NM]')
ax[1,2].grid()
lineobj = ax[2,2].plot(self.log_tim, self.log_fm)
ax[2,2].legend(iter(lineobj), ['fx','fy','fz','mx','my','mz'])
ax[2,2].set_title('CoG Force / Moment')
ax[2,2].set_xlabel('time [s]')
ax[2,2].grid()
plt.savefig('./model/'+filename+'_log.png')
if show:
plt.show()
| true |
0438493823e3ec18b7afc63351293e25a25349b6 | Python | priyalorha/jesse | /jesse/models/FuturesExchange.py | UTF-8 | 7,369 | 2.6875 | 3 | [
"MIT"
] | permissive | import jesse.helpers as jh
import jesse.services.logger as logger
from jesse.exceptions import NegativeBalance, InsufficientMargin
from jesse.models import Order
from jesse.enums import sides, order_types
from jesse.libs import DynamicNumpyArray
import numpy as np
from jesse.services import selectors
from .Exchange import Exchange
class FuturesExchange(Exchange):
# current holding assets
assets = {}
# current available assets (dynamically changes based on active orders)
available_assets = {}
buy_orders = {}
sell_orders = {}
def __init__(
self,
name: str,
starting_assets: list,
fee_rate: float,
settlement_currency: str,
futures_leverage_mode: str,
futures_leverage: int
):
super().__init__(name, starting_assets, fee_rate, 'futures')
self.futures_leverage_mode = futures_leverage_mode
self.futures_leverage = futures_leverage
for item in starting_assets:
self.buy_orders[item['asset']] = DynamicNumpyArray((10, 2))
self.sell_orders[item['asset']] = DynamicNumpyArray((10, 2))
# make sure trading routes exist in starting_assets
from jesse.routes import router
for r in router.routes:
base = jh.base_asset(r.symbol)
if base not in self.assets:
self.assets[base] = 0
if base not in self.buy_orders:
self.buy_orders[base] = DynamicNumpyArray((10, 2))
if base not in self.sell_orders:
self.sell_orders[base] = DynamicNumpyArray((10, 2))
self.starting_assets = self.assets.copy()
self.available_assets = self.assets.copy()
# start from 0 balance for self.available_assets which acts as a temp variable
for k in self.available_assets:
self.available_assets[k] = 0
self.settlement_currency = settlement_currency.upper()
def wallet_balance(self, symbol=''):
return self.assets[self.settlement_currency]
def available_margin(self, symbol=''):
temp_credit = self.assets[self.settlement_currency] * self.futures_leverage
# we need to consider buy and sell orders of ALL pairs
# also, consider the value of all open positions
for asset in self.assets:
if asset == self.settlement_currency:
continue
position = selectors.get_position(self.name, asset + "-" + self.settlement_currency)
if position is None:
continue
if position.is_open:
# add unrealized PNL
temp_credit += position.pnl
# subtract worst scenario orders' used margin
sum_buy_orders = (self.buy_orders[asset][:][:, 0] * self.buy_orders[asset][:][:, 1]).sum()
sum_sell_orders = (self.sell_orders[asset][:][:, 0] * self.sell_orders[asset][:][:, 1]).sum()
if position.is_open:
if position.type == 'long':
sum_buy_orders += position.value
else:
sum_sell_orders -= abs(position.value)
temp_credit -= max(abs(sum_buy_orders), abs(sum_sell_orders))
return temp_credit
def charge_fee(self, amount):
fee_amount = abs(amount) * self.fee_rate
new_balance = self.assets[self.settlement_currency] - fee_amount
logger.info(
'Charged {} as fee. Balance for {} on {} changed from {} to {}'.format(
round(fee_amount, 2), self.settlement_currency, self.name,
round(self.assets[self.settlement_currency], 2),
round(new_balance, 2),
)
)
self.assets[self.settlement_currency] = new_balance
def add_realized_pnl(self, realized_pnl: float):
new_balance = self.assets[self.settlement_currency] + realized_pnl
logger.info('Added realized PNL of {}. Balance for {} on {} changed from {} to {}'.format(
round(realized_pnl, 2),
self.settlement_currency, self.name,
round(self.assets[self.settlement_currency], 2),
round(new_balance, 2),
))
self.assets[self.settlement_currency] = new_balance
def on_order_submission(self, order: Order, skip_market_order=True):
base_asset = jh.base_asset(order.symbol)
# make sure we don't spend more than we're allowed considering current allowed leverage
if order.type != order_types.MARKET or skip_market_order:
if not order.is_reduce_only:
order_size = abs(order.qty * order.price)
remaining_margin = self.available_margin()
if order_size > remaining_margin:
raise InsufficientMargin('You cannot submit an order for ${} when your margin balance is ${}'.format(
round(order_size), round(remaining_margin)
))
# skip market order at the time of submission because we don't have
# the exact order.price. Instead, we call on_order_submission() one
# more time at time of execution without "skip_market_order=False".
if order.type == order_types.MARKET and skip_market_order:
return
self.available_assets[base_asset] += order.qty
if order.side == sides.BUY:
self.buy_orders[base_asset].append(np.array([order.qty, order.price]))
else:
self.sell_orders[base_asset].append(np.array([order.qty, order.price]))
def on_order_execution(self, order: Order):
base_asset = jh.base_asset(order.symbol)
if order.type == order_types.MARKET:
self.on_order_submission(order, skip_market_order=False)
if order.side == sides.BUY:
# find and set order to [0, 0] (same as removing it)
for index, item in enumerate(self.buy_orders[base_asset]):
if item[0] == order.qty and item[1] == order.price:
self.buy_orders[base_asset][index] = np.array([0, 0])
break
else:
# find and set order to [0, 0] (same as removing it)
for index, item in enumerate(self.sell_orders[base_asset]):
if item[0] == order.qty and item[1] == order.price:
self.sell_orders[base_asset][index] = np.array([0, 0])
break
return
def on_order_cancellation(self, order: Order):
base_asset = jh.base_asset(order.symbol)
self.available_assets[base_asset] -= order.qty
# self.available_assets[quote_asset] += order.qty * order.price
if order.side == sides.BUY:
# find and set order to [0, 0] (same as removing it)
for index, item in enumerate(self.buy_orders[base_asset]):
if item[0] == order.qty and item[1] == order.price:
self.buy_orders[base_asset][index] = np.array([0, 0])
break
else:
# find and set order to [0, 0] (same as removing it)
for index, item in enumerate(self.sell_orders[base_asset]):
if item[0] == order.qty and item[1] == order.price:
self.sell_orders[base_asset][index] = np.array([0, 0])
break
return | true |
53a878a603e3afb0010cab1057849b1fb2f490dc | Python | DarthGecko/orthometer | /intronitator.py | UTF-8 | 11,832 | 2.609375 | 3 | [] | no_license | # Nick Weiner 2017
# introninator.py
# Getting introns from FASTA files made by phytozomedler
import re
from Bio import SeqIO
from Bio.Seq import Seq
from Bio import motifs
don_len = 5
acc_len = 8
# Things to work out:
# How are we storing the data on the introns? In RAM or ROM?
# Make a FASTA file of introns?
# Set parameters of the record using title2ids
# Reference pages:
# http://biopython.org/DIST/docs/api/Bio.SeqIO-module.html
# http://biopython.org/DIST/docs/api/Bio.SeqIO.FastaIO-module.html
# Negative strands??!
def analyze_intron(intron_seq):
from Bio.SeqUtils import GC
gc = '{0:.2f}'.format(GC(intron_seq))
ambiguous = ('W' or 'S' or 'M' or 'K' or 'R' or 'Y' or 'B' or 'D' or 'H' or 'V'
or 'N' or '-') in intron_seq.upper()
if ambiguous:
ambig = 'ambig'
else:
ambig = 'unamb'
len(intron_seq)
return [gc, ambig, ]
def score_site(seq, model):
assert isinstance(model, motifs.Motif)
assert isinstance(seq, Seq)
pssm = model.counts.normalize(pseudocounts=0.5).log_odds()
# p = 1
# for i in range(0, len(seq)):
# nt = seq[i]
# print(model.counts[nt, i])
# p *= model.counts[nt, i]
# '{0:.2f} compared to {}'.format(log(p / 0.25 ** len(seq)),
# pssm.calculate(seq))
return '{0:.2f}'.format(pssm.calculate(seq))
def dinucs(seq):
seq = str(seq)
if len(seq) < (don_len + acc_len + 2):
return []
else:
return [seq[i:(i + 2)] for i in range(don_len, len(seq) - acc_len - 2)]
def score_dinucleotides(seq, model):
# sub score_2mer
# {
# my($seq, $model) = @_;
# my $score = 0;
# for (my $i = 0; $i < length($seq) -1; $i++) {
# my $di = substr($seq, $i, 2);
# $score += log($model->{$di} / 0.0625); # ha, because I'm lazy
# }
# return sprintf "%.1f", 100 * $score;
# }
from math import log
score = 0
for di in dinucs(seq):
score += log(model[di] / 0.0625)
return '{0:.2f}'.format(score * 100)
def pseudo_score_site2(seq, model):
from random import randrange
return randrange(2)
def get_exon_id(header): # Gives each record.name the exon coords septed by |
return (re.match('.*transcript_name1="([^"]+)"', header).group(1),
'|'.join(re.findall('exon_chrom_[star|end]+="([\d;]+)"', header)),
header)
def get_pep_id(header):
return re.match('.*peptide_name="([^"]+)"', header).group(1)
def strip_introns(fasta, verb=None, test=False, min_intron_len=35,
max_intron_len=10000, multi_species=False, peptide=''):
# want the chrom (refers to coordinates)
intron_file = '{}_introns_1.FASTA'.format(fasta[:-6])
p_head = ''
if peptide != '':
peptide_dict = SeqIO.index(peptide, "fasta", key_function=get_pep_id)
p_head = ' pep'
headline = '# id chr beg end str n/m len gc ambig?{} seq\n'.format(p_head)
enough_introns = False
don_motif = {}
acc_motif = {}
dinuc_motif = {}
dinuc_dist = {}
with open(fasta) as handle:
o = open(intron_file, 'w')
o.write(headline)
example = 0
don = {}
acc = {}
dinuc = {}
for seq_record in SeqIO.FastaIO.FastaIterator(handle,
title2ids=get_exon_id):
if verb:
print ("Seq Record: " + seq_record.name)
chrom = re.match('.+chr_name1="([^"]+)"',
seq_record.description).group(1)
if 'scaffold' in chrom:
if verb:
print ('Scaffolding skipped!')
continue
exon_positions = {}
pos = ['beg', 'end']
r = seq_record.name.split('|')
for i in range(2):
exon_positions[pos[i]] = [int(x) for x in r[i].split(';')]
strand = int(re.match('.+gene_chrom_strand="([^"]+)"',
seq_record.description).group(1))
species = re.match('.+organism_name="([^"]+)"',
seq_record.description).group(1)
if verb:
print ('strand: ', strand)
start = int(re.match('.+transcript_chrom_start="([^"]+)"',
seq_record.description).group(1))
intron_count = len(exon_positions['beg']) - 1 # Is this right?
if verb:
print ('Exons:')
for i in range(0, intron_count + 1):
print (
'{} - b: {} e: {}'.format(i + 1, exon_positions['beg'][i],
exon_positions['end'][i]))
# print ('There should be {} introns.'.format(intron_count))
intron_positions = {'beg': [], 'end': []}
if verb:
print ('Introns: ')
for i in range(1, intron_count+1): # Strand represented by 1 or -1
# if strand > 0:
intron_positions['beg'].append(exon_positions['end'][i-1]+1)
intron_positions['end'].append(exon_positions['beg'][i] - 1)
# else:
# intron_positions['beg'].append(exon_positions['end'][i] + 1)
# intron_positions['end'].append(exon_positions['beg'][i-1]-1)
if verb:
for i in range(0, intron_count):
print ('{} - b: {} e: {}'.format(i+1, intron_positions['beg'][i],
intron_positions['end'][i]))
# return intron_positions # Is this all I want? Won't work with
# per transcript loop
introns = []
for i in range(0, intron_count):
# intron = ''
if strand > 0:
intron = seq_record.seq[intron_positions['beg'][i] -
start:intron_positions['end'][i] -
start]
else:
intron = seq_record.seq[intron_positions['beg'][i] -
start:intron_positions['end'][i] -
start]
# intron = seq_record.seq[intron_positions['end'][i] -
# start:intron_positions['beg'][i] -
# start]
intron = intron.reverse_complement()
introns.append(intron)
if verb:
print ('The introns of {} are '.format(seq_record.id))
for x in introns:
print (str(x))
# Gather further info for output
strand = int(re.match('.+gene_chrom_strand="([^"]+)"',
seq_record.description).group(1))
if strand > 0:
strand_sym = '+'
else:
strand_sym = '-'
# Output
s = 1
if species not in don:
don[species] = []
acc[species] = []
dinuc[species] = []
dinuc_motif[species] = []
for x in introns:
# If intron is not anomalous...
if not (len(x) > max_intron_len or len(x) < min_intron_len):
# Setting up donor and acceptor tables
# upper is good???
don[species].append(x.upper()[:don_len])
acc[species].append(x.upper()[-acc_len:])
dinuc[species].extend(dinucs(x))
beg = intron_positions['beg'][s-1]
end = intron_positions['end'][s-1]
l = abs(end - beg)
intron_set = '{}/{}'.format(s, intron_count)
order = [seq_record.id, species, chrom, str(beg), str(end),
strand_sym, intron_set, str(l)]
order.extend(analyze_intron(x))
if peptide != '':
pep_id = get_pep_id(seq_record.description)
order.append(str(len(peptide_dict[pep_id])))
order.append(str(x))
o.write('\t'.join(order)+'\n')
s += 1
example += 1
if example > 4 and test:
break
# delete output file if not enough_introns?
o.close()
for species in don:
don_motif[species] = motifs.create(don[species])
acc_motif[species] = motifs.create(acc[species])
# dinuc_motif[species] = motifs.create(dinuc[species])
dinuc_dist[species] = {}
for di in dinuc[species]:
try:
dinuc_dist[species][di] += 1
except KeyError:
dinuc_dist[species][di] = 1
with open(intron_file) as out1:
intron_file_2 = '{}_introns_2.FASTA'.format(fasta[:-6])
out2 = open(intron_file_2, 'w')
if peptide != '':
headline = '# id chr beg end str n/m len gc' +\
' ambig? pep don acc 2mer seq\n'
else:
headline = '# id chr beg end str n/m len gc' + \
' ambig? don acc 2mer seq\n'
out2.write(headline)
lines = out1.readlines()
good_ones = 0
for line in lines[1:]:
intron = line.split()[-1]
if len(intron) > max_intron_len or len(intron) < min_intron_len:
continue
species = line.split()[1]
good_ones += 1
d = score_site(Seq(intron[:don_len],
don_motif[species].alphabet),
don_motif[species])
a = score_site(Seq(intron[-acc_len:],
acc_motif[species].alphabet),
acc_motif[species])
di_score = score_dinucleotides(intron, dinuc_dist[species])
order = ('\t'.join(line.split()[:-1]), d, a, di_score, intron)
out2.write('\t'.join(order)+'\n')
out2.close()
if len(lines) == 0:
print ('Requires Python 3 for additional processing')
else:
print ('Processed {} good introns out of {}'.format(good_ones,
len(lines) - 1))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="""Make intron fasta files""")
parser.add_argument("file_name",
help="fasta file input")
parser.add_argument("p_file", nargs="?",
help="Optional peptide file", default="")
parser.add_argument('--verbose', '-v', action='count', default=0,
help='Multiple flags increase verbosity')
parser.add_argument('-test', '-t', action='store_true',
help='Do not store the data')
# parser.add_argument('-ms', '-multispecies', action='store_true',
# help='Account for multiple species in the FASTA')
args = parser.parse_args()
# interp input
strip_introns(args.file_name, args.verbose, args.test, peptide=args.p_file)
# gene_name1="AT2G32350" transcript_name1="AT2G32350.1" organism_name="Athaliana_Araport11" chr_name1="Chr2" gene_chrom_start="13734945" gene_chrom_end="13735788" gene_chrom_strand="1" transcript_id="37375937" transcript_chrom_start="13734945" transcript_chrom_end="13735788" peptide_name="37375937_peptide" exon_chrom_start="13734945;13735345" exon_chrom_end="13735263;13735788" exon_cds_start="13734979;13735345" exon_cds_end="13735263;13735788" 5_utr_start="13734945" 5_utr_end="13734978" 3_utr_start="" 3_utr_end="" | true |
2c557903a396e2830cc1f890eb008ed2045ed46d | Python | vijaykanth1729/FLASK-REST-FRAMEWORK | /app2.py | UTF-8 | 2,240 | 2.59375 | 3 | [] | no_license | from flask import Flask, jsonify, request
from flask_restful import Resource,Api
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
import os
import datetime
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'myflask.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
description = db.Column(db.String(100))
def __init__(self, name, description):
self.name = name
self.description = description
def __str__(self):
return f"{self.name}"
class ProductSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'description')
product_schema = ProductSchema()
products_schema = ProductSchema(many=True)
@app.route('/')
def home():
return "<h1>Welcome to Flask</h1>"
@app.route('/products/',methods=['GET','POST'])
def products_list():
if request.method == "GET":
products = Product.query.all()
return products_schema.jsonify(products)
if request.method == "POST":
name = request.json['name']
description = request.json['description']
new_data = Product(name,description)
db.session.add(new_data)
db.session.commit()
return product_schema.jsonify(new_data)
@app.route('/products/<id>/',methods=['GET','PUT', 'DELETE'])
def products_detail(id):
if request.method == "GET":
product = Product.query.get(id)
return product_schema.jsonify(product)
if request.method == "PUT":
name = request.json['name']
description = request.json['description']
product = Product.query.get(id)
product.name = name
product.description = description
db.session.commit()
return product_schema.jsonify(product)
if request.method == "DELETE":
product = Product.query.get(id)
db.session.delete(product)
db.session.commit()
return product_schema.jsonify(product)
if __name__ == '__main__':
app.run(debug=True)
| true |
809860db3fb709c0e3f5f875f384de5ff9c8bd17 | Python | daodao10/rq-bak | /dao_adjust_position_impl.py | UTF-8 | 5,145 | 2.546875 | 3 | [] | no_license | '''=============调仓制器============='''
from dao_strategy_base import *
import dao_strategy_util as dutil
'''---------------再平衡规则--------------'''
class Rebalance(Adjust_position):
def __init__(self, params):
self.max_position_count = params.get('max_position_count', 3)
self.max_weight_per_position = params.get('max_weight_per_position', 0.5)
self.unlimited_selling = params.get('unlimited_selling', False)
self.rebalance_enabled = params.get('rebalance_enabled', True)
def update_params(self, context, params):
self.max_position_count = params.get('max_position_count', self.max_position_count)
self.max_weight_per_position = params.get('max_weight_per_position', self.max_weight_per_position)
self.candidates_enabled = params.get('candidates_enabled', self.candidates_enabled)
self.rebalance_enabled = params.get('rebalance_enabled', self.rebalance_enabled)
def adjust(self, context, bar_dict, buy_stocks):
to_sell = []
to_buy = []
count = 0
to_adjust = []
temp_list = buy_stocks if self.unlimited_selling else buy_stocks[0:self.max_position_count]
for s in context.portfolio.positions:
if context.portfolio.positions[s].quantity == 0:
continue
if s not in temp_list:
if not(is_suspended(s)) and self.is_effective_order(bar_dict[s], SIDE.SELL):
to_sell.append(s)
else:
count += 1
logger.debug('{0} cannot sell [suspended|limit_down]', s)
else:
count += 1
to_adjust.append(s)
for s in buy_stocks:
if count >= self.max_position_count:
break
if s not in to_adjust:
if self.is_effective_order(bar_dict[s], SIDE.BUY):
to_buy.append(s)
count += 1
else:
logger.debug('{0} cannot buy [suspended|limit_up]', s)
if self.rebalance_enabled:
to_buy.extend(to_adjust)
# place order
for s in to_sell:
self.close_position(context, bar_dict[s])
if len(to_buy) > 0:
# logger.info('buy stocks listed: {0}', to_buy)
weight = dutil.get_avg_order_weight(count)
weight = weight if weight <= self.max_weight_per_position else self.max_weight_per_position
#logger.info('weight: [%.2f%%]' % weight)
for s in to_buy:
self.open_position(context, bar_dict[s], weight)
def is_effective_order(self, bar, side = SIDE.BUY):
if side == SIDE.BUY:
return bar.last < dutil.trunc(bar.limit_up, 2)
else:
return bar.last > dutil.trunc(bar.limit_down, 2)
def __str__(self):
return '再平衡调仓法: 卖出不在<股票池>的股票, 平均%%买入<股票池>里的股票, [持仓股票数目: %d], [单股票仓位 <= %.1f%%], [非限制性卖出: %s], [再平衡: %s]' % (self.max_position_count, self.max_weight_per_position * 100, self.unlimited_selling, self.rebalance_enabled)
'''---------------卖出股票规则--------------'''
class Sell_stocks(Adjust_position):
def adjust(self, context, bar_dict, buy_stocks):
# 卖出不在待买股票列表中的股票
# 对于因停牌等原因没有卖出的股票则继续持有
for stock in context.portfolio.positions.keys():
if stock not in buy_stocks:
self.log_debug("stock [%s] won't in buy_stocks" % (stock))
position = context.portfolio.positions[stock]
self.close_position(context, bar_dict[stock])
else:
self.log_debug("stock [%s] is still in new position" % (stock))
def __str__(self):
return '股票调仓规则: 卖出不在<股票池>的股票'
'''---------------买入股票规则--------------'''
class Buy_stocks(Adjust_position):
def __init__(self, params):
self.max_position_count = params.get('max_position_count', 3)
def update_params(self, context, params):
self.max_position_count = params.get('max_position_count', self.max_position_count)
def adjust(self, context, bar_dict, buy_stocks):
# 买入股票: 始终保持持仓数目为 max_position_count
# 根据股票数量分仓,可用金额平均分配购买,不能保证每个仓位平均分配
position_count = dutil.count_positions(context.portfolio.positions)
if self.max_position_count > position_count:
weight = dutil.get_avg_order_weight(self.max_position_count)
for stock in buy_stocks:
if self.open_position(context, bar_dict[stock], weight):
position_count += 1
if position_count == self.max_position_count:
break
def __str__(self):
return '股票调仓买入规则: 现金平分式买入<股票池>的股票'
| true |
4b9d3f8a00e359d2ce669576ddabe7bad530f4da | Python | minuso/leetcode | /0543/diameterOfBinaryTree.py | UTF-8 | 364 | 3.125 | 3 | [] | no_license | def diameterOfBinaryTree(self, root: TreeNode) -> int:
self.res = 0
def height(root):
if not root: return 0 # height of each leaf is 1
hl, hr = height(root.left), height(root.right)
self.res = max(self.res, hl + hr) # maintain longest length
return max(hl, hr) + 1
height(root)
return self.res | true |
8117da39ffa71af50f87b0617a43ae22db72ad0c | Python | mukul1729/Settings_Preferences | /python/Euler/euler2.py | UTF-8 | 215 | 3.375 | 3 | [] | no_license | first = 1
second = 1
third = 0
sum = 0
for a in range(100):
third = first + second
if third % 2 == 0 and sum <= 4000000:
sum = sum + third
first = second
second = third
print(sum)
| true |
0395a187108233cf9af5c410e0a4d056c323b3c5 | Python | suguke/gait-control-direct-id-paper | /src/example_mean_gain_plots.py | UTF-8 | 3,044 | 2.578125 | 3 | [
"CC-BY-4.0",
"CC0-1.0"
] | permissive | #!/usr/bin/env python
"""This script plots the mean of the identified joint isolated gains from
all valid trials. The gains must be precomputed. It currently does not
include trials from Subject 9 because it has odd ankle joint torques."""
# builtin
import os
# external
import numpy as np
from scipy.constants import golden
import matplotlib.pyplot as plt
# local
import utils
PATHS = utils.config_paths()
params = {'backend': 'ps',
'axes.labelsize': 8,
'axes.titlesize': 8,
'font.size': 10,
'legend.fontsize': 6,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
'text.usetex': True,
'font.family': 'serif',
'font.serif': ['Computer Modern'],
'figure.figsize': (6.0, 6.0 / golden),
}
plt.rcParams.update(params)
event = 'Longitudinal Perturbation'
structure = 'joint isolated'
file_name_safe_event = '-'.join(event.lower().split(' '))
file_name_safe_structure = '-'.join(structure.split(' '))
plot_dir = utils.mkdir(PATHS['figures_dir'])
# Do not include subject 9 in the means because of the odd ankle joint
# torques.
similar_trials = utils.build_similar_trials_dict(bad_subjects=[9])
print('Generating mean gain plots for each speed.')
mean_gains_per_speed = {}
for speed, trial_numbers in similar_trials.items():
mean_gains, var_gains = utils.mean_gains(
trial_numbers, utils.Trial.sensors, utils.Trial.controls,
utils.Trial.num_cycle_samples, file_name_safe_event,
file_name_safe_structure, scale_by_mass=True)
mean_gains_per_speed[speed] = mean_gains
fig, axes = utils.plot_joint_isolated_gains(
utils.Trial.sensors, utils.Trial.controls, mean_gains,
gains_std=np.sqrt(var_gains), mass=1.0) # masses are already scaled
fig.savefig(os.path.join(plot_dir, 'example-mean-gains-' +
speed.replace('.', '-') + '.pdf'))
plt.close(fig)
print('Generating mean gain plot for all speeds.')
fig, axes = plt.subplots(2, 3, sharex=True)
linestyles = ['-', '--', ':']
speeds = ['0.8', '1.2', '1.6']
for speed, linestyle in zip(speeds, linestyles):
fig, axes = utils.plot_joint_isolated_gains(utils.Trial.sensors,
utils.Trial.controls,
mean_gains_per_speed[speed],
gains_std=np.sqrt(var_gains),
mass=1.0,
axes=axes,
linestyle=linestyle)
axes[0, 0].legend().set_visible(False)
right_labels = ['Right ' + speed + ' [m/s]' for speed in speeds]
left_labels = ['Left ' + speed + ' [m/s]' for speed in speeds]
leg = axes[0, 1].legend(list(sum(zip(right_labels, left_labels), ())),
loc='best', fancybox=True)
leg.get_frame().set_alpha(0.75)
fig.savefig(os.path.join(plot_dir, 'example-mean-gains-vs-speed.pdf'))
plt.close(fig)
| true |
c8bb2555489806df3f9c710abc4d05af7d4d3b9e | Python | vwarner1411/openrvdas-1 | /logger/readers/network_reader.py | UTF-8 | 2,823 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import logging
import socket
import sys
sys.path.append('.')
from logger.utils.formats import Text
from logger.readers.reader import Reader
BUFFER_SIZE = 4096
################################################################################
# Read to the specified file. If filename is empty, read to stdout.
class NetworkReader(Reader):
"""
Read text records from a network socket.
NOTE: tcp is nominally implemented, but DOES NOT WORK!
TODO: code won't handle records that are larger than 4K right now,
which, if we start getting into Toby Martin's Total Metadata Ingestion
(TMI), may not be enough. We'll need to implement something that will
aggregate recv()'s and know when it's got an entire record?
"""
############################
def __init__(self, network, buffer_size=BUFFER_SIZE):
"""
network Network address to read, in host:port format (e.g.
'rvdas:6202'). If host is omitted (e.g. ':6202'),
read via UDP on specified port.
"""
super().__init__(output_format=Text)
self.network = network
self.buffer_size = buffer_size
if network.find(':') == -1:
raise ValueError('NetworkReader network argument must be in '
'\'host:port\' or \':port\' format. Found "%s"', network)
(host, port) = network.split(':')
port = int(port)
# TCP if host is specified
if host:
self.socket = socket.socket(family=socket.AF_INET,
type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
# Should this be bind()?
self.socket.connect((host, port))
# UDP broadcast if no host specified. Note that there's some
# dodginess I don't understand about networks: if '<broadcast>' is
# specified, socket tries to send on *all* interfaces. if '' is
# specified, it tries to send on *any* interface.
else:
host = '' # special code for broadcast
self.socket = socket.socket(family=socket.AF_INET,
type=socket.SOCK_DGRAM,
proto=socket.IPPROTO_UDP)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
try: # Raspbian doesn't recognize SO_REUSEPORT
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, True)
except AttributeError:
logging.warning('Unable to set socket REUSEPORT; system may not support it.')
self.socket.bind((host, port))
############################
def read(self):
"""
Read the next network packet.
"""
record = self.socket.recv(self.buffer_size)
logging.debug('NetworkReader.read() received %d bytes', len(record))
if record:
record = record.decode('utf-8')
return record
| true |
5cbd645273a1d8e5be4fe983267a9c6ee60bc209 | Python | H-Cong/LeetCode | /152_MaximumProductSubarray/152_MaximumProductSubarray_2.py | UTF-8 | 1,168 | 3.421875 | 3 | [] | no_license | class Solution:
def maxProduct(self, nums: List[int]) -> int:
if not nums: return None
curr_max = nums[0]
curr_min = nums[0]
ans = curr_max
for i in range(1, len(nums)):
temp_max = max(nums[i], nums[i]*curr_max, nums[i]*curr_min)
curr_min = min(nums[i], nums[i]*curr_max, nums[i]*curr_min)
curr_max = temp_max # you have to use a temp max here because curr_min
# is calculated based on curr_max, you dont want it
# to be changed before curr_min is calculated
ans = max(ans, curr_max)
return ans
# TC: O(n)
# where n is the size of nums. The algorithm achieves linear runtime since we are going through nums only once
# SC: O(1)
# since no additional space is consumed rather than variables which keep track of the maximum product so far,
# the minimum product so far, current variable, temp variable, and placeholder variable for the result.
# ref: https://leetcode.com/problems/maximum-product-subarray/solution/
| true |
a8a2093fee616ea5a5fc8629de0a5eaf2af80898 | Python | SeveralCamper/USE-2020-2021 | /16 задание/РекурсФункцСТекст_011.py | UTF-8 | 459 | 3.984375 | 4 | [] | no_license | # Задание 16 № 11347
# Ниже на пяти языках программирования записан рекурсивный алгоритм F.
# Чему равна сумма напечатанных на экране чисел при выполнении вызова F(10)?
count = 0
def F(n):
if n > 2:
global count
count += n
print(n)
F(n - 3)
F(n - 4)
print(F(10), count)
# Ответ: 33 | true |
19abe0a590230023d4e7ad61587796d573f023f9 | Python | lufeng0614/leetcode | /leetcode-349.py | UTF-8 | 773 | 3.8125 | 4 | [] | no_license |
给定两个数组,编写一个函数来计算它们的交集。
示例 1:
输入: nums1 = [1,2,2,1], nums2 = [2,2]
输出: [2,2]
示例 2:
输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
输出: [4,9]
说明:
输出结果中每个元素出现的次数,应与元素在两个数组中出现的次数一致。
我们可以不考虑输出结果的顺序。
========================================================================
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
inter = set(nums1) & set(nums2)
l = []
for i in inter:
l += [i] * min(nums1.count(i), nums2.count(i))
return l
| true |
3ef7a9af6293aec84092afa67ff607757ccf3c67 | Python | jdenoc/meta-search | /cgi/engine_searcher.py | UTF-8 | 3,253 | 3.46875 | 3 | [] | no_license | #!/usr/bin/python
##
## Filename: engine_searcher.py
## Version: 6.5.4
## This file opens search engine webpages and takes the links from them.
## This file contains functions that:
## opens sites, retrieves site code, closes site & then returns site code
## recieves the strings found in the sites & then trims them down to just the urls
## searches through DuckDuckGo, Bing, Yahoo! sites for usable url links
## searches HTML code of DuckDuckGo, Bing, Yahoo! for a link to the next page of results
##
import urllib
import re
def open_doc(link):
# opens sites, retrieves site code, closes site & then returns site code
#print link # TESTING ONLY
site = urllib.urlopen(link) # opens url to search through
read_file = site.read() # reads url (file) text
site.close() # done with url (file), so close it
return read_file
# END open_doc
def link_trimmer(site_links):
# recieves the strings found in the sites & then trims them down to just the urls
trimmed_links = []
for link in site_links:
url = link[1]
title = link[3]
trimmed_links.append((url, title)) # appends a tuple containing the site address & title to a list
return trimmed_links
# END link_trimmer
################### individual search engine link finders ###################
link_criterion = "[+',\s$=;@?!%&:\w./_()#-]+"
def link_finder_ddgo(code):
# searches through DuckDuckGo site for usable url links
url_match_ddgo = re.findall(r'(<a rel="nofollow" class="large" href=")('+link_criterion+')(">)(.+)(</a>)', code)
ddgo_links = link_trimmer(url_match_ddgo)
return ddgo_links
# END link_finder_ddgo
def link_finder_bing(code):
# searches through Bing site for usable url links
url_match_bing = re.findall(r'(<h3><a href=")('+link_criterion+')(" h=".+?">)(.+?)(</a>)', code) # (.+?) This indicates 1 or more of any character, but in a non-greedy form
if url_match_bing:
bing_links = link_trimmer(url_match_bing)
return bing_links
# END link_finder_bing
def link_finder_yahoo(code):
# searches through Yahoo site for usable links
url_match_yahoo = re.findall(r'(<h3><a id=".+?" class="yschttl spt" href=")('+link_criterion+')(.+?>)(.+?)(</a>)', code)
if url_match_yahoo:
yahoo_links = link_trimmer(url_match_yahoo)
return yahoo_links
# END link_finder_yahoo
def next_page_ddgo(code):
# searches HTML code of DuckDuckGo site for a link to next page of results
url_match_next = re.search(r'(<!-- <a rel="next" href=".html.+q=)('+link_criterion+')(">Next Page ></a> //-->)', code)
if url_match_next:
next_page = 'http://duckduckgo.com/html/?q=' + url_match_next.group(2)
return next_page
# END next_page_ddgo
def next_page_bing(search, page_num):
# produces links for next page of results in Bing
num = str(page_num+1)
next_page = 'http://www.bing.com/search?q='+search+'&first='+num+'1&FORM=PORE'
return next_page
# END next_page_bing
def next_page_yahoo(code):
# searches HTML code of Yahoo site for a link to next page of results
url_match_next = re.search(r'(<a id="pg-next" href=")('+link_criterion+')(">Next ></a>)', code)
if url_match_next:
next_page = url_match_next.group(2)
return next_page
# END next_page_yahoo
################### END of individual search engine link finders ###################
| true |
4330303575a8508180fa48e049ae71d9ca1ac46e | Python | michaelb/point-clustering | /generation_test.py | UTF-8 | 1,432 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Module de création de fichiers d'ensembles de test
"""
from math import floor, sqrt
from random import random
import numpy as np
def generation_fichier(nb_points, distance_liaison, chemin="qg2 > gq1.pts"):
with open(chemin, "w") as fichier:
fichier.write(str(distance_liaison) + "\n")
for _ in range(nb_points//2):
fichier.write("{}, {}\n".format(str(random()),str(random())))
for _ in range(nb_points//4):
fichier.write("{}, {}\n".format(str(random()/2+0.25),str(random()/10+0.6)))
for _ in range(nb_points//4):
fichier.write("{}, {}\n".format(str((random()>0.5)/2+0.15+random()/10),str(random()/10+0.3)))
def generation_ensemble_de_test():
for n in [1,0.5,0.25,0.1,0.05,0.4,0.3,0.2,0.1,0.09,0.8,0.08,0.07,0.06,0.05,0.04,0.03,0.02,0.01,0.009,0.008,0.007,0.006,0.004,0.005,0.003,0.002,0.001]:
generation_fichier(10000,n, "test_{}.pts".format(str(n)))
def generation_quadrillage(distance):
with open("test_quadrillage", "w") as fichier:
fichier.write(str(distance) + "\n")
distance -= 0.000000001
x = 0
while x < 1:
y = 0
while y < 1:
fichier.write("{},{}\n".format(str(x),str(y)))
y += distance
x += distance
#generation_quadrillage(0.01)
#generation_ensemble_de_test()
generation_fichier(10000,0.01)
| true |
2cf0d370c351d4e2c1831fb6e05d3cd582b070b9 | Python | gordian-biotechnology/arboreto | /scripts/run_diff_seeds_dream5_standardized.py | UTF-8 | 2,918 | 2.625 | 3 | [
"BSD-3-Clause"
] | permissive | """
Python script for running Arboreto multiple times on the DREAM5 dataset,
initialized with a different random seed for each run.
The objective is to assess the stability of the inference quality of GRNBoost2
compared with GENIE3/Arboreto and the GENIE3 results as reported in the Dream5 paper.
"""
import pandas as pd
import time
from arboreto.algo import genie3, grnboost2
from arboreto.utils import load_tf_names
from distributed import Client
from sklearn.preprocessing import StandardScaler
DEFAULT_N_RUNS = 100
wd = '../resources/dream5/'
net1_expression = wd + 'net1/net1_expression_data.tsv'
net3_expression = wd + 'net3/net3_expression_data.tsv'
net4_expression = wd + 'net4/net4_expression_data.tsv'
net1_tfs = wd + 'net1/net1_transcription_factors.tsv'
net3_tfs = wd + 'net3/net3_transcription_factors.tsv'
net4_tfs = wd + 'net4/net4_transcription_factors.tsv'
datasets = [('net1', net1_expression, net1_tfs),
('net3', net3_expression, net3_tfs),
('net4', net4_expression, net4_tfs)]
# algo = 'grnboost2'
algo = 'genie3'
out_dir = '../output/dream5/{}.std/'.format(algo)
seeds = [seed * 100 for seed in range(0, 100)]
# seeds = [seed * 100 for seed in range(0, 1)]
# dry_run = True
dry_run = False
def run_algo(client, algo_name, seed_value):
if algo_name == 'genie3':
inf_algo = genie3
elif algo_name == 'grnboost2':
inf_algo = grnboost2
else:
raise ValueError('Houston, we have a problem between desk and chair.. ({})'.format(algo_name))
scaler = StandardScaler()
for network_name, exp_path, tfs_path in datasets:
start_time = time.time()
print('inferring {0} with seed {1}'.format(network_name, seed))
exp_matrix = pd.read_csv(exp_path, sep='\t')
scaled_values = scaler.fit_transform(exp_matrix)
exp_matrix_scaled = pd.DataFrame(scaled_values, columns=exp_matrix.columns)
tf_names = load_tf_names(tfs_path)
network_df = inf_algo(client_or_address=client,
expression_data=exp_matrix_scaled,
tf_names=tf_names,
seed=seed_value,
limit=100000)
inf_time = time.time()
delta_time = inf_time - start_time
print('inferred {0} with seed {1} in {2} seconds'.format(network_name, seed, str(delta_time)))
network_out_path = '{0}{1}.seed_{2}.csv'.format(out_dir, network_name, seed)
network_df.to_csv(network_out_path, sep='\t', index=None, header=None)
print('{0} with seed {1} written to {2}'.format(network_name, seed, network_out_path))
if __name__ == '__main__':
client = Client()
print(str(client))
for seed in seeds[:3]:
print('running {0} with seed {1}'.format(algo, seed))
if not dry_run:
run_algo(client, algo, seed)
client.shutdown()
| true |
7cf04d1c5245584073def0da55639f4bb8ed396d | Python | alex-rt/DistributedSystem | /processing.py | UTF-8 | 2,244 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#MASSIVE DATA MANAGEMENT
#PARCIAL 2
#ALEJANDRO RODRIGUEZ TRILLO
#ANDRESS ARANA BEJAR
import pymongo
import numpy as np
import pandas as pd
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["Parcial1"]
mycol = mydb["alejandro_rodriguez_covid"]
# In[2]:
result = mycol.aggregate([
{ "$match": {"CLASIFICACION_FINAL": "3" }},
{ "$group": {
"_id":
"$ENTIDAD_RES"
,
"count": { "$sum": 1 }
}}
])
# In[3]:
datos = (list(result))
df = pd.DataFrame(datos)
df = df.apply(pd.to_numeric)
# In[4]:
newdf = df.sort_values(by= "_id")
newnew = newdf.reset_index(drop=True)
# In[5]:
newnew
# In[6]:
estados = [["Aguascalientes"],
["Baja California"],
["Baja California Sur"],
["Campeche"],
["Coahuila de Zaragoza"],
["Colima"],
["Chiapas"],
["Chihuahua"],
["Ciudad de México"],
["Durango"],
["Guanajuato"],
["Guerrero"],
["Hidalgo"],
["Jalisco"],
["Estado de México"],
["Michoacán de Ocampo"],
["Morelos"],
["Nayarit"],
["Nuevo León"],
["Oaxaca"],
["Puebla"],
["Querétaro"],
["Quintana Roo"],
["San Luis Potosí"],
["Sinaloa"],
["Sonora"],
["Tabasco"],
["Tamaulipas"],
["Tlaxcala"],
["Veracruz de Ignacio de la Llave"],
["Yucatán"],
["Zacatecas"]]
# In[9]:
df_new = pd.concat([newnew, pd.DataFrame(estados)], axis=1)
df_new.columns = ["ID", "CASOS CONFIRMADOS", "ESTADO"]
df_new = df_new[["ID", "ESTADO", "CASOS CONFIRMADOS"]]
# In[10]:
df_new
# In[21]:
result2 = mycol.aggregate([
{ "$match": { "$and": [ {"CLASIFICACION_FINAL": "3" }, { "FECHA_DEF": { "$ne": '9999-99-99'}} ]}},
{ "$group": {
"_id":
"$ENTIDAD_RES"
,
"count": { "$sum": 1 }
}}
])
# In[22]:
datos2 = (list(result2))
df2 = pd.DataFrame(datos2)
df2 = df2.apply(pd.to_numeric)
# In[23]:
newdf2 = df2.sort_values(by= "_id")
newnew2 = newdf2.reset_index(drop=True)
# In[24]:
newnew2
# In[25]:
newnew2.columns = ["ID", "FALLECIMIENTOS"]
del newnew2["ID"]
# In[26]:
newnew2
# In[27]:
resultado = pd.concat([df_new, newnew2], axis=1)
# In[28]:
resultado.columns = ["ID", "ESTADO", "CASOS CONFIRMADOS", "FALLECIMIENTOS"]
# In[29]:
resultado
# In[36]:
resultado.to_csv('output_covid.csv', encoding='utf-8')
# In[ ]:
| true |
af6fcee9518d537c001aeae5bd62f9fa7c240130 | Python | mohmah7/osajana_kehitaminen-korkeakoulu | /harij08/L08T05.py | UTF-8 | 699 | 3.90625 | 4 | [] | no_license | first_number = input("Please input number :")
if first_number > 0:
first_number = first_number
else:
first_number = 0;
second_number = input("Please input number :")
if second_number > 0:
second_number = second_number
else:
second_number =0
third_number = input("Please input number :")
if third_number > 0:
third_number = third_number
else:
third_number=0
fourth_number = input("Please input number :")
if fourth_number > 0:
fourth_number = fourth_number
else:
fourth_number = 0
fifth_number = input("Please input number :")
if fifth_number > 0 :
fifth_number = fifth_number
else:
fifth_number =0
print("Sum is", first_number+second_number+third_number+fourth_number+fifth_number) | true |
06308053fe2bdf77f62ec4cda964e54e8b668a0a | Python | CCedricYoung/bitesofpy | /333/test_metropolis_hastings.py | UTF-8 | 2,020 | 3.234375 | 3 | [] | no_license | import math
import numpy as np
import pytest
from metropolis_hastings import metropolis_hastings
np.random.seed(42)
def norm_dist(x, mean, std):
"""Gaussian normal probability distribution."""
return np.exp(-0.5 * (x - mean) ** 2 / std ** 2)
def standard_norm_dist(x):
"""Gaussian normal standard probability distribution."""
return norm_dist(x, mean=0, std=1)
def custom_norm_dist(x):
"""Gaussian normal probability distribution with mean of 1 and standard deviation of two."""
return norm_dist(x, mean=1, std=2)
def exp_dist(x, lambda_):
"""Exponential probability distribution."""
return lambda_ * np.exp(-lambda_ * x) if x >= 0 else 0
def custom_exp_dist(x):
return exp_dist(x, lambda_=10)
def test_correctness():
samples = metropolis_hastings(standard_norm_dist, 0, 100)
assert len(samples) == 100
assert isinstance(samples, np.ndarray)
assert np.issubdtype(samples.dtype, np.floating)
def test_raise_exception_for_non_functions():
with pytest.raises(TypeError):
samples = metropolis_hastings(sum([]))
@pytest.mark.parametrize("n_samples", [-1, -10, -100, 0,])
def test_raise_exception_for_wrong_n_samples(n_samples):
with pytest.raises(ValueError):
samples = metropolis_hastings(standard_norm_dist, 0, n_samples)
@pytest.mark.parametrize(
"f, x_0, expected_mean, expected_std",
[
(standard_norm_dist, 0, 0, 1),
(standard_norm_dist, 1, 0, 1),
(standard_norm_dist, -1, 0, 1),
(custom_norm_dist, 0, 1, 2),
(custom_norm_dist, 1, 1, 2),
(custom_norm_dist, -1, 1, 2),
(custom_exp_dist, 0, 1 / 10, math.sqrt(1 / 10 ** 2)),
],
)
def test_univariate_functions(f, x_0, expected_mean, expected_std):
samples = metropolis_hastings(f, x_0)
np.testing.assert_almost_equal(samples.mean(), expected_mean, decimal=1)
np.testing.assert_almost_equal(samples.std(), expected_std, decimal=1) | true |
4fd4f9708b5e36d88a486f99d728710a26307ed4 | Python | ripsj/logic | /DeterminadorDireccion.py | UTF-8 | 811 | 4.03125 | 4 | [] | no_license |
def esPar(num):
if (num % 2) == 0:
return True
else:
return False
T = int(input("How many matrices would you like to test?"))
arregloPares = []
for x in range(T):
arregloPares.append(input("Enter 'N' and 'M' values for matrix number " + str(x+1) + ", spaced by a whitespace: "))
for i in range(len(arregloPares)):
arregloSeparado = arregloPares[i].split()
fila = int(arregloSeparado[0])
columna = int(arregloSeparado[1])
if fila == columna:
if esPar(fila):
print("L")
else:
print("R")
elif fila < columna:
if esPar(fila):
print("L")
else:
print("R")
elif fila > columna:
if esPar(columna):
print("U")
else:
print("D")
| true |
87155d69da09ab877704089ee80120d5858ed152 | Python | SamLR/MuSIC5_offline_analysis | /MuSIC5_offline_analysis/scripts/rates_from_count.py | WINDOWS-1252 | 4,396 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Calculate simulated muon rates based on truth count of number of muons
of each type.
"""
from ValueWithError import ValueWithError
from root_utilities import make_canvas
from ROOT import TGraphErrors
from array import array
from time import sleep
# g4bl = True
g4bl = False
n_protons_g4bl = 9e8
n_mu_pos_g4bl = 86710
n_mu_neg_g4bl = 9009
n_mu_pos_per_p_g4bl = n_mu_pos_g4bl/n_protons_g4bl
if g4bl:
n_protons = n_protons_g4bl
else:
n_protons = 5e5/n_mu_pos_per_p_g4bl
n_mu_neg_to_mu_pos = float(n_mu_neg_g4bl)/n_mu_pos_g4bl
class SimInfo(object):
"""docstring for SimInfo"""
def __init__(self, deg_dz, n_cu, n_f, n_protons=n_protons):
super(SimInfo, self).__init__()
self.deg_dz = deg_dz
self.n_cu = n_cu
self.n_f = n_f
self.n_p = n_protons
def __str__(self):
print_fmt = "Degrader: {}, cu: {}, free:{}"
deg = self.degrader.replace("_", " ")
return print_fmt.format(deg, self.n_cu, self.n_f)
def __repr__(self):
print_fmt = "{}: <cu:{} f:{}>"
return print_fmt.format(self.degrader, self.n_cu, self.n_f)
def get_rate(self, target):
if target=="f":
return self.n_f/self.n_p
elif target=="cu":
return self.n_cu/self.n_p
def get_counts(file_name, g4bl):
"""
Read the count values from the txt output of analyse_simulation.py
and return them
"""
with open(file_name) as in_file:
# throw away the first two lines of header
in_file.readline()
in_file.readline()
res = {}
for line in in_file:
# format: Ai | 5 | 88130 | 11722108 | 96562 | 11613220 | 758/1243
mat, dz, cu, f, junk, junk2, junk3 = map(str.strip, line.split("|"))
f = f.split("+/-")
cu = cu.split("+/-")
key = float(dz) if mat == "Al" else 0.0
if key not in res: res[key] = {'n_cu':0.0, 'n_f':0.0}
if cu[0] == "na":
# it's a mu+ measurement
res[key]["n_cu"] += 0.0
res[key]["n_f"] += ValueWithError(*f)
elif g4bl:
res[key]["n_cu"] += ValueWithError(*cu)
res[key]["n_f"] += ValueWithError(*f)
else:
# it's a mu- measurement so scale it
res[key]["n_cu"] += ValueWithError(*cu) * n_mu_neg_to_mu_pos
res[key]["n_f"] += ValueWithError(*f) * n_mu_neg_to_mu_pos
for key, counts in res.items():
res[key] = SimInfo(deg_dz=key, **counts)
return res
def set_graph_values(graph, title, x_title, y_title):
graph.SetTitle(title)
graph.GetXaxis().SetTitle(x_title)
graph.GetXaxis().SetRangeUser(-1.0, 6)
graph.GetYaxis().SetTitle(y_title)
def make_plots(data, img_name):
"""
make_plot(rates)
"""
x_order = (0, 0.5, 1, 5)
x = array('f', x_order)
x_er = array('f', [k*0.01 for k in x_order]) # assumed errors
cu = array('f', [data[k].get_rate("cu").value for k in x_order])
cu_er = array('f', [data[k].get_rate("cu").error for k in x_order])
f = array('f', [data[k].get_rate("f").value for k in x_order])
f_er = array('f', [data[k].get_rate("f").error for k in x_order])
cu_graph = TGraphErrors(len(x), x, cu, x_er, cu_er)
set_graph_values(cu_graph, "Simulated copper muon rates", "degrader thickness (mm)", "Muons per proton")
f_graph = TGraphErrors(len(x), x, f, x_er, f_er)
set_graph_values(f_graph, "Simulated free muon rates", "degrader thickness (mm)", "Muons per proton")
canvas = make_canvas("Simulated muon rates (from direct counts)", n_x=2, n_y=1, resize=True)
canvas.cd(1)
cu_graph.Draw("ALP")
canvas.cd(2)
f_graph.Draw("ALP")
canvas.Update()
canvas.SaveAs(img_name+".svg")
canvas.SaveAs(img_name+".png")
sleep (5)
def run(file_name, g4bl):
counts = get_counts(file_name, g4bl)
print "dz | {:^20s} | {:^20s}".format("cu", "f")
for key in (0.0, 0.5, 1.0, 5.0):
print "{} | {} | {}".format(key, counts[key].n_cu,counts[key].n_f)
if g4bl:
img_name = "images/g4bl_simulated_muon_rates_from_counts"
else:
img_name = "images/simulated_muon_rates_from_counts"
make_plots(counts, img_name)
def main():
if g4bl:
file_name = "output_txt/g4bl_simulation_counts_and_integrals_loose_f_16ns_bins.txt"
else:
file_name = "output_txt/simulation_counts_and_integrals_loose_f_16ns_bins.txt"
run(file_name, g4bl)
if __name__=="__main__":
main() | true |
974a98368a93e7d04ced6cf06c706dcdeda7d442 | Python | GuangyuanZhao/ee239as | /packages/geomag/emm.py | UTF-8 | 7,882 | 2.65625 | 3 | [] | no_license | # encoding: utf-8
# module geomag.emm
# from C:\Users\qinli\AppData\Local\Programs\Python\Python36\lib\site-packages\geomag\emm.cp36-win_amd64.pyd
# by generator 1.145
# no doc
# imports
import builtins as __builtins__ # <module 'builtins' (built-in)>
import ntpath as path # C:\Users\qinli\AppData\Local\Programs\Python\Python36\lib\ntpath.py
from time import gmtime
# Variables with simple values
__path__ = None
# no functions
# classes
class EMMBase(object):
# no doc
def bibtex(self, *args, **kwargs): # real signature unknown
pass
def citation(self, *args, **kwargs): # real signature unknown
pass
def decimal_year(self, t): # real signature unknown; restored from __doc__
"""
decimal_year(t)
convert a time.struct_time, datetime.date, datetime.datetime, or UTC timestamp into a decimal year
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
__pyx_vtable__ = None # (!) real value is ''
class EMMMesh(EMMBase):
"""
EMMMesh(str mesh_fname, str svmesh_fname, bool delay_load=False)
mesh_fname: filename of EMM static mesh
secmesh_fname: filename of EMM secular variation mesh
delay_load: if True, meshes will not be loaded until load() or a function requiring them is called
This class wraps NOAA's Enhanced Magnetic Model (EMM) Mesh routines.
These routines use less CPU time than EMMSph, but have a larger memory footprint.
"""
def compute_field(self, lat, lon, height, year, geodetic=True, compute_change=False): # real signature unknown; restored from __doc__
"""
compute_field(lat, lon, height, year, geodetic = True, compute_change = False)
Parameters:
lat - latitude, in degrees
lon - longitude, in degrees
height - height above EGM96 mean sea level, or WGS-84 ellipsoid if geodetic = False
year - date, in decimal years
geodetic - if true, use EGM96 mean sea level as reference for height, otherwise use WGS-84 ellipsoid
compute_change - if true, compute secular variation of magnetic field (rate of chage per year)
Returns a GeoMagneticElements object with the results.
"""
pass
def declination(self, lat, lon, height, year, geodetic=True): # real signature unknown; restored from __doc__
"""
declination(lat, lon, height, year, geodetic = True)
Angle (deg) between the magnetic field vector and true north, positive east.
"""
pass
def is_loaded(self): # real signature unknown; restored from __doc__
"""
is_loaded()
Return True if mesh files have been loaded
"""
pass
def load(self, mesh=None, secmesh=None): # real signature unknown; restored from __doc__
"""
load(mesh=None,secmesh=None)
Load the specified mesh files, or if delay_load=True was specified in the constructor, load those files.
Paramters:
mesh, secmesh - filenames for mesh or secular variation mesh. Both are optional
No return value.
"""
pass
def magnetic_to_true(self, head, lat, lon, height, year, geodetic=True): # real signature unknown; restored from __doc__
"""
magnetic_to_true(head, lat, lon, height, year, geodetic = True)
Convert a heading (deg) from magnetic north to true north (add declination)
"""
pass
def true_to_magnetic(self, head, lat, lon, height, year, geodetic=True): # real signature unknown; restored from __doc__
"""
true_to_magnetic(head, lat, lon, height, year, geodetic = True)
Convert a heading (deg) from true north to magnetic north (subtract declination)
"""
pass
def __init__(self, str_mesh_fname, str_svmesh_fname, bool_delay_load=False): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
__pyx_vtable__ = None # (!) real value is ''
class GeoMagneticElements(object):
"""
GeoMagneticElements encapsulates the geomagnetic field paramters at a location.
members:
Decl = Angle between the magnetic field vector and true north, positive east.
Incl = Angle between the magnetic field vector and the horizontal plane, positive down.
F = Magnetic field strength.
H = Horizontal magnetic field strength.
X = Northern component of the magnetic field vector.
Y = Eastern component of the magnetic field vector.
Z = Downward component of the magnetic field vector.
GV = The grid variation.
Decldot, Incldot, Fdot, Hdot, Xdot, Ydot, Zdot, and GVdot: the change per year of each of the above.
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
Decl = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Decldot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
F = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Fdot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
GV = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
GVdot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
H = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Hdot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Incl = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Incldot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
X = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Xdot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Y = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Ydot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Z = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
Zdot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__pyx_vtable__ = None # (!) real value is ''
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
__test__ = {}
| true |
09404fe514830fd880f6eecd28b8c272e893d8d7 | Python | zchauvin/tictactoe | /tester.py | UTF-8 | 3,314 | 3.75 | 4 | [] | no_license | from Computer import Computer
from Board import Board
import unittest
from typing import List
N = 3
class TestComputer(unittest.TestCase):
def test_x_computer_never_loses(self):
board = Board(N)
c = Computer('x', 'o')
self.helper(board, c, True)
def test_o_computer_never_loses(self):
board = Board(N)
c = Computer('o', 'x')
self.helper(board, c, False)
# test to fuzz every possible human move after the computer takes the optimal move
def helper(self, board: Board, c: Computer, is_computer_turn: bool) -> None:
if is_computer_turn:
self.assertFalse(
board.has_player_won(c.opponent_character),
'The computer\'s opponent must not have won'
)
# base case: tie game
if board.is_full():
return
# take the optimal computer move and recurse
new_board = board.copy()
new_board.set(c.get_move(board), c.character)
self.helper(new_board, c, not is_computer_turn)
else:
# base case: computer win or tie game
if board.has_player_won(c.character) or board.is_full():
return
# recurse on the human taking every possible move
for cell in board.get_available_cells():
new_board = board.copy()
new_board.set(cell, c.opponent_character)
self.helper(new_board, c, not is_computer_turn)
class TestBoard(unittest.TestCase):
def test_player_has_not_won(self):
self.has_player_won_helper([[], [0], [0, 1]], False)
def test_player_has_won(self):
self.has_player_won_helper([[0, 1, 2], [0, 3, 6], [0, 4, 8]], True)
def has_player_won_helper(self, cases: List[List[int]], has_player_won: bool) -> None:
player = 'x'
for cells in cases:
b = Board(N)
b.multi_set(cells, player)
self.assertEqual(b.has_player_won(player), has_player_won)
def test_get_lines(self):
self.assertEqual(
Board(N).get_lines(),
[
[0, 1, 2],
[0, 3, 6],
[3, 4, 5],
[1, 4, 7],
[6, 7, 8],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6]
]
)
def test_is_invalid_move(self):
b = Board(N)
occupied_cell = 0
b.set(occupied_cell, 'x')
for invalid_move in [-1, N * N, occupied_cell]:
self.assertFalse(b.is_valid_move(invalid_move))
def test_is_valid_move(self):
b = Board(N)
b.set(0, 'x')
self.assertTrue(b.is_valid_move(1))
def test_get_available_cells(self):
b = Board(N)
expected = list(range(N * N))
self.assertEqual(b.get_available_cells(), expected)
b.set(0, 'x')
b.set(1, 'o')
self.assertEqual(b.get_available_cells(), expected[2:])
def test_is_full(self):
b = Board(N)
self.assertFalse(b.is_full())
b.set(0, 'x')
self.assertFalse(b.is_full())
b.multi_set(range(1, N * N), 'x')
self.assertTrue(b.is_full())
if __name__ == '__main__':
unittest.main()
| true |
9203cd2ba10e9780887779818eb9b5e0f3b146e3 | Python | mdilauro39/ifdytn210 | /practicaIIII/funcionmax.py | UTF-8 | 218 | 3.859375 | 4 | [] | no_license | n1= int(raw_input("ingrese primer numero"))
n2= int(raw_input("ingrese segundo numero"))
def numgrande(a,b):
if a > b:
return a
if b > a:
return b
if a == b:
return "a y b iguales"
print numgrande(n1,n2,n3)
| true |
8fa525cfee21cae9a8d89c94633008a249954b51 | Python | jeffdeng1314/CS16x | /cs160_series/python_cs1/integral_calculator.py | UTF-8 | 3,289 | 4.21875 | 4 | [] | no_license | import math
def f1(x):
return 5 * math.pow(x, 4) + 3 * math.pow(x, 3) - 10 * x + 2
def f2(x):
return math.pow(x, 2) - 10
def f3(x):
return 40 * x + 5
def f4(x):
return math.pow(x, 3)
def f5(x):
return 20 * math.pow(x, 2) + 10 * x - 2
"""
ft: function type
mt: method type: 1 - rectangle, 2 - trapezoid, 3 - both
n: number of trapezoids
a: starting point
b: ending point
"""
def calculateArea(ft, mt, n, a, b):
w = (b-a) / n
if mt == 1 or mt == 3:
# Using rectangle
cauclateAreaUsingRectangle(ft, w, n, a, b)
if mt == 2 or mt == 3:
# Using trapezoids
cauclateAreaUsingTrapezoid(ft, w, n, a, b)
def cauclateAreaUsingRectangle(ft, w, n, a, b):
area = 0
if ft == 1:
# Use f1(x)
for i in range(0, n):
area += w * f1(a + i * w)
print("[Rectangle] The area under 5x^4 + 3x^3 - 10x + 2 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 2:
# Use f2(x)
for i in range(0, n):
area += w * f2(a + i * w)
print("[Rectangle] The area under x^2 10 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 3:
# Use f3(x)
for i in range(0, n):
area += w * f3(a + i * w)
print("[Rectangle] The area under 40x + 5 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 4:
# Use f4(x)
for i in range(0, n):
area += w * f4(a + i * w)
print("[Rectangle] The area under x^3 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 5:
# Use f5(x)
for i in range(0, n):
area += w * f5(a + i * w)
print("[Rectangle] The area under 20x^2 + 10x - 2 between " + str(a) + " and " + str(b) + " is " + str(area))
def cauclateAreaUsingTrapezoid(ft, w, n, a, b):
area = 0
if ft == 1:
# Use f1(x)
for i in range(0, n):
area += w * (f1(a + i * w) + f1(a + (i + 1) * w)) / 2
print("[Trapezoid] The area under 5x^4 + 3x^3 - 10x + 2 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 2:
# Use f2(x)
for i in range(0, n):
area += w * (f2(a + i * w) + f2(a + (i + 1) * w)) / 2
print("[Trapezoid] The area under x^2 10 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 3:
# Use f3(x)
for i in range(0, n):
area += w * (f3(a + i * w) + f3(a + (i + 1) * w)) / 2
print("[Trapezoid] The area under 40x + 5 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 4:
# Use f4(x)
for i in range(0, n):
area += w * (f4(a + i * w) + f4(a + (i + 1) * w)) / 2
print("[Trapezoid] The area under x^3 between " + str(a) + " and " + str(b) + " is " + str(area))
elif ft == 5:
# Use f5(x)
for i in range(0, n):
area += w * (f5(a + i * w) + f5(a + (i + 1) * w)) / 2
print("[Trapezoid] The area under 20x^2 + 10x - 2 between " + str(a) + " and " + str(b) + " is " + str(area))
def main():
while True:
functionType = int(input("\nChoose a function(1, 2, 3, 4, 5, other(quit)):"))
if functionType < 1 or functionType > 5:
break
methodType = int(input("Would you like to calculate the area using the rectangle, trapezoid, or both (1, 2, 3):"))
n = int(input("How many trapezoids do you want?"))
a = int(input("Please select a starting point, a = "))
b = int(input("Please select an ending point, b = "))
calculateArea(functionType, methodType, n, a, b)
main()
| true |
466cca9ddd2ec1746e4a00a0ce59f0ce24d007b7 | Python | JiwoonKim/Book-review-website | /application.py | UTF-8 | 8,683 | 2.890625 | 3 | [] | no_license | import os
from flask import Flask, session, render_template, request, jsonify
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from helpers import *
import requests
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
@app.route("/", methods=["GET", "POST"])
def index():
""" Default page for Website
User is able to search for a specific book using the isbn, title, author's name
The results of matching books will be displayed and is clickable for more details """
# Ensure user is logged in first
if session.get("user_id") is None:
return redirect("/login")
# User reached route via POST (submitted a form)
if request.method == "POST":
# Ensure book is submitted
if not request.form.get("book"):
return error("must sumbit info of book for search")
# Search for book in database
search = request.form.get("book").lower()
book = "%" + search + "%"
books = db.execute(
"SELECT * FROM books WHERE (LOWER(isbn) LIKE :book) OR (LOWER(title) LIKE :book) OR (LOWER(author) LIKE :book) LIMIT 15", {"book": book}).fetchall()
# if no matching results, show error message
if len(books) == 0:
return error("No matching results")
# if matching results, show lists of books
else:
return render_template("results.html", search=search, books=books)
# User reached route via GET
else:
return render_template("index.html")
@app.route("/api/<isbn>")
def api(isbn):
""" API access to the Website
User may make a GET request via this route with the ISBN number
A JSON response of the book's details will be returned """
# Query from books table in database
book = db.execute("SELECT * FROM books WHERE isbn=:isbn", {"isbn": isbn}).fetchone()
# If the requested ISBN number doesn't exist in database, return error
if book is None:
return jsonify({"error": "invalid ISBN number"}), 404
else:
# Query for book ratings from Goodreads API
res = requests.get("https://www.goodreads.com/book/review_counts.json",
params={"key": "lV6JRoF2Xl75SN1f9SgOmQ", "isbns": isbn})
if res.status_code != 200:
raise Exception("ERROR: API request unsuccessful.")
data = res.json()
ratings = data["books"][0]
# create JSON response and return it
api = jsonify({
"title": book.title,
"author": book.author,
"year": book.year,
"isbn": book.isbn,
"review_count": ratings["work_ratings_count"],
"average_score": ratings["average_rating"]
})
return api
@app.route("/book/<isbn>", methods=["GET", "POST"])
def book(isbn):
""" Page for further details of the book clicked from the search results from the / route
User may write one review for each book and can view the reviews and rankings """
# Ensure user is logged in first
if session.get("user_id") is None:
return redirect("/login")
# Query for book data
book = db.execute("SELECT * FROM books WHERE isbn=:isbn", {"isbn": isbn}).fetchone()
if book is None:
return error("Error in retrieving data")
# Query for book ratings from Goodreads API
res = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "lV6JRoF2Xl75SN1f9SgOmQ", "isbns": isbn})
if res.status_code != 200:
raise Exception("ERROR: API request unsuccessful.")
data = res.json()
ratings = data["books"][0]
# Query for book reviews
reviews = db.execute("SELECT username, rating, review FROM reviews JOIN users ON reviews.user_id=users.id WHERE isbn=:isbn", {
"isbn": isbn}).fetchall()
return render_template("book.html", book=book, ratings=ratings, reviews=reviews)
@app.route("/login", methods=["GET", "POST"])
def login():
""" User is logged in"""
# Forget any user_id
session.clear()
# User reached route via POST (submitting a form for logging in)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return error("must submit username")
# Ensure password was sumbitted
elif not request.form.get("password"):
return error("must submit password")
# Query for username and password
username = request.form.get("username")
password = request.form.get("password")
user_id = db.execute("SELECT id FROM users WHERE (username=:username AND password=:password)",
{"username": username, "password": password}).fetchone()
# Ensure username exists and password matches
if user_id is None:
return error("user does not exist or password does not match")
# If login credentials passes, store in session
session["user_id"] = user_id
return redirect("/")
# Ensure username exists and password is correct
else:
return render_template("login.html")
@app.route("/logout")
def logout():
""" User is logged out """
# Forget any user_id
session.clear()
# Redirect to login page
return redirect("/")
@app.route("/register", methods=["GET", "POST"])
def register():
""" User is able to register their own credentials
and is logged in automatically """
# User reached route via POST
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return error("must submit username")
# Ensure password was sumbitted
elif not request.form.get("password"):
return error("must submit password")
# Ensure confirmation was submitted
elif not request.form.get("comfirmation"):
return error("must confirm password")
# Ensure confirmation and password match
elif not request.form.get("comfirmation") == request.form.get("password"):
return error("password not matching")
# Register username and password into database (w/o hashing)
username = request.form.get("username")
password = request.form.get("password")
db.execute("INSERT INTO users(username, password) VALUES(:username, :password)",
{"username": username, "password": password})
db.commit()
# Log in credentials automatically
user_id = db.execute("SELECT id FROM users WHERE (username=:username AND password=:password)",
{"username": username, "password": password}).fetchone()
if user_id is None:
return error("not registered")
session["user_id"] = user_id
# redirect user to login page
return redirect("/")
# User reached route via GET
else:
return render_template("register.html")
@app.route("/review", methods=["POST"])
def review():
""" Inserts the review data given from the /book route into the database
Redirects the user back to the /book route """
# Ensure user is logged in first
if session.get("user_id") is None:
return redirect("/login")
user_id = session.get("user_id")[0]
# Ensure user has not already written a review
isbn = request.form.get("isbn")
if db.execute("SELECT * FROM reviews WHERE isbn=:isbn AND user_id=:user_id", {"isbn": isbn, "user_id": user_id}).rowcount > 0:
return error("Already wrote a review!")
# Ensure rating is submitted
if not request.form.get("rating"):
return error("must submit rating")
elif not request.form.get("review"):
return error("must submit review")
elif not request.form.get("isbn"):
return error("must submit isbn")
# Insert new review into database
rating = request.form.get("rating")
review = request.form.get("review")
db.execute("INSERT INTO reviews(isbn, user_id, rating, review) VALUES(:isbn, :user_id, :rating, :review)",
{"isbn": isbn, "user_id": user_id, "rating": rating, "review": review})
db.commit()
# Display the book page again
book_route = "/book/" + isbn
return redirect(book_route) | true |
5dbf7e9a9610d3299b8b3e43d1bd10ab60d93d71 | Python | DobrovolskayaValentinaAI182/homework | /first.py | UTF-8 | 1,856 | 3.578125 | 4 | [] | no_license | from random import randrange
import time
from datetime import datetime
#buble
my_list = [ randrange(0, 15) for i in range(10) ]
max_list = len( my_list )
i = 0
while i < max_list:
j = 0
while j < max_list-i-1:
if my_list[ j ] > my_list[ j + 1 ]:
my_list[ j ], my_list[ j + 1] = my_list[ j + 1], my_list[ j ]
j+=1
i += 1
print( my_list )
from datetime import datetime
start_time = datetime.now()
# do your work here
end_time = datetime.now()
print('Duration: {}'.format(end_time - start_time))
# Добровольская Валентина
# АИ-182
# Практика 1
def bubbleSort(array):
for i in range (len(array)):
for j in range (len(array) - i - 1):
if array[j] > array[j + 1]:
array[j], array[j + 1] = array[j + 1], array[j]
return array
def insertionSort(array):
for i in range (1, len(array)):
x = array[i]
j = i
while j > 0 and array[j - 1] > x:
array[j] = array[j - 1]
j = j - 1
array[j] = x
return array
def selectionSort(array):
for i in range (len(array) - 1):
minIndex = i
for j in range (i + 1, len(array)):
if array[j] < array[minIndex]:
minIndex = j
if minIndex != i:
array[i], array[minIndex] = array[minIndex], array[i]
return array
arrayForBubbleSort = [ 659, 83, 250, 390, 909, 23, 769, 238, 323, 589, 527, 497, 130, 428, 515 ]
print(" Bubble sort: ", bubbleSort(arrayForBubbleSort), "\n")
arrayForInsertionSort = [ 484, 310, 976, 364, 244, 715, 353, 871, 994, 4, 419, 415, 384, 760, 97 ]
print(" Insertion sort: ", insertionSort(arrayForInsertionSort), "\n")
arrayForSelectionSort = [ 809, 52, 503, 139, 34, 960, 237, 135, 865, 654, 408, 467, 828, 139, 741]
print(" Selection sort: ", selectionSort(arrayForSelectionSort), "\n")
| true |
9c495135ab9a2398c6a1e14de03486c6e9b2f260 | Python | Justin-mario/python_excercises | /Self_Check_Excercises/wind_chill_temperature.py | UTF-8 | 329 | 3.71875 | 4 | [] | no_license |
temperature = eval(input("Input Temperature Between -58F and 41f: "))
velocity_of_wind = eval(input("Input velocity: "))
wind_chill_temperature = 35.74 + (0.6215 * temperature) - 35.75 * (velocity_of_wind ** 0.16) \
+ 0.4275 * temperature * (velocity_of_wind ** 0.16)
print(f'{wind_chill_temperature}')
| true |
c75b50b21770c8d46f850ca03ad9ed775a8029b8 | Python | pntgoswami18/tutBotKhamen | /bot.py | UTF-8 | 7,203 | 3.09375 | 3 | [] | no_license | import os
import discord
import random
from dotenv import load_dotenv
from discord.ext import commands
from discord import DMChannel
# pull values frm .env file
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = discord.Client()
bot = commands.Bot(command_prefix='!')
@client.event
async def on_ready():
"""Triggered when the bot is connected and ready
"""
print(f'{client.user} has connected to Discord!')
# a bot user can be connected to many guilds.
# searching for the intended one in client data
guild = discord.utils.find(lambda g: g.name == GUILD, client.guilds)
# could alternatively use the get method from discord utils
# guild = discord.utils.get(client.guilds, name=GUILD)
# printing name of bot, the name of server(guild), and the server’s identification number
print(
f'{client.user} is connected to the following guild:\n'
f'{guild.name}(id: {guild.id})'
)
guild_members = '\n - '.join([member.name for member in guild.members])
print(f'Guild Members:\n - {guild_members}')
# handling event of a new memeber joining the guild
@client.event
async def on_member_join(member):
"""Handles new members joining
Arguments:
member {object} -- Discord memeber object
"""
# create a direct message channel
await member.create_dm()
# send message
await member.dm_channel.send(
f'Hi {member.name}! Welcome to siestaria.'
)
# handling custom messages being posted in the guild
@client.event
async def on_message(message):
"""Handler for message event triggers
Arguments:
message {string} -- Message trigger
Raises:
exception: Exception on unhandled events
discord.DiscordException: Discord exception raised
"""
dm_replies = [
'I\'m street so look both ways before you cross me',
'Speak your fucking mind',
'This is what you came for ?',
'I think that\'s his ass. Oh hells naw!',
'I\'m your fool, and you\'re my nizzle. And dat\'s fo\' real!',
'Get your retarded-looking saggy-ass pants out of my nose!',
'Yo, give me your scrilla before I shank your retarded-looking balls.',
"Don't try to slide into my DMs nigga",
]
# don't want my bot to reply to my own messages
# or its own messages in the guild
print(f'author is {message.author}')
print(f'message is {message.content}')
if message.author == client.user:
return
# reply to DMs
if isinstance(message.channel, DMChannel):
response = random.choice(dm_replies)
await message.author.send(response)
return
# To-Do: move these texts into files and read from there
brooklyn_99_quotes = [
'I\'m the human form of the 💯 emoji.',
'Bingpot!',
(
'Cool. Cool cool cool cool cool cool cool, '
'no doubt no doubt no doubt no doubt.'
),
'Sarge, with all due respect, I am gonna completely ignore everything you just said.💁',
'''The English language can not fully capture the depth and complexity of my thoughts,
so I’m incorporating emojis into my speech to better express myself. 😉''',
'A place where everybody knows your name is hell. You’re describing hell.',
'If I die, turn my tweets into a book.',
'I asked them if they wanted to embarrass you, and they instantly said yes.',
'Great, I’d like your $8-est bottle of wine, please.',
'Captain Wuntch. Good to see you. But if you’re here, who’s guarding Hades?',
'I’m playing Kwazy Cupcakes, I’m hydrated as hell, and I’m listening to Sheryl Crow. I’ve got my own party going on.',
'Anyone over the age of six celebrating a birthday should go to hell.',
'Captain, turn your greatest weakness into your greatest strength. Like Paris Hilton RE: her sex tape.',
'Jake, piece of advice: just give up. It’s the Boyle way. It’s why our family crest is a white flag.'
]
hindi_expletives_triggers = [
'bc', 'bencho', 'mc', 'madarchod', 'bsdk', 'bhosdike'
]
# add some flavor here
hindi_expletives = [
'mera lawda',
'bde hi ajeeb chutiye ho yaar tum',
'gaand m chattri daal k khol dunga aage bola to',
'ha tera baap hai, gandu',
'chup madarchod!',
'chup kar bhosdike',
'aisa marunga, parle-g kaali chai m dubona b naseeb nhi hoga',
'kya bawaseer hai re ye?!',
'gaali kaun diya bc?!'
]
saanp_replies = [
'abu hai asli saanp mc',
'insaan hi insaan ko dass raha hai, saanp side m baith k hass raha h',
'ye jo saath m haste hain, baad m saanp ban k daste hain :snake:',
]
welcome_salutations = [
'hi', 'hello', 'how are you', 'hey'
]
welcome_replies = [
f'kaun laya is {message.author} ko?!',
f'aao {message.author}. Guruji ka prasad leke jaoge.',
f'aham brahmasmi',
]
if message.content == '99!':
print('99! triggered')
response = random.choice(brooklyn_99_quotes)
await message.channel.send(response)
if 'happy birthday' in message.content.lower():
print('HDB triggered')
await message.channel.send('Happy Birthday! 🎈🎉')
# deliberately raise exception
if message.content == 'raise-exception':
raise discord.DiscordException
if 'sun be' in message.content.lower():
await message.channel.send('chup madarchod! bilkul chup!')
if 'ssup' in message.content.lower():
await message.channel.send('bol bhadwe, teri baari ab')
if 'saanp' in message.content.lower():
response = random.choice(saanp_replies)
await message.channel.send(response)
if 'koi' in message.content.lower():
await message.channel.send('ha tera baap hai, gandu')
if 'kaun' in message.content.lower():
await message.channel.send('register m dekh insaan h ya bhagwaan')
for x in hindi_expletives_triggers:
if x in message.content.lower():
response = random.choice(hindi_expletives)
await message.channel.send(response)
# random replies to welcome salutions
for x in welcome_salutations:
if x in message.content.lower():
response = random.choice(welcome_replies)
await message.channel.send(response)
# randomly send messages when abu messages
if str(message.author) == 'abutaha#2650':
send_message = random.choice([True, False])
if (send_message):
response = random.choice(hindi_expletives)
await message.channel.send(response)
print(message.channel.type)
# error handler
async def on_error(event, *a, **k):
"""Error handler and logger for on_message raised errors
Arguments:
event {object} -- Event for which the exception is raised
"""
with open('err.log', a) as f:
if event is 'on_message':
f.write(f'Unhandled message: {a[0]}\n')
else:
raise discord.DiscordException
client.run(TOKEN)
| true |
0d54c25260e20edad7bc33f085b00fd7dfc21684 | Python | toxicthunder69/Iteration | /Starter Exercises/ContinuousMessage.py | UTF-8 | 189 | 3.546875 | 4 | [] | no_license | #Joseph Everden
#09/10/14
#Continuous message
num = 0
while num >= 20 or num <= 10:
num = int(input("Enter a number between 10 and 20: "))
print("You entered {0} which is valid!".format(num))
| true |
190004cd644c6757c46c3ec1918c5d9206ac80f2 | Python | Cristopher-12/AplicacionesWebOrientadasaServicios | /google_books/index.py | UTF-8 | 1,484 | 2.8125 | 3 | [] | no_license | import web
import requests
import json
render = web.template.render("google_books/")
class Index():
def GET(self):
datos = None
return render.index(datos)
def POST(self):
form = web.input()
book_name = form.book_name
result = requests.get("https://www.googleapis.com/books/v1/volumes?q="+book_name)
book = result.json()
items = book["items"]
encoded = json.dumps(items)
decoded = json.loads(encoded)
title = "Título: "+(decoded[0]["volumeInfo"]["title"]) #titulo del libro
try:
for j in range (len(decoded[0]["volumeInfo"]["authors"])):
author = "Autor: "+(decoded[0]["volumeInfo"]["authors"][j]) #autor
except Exception as error:
author = "No hay autor disponible."
try:
image = (decoded[0]["volumeInfo"]["imageLinks"]["smallThumbnail"])#imagen del libro
except Exception as error:
image = "http://books.google.com.mx/googlebooks/images/no_cover_thumb.gif" #imagen alternativa
url = (decoded[0]["volumeInfo"]["infoLink"]) #enlace para comprar
titulo = "<h1 class='text-center'>"+title+"<h1>"
autor = "<h2 class='text-center'>"+author+"<h2>"
imagen = "<img src=\""+image+"\" align='center' width='300' height='350'>"
link ="<a target='blank' class='text-center text-normal' href='"+url+"'>Visualizar en Google Books</a>"
datos = {
"titulo": titulo,
"autores": autor,
"imagen": imagen,
"url": link
}
return render.index(datos) | true |
89ffcc60bae2a7717793a4fd72ca281e9bd25d6d | Python | alfir777/skillbox_course_python-basic | /lesson_005/painting/wall.py | UTF-8 | 1,957 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# (цикл for)
import time
import simple_draw as sd
def wall(coord_x=300, coord_y=100, height=201):
x = 30
y = 15
step_y = coord_y
for _ in range(0, height, y):
step_x = coord_x
if (step_y / 5) % 2:
for _ in range(0, height, x):
point_x = sd.get_point(step_x, step_y)
point_y = sd.get_point(x+step_x, y+step_y)
sd.rectangle(point_x, point_y, color=sd.COLOR_ORANGE, width=3)
step_x += x
time.sleep(0.01)
else:
for i in range(0, height, x):
if i > (height - 50):
break
point_x = sd.get_point(step_x+y, step_y)
point_y = sd.get_point(x+step_x+y, y+step_y)
sd.rectangle(point_x, point_y, color=sd.COLOR_ORANGE, width=3)
step_x += x
time.sleep(0.01)
step_y += y
def draw_figure(start_point, side_count, angle, length, color):
vector = start_point
angle_step = 360 / side_count
step = angle_step
for side in range(side_count):
if side == 0:
vector = sd.get_vector(start_point=vector, angle=angle, length=length + 3, width=3)
elif side == side_count - 1:
sd.line(vector.end_point, start_point, color=color, width=3)
break
else:
vector = sd.get_vector(start_point=vector.end_point, angle=angle + step, length=length, width=3)
step += angle_step
vector.draw(color=color)
def triangle(start_point, angle=0, length=0, color=sd.COLOR_ORANGE):
side_count = 3
draw_figure(start_point=start_point, side_count=side_count, angle=angle, length=length, color=color)
def square(start_point, angle=0, length=0, color=sd.COLOR_ORANGE):
side_count = 4
draw_figure(start_point=start_point, side_count=side_count, angle=angle, length=length, color=color) | true |
478f4e3f7fb3ce9e04ab0b0160b699e39bb649d7 | Python | serenafr/linklinkgo | /boards.py | UTF-8 | 1,870 | 3.53125 | 4 | [] | no_license | import random
class Board(object):
def __init__(self, kinds, pairsPerKind, interval):
self.interval = interval
self.kinds = kinds
self.pairsPerKind = pairsPerKind
self.tiles = None
self.GenTiles()
def GenTiles(self):
'generate a new game'
self.tiles = []
for i in xrange(self.kinds):
self.tiles.extend([i] * self.pairsPerKind * 2)
self.Shuffle()
def Shuffle(self):
random.shuffle(self.tiles)
def Size(self):
return len(self.tiles)
def Find(self):
'''find two tiles that are the same with each other
if not found, return two -1'''
for i in xrange(self.Size() - 1):
for j in xrange(i + 1, min(self.Size(), i + self.interval)):
if self.tiles[i] == self.tiles[j]:
return i, j
return -1, -1
def NeedShuffle(self):
'return True if the board needs shuffle'
return self.Find() == (-1, -1)
def GetTile(self, pos):
return self.tiles[pos]
def CanErase(self, pos1, pos2):
'return True if can erase, otherwise return False'
if pos1 >= self.Size() or pos2 >= self.Size() or pos1 < 0 or pos2 < 0:
return False
return 0 < abs(pos1 - pos2) < self.interval and \
self.GetTile(pos1) == self.GetTile(pos2)
def ManualErase(self, num1, num2):
'''Erase 2 tiles mannually
Raise a RuntinmeError if pic[num1] and pic[num2] cannot be erased
'''
if self.CanErase(num1, num2):
self.tiles.pop(min(num1, num2))
self.tiles.pop(max(num2, num1) - 1)
else:
raise RuntimeError('Not match!')
def Print(self):
'Print board'
print PrettyPrint(self.tiles)
print PrettyPrint(range(self.Size()))
def PrettyPrint(pic):
'Arrange the list as if each element takes two space, return a string list'
pic1 = []
for i in pic:
pic1.append(str(i).rjust(2))
return ' '.join(pic1)
| true |
34424b93f79daebbae1f47d6079aea120a69e562 | Python | yangmyongho/3_Python | /chap03_DataStr_exams/exam04.py | UTF-8 | 602 | 3.78125 | 4 | [] | no_license | '''
step05 문제
문) 다음 movie 객체를 대상으로 평점 8이상인 영화 제목과 누적관객수를 출력하시오.
<출력 결과>
영화제목 : 광해
영화제목 : 관상
누적 관객수 = 2,100
'''
movie = {'광해' : [9.24, 1200], '공작' : [7.86, 500], '관상' : [8.01, 900]}
# 내가한 1
for i in movie :
print(i, end=' : ')
print(movie[i])
# 내가한 2
su = 0
for i in movie.keys() :
if movie[i][0] > 8 :
print('영화제목 :', i)
su += movie[i][1]
print('누적 관객수 =', format(su, '3,d'))
| true |
143ef18ac271b8ba2533fae646aabfd7f599674d | Python | CLeDoPbIT/telegram_bot_face_recognition | /project/bot_self_check.py | UTF-8 | 1,377 | 2.609375 | 3 | [] | no_license | import apiai, json
import photo_proc
txt_request = apiai.ApiAI('733a0aad1eb0479c88c862e1ec1a26e7').text_request() # Токен API к Dialogflow
txt_request.lang = 'ru' # На каком языке будет послан запрос
txt_request.session_id = 'Comparator3000' # ID Сессии диалога (нужно, чтобы потом учить бота)
txt_request.query = 'Ты здесь?' # Посылаем запрос к ИИ с сообщением от юзера
txt_responseJson = json.loads(txt_request.getresponse().read().decode('utf-8'))
txt_response = txt_responseJson['result']['fulfillment']['speech'] # Разбираем JSON и вытаскиваем ответ
# Если есть ответ от бота - присылаем юзеру, если нет - бот его не понял
print ('Проверяем работает ли бот; Спросим: Ты здесь?')
if txt_response:
print('Бот готов; его ответ: ' + txt_response)
else:
print('Подключение отсутствует, бот не отвечает')
photo_request = '/test_photo.jpg'
try:
photo_response = photo_proc.class_detector(photo_request)
print('Обработка фото функционирует нормально')
except:
print('Что-то пошло не так при обработке фотографии') | true |
4e9cac388fa214eb1856275e9e02d0540e12280e | Python | petterin/amaer | /amaer.py | UTF-8 | 4,162 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# AMAer ("Adjacency Matrix-er") turns a CSV list into an adjacency matrix.
# Usage instructions: python amaer.py --help
#
# https://github.com/petterin/amaer
#
# Copyright (c) 2013 Petteri Noponen (Licensed under the MIT License.)
import argparse
import csv
import os
import sys
import time
def main():
# Initialize command line arguments
arg_parser = init_argparse()
args = arg_parser.parse_args()
verbose = (args.outputFile.name != '<stdout>')
# Read the input CSV and prepare data table
csv_reader = csv.reader(args.inputFile, delimiter=';')
data = table_from_reader(csv_reader, args.col_offset, args.row_offset)
# Store unique values
uniques = find_uniques_values(data);
# Prepare the result matrix
size = len(uniques)
matrix = [ [-1]*size for _ in range(size) ] # table (size x size) full of neg values
start = time.clock()
if verbose: print '\nCalculating... (This may take a minute.)'
# Count connections (Goes through the table several times. Quite inefficient, but works...)
for i in range(size):
for j in range(size):
if i == j:
matrix[i][j] = ''
else:
matrix[i][j] = count_connections(uniques[i], uniques[j], data)
end = time.clock()
if verbose: print 'Calculating done in %.2g seconds.' % (end-start)
add_table_headers(uniques, matrix)
# Output the result CSV table
csv_writer = csv.writer(args.outputFile, delimiter=';')
for row in matrix:
csv_writer.writerow(row)
if verbose: print '\nOutput matrix saved to %s.' % args.outputFile.name
def init_argparse():
parser = argparse.ArgumentParser(description='AMAer ("Adjacency Matrix-er") turns an Excel CSV list of nodes into an adjacency matrix.')
parser.add_argument('-c',
dest='col_offset',
metavar='X',
type=int,
default=1,
help="Ignore the first X columns (default: 1)")
parser.add_argument('-r',
dest='row_offset',
metavar='Y',
type=int,
default=0,
help="Ignore the first Y rows (default: 0)")
parser.add_argument('inputFile',
metavar='INPUT',
type=argparse.FileType('rU'),
help='Input CSV file path')
parser.add_argument('outputFile',
metavar='OUTPUT',
nargs='?',
type=argparse.FileType('w'), # writable file
default=sys.stdout,
help='Output CSV file path (if omitted, uses stdout)')
# Without arguments displays help and exits
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser
def table_from_reader(reader, start_with_column=0, start_with_row=0):
data = list()
# Skip specified number of rows
if start_with_row > 0:
for _ in range(start_with_row):
next(reader)
for reader_row in reader:
data_row = list()
# Only start with column specified as parameter
for reader_cell in reader_row[start_with_column::1]:
# Trim cell values and use only the non-empty ones
value = reader_cell.strip()
if value:
data_row.append(value)
data.append(data_row)
return data
def find_uniques_values(table):
uniques = list()
for row in table:
for cell in row:
if cell not in uniques:
uniques.append(cell)
return uniques
def count_connections(str1, str2, table):
count = 0
for row in table:
if str1 in row and str2 in row:
count += 1
return count
def add_table_headers(header_list, table):
if not len(table) == len(header_list):
raise Exception("Header list's size doesn't match result matrix's size.")
# Table's vertical headers
for index in range(len(table)):
header = header_list[index]
table[index].insert(0, header)
# Table's horizontal headers
header_row = header_list[:] # copy from header_list
header_row.insert(0, '') # add empty cell to the begin of the row
table.insert(0, header_row)
if __name__ == '__main__':
main()
| true |
a3c1401a7cb47247fd98f91f1c0e683a7c982760 | Python | Serasar/leverx_homework_2 | /task_2.py | UTF-8 | 2,679 | 3.015625 | 3 | [] | no_license | class Version:
def __init__(self, version):
self.version = version
self.__version_int = 0
self.STRING_WORTH = {"a": 1, "b": 2, "alpha": -2, "beta": -1, "r": -5}
self.MAJOR_WORTH = 100
self.MINOR_WORTH = 10
self.PATCH_WORTH = 0.1
self.version_to_int()
def version_to_int(self):
main_part, suffix = "", ""
if "-" in self.version:
main_part, suffix = self.version.split("-")
suffix = suffix.replace("rc", "r")
else:
main_part = self.version
main_part, suffix = main_part.split("."), suffix.split(".")
# Process main part
# Process major version
self.__version_int += int(main_part[0]) * self.MAJOR_WORTH
# Process minor version
if int(main_part[1]) < 10:
self.__version_int += int(main_part[1]) * self.MINOR_WORTH
else:
self.__version_int += int(main_part[1])
# Process patch version
if main_part[2][-1].isalpha():
self.__version_int += self.STRING_WORTH[main_part[2][-1]] * self.PATCH_WORTH
main_part[2] = main_part[2][:-1]
self.__version_int += int(main_part[2]) * self.PATCH_WORTH
else:
self.__version_int += int(main_part[2]) * self.PATCH_WORTH
# Process suffix part
if suffix != [""]:
for symbol in suffix:
if symbol.isalpha():
self.__version_int += self.STRING_WORTH[symbol] * self.PATCH_WORTH
else:
self.__version_int += int(symbol) * self.PATCH_WORTH
def __eq__(self, other):
return self.__version_int == other.__version_int
def __ne__(self, other):
return self.__version_int != other.__version_int
def __le__(self, other):
return self.__version_int <= other.__version_int
def __ge__(self, other):
return self.__version_int >= other.__version_int
def __lt__(self, other):
return self.__version_int < other.__version_int
def __gt__(self, other):
return self.__version_int > other.__version_int
def main():
to_test = [
("1.0.0", "2.0.0"),
("1.0.0", "1.42.0"),
("1.2.0", "1.2.42"),
("1.1.0-alpha", "1.2.0-alpha.1"),
("1.0.1b", "1.0.10-alpha.beta"),
("1.0.0-rc.1", "1.0.0"),
]
for version_1, version_2 in to_test:
assert Version(version_1) < Version(version_2), "le failed"
assert Version(version_2) > Version(version_1), "ge failed"
assert Version(version_2) != Version(version_1), "neq failed"
if __name__ == "__main__":
main()
| true |
763b0b494a5bcf723a05b3bb7e0110ded4535132 | Python | Aasthaengg/IBMdataset | /Python_codes/p03816/s466111035.py | UTF-8 | 99 | 2.71875 | 3 | [] | no_license | N = input()
A = map(int, raw_input().split())
ans = len(set(A))
if ans%2==0:
ans -= 1
print ans
| true |
8937656571224085957c9f315d2d269865b0d300 | Python | MuqadirHussain/python-instagram-bot | /Bot.py | UTF-8 | 2,191 | 2.625 | 3 | [] | no_license | from selenium import webdriver
import os
import csv
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.chrome.options import Options
def read_data(path):
try:
if (os.path.exists(path)):
tagsArray=[]
with open(path,'r') as csvfile:
data=csv.reader(csvfile)
for row in data:
if(len(row)>0):
tagsArray.append(row[0][1:])
return tagsArray
except FileNotFoundError:
print ("no data")
def write_data(file,tag,no_of_post):
try:
if(os.path.exists(file)) is False:
with open(file, 'w',newline='') as csvfile:
filewriter = csv.writer(csvfile)
filewriter.writerow(['TAGS', 'NO OF POST'])
# filewriter.writerow(["#"+tag, no_of_post])
csvfile.close()
with open(file, 'a' , newline='') as csvfile:
filewriter = csv.writer(csvfile)
filewriter.writerow(["#"+tag, no_of_post])
except FileNotFoundError:
pass
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("user-data-dir="+os.getcwd()+"\\"+"user1")
#if you have google chrome portable then use this
#chrome_options.binary_location=r'GoogleChromePortable\\App\\Chrome-bin\\chrome.exe'
chrome_options.add_argument('--disable-infobars')
browser=webdriver.Chrome(chrome_options=chrome_options)
browser.maximize_window()
file="tags.csv"
tag_data=read_data(file)
if(len(tag_data)>0):
for rows_tag in tag_data:
browser.get(r"https://www.instagram.com/explore/tags/"+rows_tag)
try:
WebDriverWait(browser, 15).until(lambda browser:
browser.execute_script('return document.readyState'))
postnumber=browser.find_element_by_class_name('g47SY ')
hash_tag_count=postnumber.text
write_data("data.csv",rows_tag,hash_tag_count)
except :
write_data("data.csv",rows_tag,"page not found")
else:
print("no data found")
| true |
557fcfb2b2fe40732519990f2f59cfdaf8e24177 | Python | RunTimeError2/Hand-on-ML-with-sklearn-and-TF | /ex02_chap3/multi_output.py | UTF-8 | 2,178 | 3.03125 | 3 | [] | no_license | import numpy as np
from sklearn.datasets import fetch_mldata
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
from sklearn.metrics import roc_curve, roc_auc_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.preprocessing import StandardScaler
from PIL import Image
from sklearn.neighbors import KNeighborsClassifier
# Fetching the MNIST dataset
mnist = fetch_mldata('MNIST original')
print(mnist)
# Display the shape of each list of element
X, y = mnist['data'], mnist['target']
print('Shape of X is ', X.shape)
print('Shape of y is ', y.shape)
# There are 70,000 images in total and each image is 28*28
# Split the dataset into training set and test set
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# Shuffle the sets
shuffle_index = np.random.permutation(60000)
X_train, y_train = X_train[shuffle_index], y_train[shuffle_index]
# Adding noise to images
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
y_train_mod = X_train
y_test_mod = X_test
# Pick a image and plot it
Image_index = 36000
some_image = X_train_mod[Image_index]
plt.imshow(some_image.reshape(28, 28), cmap=matplotlib.cm.binary,
interpolation='nearest')
plt.axis('off')
plt.show()
def plot_digit(digit, width=28, height=28):
plt.imshow(digit.reshape(width, height), cmap=matplotlib.cm.binary,
interpolation='nearest')
plt.axis('off')
plt.show()
# Using KNN
print('Using KNeiborsClassifier')
Image_index = 2333
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[Image_index]])
plot_digit(X_test_mod[Image_index])
plot_digit(clean_digit)
| true |
95b1746ee696f3c6de0ecf9469a8d82f141daa18 | Python | daniellopes04/uva-py-solutions | /list4/10541 - Stripe.py | UTF-8 | 669 | 3.53125 | 4 | [] | no_license | # -*- coding: UTF-8 -*-
def rectangles(n, k):
if k == n:
return 1
if k > n:
return 0
d = max(k, n - k)
q = min(k, n - k)
numerator = 1
for n in range(d + 1, n + 1):
numerator *= n
denominator = 1
for d in range(1, q + 1):
denominator *= d
return numerator // denominator
def main():
cases = int(input())
for _ in range(cases):
line = input().split()
numbers = list(map(int, line))
n = numbers[0]
k = numbers[1]
stripes = sum(numbers[2:]) + k - 1
print(rectangles(n - stripes + k, k))
if __name__ == '__main__':
main() | true |
60f6efc871a2b70f1bd7bc032eade4c664124878 | Python | gruiick/openclassrooms-py | /01-Apprenez/2.01.py | UTF-8 | 820 | 3.46875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python3
# coding: utf-8
# $Id: 2.01.py 1.2 $
# SPDX-License-Identifier: BSD-2-Clause
chaine = str() # chaîne vide, même résultat que chaine = ""
#while chaine.lower() != 'q':
#print("hit 'Q' to quit")
#chaine = input()
print('Thx')
print('Thx'.upper())
print('majuscules'.upper())
print(' sans espaces '.strip())
print(' sans à espaces gauche '.lstrip())
print('title'.upper().center(15))
prenom = "Paul"
nom = "Dupont"
age = 21
print( \
"Je m'appelle {0} {1} ({3} {0} pour l'administration) et j'ai {2} " \
"ans.".format(prenom, nom, age, nom.upper()))
# formatage d'une adresse
adresse = """
{no_rue}, {nom_rue}
{code_postal} {nom_ville} ({pays})
""".format(no_rue=5, nom_rue="rue des Postes", code_postal=75003, nom_ville="Paris", pays="France")
print(adresse)
| true |
277afb49b1cb9a729e8f0852b2777936f934a22c | Python | empracine/test-epcath | /TimePeriod.py | UTF-8 | 30,741 | 2.515625 | 3 | [] | no_license | import copy
import random
from Schedule import *
from operator import itemgetter
############################################################################################################################################################################################################
######################################### BEGIN TIME PERIOD DATA TYPE ######################################################################################################################################
############################################################################################################################################################################################################
class TimePeriod:
'''
Class to model a given time period of scheduling.
'''
def __init__(self, params):
CathRooms = {(d,params.cathID,i):Schedule(int(params.resolution),params.labStartTime,params.labEndTime) for i in range(params.numCathRooms) for d in range(params.daysInPeriod)}
EPRooms = {(d,params.epID,i):Schedule(int(params.resolution),params.labStartTime,params.labEndTime) for i in range(params.numEPRooms) for d in range(params.daysInPeriod)}
MiddleRooms = {(d,params.middleID,i):Schedule(int(params.resolution),params.labStartTime,params.labEndTime) for i in range(params.numMiddleRooms) for d in range(params.daysInPeriod)}
DayShifts = {d: ShiftSchedule(params.numCathRooms,params.numEPRooms,params.secondShiftStart) for d in range(params.daysInPeriod)}
rooms = dict(list(CathRooms.items()) + list(EPRooms.items()) + list(MiddleRooms.items()))
overflow = {d:[] for d in range(params.daysInPeriod)}
multiple = 60.0/params.resolution
holdingBays = {(d,1.0*i):0 for i in range(0,int(params.HBCloseTime*multiple)) for d in range(params.daysInPeriod)}
self.bins = [copy.deepcopy(rooms),copy.deepcopy(overflow),copy.deepcopy(holdingBays), copy.deepcopy(DayShifts)]
self.numCathRooms = params.numCathRooms #Now equivalent to numRooms[cathID]
self.numEPRooms = params.numEPRooms #Now equivalent to numRooms[epID]
self.numMiddleRooms = params.numMiddleRooms #Now equivalent to numRooms[middleID] self.numCathRooms = numCathRooms
self.numRooms = {params.cathID: params.numCathRooms, params.epID:params.numEPRooms, params.middleID:params.numMiddleRooms} #Added to call as a variable
self.numRestrictedCath = params.numRestrictedCath
self.numRestrictedEP = params.numRestrictedEP
self.numDays = params.daysInPeriod
self.numWeeks = params.daysInPeriod/5
self.labStartTime = params.labStartTime
# statistical counters
self.numTotalProcs = 0
self.numSameDays = 0
self.numSameWeeks = 0
self.numEmergencies = 0
self.numTotalShifts = 0
self.numFullShifts = 0
self.numHalfShifts = 0
self.numQuarterShifts = 0
self.procsPlaced = 0
self.procsPlacedData = []
self.primeTimeProcs = 0
self.primeTimeProcsData = []
self.crossOverProcs = 0
self.cathToEP = 0 # procedures historically done in Cath that are scheduled in an EP room
self.epToCath = 0 # procedures historically done in EP that are scheduled in a Cath room
self.overflowCath = 0
self.overflowEP = 0
self.overflowMiddle = 0
self.overflowQuarter = 0
self.overflowHalf = 0
self.overflowFull = 0
self.overflowDays = []
######################################################################################################
###################################### PHASE ONE: SHIFT SCHEDULING ###################################
######################################################################################################
################################### SHIFT SCHEDULING FOR ##################################
#################################### WHOLE TIME PERIOD ####################################
def packShifts(self,shifts, params):
'''
Schedules shifts for the entire time period.
Input: procedures (a list of cleaned procedure data for a given period of time)
algType (a string describing the type of scheduling algorithm to run)
Returns: none
'''
allShifts = copy.deepcopy(shifts)
# make any room value changes
for change in params.roomValueChanges:
shiftType, newRoomValue = change
for shift in allShifts:
if shift[params.iShiftType] == shiftType:
shift[params.iRoomS] = newRoomValue
# break shifts up by block size horizon
FullShifts = [p for p in allShifts if p[params.iShiftType] == params.fullShiftID]
HalfShifts = [p for p in allShifts if p[params.iShiftType] == params.halfShiftID]
QuarterShifts = [p for p in allShifts if p[params.iShiftType] == params.quarterShiftID]
self.numTotalShifts = len(FullShifts)+len(HalfShifts)+len(QuarterShifts)
self.numFullShifts = len(FullShifts)
self.numHalfShifts = len(HalfShifts)
self.numQuarterShifts = len(QuarterShifts)
#for d in range(1,timePeriod.numDays+1):
for d in range(1,self.numDays+1):
daysShifts = [shift for shift in allShifts if shift[params.iDayS]==d]
# shifts sorted in descending order
daysSortedShifts = self.sortShifts(daysShifts, params)
self.packShiftsForDay(d-1,daysSortedShifts, params)
###################################### DAY BY DAY SHIFT ##################################
################################# SCHEDULING FOR BOTH LABS ################################
def packShiftsForDay(self,day,daysShifts, params):
'''
Schedules shifts during a given day. Keeps track of overflow shifts
(that couldn't fit into prime time that day).
Input: day (integer day of time period to be scheduled, indexed from 0)
daysProcedures given by three lists each containing a type of shift
(a list of procedure data for a given day)
Returns: none
'''
shifts = copy.deepcopy(daysShifts)
shiftsCrossOver = []
shiftsOverflow = []
for shiftToPlace in shifts:
if not self.tryPlaceShift(shiftToPlace,day, params):
if params.restrictRooms:
if shiftToPlace[params.iRoomS] == 2.0:
shiftsCrossOver.append(shiftToPlace)
else:
shiftsOverflow.append(shiftToPlace)
else:
shiftsCrossOver.append(shiftToPlace)
for shiftToPlace in shiftsCrossOver:
if not self.tryPlaceShiftInOtherLab(shiftToPlace,day, params):
shiftsOverflow.append(shiftToPlace)
cathOverflow = [shift for shift in shiftsOverflow if shift[params.iLabS] == 0.0]
epOverflow = [shift for shift in shiftsOverflow if shift[params.iLabS] == 1.0]
for cathShift in cathOverflow:
self.placeShiftInLab(cathShift,day, params)
for epShift in epOverflow:
self.placeShiftInLab(epShift,day, params)
def tryPlaceShift(self,shift,day,params):
'''
Tries to pack each shift into one of the room days of the given day.
When packing calls to nextOpenRoomInLab so that each shift will be placed
in the lowest numbered room that has space for it
Input: The day, the shift to be scheduled.
Notice, the shifts will be passed to this in the order of the labs (Cath, EP)
and the reverse order of the legnths of thes shift (1 - full, 0.5 - half, 0.25 - quarter),
from which we subtract 0.01 for comparison precision purposes
Returns: True if the shift was placed, otherwise False
'''
allShifts = self.bins[3]
### STEP 1: get procedure information ###
toPlace = copy.deepcopy(shift)
labTo = shift[params.iLabS]
shiftType = shift[params.iShiftType]
### STEP 2: place procedure in its own lab in the lowest room number possible if there is room for it ###
if allShifts[day].maxOpenRoomInLab(labTo) >= shiftType:
firstOpen = allShifts[day].nextOpenRoomInLab(labTo, shiftType)
allShifts[day].placeProvider(
labTo,firstOpen,toPlace[params.iProviderS],toPlace[params.iShiftType],toPlace[params.iShiftLength],toPlace[params.iLabS]
)
return True
return False
def tryPlaceShiftInOtherLab(self,shift,day, params):
'''
Tries to pack each shift into one of the room days of other lab in the given day.
When packing calls to nextOpenRoomInLab so that each shift will be placed
in the lowest numbered room that has space for it
Input: The day, the shift to be scheduled.
Notice, the shifts will be passed to this in the order of the labs (Cath, EP)
and the reverse order of the legnths of thes shift (1 - full, 0.5 - half, 0.25 - quarter),
from which we subtract 0.01 for comparison precision purposes
Returns: True if the shift was placed, otherwise False
'''
allShifts = self.bins[3]
### STEP 1: get procedure information ###
toPlace = copy.deepcopy(shift)
originalLab = shift[params.iLabS]
labTo = params.cathID if originalLab==params.epID else params.epID
shiftType = shift[params.iShiftType]
maxRoomAllowed = params.cathCrossOverRooms-1 if labTo==params.cathID else params.epCrossOverRooms-1
### STEP 2: Place procedure in the other lab in the lowest room number possible if there is room for it ###
if allShifts[day].maxOpenRoomInLab(labTo) >= shiftType:
firstOpen = allShifts[day].nextOpenRoomInLab(labTo, shiftType)
if firstOpen > maxRoomAllowed:
return False
allShifts[day].placeProvider(
labTo, firstOpen,
toPlace[params.iProviderS],
toPlace[params.iShiftType],
toPlace[params.iShiftLength],
toPlace[params.iLabS]
)
return True
return False
def placeShiftInLab(self,shift,day, params):
'''
Packs each shift into the room in its lab which has the earliest end time in the given day.
Input: The day, the shift to be scheduled.
Returns: None
'''
allShifts = self.bins[3]
daysShifts = allShifts[day]
### STEP 1: get shift information ###
labTo = shift[params.iLabS]
roomTo = daysShifts.findEarliestRoom(labTo)[1]
### STEP 2: Place procedure in the room in its lab which has the earliest end time ###
daysShifts.placeProvider(
labTo,
roomTo,
shift[params.iProviderS],
shift[params.iShiftType],
shift[params.iShiftLength],
shift[params.iLabS]
)
######################################## HELPER FUNCTIONS ########################################
####################################### (SHIFT BIN PACKING) ######################################
#def cmp(x, y): return (x > y) - (x < y)
def sortShifts(self,shifts, params):
'''
Sorts shifts in decreasing order by type (1, 0.5, 0.25)
Input: shifts
Returns: shifts sorted
'''
shifts = copy.deepcopy(shifts)
#print("before" shifts[1:10])
sorted(shifts, key=itemgetter(params.iShiftType), reverse=True)
#shifts.sort(lambda x,y: cmp(x[params.iShiftType],y[params.iShiftType]),reverse=True)
#print("after" shifts[1:10])
return shifts
######################################################################################################
######################################## PHASE TWO: PROC PACKING #####################################
######################################################################################################
##################################### PROC PACKING FOR ####################################
#################################### WHOLE TIME PERIOD ####################################
def packProcedures(self,procedures, params):
'''
Packs procedures into the appropriate shifts during the time period. Assumes
that the shifts have already been scheduled.
Input: procedures (a list of cleaned procedure data for a given period of time)
Returns: none
'''
allProcs = procedures[:]
###### STEP 0: MODIFY PROCEDURE DATA ACCORDING TO SPECS ######
# add procedure ID's
for i in range(len(allProcs)):
proc = procedures[i]
proc.append(i)
if not params.schedMRinHB:
# change all MR holding bay times to 0
for proc in allProcs:
proc[params.iPreTime] = 0.0 if proc[params.iRoom]==3.0 else proc[params.iPreTime]
proc[params.iPostTime] = 0.0 if proc[params.iRoom]==3.0 else proc[params.iPostTime]
if params.middleRoomPreRandom and params.schedMRinHB:
for proc in allProcs:
preTime = random.gauss(desiredPreMeanMR, desiredPreStDevMR)
proc[params.iPreTime] = preTime if proc[params.iRoom]==3.0 else proc[params.iPreTime]
if params.middleRoomPostRandom and params.schedMRinHB:
for proc in allProcs:
postTime = random.gauss(params.desiredPostMeanMR, params.desiredPostStDevMR)
proc[params.iPostTime] = postTime if proc[params.iRoom]==3.0 else proc[params.iPostTime]
if params.preProcHBCleanTimeRandom:
for proc in allProcs:
cleanTime = random.gauss(params.desiredPreCleanMean, params.desiredPreCleanStDev)
proc[params.iPreProcHBCleanTime] = cleanTime
if params.postProcHBCleanTimeRandom:
for proc in allProcs:
cleanTime = random.gauss(params.desiredPostCleanMean, params.desiredPostCleanStDev)
proc[params.iPostProcHBCleanTime] = cleanTime
if params.postProcRandom:
# change the post procedure time to a random value from a distribution with a given mean/standard deviation
for proc in allProcs:
postTime = random.gauss(params.desiredMean, params.desiredStDev)
proc[params.iPostTime] = postTime if proc[params.iRoom]!=3.0 else proc[params.iPostTime]
if params.multPostProcTime:
for proc in allProcs:
postTime = PostProcMult*proc[params.iPostTime]
proc[params.iPostTime] = postTime
if params.ConvertPreProcToHours:
# Convert the pre procedure time to hours
for proc in allProcs:
proc[params.iPreTime] = proc[params.iPreTime]/60
if params.CapHBPreProc:
# Cap the pre procedure time to be be no more than 3 hours
for proc in allProcs:
PreTime = min(proc[params.iPreTime],params.HBPreProcCap)
proc[params.iPreTime] = PreTime
if params.ChangeProviderDays:
#Make all changes for each tuple
for proc in allProcs:
if proc[params.iProvider] in list(params.providerChanges.keys()):
procDOW = (proc[params.iDay]-1)%5
change = providerChanges[proc[params.iProvider]]
fromDay = change[0]
toDay = change[1]
proc[params.iDay] += (toDay-fromDay) if procDOW==change[0] else 0
if params.SwapProviderDays:
#Make all swap for each tuple
providerDict = {k:[[],[]] for k in list(params.providerSwaps.keys())}
for proc in allProcs:
if proc[params.iProvider] in list(params.providerSwaps.keys()):
procDOW = (proc[params.iDay]-1)%5
fromDay = providerSwaps[proc[params.iProvider]][0]
toDay = providerSwaps[proc[params.iProvider]][1]
if procDOW in (fromDay,toDay):
providerDict[proc[params.iProvider]][0].append(proc) if procDOW == fromDay else providerDict[proc[params.iProvider]][1].append(proc)
for provider in list(providerSwaps.keys()):
swap = providerSwaps[provider]
fromDay = swap[0]
toDay = swap[1]
for proc in providerDict[provider][0]:
proc[params.iDay] += (toDay-fromDay)
for proc in providerDict[provider][1]:
proc[params.iDay] += (fromDay-toDay)
###### STEP 1: PACK PROCS INTO SHIFTS DAY BY DAY ######
# update summary stats
self.numTotalProcs = len(allProcs)
self.numSameDays = len([x for x in allProcs if x[params.iSchedHorizon]==1.0])
self.numSameWeeks = len([x for x in allProcs if x[params.iSchedHorizon]==3.0])
self.numEmergencies = len([x for x in allProcs if x[params.iSchedHorizon]==0.0])
#for d in range(1,timePeriod.numDays+1):
for d in range(1,self.numDays+1):
daysProcs = [proc for proc in allProcs if proc[params.iDay]==d]
if params.sortProcs:
# sort procedures based on parameters
daysProcs = self.sortProcedures(daysProcs, params.sortIndex ,params.sortDescend, params.iProvider)
self.placeDaysProcs(d-1,daysProcs, params)
################################## DAY BY DAY PROC PACKING ###################################
################################## INTO APPROPRIATE SHIFTS ###################################
def placeDaysProcs(self,day,daysProcs, params):
'''
Iterates through the day's procedures, determines which shift each one belongs in,
and calls self.placeProcInShift(proc) to place
Input: a list of the days procedures
Returns: None
use getProviderRoomAssignment which returns
(lab, room number, shift length, and the length of shifts scheduled BEFORE the provider)
'''
procsToPlace = copy.deepcopy(daysProcs)
allShifts = self.bins[3]
allSchedules =self.bins[0]
daysShifts = allShifts[day]
for toPlace in procsToPlace:
# middle room procedures: not part of shift schedule
if toPlace[params.iRoom] == params.middleID:
middle1,middle2 = (allSchedules[(day,params.middleID,0)], allSchedules[(day,params.middleID,1)])
middle1Open,middle2Open = (middle1.getNextOpenTimeSlot(params.labStartTime), middle2.getNextOpenTimeSlot(params.labStartTime))
# decide which middle room to place the procedure in and when
if isLater(middle1Open,params.labEndTime) and isLater(middle2Open,params.labEndTime):
room = middle1 if isEarlier(middle1Open,middle2Open) else middle1
else:
room = middle2 if isLater(middle1Open,params.labEndTime) else middle1
when = middle1Open if room == middle1 else middle2Open
room.scheduleProcedure(toPlace,toPlace[params.iProcTime],when)
# update summary stats
self.updateHoldingBays(toPlace,day,when, params)
self.updateProcsPlacedStats(toPlace)
if isEarlier(when,params.labEndTime):
self.updatePrimeTimeProcsStats(toPlace)
# normal procedures: part of shifts
else:
providerToPlace = toPlace[params.iProvider]
shiftInfo = daysShifts.getProviderRoomAssignment(providerToPlace)
labToPlace = shiftInfo[0]
roomToPlace = shiftInfo[1]
# determine when the procedure's corresponding shift starts
providerStart = daysShifts.getProviderStartTime(labToPlace,roomToPlace,providerToPlace)
daysSchedule = allSchedules[(day,labToPlace,roomToPlace)]
# determine when the procedure should start
whenToPlace = daysSchedule.getNextOpenTimeSlot(providerStart)
daysSchedule.scheduleProcedure(toPlace,toPlace[params.iProcTime],whenToPlace)
# update summary stats
self.updateHoldingBays(toPlace,day,whenToPlace, params)
self.updateProcsPlacedStats(toPlace)
self.updateCrossoverStats(toPlace,labToPlace, params)
if isLater(whenToPlace,params.labEndTime):
self.updateOverflowStats(toPlace,shiftInfo[2],day, params)
else:
self.updatePrimeTimeProcsStats(toPlace)
######################################## HELPER FUNCTIONS ########################################
######################################### (PROC PACKING) #########################################
def maxTime(self,time1,time2):
'''
Determines the later time.
Input: time1 and time2 to compare, in the form (hours,minutes)
Returns: the later time. If they are the same time, returns it.
'''
hour1,min1 = time1
hour2,min2 = time2
if hour1 < hour2:
return time2
elif hour1 > hour2:
return time1
else:
if min1 < min2:
return time2
else:
return time1
def sortProcedures(self,procs,index,descending,iProvider):
'''
Sort the procedures based on their procedure time, in either ascending or descending order.
Input: procs (list of procedure data to sort)
index (the index of the procedure attribute to sort based on)
descending (bool value indicating whether the sorted order
should be descending, i.e. longest first, or not)
Returns: a sorted copy of the procs list
'''
procsCopy = procs[:]
procsCopy.sort(key=lambda x:(x[iProvider],x[index]),reverse=descending)
return procsCopy
def updateHoldingBays(self,procedure,day,procStartTime, params):
'''
'''
# add counters to holding bay
# Computes the start time of each procedure as number of hours into the day.
# Patients who spent less than 15 minutes recovering do not go to the holding bay since we assume this is flawed data
if procedure[params.iPostTime]> params.MinHBTime:
procStartTime = minutesFromTimeFormatted(procStartTime)/60.0
preHoldingStart = procStartTime - procedure[params.iPreTime]
preProcClean = procStartTime + procedure[params.iPreProcHBCleanTime]
postHoldingStart = procStartTime + procedure[params.iProcTimeMinusTO]/60.0
postHoldingEnd = postHoldingStart + procedure[params.iPostTime]
postHoldingClean = postHoldingEnd + procedure[params.iPostProcHBCleanTime]
# multipliers to round up/down to nearest resolution
preHoldingStartRound = ((60*preHoldingStart)//params.resolution)
preHoldingEndRound = ((60*preProcClean)//params.resolution)
postHoldingStartRound = ((60*postHoldingStart)//params.resolution)
postHoldingEndRound = ((60*postHoldingClean)//params.resolution)
numPreSlots = preHoldingEndRound-preHoldingStartRound
numPostSlots = postHoldingEndRound-postHoldingStartRound
for i in range(int(numPreSlots)):
self.bins[2][(day,preHoldingStartRound+i)] += 1
for j in range(int(numPostSlots)):
self.bins[2][(day,postHoldingStartRound+j)] += 1
def updateOverflowStats(self,procOverflow,shiftType,day, params):
if procOverflow[params.iRoom] == 3.0:
self.overflowMiddle += 1
elif procOverflow[params.iLab] == cathID:
self.overflowCath += 1
elif procOverflow[params.iLab] == epID:
self.overflowEP += 1
if shiftType == 0.25:
self.overflowQuarter += 1
elif shiftType == 0.5:
self.overflowHalf += 1
elif shiftType == 1.0:
self.overflowFull += 1
self.overflowDays.append(day) if day not in self.overflowDays else None
def updateProcsPlacedStats(self,procedure):
self.procsPlaced += 1
self.procsPlacedData.append(procedure)
def updatePrimeTimeProcsStats(self,procedure):
self.primeTimeProcs += 1
self.primeTimeProcsData.append(procedure)
def updateCrossoverStats(self,procedure,placedLabID, params):
originalLab = procedure[params.iLab]
if originalLab != placedLabID:
self.crossOverProcs += 1
if originalLab == params.cathID:
self.cathToEP += 1
else:
self.epToCath += 1
######################################################################################################
###################################### PHASE THREE: SUMMARY STATS ####################################
######################################################################################################
def getUtilizationStatistics(self, params):
'''
'''
CathRooms = {(d,params.cathID,i):[] for i in range(params.numCathRooms) for d in range(self.numDays)}
EPRooms = {(d,params.epID,i):[] for i in range(params.numEPRooms) for d in range(self.numDays)}
roomsUtil = dict(list(CathRooms.items()) + list(EPRooms.items()))
for day in range(self.numDays):
for c in range(params.numCathRooms):
roomMinutes = self.bins[0][(day,params.cathID,c)].getTotalPrimeTimeMinutes()
util = roomMinutes / params.totalTimeRoom
roomsUtil[(day,params.cathID,c)] = util
for e in range(params.numEPRooms):
roomMinutes = self.bins[0][(day,params.epID,e)].getTotalPrimeTimeMinutes()
util = roomMinutes / params.totalTimeRoom
roomsUtil[(day,epID,e)] = util
avgDays = self.getAverageUtilizationByDay(roomsUtil, params)
avgsCath = [avgDays[x] for x in list(avgDays.keys()) if x[1]==params.cathID]
avgsEP = [avgDays[x] for x in list(avgDays.keys()) if x[1]==params.epID]
cathAverage = sum(avgsCath)/self.numDays
epAverage = sum(avgsEP)/self.numDays
avgDaysCombined = [[avgDays[(d,params.cathID)],avgDays[(d,params.epID)]] for d in range(self.numDays)]
avgWeeks = self.getAverageUtilizationByWeek(avgDaysCombined)
return (cathAverage,epAverage,avgDaysCombined,avgWeeks,roomsUtil)
def getAverageUtilizationByDay(self,daysUtil, params):
'''
'''
daysUtilCopy = copy.deepcopy(daysUtil)
daysAverageUtil = {}
for d in range(self.numDays):
cathDayTotal = 0
epDayTotal = 0
for c in range(self.numCathRooms):
cathDayTotal += daysUtilCopy[(d,params.cathID,c)]
for e in range(self.numEPRooms):
epDayTotal += daysUtilCopy[(d,params.epID,e)]
daysAverageUtil[(d,cathID)] = (cathDayTotal/self.numCathRooms)
daysAverageUtil[(d,epID)] = (epDayTotal/self.numEPRooms)
return daysAverageUtil
def getAverageUtilizationByWeek(self,avgDays):
'''
'''
avgDaysCopy = copy.deepcopy(avgDays)
weeksUtil = [[avgDaysCopy[i],avgDaysCopy[i+1],avgDaysCopy[i+2],avgDaysCopy[i+3],avgDaysCopy[i+4]] for i in range(0,self.numDays-4,5)]
weeksAverageUtil = [[] for i in range(int(self.numWeeks))] #Cindie Edit/Check this
#weeksAverageUtil = [[] for i in range(self.numWeeks)] #orig
#for w in range(self.numWeeks): #orig
for w in range(int(self.numWeeks)):
cathWeekTotal = 0
epWeekTotal = 0
week = weeksUtil[w]
for d in range(5):
cathWeekTotal += week[d][0]
epWeekTotal += week[d][1]
weeksAverageUtil[w].append(cathWeekTotal/5)
weeksAverageUtil[w].append(epWeekTotal/5)
return weeksAverageUtil
def getProcsByMinuteVolume(self,allProcs, params):
'''
'''
emergencies = [x for x in allProcs if x[params.iSchedHorizon]==1.0]
sameDay = [x for x in allProcs if x[params.iSchedHorizon]==2.0]
sameWeek = [x for x in allProcs if x[params.iSchedHorizon]==3.0]
emergFlex = [x for x in emergencies if x[params.iRoom]==2.0]
emergFlexMin = sum(x[params.iProcTime] for x in emergFlex)
emergInflex = [x for x in emergencies if x[params.iRoom]!=2.0]
emergInflexMin = sum(x[params.iProcTime] for x in emergInflex)
sameDayFlex = [x for x in sameDay if x[params.iRoom]==2.0]
sameDayFlexMin = sum(x[params.iProcTime] for x in sameDayFlex)
sameDayInflex = [x for x in sameDay if x[params.iRoom]!=2.0]
sameDayInflexMin = sum(x[params.iProcTime] for x in sameDayInflex)
sameWeekFlex = [x for x in sameWeek if x[params.iRoom]==2.0]
sameWeekFlexMin = sum(x[params.iProcTime] for x in sameWeekFlex)
sameWeekInflex = [x for x in sameWeek if x[params.iRoom]!=2.0]
sameWeekInflexMin = sum(x[params.iProcTime] for x in sameWeekInflex)
return [emergFlexMin,emergInflexMin,sameDayFlexMin,sameDayInflexMin,sameWeekFlexMin,sameWeekInflexMin]
#####################################################################################################
##################################### END TIME PERIOD DATA TYPE #####################################
#####################################################################################################
| true |
25a6e6a306f09b7ed5cdeb0fa86013e4bd727793 | Python | Min-h-96/BST-calc | /BST-calc-1.py | UTF-8 | 3,238 | 3.953125 | 4 | [] | no_license | import sys
def postfix(list):
# 숫자와 연산자를 각각 구분을 한다
for i in range(len(list)):
operands = []
operator = []
for i in list:
if ord(i) >= 48:
operands.append(i)
else:
operator.append(i)
lst_postfix = [operands[0]]
for i in range(len(operator)):
lst_postfix.append(operands[i+1])
lst_postfix.append(operator[i])
return lst_postfix
class Node(object):
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BinarySearchTree(object):
def __init__(self):
self.root = None
def insert(self, data):
# 루트 노드가 존재하지 않으면 루트 노드 생성
if self.root is None:
self.root = Node(data)
else:
self.insertNode(self.root, data)
def insertNode(self, currentNode, data):
# operand가 들어왔을 때
if ord(data) >= 49:
# 이미 오른쪽 자식 노드에 수가 있을 경우
if currentNode.right:
if currentNode.left: # 왼쪽 자식 노드에 연산자 있는 경우
self.insertNode(currentNode.left, data)
else: # 없는 경우
currentNode.left = Node(data)
# 없을 경우
else:
currentNode.right = Node(data)
# operator가 들어왔을 때
else:
if currentNode.left:
self.insertNode(currentNode.left, data)
else:
currentNode.left = Node(data)
def calculate(self):
return self.calculateNode(self.root)
def calculateNode(self, currentNode):
# 루트 노드 값이 operand가 되면 그 값을 return
if ord(self.root.data[0]) >= 48:
return self.root.data
# 왼쪽 자식 노드 값이 operand 이면 부모 노드의 operator로 계산
if ord(currentNode.left.data[0]) >= 49:
if currentNode.data == '+':
currentNode.data = str(
float(currentNode.left.data) + float(currentNode.right.data))
elif currentNode.data == '-':
currentNode.data = str(
float(currentNode.left.data) - float(currentNode.right.data))
elif currentNode.data == '*':
currentNode.data = str(
float(currentNode.left.data) * float(currentNode.right.data))
elif currentNode.data == '/':
currentNode.data = str(round(float(currentNode.left.data) / float(currentNode.right.data), 1))
return self.calculateNode(self.root)
# 왼쪽 자식 노드 값이 operator이면 그 노드로 이동해서 함수 재귀 호출
else:
return self.calculateNode(currentNode.left)
if __name__ == "__main__":
prob = list(map(str, sys.stdin.readline().split()))
# arr = ['2', '+', '7', '*', '5', '-', '7']
postfix_form = postfix(prob)
bst = BinarySearchTree()
for i in reversed(postfix_form):
bst.insert(i)
print(postfix_form)
print(bst.calculate())
| true |
3456a0f06147b3344aee6baddf14dfdba710e16f | Python | ittoyou/2-1807 | /16day/3-生成器.py | UTF-8 | 233 | 3.03125 | 3 | [] | no_license | '''
def test():
a,b = 0,1
for i in range(10):
a,b = b,a+b
yield b
t = test()
for i in t:
print(i)
'''
def test():
a,b = 0,1
for i in range(10):
a,b = b,a+b
yield b
t = test()
for i in t:
print(i)
| true |
34d30a3c5b544a0d58d2b569f9ddad9312dcf14b | Python | ChJL/LeetCode | /easy/599. Minimum Index Sum of Two Lists.py | UTF-8 | 545 | 3.046875 | 3 | [] | no_license | class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
d1 = {}
d2 = {}
count = 0
for i in list1:
d1[i] = count
count += 1
count = 0
for i in list2:
if i in d1:
d2[i] = count + d1[i]
count +=1
temp = min(d2.values())
res = [key for key in d2 if d2[key] == temp]
# res.append(min(d2.keys(), key=lambda k: d2[k]) <- this could only take one item of min
return res | true |
9a2bf64c4a3530f6c97b00aae02ee3d35d733835 | Python | Leader0721/Python_Spider | /douyu/douyu.py | UTF-8 | 1,286 | 3.03125 | 3 | [] | no_license | # coding=utf-8
# 爬取斗鱼颜值妹子图片
import os
import re
from urllib import request
import time
from bs4 import BeautifulSoup
# 定义为方法
def getHTML(url):
page = request.urlopen(url)
html = page.read()
return html
# 图片存放路径
FOLDER_SAVE = r'E:/Sprider/douyu'
# 开始根据链接爬图片保存数据
def getImage(html):
if not os.path.exists(FOLDER_SAVE):
os.makedirs(FOLDER_SAVE)
# 创建对象,传入网页数据
soup1 = BeautifulSoup(html)
soupL = soup1.select('#live-list-contentbox')
print(str(soupL))
strone = str(soupL)
soup2 = BeautifulSoup(strone)
soupLi = soup2.select('li')
for soupLione in soupLi:
# 获取单个li标签获取数据
soupone = BeautifulSoup(str(soupLione))
name = soupone.a['title']
print('开始下载:%s' % name)
url = soupone.img['data-original']
try:
request.urlretrieve(url, FOLDER_SAVE + '/%s.jpg' % name)
print(url)
except OSError:
print('出现异常,地址为:%s' % url)
finally:
time.sleep(0.5)
fileimg = getHTML('https://www.douyu.com/directory/game/yz')
getImage(fileimg)
# 斗鱼颜值地址:https://www.douyu.com/directory/game/yz
| true |
b58b2870f853c351a1a275d6321619a4414b958c | Python | enabhishek94/SeleniumFramework_Python | /Dummy/BaseTest.py | UTF-8 | 648 | 2.5625 | 3 | [] | no_license | import unittest
from Library import *
import pytest
class BaseTest(unittest.TestCase):
def setUp(self):
print("\n Running setup")
driver_path = "C:\\Users\\abc\\Desktop\\Python P\\chromedriver.exe"
self.driver = webdriver.Chrome(executable_path=driver_path)
self.driver.get("https://thetestingworld.com/testings")
self.driver.maximize_window()
def tearDown(self):
print('\n running tear down')
sw = SeleniumWrapper(self.driver)
try:
sw.capture_screen_shot()
except Exception as e:
print(e)
finally:
self.driver.close()
| true |
3399bb41b056c3881890b343e3cc725cc12a7fe9 | Python | mcauser/CircuitPython_TerminalPixels | /circuitpython_terminalpixels.py | UTF-8 | 1,319 | 3.1875 | 3 | [
"MIT"
] | permissive |
from math import ceil
import sys
class FakePixel:
def __init__(self, n=80, x=1, y=1, rows=1, auto_write=True):
self.n = n
self.bpp = 3
self._x = x
self._y = y
self._rows = rows
self._chars_per_row = n // rows
self.pixels = [(0, 0, 0)] * n
self.auto_write = auto_write
sys.stdout.write('%c[2J' % 27)
def show(self):
sys.stdout.write('%c[s%c[%d;%dH' % (27, 27, self._x, self._y))
for row in range(self._rows):
sys.stdout.write('%c[%d;%dH' % (27, self._y + row, self._x))
sys.stdout.write(
''.join(['%c[48;2;%d;%d;%dm ' % (27, item[0], item[1], item[2])
for item in self.pixels[row * self._chars_per_row:(row+1) * self._chars_per_row]]))
sys.stdout.write('%c[u' % 27)
sys.stdout.flush()
def __setitem__(self, key, value):
if isinstance(value, int):
value = (value >> 16 & 255, value >> 8 & 255, value & 255)
self.pixels[key] = value
if self.auto_write:
self.show()
def __getitem__(self, item):
return self.pixels[item]
def __len__(self):
return len(self.pixels)
def fill(self, color):
for n in range(self.n):
self.__setitem__(n, color)
| true |
b889584694fd98ac354136c81b576ae1cae796ee | Python | borescht/HoQuNM | /hoqunm/data_tools/analysis.py | UTF-8 | 51,708 | 2.515625 | 3 | [
"MIT"
] | permissive | """Provide methodologies for analysing the provided data."""
import json
import logging
from collections import Counter
from datetime import date, datetime, timedelta
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import graphviz
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from dataenforce import Dataset, validate
from sklearn import tree
from hoqunm.data_tools.base import (
BEGIN, CART_COLUMNS, CART_COLUMNS_TRANSLATION, CAT_COLUMNS, CURRENT_CLASS,
CURRENT_WARD, END, EXTERNAL, INTER_ARRIVAL, INTERNAL, MAX, MEAN, MIN,
MIN_HEAD, OCCUPANCY, OUTPUT_DIR, PATIENT, POST_CLASS, POST_WARD, PRE_CLASS,
PRE_WARD, SERVICE, SIGN, STD, TIME, WEEK, Min_Head_Name, and_query,
column_query, drop_week_arrival, get_data, make_week, or_query)
from hoqunm.data_tools.preprocessing import Preprocessor
from hoqunm.simulation.evaluators import Evaluator
from hoqunm.simulation.hospital import HospitalSpecs
from hoqunm.utils.utils import annotate_heatmap, get_logger, heatmap
pd.set_option('mode.chained_assignment', None)
# pylint: disable=too-many-lines
class CartSpecs:
"""Holding specifications for CART analysis.
:param wards: Wards to consider for CART.
:param feature_columns: Columns to consider for analysis.
:param max_depth: Maximal tree depth.
:param min_samples_leaf: Minimal samples per leaf.
"""
def __init__(self,
wards: List[str],
feature_columns: Optional[List[str]] = None,
cat_columns: Optional[List[str]] = None,
max_depth: int = 4,
min_samples_leaf: int = 200):
self.wards = wards
self.feature_columns = feature_columns if feature_columns is not None else CART_COLUMNS
self.cat_columns = cat_columns if cat_columns is not None else CAT_COLUMNS
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
if not all(c in self.feature_columns for c in self.cat_columns):
raise ValueError("Not all catgerocial columns in feature columns.")
@staticmethod
def load_dict(arguments: Dict[str, Any]) -> "CartSpecs":
"""Create class from Dict with arguments and values in it.
:param arguments: The dict containing the parameter-argument pairs.
:return: Class instance.
"""
return CartSpecs(**arguments)
class Analyser:
"""A class for analysing the data of the hospital. The data should come in
the right format (possibly preprocessed with Preprocessor).
:param filepath: The excel file, where the data is contained.
:param sep: The separator with which the entries of the data file are seperated.
"""
@validate
def __init__(self,
data: Min_Head_Name = pd.DataFrame(columns=MIN_HEAD),
filepath: Path = Path(),
sep: str = ";",
startdate: datetime = datetime(2019, 1, 1),
enddate: datetime = datetime(2020, 1, 1),
datescale: timedelta = timedelta(1),
logger: Optional[logging.Logger] = None,
output_dir: Optional[Path] = None,
**kwargs: Any) -> None:
self.datescale = datescale
if data.empty and filepath != Path():
self.data, self.startdate, self.enddate = get_data(
filepath, sep, **kwargs)
elif data.empty and filepath == Path():
raise ValueError("Empty DataFrame and empty filepath given")
else:
self.data = data.copy()
self.startdate = startdate
self.enddate = enddate
self.output_dir = output_dir if output_dir is not None else OUTPUT_DIR
self.logger = logger if logger is not None else get_logger(
"modeller", self.output_dir.joinpath("modeller.log"))
assert all(column in self.data.columns for column in MIN_HEAD)
self.wards, self.wards_map, self.wards_map_inv = self._make_wards()
self.ward_occupancy = self._make_ward_occupancy()
self.occupancy, self.occupancy_week, self.occupancy_weekend = self._make_occupancy(
)
self.capacities = self._read_capacities()
self.hospital_specs = HospitalSpecs(
capacities=np.array(self.capacities))
self.hospital_specs.ward_map = self.wards_map_inv
self.classes = self._make_classes()
self.hospital_specs.set_U(len(self.classes))
self.class_tree = tree.DecisionTreeRegressor()
self.cart_code_map: Dict[Any, Dict[int, Any]] = dict()
self.cart_graphs: List[Tuple[Any, graphviz.Source]] = []
self.datescale = datescale
self.make_week_column()
def copy(self) -> "Analyser":
"""Copy self into a fresh object.
:return: New instance of Analyser.
"""
other = Analyser(data=self.data.copy())
for key, value in self.__dict__.items():
if not callable(getattr(self, key)):
try:
setattr(other, key, value.copy())
except AttributeError:
setattr(other, key, value)
return other
@staticmethod
def from_preprocessor(
preprocessor: Preprocessor,
output_dir: Optional[Path] = None,
logger: Optional[logging.Logger] = None) -> "Analyser":
"""Make an instance of self from Preprocessor.
:param preprocessor: The preprocessor to use.
:param output_dir: Output dir for plot saving.
:param logger: Logger to use.
:return: Analyser instance.
"""
return Analyser(preprocessor.data,
startdate=preprocessor.startdate,
enddate=preprocessor.enddate,
datescale=preprocessor.datescale,
output_dir=output_dir,
logger=logger)
def set_capacities(self, capacities: Dict[str, int]) -> None:
"""Set new capacities.
This is mainly thought for reducing capacities. It is the
general case, that the occupancies show much more beds than are
practically available. To have a good distributions comparison,
we want to adjust the occupancy distributions. Note that we do
not adjust the ward-wise flows from which the occupancy
distributions result! We adress this by filtering with >= instead of
==.
:param capacities: Mapping for capacities to use.
"""
for ward, capacity in capacities.items():
self.capacities.loc[ward] = capacity
self.hospital_specs.capacities = np.array(self.capacities)
# squash occupancies
self.occupancy = self._squash_occupancy(self.occupancy)
self.occupancy_week = self._squash_occupancy(self.occupancy_week)
self.occupancy_weekend = self._squash_occupancy(self.occupancy_weekend)
def _squash_occupancy(self, occupancy: Dataset) -> Dataset:
for ward, ser in occupancy.items():
occupancy.loc[self.capacities[ward],
ward] = ser.loc[self.capacities[ward]:].sum()
occupancy.loc[self.capacities[ward] + 1:, ward] = 0
return occupancy
@property
def occupancies(self) -> Tuple[Dataset, Dataset, Dataset]:
"""Return all different occupancies.
:return: Occupancy, week occupancy and weekend occupancy.
"""
return self.occupancy, self.occupancy_week, self.occupancy_weekend
def make_occupancy(self) -> None:
"""Make the occupancies from the given data.
Assume datetimes to be float!
"""
self.ward_occupancy = self._make_ward_occupancy()
self.occupancy, self.occupancy_week, self.occupancy_weekend = self._make_occupancy(
)
self.capacities = self._read_capacities()
self.hospital_specs.capacities = np.array(self.capacities)
self.hospital_specs.ward_map = self.wards_map_inv
self.set_preclass()
self.set_postclass()
self.make_classes()
def _make_ward_occupancy(self) -> Dict[Any, Dataset]:
"""Make the occupancy ward-wise."""
ward_occupancy = {ward: pd.DataFrame() for ward in self.wards}
end = (self.enddate - self.startdate) / self.datescale
for ward in ward_occupancy:
qry = column_query(CURRENT_WARD, ward)
ward_data = self.data.query(qry)
end_qry = column_query(END, end, ">=")
end_index = ward_data.query(end_qry).index
ward_data.loc[end_index, END] = float("NaN")
ward_flow = pd.DataFrame({
BEGIN:
list(ward_data[BEGIN]) + list(ward_data[END]),
SIGN: [1] * len(ward_data) + [-1] * len(ward_data)
})
ward_flow = ward_flow.dropna()
ward_flow = ward_flow.sort_values(by=BEGIN,
axis=0).reset_index(drop=True)
ward_flow.loc[:, OCCUPANCY] = ward_flow[SIGN].cumsum()
time = np.array(ward_flow[BEGIN])[1:] - np.array(
ward_flow[BEGIN])[:-1]
ward_flow = ward_flow.iloc[:-1]
ward_flow.loc[:, TIME] = time
ward_flow.loc[:, END] = ward_flow[BEGIN] + ward_flow[TIME]
timeqry = column_query(BEGIN, 0, ">=")
ward_flow = ward_flow.query(timeqry)
begin = self.startdate.weekday()
ward_flow = make_week(ward_flow, begin, BEGIN)
ward_occupancy[ward] = ward_flow
return ward_occupancy
def _make_occupancy(self) -> Tuple[Dataset, Dataset, Dataset]:
"""Make occupancy."""
occupancy = pd.DataFrame(
0,
columns=self.wards,
index=range(
max([
ward_flow[OCCUPANCY].max()
for ward, ward_flow in self.ward_occupancy.items()
]) + 1),
dtype="float")
occupancy_week = occupancy.copy()
occupancy_weekend = occupancy.copy()
for ward, ser in occupancy.items():
for index in ser.index:
all_qry = column_query(OCCUPANCY, index)
occupancy.loc[index, ward] = self.ward_occupancy[ward].query(
all_qry)[TIME].sum()
week_qry = and_query(column_query(OCCUPANCY, index),
column_query(WEEK, 0))
occupancy_week.loc[index,
ward] = self.ward_occupancy[ward].query(
week_qry)[TIME].sum()
weekend_qry = and_query(column_query(OCCUPANCY, index),
column_query(WEEK, 1))
occupancy_weekend.loc[index,
ward] = self.ward_occupancy[ward].query(
weekend_qry)[TIME].sum()
# finished
occupancy = occupancy.divide(occupancy.sum(axis=0), axis=1)
occupancy_week = occupancy_week.divide(occupancy_week.sum(axis=0),
axis=1)
occupancy_weekend = occupancy_weekend.divide(
occupancy_weekend.sum(axis=0), axis=1)
return occupancy, occupancy_week, occupancy_weekend
def adjust_occupancy_pacu(self) -> None:
"""Adjust the occupancy for PACU."""
if "PACU" in self.occupancy.columns:
self.occupancy.loc[0,
"PACU"] = self.occupancy.loc[0, "PACU"] - 9 / 28
self.occupancy.loc[:,
"PACU"] = self.occupancy.loc[:,
"PACU"] / (19 / 28)
def regain_occupancy_pacu(self) -> None:
"""Revert the change made by adjust_occupancy_PACU."""
if "PACU" in self.occupancy.columns:
self.occupancy.loc[:,
"PACU"] = self.occupancy.loc[:,
"PACU"] * (19 / 28)
self.occupancy.loc[0,
"PACU"] = self.occupancy.loc[0, "PACU"] + 9 / 28
def _read_capacities(self) -> pd.Series:
"""Read capacities from self.occupancy."""
capacities = pd.Series(0, index=self.occupancy.columns)
for column, item in self.occupancy.iteritems():
capacities.loc[column] = item[item != 0].index[-1]
return capacities
def make_week_column(self) -> None:
"""Make a boolean column, if the arriving day is a weekday or a weekend
day.
Also make a column, indicating which day of the week the arrival
column is.
"""
begin = self.startdate.weekday()
self.data = make_week(self.data, begin, BEGIN)
def plot_flow(self, capacities: pd.Series, squash: bool = False) -> None:
"""Plot the observed occupancy.
:param capacities: Capacities of individual wards as should be.
:param squash: If true, squash the flow to given capacities.
"""
plot_begin = self.startdate
for ward, ward_flow in self.ward_occupancy.items():
max_time = ward_flow[END].max(skipna=True)
plot_num = int(np.ceil(max_time / 365))
plot_height = 0.3 * plot_num * (ward_flow[OCCUPANCY].max() -
ward_flow[OCCUPANCY].min())
fig = plt.figure(figsize=(12, plot_height))
for j in range(int(np.ceil(max_time / 365))):
ax = fig.add_subplot(plot_num, 1, j + 1)
timeqry = and_query(column_query(BEGIN, j * 365, ">="),
column_query(BEGIN, (j + 1) * 365, "<"))
data = ward_flow.query(timeqry)
if squash:
capacity = capacities[ward]
qry_squash = column_query(OCCUPANCY, capacity, ">")
index_squash = data.query(qry_squash).index
data.loc[index_squash, OCCUPANCY] = capacity
ax.bar(data[BEGIN],
0.1,
align="edge",
bottom=data[OCCUPANCY] - 0.05,
width=data[TIME],
label=OCCUPANCY,
color="b")
Analyser.plot_min_max_mean_flow(data, j * 365, (j + 1) * 365,
1, ax)
ax.plot([
max(j * 365, data[BEGIN].min()),
min((j + 1) * 365, data[BEGIN].max())
], [capacities[ward]] * 2,
linewidth=5,
color="black",
label="Ward capacity")
ax.set_title(
"Occupancy for ward: {}, year: {}, squashed: {}.".format(
ward, j + 1, squash))
ax.set_yticks(list(set(data[OCCUPANCY])))
x_ticklabels = [
date(plot_begin.year + j, m, 1)
for m in range(1, 13) if m >= plot_begin.month
] + [
date(plot_begin.year + j + 1, m, 1)
for m in range(1, 13) if m < plot_begin.month
]
x_tick_datetimes = [
datetime(plot_begin.year + j, m, 1)
for m in range(1, 13) if m >= plot_begin.month
] + [
datetime(plot_begin.year + j + 1, m, 1)
for m in range(1, 13) if m < plot_begin.month
]
x_ticks = [(label - plot_begin) / timedelta(1)
for label in x_tick_datetimes]
ax.set_xticks(x_ticks)
ax.set_xticklabels(x_ticklabels, rotation=45)
x_ticks_minor = list(range(j * 365, (j + 1) * 365))
ax.set_xticks(x_ticks_minor, minor=True)
ax.set_xlabel("Months")
ax.set_ylabel("Patients")
ax.legend()
ax.grid()
fig.tight_layout()
if not squash:
filename = f"occupancy - ward[{ward}].pdf"
plt.savefig(self.output_dir.joinpath(filename))
plt.close()
else:
filename = f"occupancy_squashed - ward[{ward}].pdf"
plt.savefig(self.output_dir.joinpath(filename))
plt.close()
@staticmethod
def plot_min_max_mean_flow(data: Dataset, begin: int, end: int, step: int,
ax: plt.axis) -> None:
"""Plot the minimal, maximal and mean occupancies for the given steps
(intervals).
:param data: The data to use.
:param begin: The begin time.
:param end: The last time to use.
:param step: The windows/step size to use.
:param ax: The axis to plot to.
"""
last_occupancy = data.iloc[0][OCCUPANCY]
occupancy_df = pd.DataFrame([],
columns=[MIN, MAX, MEAN, "Last"],
index=range(begin, end, step))
begin = int(max(data[BEGIN].min(), begin))
end = int(min(data[BEGIN].max(), end))
for i in range(begin, end, step):
rangeqry = and_query(column_query(BEGIN, i, ">="),
column_query(BEGIN, i + step, "<="))
range_data = data.query(rangeqry)[[BEGIN, TIME, OCCUPANCY]]
if not range_data.empty:
range_data.loc[range_data.index[-1],
TIME] = i + step - range_data.iloc[-1][BEGIN]
start_time = range_data.iloc[0][BEGIN] - i
begin_ser = pd.Series([i, start_time, last_occupancy],
index=[BEGIN, TIME, OCCUPANCY])
last_occupancy = range_data.loc[range_data.index[-1],
OCCUPANCY]
range_data = range_data.append(begin_ser, ignore_index=True)
else:
range_data = pd.DataFrame([[i, step, last_occupancy]],
columns=[BEGIN, TIME, OCCUPANCY],
index=[0])
min_occupancy = range_data[OCCUPANCY].min()
max_occupancy = range_data[OCCUPANCY].max()
mean_occupancy = max(min_occupancy,
(range_data[OCCUPANCY] * range_data[TIME] /
step).sum())
occupancy_df.loc[i] = [
min_occupancy, max_occupancy, mean_occupancy, last_occupancy
]
ax.plot(occupancy_df[MIN], linewidth=1, label=MIN, color="y")
ax.plot(occupancy_df[MAX], linewidth=1, label=MAX, color="g")
ax.plot(occupancy_df[MEAN], linewidth=1, label=MEAN, color="r")
def plot_occupancy(self) -> None:
"""Plot the observed occupancy distributions."""
ob_ev_all = Evaluator(self.hospital_specs)
ob_ev_all.distributions.occupancy = np.array(self.occupancy).T
ob_ev_all.name = "Whole observation"
ob_ev_week = Evaluator(self.hospital_specs)
ob_ev_week.distributions.occupancy = np.array(self.occupancy_week).T
ob_ev_week.name = "Week observation"
ob_ev_weekend = Evaluator(self.hospital_specs)
ob_ev_weekend.distributions.occupancy = np.array(
self.occupancy_weekend).T
ob_ev_weekend.name = "Weekend observation"
ob_ev_all.plot_against([ob_ev_week, ob_ev_weekend])
def feature_correlation(self,
cart_specs: Optional[CartSpecs] = None) -> None:
"""Compute the correlation of given features.
:param cart_specs: Specifications for cart analysis to use.
"""
if SERVICE not in self.data.columns:
self.make_service()
data = self.data.copy()
if cart_specs is None:
cart_specs = CartSpecs(wards=self.wards)
hist_columns = cart_specs.feature_columns + [SERVICE]
data.loc[:, cart_specs.cat_columns] = data.loc[:, cart_specs.
cat_columns].astype(
"category")
for column in cart_specs.cat_columns:
data.loc[:, column] = data.loc[:, column].cat.codes
num_wards = len(self.wards)
fig = plt.figure(figsize=(12, 4 * int(np.ceil(num_wards / 2))))
for i, ward in enumerate(self.wards):
qry = column_query(CURRENT_WARD, ward)
data_ = data.query(qry)[hist_columns]
ax = fig.add_subplot(int(np.ceil(num_wards / 2)), 2, i + 1)
ax.set_title("Ward: {}, number of patients: {}".format(
ward, data_.shape[0]))
im, _ = heatmap(data_.corr(),
data_.columns,
data_.columns,
ax=ax,
cmap="YlGn",
cbarlabel="correlation")
annotate_heatmap(im, valfmt="{x:.3f}", threshold=0)
fig.tight_layout()
def day_arrival(self) -> None:
"""Compute the daily culmulated arrivals."""
self.make_week_column()
fig = plt.figure(figsize=(12, 3 * len(self.wards)))
for i, ward in enumerate(self.wards):
qry = column_query(CURRENT_WARD, ward)
data = self.data.query(qry)
ax = fig.add_subplot(len(self.wards), 1, i + 1)
ax.set_title("Ward: {}, number of patients: {}".format(
ward, data.shape[0]))
pairs: List[Tuple[Any, Any]] = list(
Counter(data["Weekday"].astype("int")).items())
pairs.sort(key=lambda x: x[0])
pairs_ = list(zip(*pairs))
x_ax = pairs_[0]
y_ax = pairs_[1]
ax.bar(x_ax, y_ax)
qry_out = column_query(PRE_WARD, EXTERNAL)
pairs_out: List[Tuple[Any, Any]] = list(
Counter(data.query(qry_out)["Weekday"].astype("int")).items())
pairs_out.sort(key=lambda x: x[0])
pairs_out_ = list(zip(*pairs_out))
x_ax_o = pairs_out_[0]
y_ax_o = pairs_out_[1]
ax.bar(x_ax_o, y_ax_o)
ax.set_xticks(x_ax)
ax.set_xticklabels(["Mo", "Tu", "We", "Thu", "Fr", "Sa", "Su"])
fig.tight_layout()
@validate
def rolling_arrival(
self,
window: float = 365.,
pre_ward: Optional[List[str]] = None
) -> Dict[Tuple[str, Any], Dataset[MEAN, STD]]:
"""Compute the rolling arrival mean and variance.
:param window: The window size to use.
:param pre_ward: The pre_wards for which the rolling arrival.
shall be computed.
:return: The rolling_arrival for the respective wards.
"""
if pre_ward is None:
pre_ward = [EXTERNAL]
rolling_arrival_dict = {
(ward, class_): self.rolling_arrival_ward_class(ward=ward,
class_=class_,
pre_ward=pre_ward,
window=window)
for class_ in self.classes for ward in self.wards
}
plot_num = len(self.wards) * len(self.classes)
fig = plt.figure(figsize=(12, 3 * plot_num))
for i, key_value in enumerate(rolling_arrival_dict.items()):
ward_class, rolling_arrival_ = key_value
ward, class_ = ward_class
ax = fig.add_subplot(plot_num, 1, i + 1)
ax.plot(rolling_arrival_.loc[:, MEAN], label=MEAN)
ax.plot(rolling_arrival_.loc[:, STD], label=STD)
ax.legend()
ax.set_title(f"Rolling inter-arrival for ward {ward}\n"
f"with window size {window}")
ax.set_xlabel("Starting time")
ax.set_ylabel("Days")
ax.grid(axis="y")
fig.tight_layout()
return rolling_arrival_dict
@validate
def rolling_arrival_ward_class(self,
ward: Any,
class_: Any,
pre_ward: Optional[List[Any]] = None,
window: float = 365.) -> Dataset[MEAN, STD]:
"""Compute the rolling arrival mean and variance for the given ward and
class.
:param ward: The ward under consideration.
:param class_: The class under consideration.
:param pre_ward: The pre_wards for which the rolling arrival
shall be computed.
:param window: The window size to use.
:return: The rolling_arrival for the respective ward and class.
"""
if pre_ward is None:
pre_ward = [EXTERNAL]
ward_class_qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_))
ward_class_data = self.data.query(ward_class_qry).dropna(
subset=[BEGIN, END])
ward_class_data.loc[:, "Arrival"] = self.make_inter_arrival(
ward_class_data, pre_ward=pre_ward)
ward_class_data = ward_class_data.dropna(subset=["Arrival"])
last_time = int(max(ward_class_data[BEGIN].dropna().max() - window, 1))
df = pd.DataFrame(columns=[MEAN, STD], index=range(last_time))
if ward == "PACU":
ward_class_data = drop_week_arrival(ward_class_data, week=True)
for i in range(last_time):
qry = and_query(column_query(BEGIN, i, ">="),
column_query(BEGIN, i + window, "<="))
data = ward_class_data.query(qry)
df.loc[i, MEAN] = data["Arrival"].mean()
df.loc[i, STD] = data["Arrival"].std()
return df
@validate
def rolling_arrival_ratio(
self,
window: float = 365.) -> Dict[Tuple[str, Any], Dataset[MEAN, STD]]:
"""Compute the rolling arrival ratio for the given ward and class.
:param window: The window size to use.
:return: The rolling_arrival for the respective ward and class.
"""
rolling_arrival_ratio_dict = {
(ward, class_):
self.rolling_arrival_ward_class(ward=ward,
class_=class_,
pre_ward=[INTERNAL, EXTERNAL],
window=window) /
self.rolling_arrival_ward_class(
ward=ward, class_=class_, pre_ward=[INTERNAL], window=window)
for class_ in self.classes for ward in self.wards
}
plot_num = len(self.wards) * len(self.classes)
fig = plt.figure(figsize=(12, 4 * plot_num))
for i, key_value in enumerate(rolling_arrival_ratio_dict.items()):
ward_class, rolling_arrival_ratio_ = key_value
ward, class_ = ward_class
ax = fig.add_subplot(plot_num, 1, i + 1)
ax.plot(rolling_arrival_ratio_[MEAN],
label="INTERNAL/(INTERNAL+EXTERNAL)")
ax.legend()
rec_qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_))
records = len(self.data.query(rec_qry))
ax.set_title(
"Rolling arrival for ward {}, class {} and as "
"INTERNAL/(INTERNAL+EXTERNAL),\nfor #records: {}, with window size {}"
.format(ward, class_, records, window))
ax.set_xlabel("Starting time")
fig.tight_layout()
return rolling_arrival_ratio_dict
@validate
def rolling_service(
self,
window: float = 365.) -> Dict[Tuple[str, Any], Dataset[MEAN, STD]]:
"""Compute the rolling service mean and variance.
:param window: The window size to use.
:return: The rolling_service for the respective wards.
"""
rolling_service_dict = {(ward, clas):
self.rolling_service_ward_class(ward=ward,
class_=clas,
window=window)
for clas in self.classes
for ward in self.wards}
plot_num = len(self.wards) * len(self.classes)
fig = plt.figure(figsize=(12, 4 * plot_num))
for i, key_value in enumerate(rolling_service_dict.items()):
ward_class, rolling_service_ = key_value
ward, clas = ward_class
ax = fig.add_subplot(plot_num, 1, i + 1)
ax.plot(rolling_service_[MEAN], label=MEAN)
ax.plot(rolling_service_[STD], label=STD)
ax.legend()
ax.set_title("Rolling service for ward {}\n"
"with window size {}".format(ward, window))
ax.set_xlabel("Starting time")
ax.set_ylabel("Days")
ax.grid(axis="y")
fig.tight_layout()
return rolling_service_dict
@validate
def rolling_service_ward_class(self,
ward: Any,
class_: Any,
window: float = 365.) -> Dataset[MEAN, STD]:
"""Compute the rolling service mean and variance for the given ward and
class.
:param ward: The ward under consideration.
:param class_: The class under consideration.
:param window: The window size to use.
:return: The rolling_service for the respective ward and class
"""
self.make_service()
ward_class_qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_))
ward_class_data = self.data.query(ward_class_qry)
ward_class_data = ward_class_data.dropna(subset=[BEGIN, END])
last_time = int(max(ward_class_data[BEGIN].max() - window, 1))
df = pd.DataFrame(columns=[MEAN, STD], index=range(last_time))
for i in range(last_time):
qry = and_query(column_query(BEGIN, i, ">="),
column_query(BEGIN, i + window, "<="))
data = ward_class_data.query(qry)
df.loc[i, MEAN] = data[SERVICE].mean()
df.loc[i, STD] = data[SERVICE].std()
return df
def rolling_routing(self,
window: float = 365.,
incoming: bool = False) -> Dict[str, Dataset]:
"""Compute the rolling routings for a given ward assuming just one
class.
:param window: The window size to use.
:param incoming: Wether to consider incoming or outgoing patients.
:return: The rolling_routing for the respective wards.
"""
rolling_routing_dict = {
ward: self.rolling_routing_ward(ward=ward,
window=window,
incoming=incoming)
for ward in self.wards
}
plot_num = len(self.wards)
fig = plt.figure(figsize=(12, 4 * plot_num))
for i, key_value in enumerate(rolling_routing_dict.items()):
ward, rolling_routing_ = key_value
ax = fig.add_subplot(plot_num, 1, i + 1)
self.plot_rolling_routing(ward, rolling_routing_, window, ax)
fig.tight_layout()
return rolling_routing_dict
@staticmethod
def plot_rolling_routing(ward: Any, rolling_routing_: Dataset,
window: float, ax: plt.axis) -> None:
"""Plot the computed rolling routing.
:param ward: The ward to plot it for.
:param rolling_routing_: The computed rolling routing DataFrame.
:param window: The window size.
:param ax: The axis to plot to.
"""
for ward2, rolling_routing_ward in rolling_routing_.iteritems():
ax.plot(rolling_routing_ward, label=ward2)
ax.legend()
ax.set_title("Rolling routing for ward {} \n"
"with window size {}".format(ward, window))
ax.set_xlabel("Starting time")
ax.set_ylabel("Routing probability")
ax.grid(axis="y")
def rolling_routing_ward(self,
ward: Any,
window: float = 365.,
incoming: bool = False) -> Dataset:
"""Compute the rolling routing for a given ward.
:param ward: The ward under consideration.
:param window: The window size to use.
:param incoming: Analyse incoming or outgoing.
:return: The rolling_routing for the respective ward.
"""
data = self.data.copy()
if POST_CLASS not in data.columns:
data[POST_CLASS] = 0
data.loc[:, [CURRENT_CLASS, POST_CLASS]] = 0
last_time = int(max(data[BEGIN].max() - window, 1))
post_wards = list(data[POST_WARD].dropna().unique())
post_wards.remove(INTERNAL)
if incoming:
post_wards.append(INTERNAL)
df = pd.DataFrame(columns=post_wards, index=range(last_time))
post_wards.remove(EXTERNAL)
for i in range(last_time):
qry = and_query(column_query(BEGIN, i, ">="),
column_query(BEGIN, i + window, "<="))
data_ = data.query(qry)
for ward2 in post_wards:
r = self.compute_routing(data_,
ward,
ward2,
0,
0,
incoming=incoming)
df.loc[i, ward2] = r
df.loc[i, EXTERNAL] = 1 - df.loc[i].sum()
return df
def rolling_occupancy(self,
window: float = 365.,
step: float = 183.) -> Dict[str, Dataset]:
"""Compute the rolling occupancy distributions.
:param window: The window size to use.
:param step: The stepsize. We do not want to have it rolling over all days,
so only step_wise.
:return: The rolling_occupancy for the respective wards
"""
rolling_occupancy_dict = {
ward: self.rolling_occupancy_ward(ward, window=window, step=step)
for ward in self.wards
}
plot_num = len(self.wards)
fig = plt.figure(figsize=(12, 4 * plot_num))
for i, key_value in enumerate(rolling_occupancy_dict.items()):
ward, rolling_occupancy_ = key_value
ax = fig.add_subplot(plot_num, 1, i + 1)
capacities = np.array([rolling_occupancy_.index[-1]] *
len(rolling_occupancy_.columns))
evaluator = Evaluator(HospitalSpecs(capacities))
# squash occupancy to max capacity
evaluator.distributions.occupancy = np.array(rolling_occupancy_).T
evaluator.plot_occupancy(ax=ax)
ax.legend(rolling_occupancy_.columns)
rec_qry = column_query(CURRENT_WARD, ward)
records = len(self.data.query(rec_qry))
ax.set_title(
"Rolling occupancy for ward {}\n"
"for #records: {}, with window size {} and step size {}".
format(ward, records, window, step))
fig.tight_layout()
return rolling_occupancy_dict
def rolling_occupancy_ward(self,
ward: Any,
window: float = 365.,
step: float = 183.) -> Dataset:
"""Compute the rolling occupancy distributions for a given ward.
:param ward: The ward under consideration.
:param window: The window size to use.
:param step: The stepsize. We do not want to have it rolling over all days,
so only step_wise.
:return: The rolling_occupancy for the respective ward.
"""
ward_flow = self.ward_occupancy[ward]
steps = max(int(np.ceil(
(ward_flow[BEGIN].max() - window) / step)), 1) + 1
occupancy = pd.DataFrame(0,
columns=[step * i for i in range(steps)],
index=range(ward_flow[OCCUPANCY].max() + 1),
dtype="float")
for i in range(steps):
for index in occupancy.index:
qry = and_query(column_query(OCCUPANCY, index),
column_query(BEGIN, i * step, ">"),
column_query(BEGIN, i * step + window, "<"))
occupancy.loc[index,
i * step] = ward_flow.query(qry)[TIME].sum()
# finished
occupancy = occupancy.divide(occupancy.sum(axis=0), axis=1)
return occupancy
def cart_classes(
self,
cart_specs: Optional[CartSpecs] = None
) -> Tuple[Dict[Any, Dict[int, Any]], List[Tuple[Any, graphviz.Source]]]:
"""Cecide on classes using CART assume values in features to be
sortable and already numerical.
:param cart_specs: Specifications to use.
:return: The graphvis source files holding the decision trees.
"""
if cart_specs is None:
cart_specs = CartSpecs(wards=self.wards)
# convert values to categories first
data = self.data.copy()
data[cart_specs.feature_columns] = data[
cart_specs.feature_columns].astype("category")
code_map = {
feature: dict(enumerate(data[feature].cat.categories))
for feature in cart_specs.cat_columns
}
for feature in cart_specs.cat_columns:
data[feature] = data[feature].cat.codes
graphs = []
for ward in cart_specs.wards:
qry = column_query(CURRENT_WARD, ward)
data_ = data.query(qry).dropna(subset=[BEGIN, END])
X = np.array(data_.loc[:, cart_specs.feature_columns])
y = np.array(data_.loc[:, END] - data_.loc[:, BEGIN])
clf = tree.DecisionTreeRegressor(
max_depth=cart_specs.max_depth,
min_samples_leaf=cart_specs.min_samples_leaf)
clf_fit = clf.fit(X, y)
self.class_tree = clf_fit
data_apply = data.query(qry)
X_apply = np.array(data_apply.loc[:, cart_specs.feature_columns])
leaf_id = clf.apply(X_apply)
self.data.loc[data_apply.index, CURRENT_CLASS] = leaf_id
self.set_preclass()
self.set_postclass()
self.make_classes()
feature_names = cart_specs.feature_columns.copy(
) if cart_specs.feature_columns is not None else []
for i, name in enumerate(feature_names):
if CART_COLUMNS_TRANSLATION.get(name, None) is not None:
feature_names[i] = CART_COLUMNS_TRANSLATION[name]
dot_data = tree.export_graphviz(clf,
out_file=None,
feature_names=feature_names,
filled=True,
rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graphs.append((ward, graph))
self.cart_code_map = code_map
self.cart_graphs = graphs
return code_map, graphs
def set_preclass(self) -> None:
"""Analyze the data and set the preclasses from given current
classes."""
self.data[PRE_CLASS] = float("NaN")
for patient in set(self.data[PATIENT]):
patient_data = self.data.query(column_query(
PATIENT, patient)).sort_values(by=BEGIN)
class_data = patient_data.iloc[:-1][CURRENT_CLASS]
class_data.index = patient_data.index[1:]
patient_data[PRE_CLASS] = class_data
patient_data.loc[patient_data.index[0],
PRE_CLASS] = patient_data.loc[
patient_data.index[0], CURRENT_CLASS]
self.data.loc[patient_data.index] = patient_data
def set_postclass(self) -> None:
"""Analyze the data and set the postclasses from given current
classes."""
self.data[POST_CLASS] = float("NaN")
for patient in set(self.data[PATIENT]):
patient_data = self.data.query(column_query(
PATIENT, patient)).sort_values(by=BEGIN)
class_data = patient_data.iloc[1:][CURRENT_CLASS]
class_data.index = patient_data.index[:-1]
patient_data[POST_CLASS] = class_data
patient_data.loc[patient_data.index[-1],
POST_CLASS] = patient_data.loc[
patient_data.index[-1], CURRENT_CLASS]
self.data.loc[patient_data.index] = patient_data
def make_classes(self) -> None:
"""Read classes from data."""
self.classes = self._make_classes()
self.hospital_specs.set_U(len(self.classes))
def _make_classes(self) -> List[Any]:
"""Read classes form data."""
classes = list(self.data[CURRENT_CLASS].unique())
classes.sort()
return classes
def make_wards(self) -> None:
"""Read wards from data."""
self.wards, self.wards_map, self.wards_map_inv = self._make_wards()
def _make_wards(self) -> Tuple[List[Any], Dict[Any, int], Dict[int, Any]]:
"""Make the ward list with index mapping from the data."""
wards: List[Any] = list(self.data[CURRENT_WARD].unique())
wards.sort()
wards_map = {ward: i for i, ward in enumerate(wards)}
wards_map_inv = dict(enumerate(wards))
return wards, wards_map, wards_map_inv
@staticmethod
def compute_routing(data: Dataset,
ward1: Any,
ward2: Any,
class1: Any,
class2: Any,
incoming: bool = False) -> float:
"""Compute the routing probability for 2 given wards and 2 given
classes.
:param data: The data under consideration.
:param ward1: The preceding ward.
:param ward2: The current ward.
:param class1: The preceding class.
:param class2: The current class.
:param incoming: Wether to consider incoming or outgoing patients.
:return: The probability.
"""
qry1 = and_query(column_query(CURRENT_WARD, ward1),
column_query(CURRENT_CLASS, class1),
column_query(BEGIN, 0, ">="))
if incoming:
flow_ward = PRE_WARD
flow_class = PRE_CLASS
else:
flow_ward = POST_WARD
flow_class = POST_CLASS
qry2 = and_query(column_query(flow_ward, ward2),
column_query(CURRENT_WARD, ward1),
column_query(flow_class, class2),
column_query(CURRENT_CLASS, class1),
column_query(BEGIN, 0, ">="))
class1_patients = data.query(qry1).dropna(subset=[BEGIN, END])
class2_patients = data.query(qry2).dropna(subset=[BEGIN, END])
if len(class1_patients) > 0:
r = len(class2_patients) / len(class1_patients)
else:
r = 0
return r
@staticmethod
def make_inter_arrival(data: Dataset,
pre_ward: Optional[List[Any]] = None) -> pd.Series:
"""Iterate over arrival and substract last arrival from current
arrival. Assume data for a given ward and class.
:param data: The data under investigation (for a special ward/class).
:param pre_ward: The pre_wards to consider.
:return: The inter-arrival times for the given data.
"""
if pre_ward is None:
pre_ward = [EXTERNAL, INTERNAL]
qry = and_query(
or_query(
*[column_query(PRE_WARD, pre_ward_)
for pre_ward_ in pre_ward]), column_query(BEGIN, 0, ">="))
data = data.query(qry)[BEGIN]
data = data.sort_values()
if not data.empty:
arrival_data = (data.iloc[1:].reset_index(drop=True) -
data.iloc[:-1].reset_index(drop=True))
else:
arrival_data = data
arrival_data.index = data.index[1:]
arrival_data.name = INTER_ARRIVAL
arrival_data = arrival_data.dropna()
arrival_data = arrival_data[arrival_data != 0]
return arrival_data
def make_service(self, classes: Optional[List[int]] = None) -> None:
"""Make the service data for the given set of classes.
:param classes: The list of class indices under consideration.
"""
if classes is None:
if hasattr(self, "classes"):
classes = self.classes
else:
classes = [0]
self.data.loc[:, SERVICE] = float("NaN")
data = self.data.copy()
for ward in self.wards:
for class_ in classes:
qry = and_query(column_query(CURRENT_WARD, ward),
column_query(CURRENT_CLASS, class_),
column_query(BEGIN, 0, ">="))
class_data = data.query(qry).dropna(subset=[BEGIN, END])
if not class_data.empty:
service_data = (class_data[END] - class_data[BEGIN])
self.data.loc[service_data.index, SERVICE] = service_data
self.data.loc[:, SERVICE] = self.data[SERVICE].astype("float")
def analyse(preprocessor: Preprocessor,
wards: List[str],
capacities: List[int],
adjust_pacu_occupancy: bool = True,
output_dir: Path = OUTPUT_DIR,
logger: Optional[logging.Logger] = None) -> Analyser:
"""Analyse data from preprocessor.
:param preprocessor: Preprocessor to use.
:param wards: Wards to consider.
:param capacities: Capacity per ward to consider.
:param adjust_pacu_occupancy: Adjust pacu occupancy because of weekends.
:param output_dir: Directory for plot saving.
:param logger: Logger to use for logging.
:return: Anaylser instance with analysed data.
"""
if not output_dir.is_dir():
output_dir.mkdir()
if logger is None:
logger = get_logger(__file__,
file_path=output_dir.joinpath(
f"{Path(__file__).resolve().stem}.log"))
analyser = Analyser.from_preprocessor(preprocessor=preprocessor,
output_dir=output_dir,
logger=logger)
analyser.logger.info("Number of entries in data: {}.".format(
len(analyser.data)))
analyser.logger.info("Entries per ward: {}".format(
Counter(analyser.data[CURRENT_WARD])))
analyser.logger.info("Make occupancy.")
analyser.make_occupancy()
analyser.logger.info("Compute flow and make plots.")
ward_capacity = pd.Series(dict(zip(wards, capacities)))
analyser.plot_flow(capacities=ward_capacity, squash=False)
analyser.plot_flow(capacities=ward_capacity, squash=True)
analyser.plot_occupancy()
s = ", ".join(wards)
filename = f"distributions - wards[{s}].pdf"
plt.savefig(output_dir.joinpath(filename))
plt.close()
if "PACU" in wards and adjust_pacu_occupancy:
analyser.logger.info("Adjust PACU occupancy.")
analyser.adjust_occupancy_pacu()
analyser_ = analyser.copy()
analyser_.set_capacities(capacities=ward_capacity)
ob_ev_all = Evaluator(analyser_.hospital_specs)
ob_ev_all.distributions.occupancy = np.array(analyser_.occupancy).T
Evaluator.plot_many(evaluation_results=[ob_ev_all],
colors=["r"],
markers=["*"],
labels=["Real observation"])
filename = f"real_observation.pdf"
plt.savefig(output_dir.joinpath(filename))
plt.close()
return analyser
def advanced_analysis(preprocessor: Preprocessor,
wards: List[str],
capacities: List[int],
cart_specs: CartSpecs,
output_dir: Path,
window_size: float = 90.,
adjust_pacu_occupancy: bool = True,
logger: Optional[logging.Logger] = None) -> Analyser:
"""Undertake some advanced analysis on existing Analyser class.
:param preprocessor: Preprocessor to use.
:param wards: Wards to consider.
:param capacities: Capacity per ward to consider.
:param cart_specs: Specifications for cart to use.
:param output_dir: Output dir for plot saving.
:param window_size: Window_size for rolling plots.
:param adjust_pacu_occupancy: Adjust pacu occupancy because of weekends.
:param logger: Logger to use for logging.
:return: Anaylser instance with anaylsed data.
"""
if logger is None:
logger = get_logger(__file__,
file_path=output_dir.joinpath(
f"{Path(__file__).resolve().stem}.log"))
analyser = analyse(preprocessor=preprocessor,
wards=wards,
capacities=capacities,
adjust_pacu_occupancy=adjust_pacu_occupancy,
output_dir=output_dir,
logger=logger)
analyser.feature_correlation(cart_specs=cart_specs)
plt.savefig(output_dir.joinpath(f"Feature correlation.pdf"))
plt.close()
for pre_ward in [[EXTERNAL], [INTERNAL], [EXTERNAL, INTERNAL]]:
analyser.rolling_arrival(pre_ward=pre_ward, window=window_size)
key = ",".join(pre_ward)
plt.savefig(output_dir.joinpath(f"Rolling arrival - {key}.pdf"))
plt.close()
analyser.rolling_service(window=window_size)
plt.savefig(output_dir.joinpath(f"Rolling service.pdf"))
plt.close()
for incoming in [True, False]:
analyser.rolling_routing(incoming=incoming, window=window_size)
key = "incoming" if incoming else "outgoing"
plt.savefig(
output_dir.joinpath(
f"Rolling routing - {key}pre_ward=[external].pdf"))
plt.close()
code_map, graphs = analyser.cart_classes(cart_specs=cart_specs)
with open(output_dir.joinpath("Decision tree - code map.json"), "w") as f:
json.dump(code_map, f)
# pylint: disable=broad-except
for ward, graph in graphs:
try:
graph.render(
output_dir.joinpath(f"Decision tree for ward {ward}.gv"))
except BaseException as e:
logger.warning(
f"Could not save decision tree data. Received error {e}.")
return analyser
| true |
220782e6d68217ed4677ddf90ae3d0964c3bb3ae | Python | IKyriem/Ejercicios-procesamiento-de-imagenes | /tarea.py | UTF-8 | 276 | 2.625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as pyplot
import cv2
#Carga de imagen a color
img = cv2.imread('2C1.jpg', 1)
[B, G, R] = cv2.split(img)
cv2.imshow ('imageR', R)
cv2.imshow ('imageG', G)
cv2.imshow ('imageB',B)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
5ea8278a46f6919fd0e35af636b65fe11522791b | Python | zctao/GLIB_Aurora | /PyChips_1_4_1/scripts/glib_icap_jump_to_image.py | UTF-8 | 4,249 | 2.59375 | 3 | [] | no_license | ##===================================================================================================##
##==================================== Script Information ===========================================##
##===================================================================================================##
##
## Company: CERN (PH-ESE-BE)
## Engineer: Manoel Barros Marin (manoel.barros.marin@cern.ch) (m.barros@ieee.org)
##
## Create Date: 14/03/2013
## Script Name: glib_icap_jump_to_image
## Python Version: 2.7.3
##
## Revision: 1.0
##
## Additional Comments:
## * This script configures and triggers the ICAP primitive for jumping to User or Golden images.
##
##===================================================================================================##
##===================================================================================================##
## Python modules:
import sys
from time import sleep
## Import the PyChips code - PYTHONPATH must be set to the PyChips installation src folder!
from PyChipsUser import *
##===================================================================================================##
##======================================== Script Body ==============================================##
##===================================================================================================##
args = sys.argv
arg_length = len(args)
if arg_length == 2:
#######################################################################################################
## PYCHIPS
#######################################################################################################
##################################################################################################
### Uncomment one of the following two lines to turn on verbose or very-verbose debug modes. ###
### These debug modes allow you to see the packets being sent and received. ###
##################################################################################################
#chipsLog.setLevel(logging.DEBUG) # Verbose logging (see packets being sent and received)
# Read in an address table by creating an AddressTable object (Note the forward slashes, not backslashes!)
glibAddrTable = AddressTable("./glibAddrTable.dat")
f = open('./ipaddr.dat', 'r')
ipaddr = f.readline()
f.close()
glib = ChipsBusUdp(glibAddrTable, ipaddr, 50001)
print
print "--=======================================--"
print " Opening GLIB with IP", ipaddr
print "--=======================================--"
#######################################################################################################
## Main
#######################################################################################################
arg_error = 0
if args[1] == "user":
page = 2 ## 2 -> 0x400000 (User image)
elif args[1] == "golden":
page = 0 ## 0 -> 0x000000 (Golden image)
else:
print
print "-> Error!!! Invalid argument."
print "-> "
print "-> Syntax:"
print "-> "
print "-> glib_icap_jump_to_image.py <user|golden>"
print "-> "
print "-> <user|golden> - Either user OR golden:"
print "-> - user : Jumps to User image"
print "-> - golden : Jumps to Golden image"
arg_error = 1
if arg_error == 0:
print
print "-> Jumping to %s image..." % args[1]
print
glib.write("icap_page", page)
glib.write("icap_trigg", 1)
else:
print
print "-> Error!!! One argument is required."
print "-> "
print "-> Syntax:"
print "-> "
print "-> glib_icap_jump_to_image.py <user|golden>"
print "-> "
print "-> <user|golden> - Either user OR golden:"
print "-> - user : Jumps to User image"
print "-> - golden : Jumps to Golden image"
##===================================================================================================##
##===================================================================================================## | true |
52ab0375a54ef8c2e04d83fcc2634d11cf192590 | Python | Juan8bits/low_level_programming | /0x1C-makefiles/5-island_perimeter.py | UTF-8 | 1,613 | 3.890625 | 4 | [] | no_license | #!/usr/bin/python3
""" Islander perimeter """
def island_perimeter(grid):
""" function that returns the perimeter of the
island described in grid.
Conditions:
- Grid is a list of list of integers.
- 0 represents a water zone.
- 1 represents a land zone
- One cell is a square with side length 1
- Grid cells are connected horizontally/vertically
(not diagonally).
- Grid is rectangular, width and height dont
exceed 100
- Grid is completely surrounded by water, and there
is one island (or nothing).
- The island doesnt have lakes (water inside that
isnt connected to the water around the island).
"""
if len(grid) < 1:
return 0
ex_y = len(grid) - 1
ex_x = len(grid[0]) - 1
perimeter = 0
for y in range(ex_y + 1):
for x in range(ex_x + 1):
if grid[y][x] == 1:
'#up'
if x == 0:
perimeter += 1
elif grid[y][x - 1] == 0:
perimeter += 1
'#down'
if x == ex_x:
perimeter += 1
elif grid[y][x + 1] == 0:
perimeter += 1
'#left'
if y == 0:
perimeter += 1
elif grid[y - 1][x] == 0:
perimeter += 1
'#right'
if y == ex_y:
perimeter += 1
elif grid[y + 1][x] == 0:
perimeter += 1
return perimeter
| true |
494bd38d3f3d370098e8c11e672901c7f619dd73 | Python | a1742861031/faceRecognition | /flask-face-recognition-manage-system/controller/api_1_0/passport.py | UTF-8 | 3,493 | 2.546875 | 3 | [] | no_license | from . import api
from flask import current_app, jsonify, make_response, request, session
from .. import redis_store
from .. import models, db
from sqlalchemy.exc import IntegrityError
from werkzeug.security import generate_password_hash, check_password_hash
@api.route('/users', methods=["POST"])
def register():
"""注册
请求的参数 id 密码 验证码 手机号
参数格式 json
"""
req_dict = request.get_json()
# 判断验证码是否正确
codeid = req_dict.get("codeid")
code = req_dict.get("code")
mobile = req_dict.get("mobile")
id = req_dict.get("id")
pwd = req_dict.get("pwd")
name = req_dict.get("name")
# 图片验证码错误
if (str(redis_store.get("image_code_%s" % codeid))[2:6]).lower() != code.lower():
return jsonify(code=4001, msg="图片验证码错误")
# 图片验证码正确
else:
user = models.User(id=id, mobile=mobile, state=0 , Name=name, isSuperAdmin=0)
user.password = pwd # 这里会自动调用装饰器里的函数
try:
db.session.add(user)
db.session.commit()
except IntegrityError as e:
# 数据库操作回滚
db.session.rollback()
# 用户id出现重复值
return jsonify(code=4001, msg="用户id已经存在")
except Exception as e:
print(e)
return jsonify(code=4001, msg="数据库异常")
# 登录状态保存到session中
session["id"] = id
session["mobile"] = mobile
return jsonify(code=201, msg="注册成功")
@api.route("/sessions", methods=["POST"])
def login():
"""
用户登录
参数:用户名 密码 验证码
:return:
"""
req_dict = request.get_json()
codeid = req_dict.get("codeid")
code = req_dict.get("code")
id = req_dict.get("id")
pwd = req_dict.get("pwd")
# 图片验证码错误
if (str(redis_store.get("image_code_%s" % codeid))[2:6]).lower() != code.lower():
return jsonify(code=4001, msg="图片验证码错误")
# 图片验证码正确
else:
user = models.User.query.filter_by(id=id).first()
# 未查找到
if user is None:
return jsonify(code=4001, msg="用户名或密码错误")
else:
if user.check_password(passwd=pwd):
# 保存登录状态
session["id"] = user.id
return jsonify(code=200, msg="登录成功", data={'id': user.id, 'stata': user.state, 'mobile': user.mobile})
else:
return jsonify(code=4001, msg="用户名或密码错误")
@api.route("/sessions", methods=["GET"])
def check_Login():
"""
检查登录状态
"""
"""获取session的数据"""
id = session.get("id")
print(id)
if id is not None:
return jsonify(code=200, msg="用户已经登录", userid=id)
else:
return jsonify(code=4001, msg="用户未登录")
# 尝试通过session获取用户id
# 因为通过session获取用户id时要通过sessionid
# id = session.get("id")
# name = request.cookies.get('session')
# print(name)
# print(id)
# # 如果session中数据id已经存在
@api.route("/sessions", methods=["DELETE"])
def logout():
"""登出"""
session.clear()
return jsonify(code=200, msg="成功登出")
| true |
d276cd96b0a93aca3693bbcedd06664254ef5b85 | Python | tshidhore/solver-codes | /ME614/homeworkHPC/code/p3.py | UTF-8 | 1,828 | 2.546875 | 3 | [] | no_license | import os
import sys
import numpy as np # library to handle arrays like Matlab
import scipy.sparse as scysparse
from matplotlib import pyplot as plt
from mpi4py import MPI
import pickle
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size1 = comm.Get_size()
N = ((10**5)+1)*np.ones(1,dtype=np.float128)
e_tr = np.zeros(1,dtype=np.float128)
def function(x):
f = np.tanh(x,dtype=np.float128)*np.exp(x,dtype=np.float128)
return f
dx = np.float128(20.0/(N-1))
#Try and avoid defining variables in terms of one another!!!!
p_integral = np.zeros(1,dtype=np.float128)
integral = np.zeros(1,dtype=np.float128)
index_start = int(rank*np.floor(N/size1))
start = -10 + (index_start)*dx
if rank == size1 - 1:
index_end = int(N - 1)
end = 10
else:
index_end = int(index_start + np.floor(N/size1))
end = -10 + index_end*dx
x_stencil = np.arange(start, end, dx)
for i,x in enumerate(x_stencil):
p_integral = p_integral + (dx*(function(x) + (4*function(x+dx/2)) + function(x+dx))/6)
comm.Reduce(p_integral, integral, op=MPI.SUM, root=0)
if rank == 0 :
integral_a = (-1/np.exp(10,dtype=np.float128)) + (np.exp(10,dtype=np.float128)) + (2*np.arctan(1/np.exp(10,dtype=np.float128),dtype=np.float128)) - (2*np.arctan(np.exp(10,dtype=np.float128),dtype=np.float128))
e_tr = np.abs(integral_a - integral)
print "The numerical value of the integral is %2.11f" % (integral)
print "The analytical value of the integral is %2.11f" % (integral_a)
print "The absoulte truncation error is %2.11f" % (e_tr)
print "The number of processors used is %d" % (size1)
comm.Barrier()
if rank == 0:
f1 = open("Truncation_Error_partc_Np=%d.p" % (size1), "wb")
pickle.dump(e_tr, f1)
f1.close()
comm.Barrier()
print "Node %d exiting\n" % (rank)
sys.exit()
| true |
b07aa6e4556561a02878255178fa1bb52d023c3b | Python | shivatharun/NLP | /Functional_API_Keras/5_Shared_Input_layer.py | UTF-8 | 852 | 2.609375 | 3 | [] | no_license | # Shared Input Layer model
from keras.models import Model
from keras.layers import Input, Dense, Flatten
from keras.utils import plot_model
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
visible = Input(shape=(64,64,1))
conv1 = Conv2D(32, kernel_size = 4, activation = 'relu')(visible)
pool1 = MaxPooling2D(pool_size = (2,2))(conv1)
flat1 = Flatten()(pool1)
conv2 = Conv2D(16, kernel_size = 8, activation = 'relu')(visible)
pool2 = MaxPooling2D(pool_size = (2,2))(conv2)
flat2 = Flatten()(pool2)
merge = concatenate([flat1, flat2])
hidden1 = Dense(10, activation = 'relu')(merge)
output = Dense(1, activation = 'sigmoid')(hidden1)
model = Model(inputs = visible, outputs = output)
print model.summary()
plot_model(model, to_file = '5_Shared_input_layer.png')
| true |
1cf8687770a7c3c62d1edb55d6ebefccd3c413bd | Python | TenzinJhopee/GamestonkTerminal | /gamestonk_terminal/cryptocurrency/onchain/ethgasstation_model.py | UTF-8 | 1,549 | 3 | 3 | [
"MIT"
] | permissive | import pandas as pd
import requests
def get_gwei_fees() -> pd.DataFrame:
"""Returns the most recent Ethereum gas fees in gwei
[Source: https://ethgasstation.info]
Parameters
----------
Returns
-------
pd.DataFrame
four gas fees and durations
(fees for slow, average, fast and
fastest transactions in gwei and
its average durations in seconds)
"""
r = requests.get("https://ethgasstation.info/json/ethgasAPI.json")
try:
if r.status_code == 200:
apiData = r.json()
return pd.DataFrame(
data=[
[
"Fastest",
int(apiData["fastest"] / 10),
round(apiData["fastestWait"], 1),
],
["Fast", int(apiData["fast"] / 10), round(apiData["fastWait"], 1)],
[
"Average",
int(apiData["average"] / 10),
round(apiData["avgWait"], 1),
],
[
"Slow",
int(apiData["safeLow"] / 10),
round(apiData["safeLowWait"], 1),
],
],
columns=["Tx Type", "Fee (gwei)", "Duration (min)"],
)
return pd.DataFrame()
except TypeError:
return pd.DataFrame()
except requests.exceptions.RequestException:
return pd.DataFrame()
| true |