blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0380a9e419eaefe49786d5c67803050794ab55f5 | Python | Aj588/StatsGroupProject | /MathOperations/multiplication.py | UTF-8 | 125 | 2.5625 | 3 | [] | no_license | class Multiplication:
@staticmethod
def product(multiplier, multiplicand):
return multiplier * multiplicand
| true |
937ba82e576a3f44ac37d91f078d4afc939f02ac | Python | BedaSBa6koi/Homework-14.03.20 | /task12v2.py | UTF-8 | 465 | 3.6875 | 4 | [] | no_license | fahr = print('Enter the desired number and F if you want to convert Fahrenheit to Celsius\n')
cels = print('Enter the desired number and C if you want to convert Celsius to Fahrenheit\n')
t = (input('Enter: \n'))
sign = t[-1]
t = int(t[0:-1])
def calc(t):
if sign == 'C' or sign == 'c':
t = int(t * (9/5) + 32)
print(str(t) + 'F')
elif sign == 'F' or sign == 'f':
t = int((t - 32) * (5/9))
print(str(t) + 'C')
(calc(t)) | true |
14666225bc5ae653e830cac95091478ddd479680 | Python | AllanPS98/Compilador | /main.py | UTF-8 | 932 | 3.1875 | 3 | [] | no_license | '''
Versão Python: 3.8
'''
import AnalisadorLexico
import AnalisadorSintatico
import Arquivos
import os
lexan = AnalisadorLexico.AnalisadorLexico()
arquivosEntrada = os.listdir("input")
print(arquivosEntrada)
leitor = Arquivos.Arquivos()
texto = ""
contadorArquivo = 1
for arquivo in arquivosEntrada:
caminho = "input/" + arquivo
texto = leitor.ler(caminho)
print(texto)
resposta, teve_erro_lexico = lexan.analisar(texto)
caminho_saida_lexico = f"output/saida{contadorArquivo}.txt"
leitor.escrever(caminho_saida_lexico, resposta)
if not teve_erro_lexico:
caminho_entrada_sintatico = caminho_saida_lexico
texto_sintatico = leitor.ler(caminho_entrada_sintatico)
sinan = AnalisadorSintatico.AnalisadorSintatico(resposta)
resposta = sinan.analisar(texto_sintatico)
leitor.escrever(caminho_saida_lexico, resposta)
contadorArquivo += 1
| true |
1a9644e1315d0be02064c24a7f3163908d60fb44 | Python | ofkarakus/python-assignments | /Python Basics/3-Control Flow Statements/Assignment-7/Assignment - 7 (FizzBuzz).py | UTF-8 | 726 | 4.71875 | 5 | [] | no_license | # Task : Print the FizzBuzz numbers.
# FizzBuzz is a famous code challenge used in interviews to test basic programming
# skills. It's time to write your own implementation.
# Print numbers from 1 to 100 inclusively following these instructions:
# if a number is multiple of 3, print "Fizz" instead of this number,
# if a number is multiple of 5, print "Buzz" instead of this number,
# for numbers that are multiples of both 3 and 5, print "FizzBuzz",
# print the rest of the numbers unchanged.
# Output each value on a separate line.
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
print('FizzBuzz')
elif i % 3 == 0:
print('Fizz')
elif i % 5 == 0:
print('Buzz')
else: print(i) | true |
ac369ed2adaddd2290ac100bb82c8423ba5b87d8 | Python | MrFrezza/Keras-exercises-repository | /tfcheck.py | UTF-8 | 226 | 2.546875 | 3 | [] | no_license | import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
print("Tensorflow version is " + str(tf.__version__))
hello = tf.constant('Hello from Tensorflow')
sess = tf.Session()
print(sess.run(hello)) | true |
8f2c272773a219495ac64a9934030b9c939f8a0f | Python | RafalSl/Python-bootcamp-excercises | /zagadka.py | UTF-8 | 2,830 | 3.859375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
#P76 średnia ocen - wersja z błędem - podwójny Enter na zakończenie wpisywania
class SrOcen:
l_ocen = ['1', '1.5', '2', '2.5', '3', '3.5', '4', '4.5', '5']
def __init__(self, imie, nazwisko):
self.imie = imie
self.nazwisko = nazwisko
self.oceny = []
self.srednia()
def __add__(self, other):
return (self.srednia() + other.srednia())/2
def __str__(self):
if len(self.oceny) > 0:
temp = str(self.oceny)
return '\n\n' + self.imie + ' ' + self.nazwisko + '\n' + 'Lista ocen: ' + temp[1:len(temp)-1] + '\n' + 'Średnia ocen: ' + str(self.srednia())
else:
return '\n\n' + self.imie + ' ' + self.nazwisko + '\n' + 'Lista ocen: brak\n Średnia ocen: brak ocen'
def wpisywanie(self):
while True:
ocena = input('Podaj ocenę (sam "Enter" = koniec): ')
if len(ocena) == 0:
break
elif ocena not in self.l_ocen:
print('Pomyłka')
continue
else:
self.oceny.append(float(ocena))
return self.oceny
def srednia(self):
self.wpisywanie()
suma = 0
for i in self.oceny:
suma += i
if len(self.oceny) > 0:
sr_ocen = round(suma/len(self.oceny),2)
return sr_ocen
s1 = SrOcen('Adam', 'Kowalski')
print(s1)
#Wersja OK
class SrOcen:
l_ocen = ['1', '1.5', '2', '2.5', '3', '3.5', '4', '4.5', '5']
def __init__(self, imie, nazwisko):
self.imie = imie
self.nazwisko = nazwisko
self.oceny = []
self.wpisywanie()
self.srednia()
def __add__(self, other):
return (self.srednia() + other.srednia())/2
def __str__(self):
if len(self.oceny) > 0:
temp = str(self.oceny)
return '\n\n' + self.imie + ' ' + self.nazwisko + '\n' + 'Lista ocen: ' + temp[1:len(temp)-1] + '\n' + 'Średnia ocen: ' + str(self.srednia())
else:
return '\n\n' + self.imie + ' ' + self.nazwisko + '\n' + 'Lista ocen: brak\n Średnia ocen: brak ocen'
def wpisywanie(self):
while True:
ocena = input('Podaj ocenę (sam "Enter" = koniec): ')
if len(ocena) == 0:
break
elif ocena not in self.l_ocen:
print('Pomyłka')
continue
else:
self.oceny.append(float(ocena))
return self.oceny
def srednia(self):
suma = 0
for i in self.oceny:
suma += i
if len(self.oceny) > 0:
sr_ocen = round(suma/len(self.oceny),2)
return sr_ocen
s1 = SrOcen('Adam', 'Kowalski')
print(s1) | true |
8ac44d2681ca8d5246b759b634ab9f924727692d | Python | Zhaokugua/MOOC_1261_Eamples | /4-5 Python之while循环.py | UTF-8 | 90 | 3.375 | 3 | [] | no_license | #请求出1~10的乘积。
i = 0
s = 1
while i < 10:
i = i + 1
s = s * i
print(s)
| true |
f3d2dcc976276815a4441e72e1e71138c5be915f | Python | mmmare/Python1 | /stat.py | UTF-8 | 750 | 4.03125 | 4 | [] | no_license |
while True:
try:
numbers = input('please enter values seperated by a space ').split()
addval = sum([int(number)for number in numbers])
addval = int(addval)
average = addval/len(numbers)
intva = int(numbers)
median = sorted(intva)
print("The mean is",average)
if len(numbers)%2 == 0:
evenval = int(len(numbers)/2)
evenRange = int(median[evenval])
lowevenRange = int(median[evenval]) -1
print("The median is",(lowevenRange + evenRange)/2)
else:
oddvalueval = ((len(numbers))/2)
print(median)
print(median)
oddRange = oddvalueval-.5
intoddrange = int(oddRange)
oddmedian = int(median[intoddrange])
print("The median is", oddmedian)
break
except ValueError:
print("please enter integers")
| true |
75958e549b0a3393e8ad5f1d7146a27271a9f569 | Python | hyejinHong0602/BOJ | /bronze3/[WEEK6] 5073 - 삼각형과 세 변.py | UTF-8 | 637 | 3.328125 | 3 | [] | no_license | a=1
b=1
c=1
while a!=0 or b!=0 or c!=0:
a, b, c = map(int, input().split())
nums=[a,b,c]
sortedNum=sorted(nums)
if sortedNum[2] >= sortedNum[1]+sortedNum[0]:
if a == 0 and b == 0 and c == 0:
pass
else:
print('Invalid')
else:
if a == b:
if b == c:
print('Equilateral')
else:
print('Isosceles')
else:
if b == c:
print('Isosceles')
else:
if a == c:
print('Isosceles')
else:
print('Scalene')
| true |
f3d5154da0a3a51fb9864148b22375471b9e878e | Python | Quatroctus/CS362-ICA-PY-Unit | /word_count.py | UTF-8 | 120 | 3.078125 | 3 | [] | no_license |
def word_count(sentence: str) -> int:
words = [word.split("-") for word in sentence.split()]
return len(words)
| true |
1e1646630faedcbae936c36387f66cf97a4138ac | Python | NinaHerrmann/ACO-Results | /Scripts/Graphs/TSP/bar_graph_big_problem_routekernel_runtime_comparison.py | UTF-8 | 1,792 | 2.828125 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
'../../.'
my_data = pd.read_csv('../../../data_aggregation/TSP/HighLevel/Musket_route_kernel_average.csv', delimiter=',', header=None)
breno_data = pd.read_csv('../../../data_aggregation/TSP/LowLevel/Lowlevel_route_kernel_average.csv', delimiter=',', header=None)
#define size
#fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, figsize=(15, 10))
# X-Achsis
many_years = my_data.iloc[0]
years = [str(int(year)) for year in many_years][9:]
ind = np.arange(len(years)) # the x locations for the groups
width = 0.35 # the width of the bars
# Data from Musket
Musket1 = my_data.iloc[1].astype(float)[9:]
Musket2 = my_data.iloc[2].astype(float)[9:]
Musket4 = my_data.iloc[3].astype(float)[9:]
Musket8 = my_data.iloc[4].astype(float)[9:]
bars1 = [Musket1[9],Musket2[9],Musket4[9],Musket8[9]]
# Data Breno
Breno1 = breno_data.iloc[1].astype(float)[9:]
Breno2 = breno_data.iloc[2].astype(float)[9:]
Breno4 = breno_data.iloc[3].astype(float)[9:]
Breno8 = breno_data.iloc[4].astype(float)[9:]
bars2 = [Breno1[9],Breno2[9],Breno4[9],Breno8[9]]
fig, ax = plt.subplots()
ax.set_axisbelow(True)
ax.grid()
# width of the bars
barWidth = 0.3
# The x position of bars
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
plt.figure(figsize=(9, 3))
# Create blue bars
plt.bar(r1, bars1, width = barWidth, color = '#A60628', edgecolor = 'black', capsize=7, label='Musket')
plt.bar(r2, bars2, width = barWidth, color = '#348ABD', edgecolor = 'black', capsize=7, label='Low-level')
# general layout
plt.xticks([r + barWidth/2 for r in range(len(bars1))], ['1024', '2048', '4096', '8192'])
plt.title('Route Kernel Runtime Comparison - pr2392')
plt.ylabel('seconds')
plt.xlabel('ants')
plt.legend()
plt.show() | true |
81e39c69c0bbfa31bbab73efba156a3bd0839cfd | Python | shawn-stover/LeetCode-Python | /largest-number-at-least-twice-of-others/largest-number-at-least-twice-of-others.py | UTF-8 | 1,156 | 3.734375 | 4 | [] | no_license | class Solution:
def dominantIndex(self, nums: List[int]) -> int:
"""
Trivial cases
- Array only has 1 element
the return must be 0
- An array of length 2
- [3, 6]
[3, 6, 1, 0]
enumerate(nums)
[(0, 3), (1, 6), (2, 1), (3, 0)]
- Do max to find the max, remove it, then do max again and compare them
- Largest starts at the beginning
- Iterate through the array, comparing the curent element to the largest
if its larger, assign largest to it
"""
largest = -1
second_largest = -1
largest_index = -1
for i, x in enumerate(nums):
if x > largest:
second_largest = largest
largest = x
largest_index = i
elif x > second_largest:
second_largest = x
if largest < second_largest*2:
return -1
return largest_index | true |
df87933633744972644ae149faec3a3be7a41a49 | Python | cphyc/MHD_simulation | /python/simul.py | UTF-8 | 9,256 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/env python3
try:
import numpypy as np
except:
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
#import ipdb
## Tri Diagonal Matrix Algorithm(a.k.a Thomas algorithm) solver
def TDMAsolver(a, b, c, d):
'''
TDMA solver, a b c d can be NumPy array type or Python list type.
refer to http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
'''
nf = len(a) # number of equations
ac, bc, cc, dc = map(np.array, (a, b, c, d)) # copy the array
for it in xrange(1, nf):
mc = ac[it]/bc[it-1]
bc[it] = bc[it] - mc*cc[it-1]
dc[it] = dc[it] - mc*dc[it-1]
xc = ac
xc[-1] = dc[-1]/bc[-1]
for il in xrange(nf-2, -1, -1):
xc[il] = (dc[il]-cc[il]*xc[il+1])/bc[il]
del bc, cc, dc # delete variables from memory
return xc
class Vector(object):
def __init__(self, parent):
# save the pointer to the parent (dynamical)
self.p = parent
# initial G = 0, G[k,n]
self.G = np.zeros((self.p.Nz, self.p.NFourier), dtype="float64")
# access via G[k][n]
def step(self):
# save the old G
self.G_old = self.G.copy()
# compute the new one
self.compute_G()
# new += dt/2*(3G-G_old)
self.field[1:-1] = (self.field[1:-1]
+ self.p.dt/2*(3*self.G[1:-1] - self.G_old[1:-1])
)
# conditions at top and bottom : null
self.field[0 ,:] = 0
self.field[-1,:] = 0
def compute_G(self):
raise Exception("Vector class is a base class, not supposed to be "+
"used like that")
def initial(self, init_cond):
if init_cond == 'null':
self.field = np.zeros((self.p.Nz, self.p.NFourier))
elif init_cond == "T":
self.field = np.array([[T_0(n,k,self.p) for n in range(self.p.NFourier)]
for k in range(self.p.Nz)])
else:
raise Exception("init_cond must be either `null` or `T`")
class Temp(Vector):
name = "T"
def compute_G(self):
# compute G except for k = 0, Nz-1 and n = 0
for n in range(1, self.p.NFourier):
self.G[1:-1,n] = ((self.field[:-2,n]-2*self.field[1:-1,n]+self.field[2:,n])
* self.p.oodz2
- (n*self.p.pi/self.p.a)**2
* self.field[1:-1,n] )
class Vort(Vector):
name = "ω"
def __init__(self, parent):
super().__init__(parent)
self.compute_wk()
def compute_wk(self):
# init. the arrays:
self.wk1 = np.zeros((self.p.Nz, self.p.NFourier))
self.wk2 = np.zeros((self.p.Nz, self.p.NFourier))
self.sub = np.zeros((self.p.Nz, self.p.NFourier))
for n in range(1,self.p.NFourier):
# save some usefull functions
sub_f = lambda k : -self.p.oodz2 if k<self.p.Nz-1 else 1
dia = lambda k : (n*self.p.pi/self.p.a)**2 + 2*self.p.oodz2 if 0<k<self.p.Nz-1 else 1
sup = lambda k : -self.p.oodz2 if k>0 else 1
# tridiag. solver
self.wk1[0,n] = 1/dia(0)
self.wk2[0,n] = sup(0) * self.wk1[0,n]
for k in range(1, self.p.Nz-1):
self.wk1[k,n] = 1 /(dia(k)-sub_f(k)*self.wk2[k-1,n])
self.wk2[k,n] = sup(k)*self.wk1[k,n]
self.wk1[-1,n] = 1/(dia(self.p.Nz-1)-sub_f(self.p.Nz-1)*self.wk2[-2,n])
self.sub[:,n] = [sub_f(k) for k in range(self.p.Nz)]
def step(self):
rhs = self.p.psi.field.copy()
# boundary conditions k=0, Nz-1 : psi = 0
rhs[0, :] = 0
rhs[-1,:] = 0
for n in range(1,self.p.NFourier):
# tridiag. solver
self.field[0,n] = rhs[0,n]*self.wk1[0,n]
for k in range(1, self.p.Nz):
self.field[k,n] = (rhs[k,n] - self.sub[k,n]*self.field[k-1,n]*self.wk1[k,n])
for k in range(self.p.Nz-2, 0, -1):
self.field[k,n] = self.field[k,n]-self.wk2[k,n]*self.field[k+1,n]
class Stream(Vector):
name = "ψ"
def compute_G(self):
# compute G except for k=0, Nz-1 and n=0
for n in range(1, self.p.NFourier):
a = self.p.Ra*n*self.p.pi/self.p.a*self.p.T.field[1:-1,n]
b = (self.field[:-2,n] - 2*self.field[1:-1,n] + self.field[2:,n])*self.p.oodz2
c = (n*self.p.pi/self.p.a)**2*self.field[1:-1,n]
self.G[1:-1,n] = self.p.Pr*( a + b - c)
class Simulation(object):
param_list = {'Re': 1, 'Pr': 1, 'Ra': 1, 'a' : 1, 'Nz': 100,
'NFourier': 50, 'dt_security': 0.9,
'maxiter': 100, 'freq_output': 10,
'freq_critical_Ra':50, 'verbose': False}
def __init__(self, *args, **kargs):
# save the default parameters
for param, value in self.param_list.items():
setattr(self, param, value)
# override if necessary
for param, value in kargs.items():
if param not in self.param_list:
raise Exception("`%s' not recognized" % param)
else:
setattr(self, param, value)
# set the initial values
self.t = 0
self.niter = 0
self.dz = 1/(self.Nz-1)
# some usefull quantities
self.oodz2 = 1/self.dz**2
self.pi = np.pi
# create the inner fields
self.T = Temp(self)
self.omega = Vort(self)
self.psi = Stream(self)
# previous fields for critical Ra number
self.T_old = np.zeros((self.NFourier,))
self.omega_old = np.zeros((self.NFourier,))
self.psi_old = np.zeros((self.NFourier,))
def __del__(self):
pass
def growth(self):
''' Calculate the log-growth rate and return a string containing
all the growth rate'''
amp = lambda v: np.log(abs(v)) if v != 0 else 0
gr = lambda new,old,n: str(amp(new.field[self.Nz//3,n])
- amp(abs(old[n])))
out = "".join([ gr(self.T, self.T_old,n) + "\t" +
gr(self.omega, self.omega_old,n) + "\t" +
gr(self.psi, self.psi_old,n) + "\t"
for n in range(self.NFourier) ])
# save the arrays for next output
self.T_old = self.T.field[self.Nz//3,:].copy()
self.omega_old = self.omega.field[self.Nz//3,:].copy()
self.psi_old = self.psi.field[self.Nz//3,:].copy()
return out+"\n"
def step(self):
# eventually output
if self.verbose and self.niter % self.freq_output == 0:
self.dump()
# eventually calculate the d-ln term for the critical Ra
if self.verbose and self.niter % self.freq_critical_Ra == 0 :
output = "# growth : \t"
output+= "".join([
"{T.name}_{n}\t{w.name}_{n}\t{psi.name}_{n}\t".format(T=self.T,
w=self.omega,
psi=self.psi,
n=n)
for n in range(self.NFourier)])
output+= "\n"
output+= "# growth : \t"
output+= self.growth()
print(output)
# get the max timestep
self.CFL()
# increase the time, the iteration
self.t += self.dt
self.niter += 1
# check that the end is not reached
if self.niter > self.maxiter:
return False
else:
return True
def dump(self):
output = "#k\t"
for n in range(self.NFourier):
o = "{T}_{n}\t{w}_{n}\t{psi}_{n}\t".format(T=self.T.name,
w=self.omega.name,
psi=self.psi.name,
n=n)
output += o
output += "\n"
for k in range(self.Nz):
output += str(k) + "\t"
for n in range(self.NFourier):
l = "{T}\t{w}\t{psi}\t".format(T=self.T.field[k,n],
w=self.omega.field[k,n],
psi=self.psi.field[k,n])
output += l
output += "\n"
print(output)
def CFL(self):
# dt < (dz)^2/4 or (dz)^2/(4Pr) if Pr > 1
self.dt = self.dt_security * self.dz**2/(4*max(1,self.Pr))
def T_0 (n,k,s):
if n > 0:
return np.sin(s.pi*k*s.dz)
else:
return 1-k*s.dz
if __name__ == '__main__':
# create a new simulation
s = Simulation(Re=5)
# initial conditions psi(0) = 0, Omega(0) = 0
s.psi.initial("null")
s.omega.initial("null")
# T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz
s.T.initial(lambda n, k: T_0(n,k,s))
# main loop over time
while s.step():
s.T.step()
s.psi.step()
s.omega.step()
del s
| true |
cf6714f9203b10b285b97f0c00803aa7e22582d7 | Python | lenguyen1605/FlaskApp | /app.py | UTF-8 | 753 | 2.71875 | 3 | [] | no_license | from flask import *
from flask import url_for
import argparse
import requests
import os
app = Flask(__name__)
@app.route('/number-cats/<int:number>')
def main(number):
urls = []
directory = "static"
parent_dir = '/Users/lenguyen/Desktop/ProjectFlask'
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
for i in range(number):
location = os.path.join(path, 'cat' + str(i) + '.jpg')
r = requests.get('https://cataas.com/cat')
with open(location, 'wb') as f:
f.write(r.content)
urls.append(url_for('static', filename='cat' + str(i) + '.jpg', _external=True))
return str(urls)
if __name__ == "__main__":
app.run()
| true |
14983229caa60aa80df9d9d467fd3f6fc18104da | Python | mgeiger/beehive | /beehive/database/database.py | UTF-8 | 2,927 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python3
import sqlite3 as lite
import logging
import sys
table_name = 'sensor_values'
col_date_time = 'date_time'
col_temperature = 'temperature'
col_temp2 = 'temperature2'
col_pressure = 'pressure'
col_altitude = 'altitude'
col_humidity = 'humidity'
col_light = 'light'
create = "CREATE TABLE IF NOT EXISTS {}({} DATETIME PRIMARY KEY NOT NULL, {} REAL, {} REAL, {} REAL);".format(table_name, col_date_time, col_temperature, col_humidity, col_light)
database_file = '/home/pi/beehive/beehive.db'
create_sql = """
CREATE TABLE IF NOT EXISTS {}({} DATETIME PRIMARY KEY NOT NULL,
{} REAL, {} REAL, {} REAL,
{} REAL, {} REAL, {} REAL);""".format(table_name, col_date_time,
col_temperature, col_temp2, col_humidity,
col_pressure, col_altitude, col_light)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logger = logging.getLogger(__name__)
def create_database():
con = lite.connect(database_file)
try:
logging.debug('Acquired database')
cur = con.cursor()
cur.execute(create_sql)
con.commit()
except lite.Error:
e = sys.exc_info()[0]
logging.error(e.args[0])
finally:
if con:
con.close()
def insert_database(temperature=None, temperature2=None, humidity=None, pressure=None, altitude=None, light=None):
insert = "INSERT INTO {} VALUES(datetime('now'), {}, {}, {}, {}, {}, {});".format(table_name, temperature, temperature2, humidity, pressure, altitude, light)
con = lite.connect(database_file)
try:
logging.debug('Inserting into database')
cur = con.cursor()
cur.execute(insert)
con.commit()
except lite.Error:
e = sys.exc_info()[0]
logging.error(e.args[0])
finally:
if con:
con.close()
def get_averages():
query = "SELECT DATETIME((STRFTIME('%s', date_time) / 900) * 900, 'unixepoch', 'localtime') interval, avg(temperature) temperature, avg(temperature2) temperature2, avg(humidity) humidity, avg(pressure) pressure, avg(altitude) altitude, avg(light) light from sensor_values where date_time > datetime('now', '-1 day') group by interval order by interval;"
con = lite.connect(database_file)
try:
cur = con.cursor()
cur.execute(query)
data = cur.fetchall()
except lite.Error:
e = sys.exc_info()[0]
logging.error(e.args[0])
finally:
if con:
con.close()
# Now do something with the data...
if not data or len(data) < 1:
return None
return {'time': [x[0] for x in data],
'temperature': [x[1] for x in data],
'temperature2': [x[2] for x in data],
'humidity': [x[3] for x in data],
'pressure': [x[4] for x in data],
'altitude': [x[5] for x in data],
'light': [x[6] for x in data]}
if __name__ == "__main__":
print(get_averages())
| true |
932ff16a4baafeaab6b672f4ebdf649b87c5a79e | Python | JoneCoder/Python_basic | /Project-02/addition.py | UTF-8 | 331 | 3.609375 | 4 | [] | no_license | result = 0
for i in range(50):
result = result + 1
print(result)
result2 = 0
num = 1
for i in range(50):
result2 = result2 + num
num = num + 1
print(result2)
result3 = 0
for num in range(50):
result3 = result3 + num
print (result3)
result4 = 0
for num in range(1, 51):
result4 = result4 + num
print(result4) | true |
202ffa2acde884466a422385762a776b211e06bc | Python | KrishnaSindhuReddyDodda/NLP-POS-tags | /Task1.py | UTF-8 | 2,885 | 3.1875 | 3 | [] | no_license | import nltk
import stanza
import a1
import accu #accu is a file with accuracy().
from nltk.corpus import brown
print(brown.categories()) #This retrieve categories in genre of brown corpus
sent_pos_adventure = nltk.corpus.brown.tagged_sents(categories = "adventure",tagset="universal")
adv=[]
correct_adv_pos=[]
sents=nltk.corpus.brown.sents(categories="adventure")
print("sentences in adventure category...",len(sents)) #gives length of adventure category
for i in range(len(sents)):
adv.append(sents[i])
pos_tagger=stanza.Pipeline(processors="tokenize,pos",tokenize_pretokenized=True)
output1=pos_tagger(adv)
pos1= a1.get_pos_from_stanza_output(output1)
print(pos1)
correct_pos1=a1.get_pos_from_nltk_tagged_sents(sent_pos_adventure[0:len(sent_pos_adventure)])
for i in range(len(sent_pos_adventure)):
correct_adv_pos.append(correct_pos1[i])
print()
print()
print("correct pos for adventure catregory ...")
print(correct_adv_pos)
print()
print("accuarcay for adventure category....")
print(accu.accuracy(correct_adv_pos,pos1))
print()
print()
sent_pos_news = nltk.corpus.brown.tagged_sents(categories = "news",tagset="universal")
new=[]
correct_new_pos=[]
sents=nltk.corpus.brown.sents(categories="news")
print("sentences in news category.....",len(sents)) #gives length of news category
for i in range(len(sents)):
new.append(sents[i])
pos_tagger=stanza.Pipeline(processors="tokenize,pos",tokenize_pretokenized=True)
output2=pos_tagger(new)
pos2= a1.get_pos_from_stanza_output(output2)
print(pos2)
correct_pos2=a1.get_pos_from_nltk_tagged_sents(sent_pos_news[0:len(sent_pos_news)])
for i in range(len(sent_pos_news)):
correct_new_pos.append(correct_pos2[i])
print()
print()
print("correct pos for news catregory ...")
print(correct_new_pos)
print()
print("accuarcay for news category....")
print(accu.accuracy(correct_new_pos,pos2))
print()
print()
sent_pos_lore = nltk.corpus.brown.tagged_sents(categories = "lore",tagset="universal")
lore=[]
correct_lore_pos=[]
sents=nltk.corpus.brown.sents(categories="lore")
print("sentences in lore category....",len(sents)) #gives length of lore category
for i in range(len(sents)):
lore.append(sents[i])
pos_tagger=stanza.Pipeline(processors="tokenize,pos",tokenize_pretokenized=True)
output3=pos_tagger(lore)
pos3= a1.get_pos_from_stanza_output(output3)
print(pos3)
correct_pos3=a1.get_pos_from_nltk_tagged_sents(sent_pos_lore[0:len(sent_pos_lore)])
for i in range(len(sent_pos_lore)):
correct_lore_pos.append(correct_pos3[i])
print()
print()
print("correct pos for lore catregory ...")
print(correct_lore_pos)
print()
print("accuarcay for lore category....")
print(accu.accuracy(correct_lore_pos,pos3)) #Gives performance of accuracy for each brown corpus categories
| true |
deeecbe4ba43a9f7960d37e48ffff6a3fae3ed6e | Python | thijskruithof/sqrmelon | /SqrMelon/animationgraph/curvedata.py | UTF-8 | 10,286 | 3.03125 | 3 | [
"MIT"
] | permissive | from pycompat import *
from mathutil import Vec2
class Key(object):
"""
A single key in a curve.
Currently tangent X values, tangentBorken and the TANGENT_USER mode are unused.
"""
TYPE_MANUAL, TYPE_LINEAR, TYPE_FLAT = range(3)
TANGENT_AUTO, TANGENT_SPLINE, TANGENT_LINEAR, TANGENT_FLAT, TANGENT_STEPPED, TANGENT_USER = range(6)
def __init__(self, time, value, parent):
self.__point = Vec2(time, value)
self.__parent = parent
# note that tangent X values have been deprecated and is not exported;
# they were for cubic bezier curves that never got made
self.inTangent = Vec2(0.0, 0.0)
self.outTangent = Vec2(0.0, 0.0)
self.__inTangentType = Key.TYPE_LINEAR
self.__outTangentType = Key.TYPE_LINEAR
self.__tangentBroken = False
self.__tangentMode = Key.TANGENT_AUTO
def clone(self, parent):
k = self.__class__(self.time(), self.value(), parent)
k.__inTangent = Vec2(self.inTangent)
k.__outTangent = Vec2(self.outTangent)
k.__tangentBroken = self.tangentBroken
k.__tangentMode = self.tangentMode
return k
# TODO: refactor to use getters/setters instead of properties
@property
def tangentBroken(self):
return self.__tangentBroken
@tangentBroken.setter
def tangentBroken(self, tangentBroken):
self.__tangentBroken = tangentBroken
self.updateTangents()
@property
def tangentMode(self):
return self.__tangentMode
@tangentMode.setter
def tangentMode(self, tangentMode):
self.__tangentMode = tangentMode
self.updateTangents()
def updateTangents(self):
if self.__tangentMode == Key.TANGENT_USER:
return
if self.__tangentMode == Key.TANGENT_STEPPED:
# this leave the input tangent as is, so you can go set e.g.
# "linear" to get the input, then back to "stepped"
# TODO: have "output is stepped" as separate state ("in tangent" with "stepped output" control is tedious)
self.outTangent = Vec2(0.0, float('inf'))
return
if self.__tangentMode == Key.TANGENT_FLAT:
self.inTangent = Vec2(0.0, 0.0)
self.outTangent = Vec2(0.0, 0.0)
else:
self.__parent.updateTangents(self, self.__tangentMode)
def time(self):
return self.__point.x
def setTime(self, time):
self.__point.x = time
self.__parent.sortKeys()
def value(self):
return self.__point.y
def setValue(self, value):
self.__point.y = value
self.__parent.keyChanged(self)
def point(self):
return Vec2(self.__point)
def setPoint(self, point):
self.__point = Vec2(point)
self.__parent.sortKeys()
self.__parent.keyChanged(self)
def delete(self):
self.__parent.deleteKey(self)
def reInsert(self):
self.__parent.reInsert(self)
def parentCurve(self):
return self.__parent
class Curve(object):
"""
Animation data with Cubic Hermite Spline interpolation.
"""
def __init__(self):
self.__keys = []
self.sortKeys()
def clone(self):
curve = Curve()
for key in self.__keys:
k = key.clone(curve)
curve.__keys.append(k)
curve.sortKeys()
return curve
def keyAt(self, time):
for key in self.__keys:
if key.time() == time:
return key
def deleteKey(self, key):
idx = self.__keys.index(key)
self.__keys.pop(idx)
if idx != 1 and len(self.__keys):
self.__keys[idx - 1].updateTangents()
if idx != len(self.__keys):
self.__keys[idx].updateTangents()
def addKeyWithTangents(self,
inTangentX, inTangentY,
time, value,
outTangentX, outTangentY,
tangentBroken, tangentMode):
k = Key(time, value, self)
self.__keys.append(k)
self.sortKeys()
k.inTangent = Vec2(inTangentX, inTangentY)
k.outTangent = Vec2(outTangentX, outTangentY)
k.tangentBroken = tangentBroken
k.tangentMode = tangentMode
return k
def reInsert(self, key):
self.__keys.append(key)
self.sortKeys()
def keyChanged(self, key):
idx = self.__keys.index(key)
first = idx == 0
last = idx == len(self.__keys) - 1
if not first:
self.__keys[idx - 1].updateTangents()
key.updateTangents()
if not last:
self.__keys[idx + 1].updateTangents()
def updateTangents(self, key, mode):
idx = self.__keys.index(key)
first = idx == 0
last = idx == len(self.__keys) - 1
if first and last:
return
def keyDirection(a, b):
keyDifference = b.point() - a.point()
try:
keyDifference.normalize()
except ZeroDivisionError:
return Vec2(0.0, 0.0)
keyDifference.x = abs(keyDifference.x)
return keyDifference
def finalize():
if not first and key.inTangent.length() != 0:
pd = self.__keys[idx].time() - self.__keys[idx - 1].time()
try:
key.inTangent *= pd / key.inTangent.x
except ZeroDivisionError:
pass
if not last and key.outTangent.length() != 0:
nd = self.__keys[idx + 1].time() - self.__keys[idx].time()
try:
key.outTangent *= nd / key.outTangent.x
except ZeroDivisionError:
pass
if mode == Key.TANGENT_LINEAR:
if first:
key.inTangent = Vec2(0.0, 0.0)
else:
key.inTangent = keyDirection(self.__keys[idx], self.__keys[idx - 1])
key.inTangent.x = -key.inTangent.x
if last:
key.outTangent = Vec2(0.0, 0.0)
else:
key.outTangent = keyDirection(self.__keys[idx], self.__keys[idx + 1])
finalize()
return
elif mode == Key.TANGENT_SPLINE:
if first:
key.outTangent = keyDirection(self.__keys[idx], self.__keys[idx + 1])
key.inTangent = key.outTangent
elif last:
key.inTangent = keyDirection(self.__keys[idx], self.__keys[idx - 1])
key.inTangent.x = -key.inTangent.x
key.outTangent = -key.inTangent
else:
key.outTangent = keyDirection(self.__keys[idx - 1], self.__keys[idx + 1])
key.inTangent = -key.outTangent
finalize()
return
elif mode == Key.TANGENT_AUTO:
def sgn(x):
return -1 if x < 1 else 1 if x > 1 else 0
if first or last or sgn(self.__keys[idx - 1].value() - key.value()) == sgn(
self.__keys[idx + 1].value() - key.value()):
key.inTangent = Vec2(0.0, 0.0)
key.outTangent = Vec2(0.0, 0.0)
else:
key.outTangent = keyDirection(self.__keys[idx - 1], self.__keys[idx + 1])
key.inTangent = -key.outTangent
finalize()
return
elif mode in (Key.TANGENT_USER, Key.TANGENT_STEPPED):
return
assert False, 'Invalid tangent mode for key.'
def sortKeys(self):
# TODO: optimize in any way?
self.__keys.sort(key=lambda k: k.time())
for key in self.__keys:
key.updateTangents()
def __iter__(self):
for key in self.__keys:
yield key
def __getitem__(self, index):
return self.__keys[index]
def __setitem__(self, index, pos):
self.__keys[index] = pos
def __len__(self):
return len(self.__keys)
def scale(self, speed):
"""
Speed up the animation by the given multiplier.
"""
# reverse to avoid auto-sorting messing up anything
for key in reversed(self.__keys):
key.setTime(key.time() / speed)
def move(self, deltaTime):
"""
Move the animation by the given addition.
"""
# shifting to the right, reverse application order to avoid auto-sorting messing up anything
if deltaTime > 0.0:
for key in reversed(self.__keys):
key.setTime(key.time() + deltaTime)
else:
for key in self.__keys:
key.setTime(key.time() + deltaTime)
def trim(self, start, end):
"""
Delete keys outside of the given time range.
"""
assert start <= end
startIdx = -1
endIdx = len(self.__keys)
for i, key in enumerate(self.__keys):
if startIdx < 0 and key.time() > start:
startIdx = i - 1
if key.time() >= end:
endIdx = i + 1
break
self.__keys = self.__keys[max(startIdx, 0):min(endIdx, len(self.__keys))]
def evaluate(self, time):
"""
Hermite spline interpolation at the given time.
Times outside the bounds are just clamped to the endpoints.
"""
if not self.__keys:
return 0.0
if time <= self.__keys[0].time():
return self.__keys[0].value()
for i in range(1, len(self.__keys)):
if self.__keys[i].time() > time:
p0 = self.__keys[i - 1].point()
p1 = self.__keys[i - 1].outTangent.y
# stepped tangents
if p1 == float('inf'):
return p0.y
p2 = self.__keys[i].inTangent.y
p3 = self.__keys[i].point()
dx = p3.x - p0.x
dy = p3.y - p0.y
c0 = (p1 + p2 - dy - dy)
c1 = (dy + dy + dy - p1 - p1 - p2)
c2 = p1
c3 = p0.y
t = (time - p0.x) / dx
return t * (t * (t * c0 + c1) + c2) + c3
return self.__keys[-1].value()
| true |
81dd10d7da9fcf5fdcb90452428e2b66f1ce55c7 | Python | osk7462/app_store | /customer.py | UTF-8 | 1,979 | 3.765625 | 4 | [] | no_license | from apps import AppStore
class Customer:
"""
A class to represent a customer
Attributes
----------
cart : list
a list to add an app in cart
total : float
store total price of apps in the cart
Methods
-------
add_to_cart(app_name)
add an app to cart
delete_from_cart(app_name)
delete an app from cart
checkout()
display the list of apps in the cart and their total price
"""
def __init__(self):
self.cart = []
self.total = float(0)
def __show_cart(self):
print("*"*11+"Your Cart"+"*"*11+"\n")
print("Name--price")
for i in range(len(self.cart)):
print(self.cart[i])
print("\n")
def add_to_cart(self, app_name):
for i in range(len(AppStore.app_list)):
app, price = AppStore.app_list[i].split(':')
if str.lower(app) == str.lower(app_name):
self.cart.append("{}--{}".format(app, price))
self.total += float(price[1:])
print("{} is added to cart\n".format(app))
self.__show_cart()
return
print("no such item available in app store")
def delete_from_cart(self, app_name):
for i in range(len(self.cart)):
app, price = self.cart[i].split('--')
if str.lower(app) == str.lower(app_name):
self.total -= float(price[1:])
self.cart.pop(i)
print("{} is deleted from cart".format(app))
self.__show_cart()
return
print("no such item is in your cart")
self.__show_cart()
def checkout(self):
print("\nYour cart:")
for i in range(len(self.cart)):
app, price = self.cart[i].split('--')
print("{}--{}".format(app, price[1:]))
print("Your Total: {:.2f}\n".format(self.total))
| true |
cf32d1bde5563f9033ac246c7e8da8f0de7a2299 | Python | NBCisae/RLchallenge | /simonet/trainning.py | UTF-8 | 3,125 | 2.765625 | 3 | [] | no_license | import numpy as np
import pickle
from ple import PLE
from ple.games.flappybird import FlappyBird
from state import new_state
#Retourner l'action en fonction du argmax (0 ou 1)
def get_action(a):
return a*119
#Def epsilon greedy
def epsilon_greedy(Q, new_state, epsilon, state):
a = np.argmax(Q[new_state[0],new_state[1],new_state[2]])
if np.random.rand() <= epsilon :
if np.random.rand() <= 0.5 * epsilon:
if state['next_pipe_bottom_y'] - state['player_y'] < 50 :
a = 1
else :
a = 0
return a
# Parametres
gamma = 0.95
alpha = 0.9
epsilon = 0.1
nb_games = 15000
#taille de notre espace des états
X = 18
Y = 30
V = 21
# Init Q
Q = np.zeros((X,Y,V,2))
#file = open("Qtrained",'rb')
#Q = pickle.load(file)
#alpha = 0.1
#Création du jeu accéléré
game = FlappyBird(graphics="fancy")
p = PLE(game, fps=30, frame_skip=1, num_steps=1, force_fps=True, display_screen=False)
# Score des X dernières parties
last_100 = 0
last_1000 = 0
#calcul de l'espace des états
#Xmax = 0
#Ymax = 0
#Vmax = 0
#file=open('Qtrained', 'rb')
#Q=marshal.load(file)
# For each game
for g in range(1, nb_games):
# Début du jeu
p.init()
p.reset_game()
state = game.getGameState()
reward = training_reward = 0
s = new_state(state)
action = epsilon_greedy(Q, s, epsilon, state)
#calcul de l'espace des états
#if s[0] > Xmax:
# Xmax = s[0]
#if s[1] > Ymax:
# Ymax = s[1]
#if s[2] > Vmax:
# Vmax = s[2]
while not p.game_over():
# Action
reward = p.act(get_action(action))
# Calcul de la reward d'entrainement
if reward == -5 :
training_reward = -100
else:
training_reward = 1
# Nouvel état
state_ = game.getGameState()
s_ = new_state(state_)
action_ = epsilon_greedy(Q, s_, epsilon, state)
# calcul de Q avec l'algorythme SARSA
delta = (training_reward + gamma * Q[s_[0],s_[1],s_[2]][action_] - Q[s[0],s[1],s[2]][action])
Q[s[0],s[1],s[2]][action] = Q[s[0],s[1],s[2]][action] + alpha * delta
# Update de l'état
s = s_
action = action_
# Calcul des résultats en cours de compilation
if reward+5:
last_100 += reward
last_1000 += reward
# contrôle des résultats en cours de compilation et diminution de alpha
if g %100 == 0 :
print('Moyenne des 100 derniers essais : %.2f' %(last_100/100))
last_100 = 0
if g %1000 == 0 :
while alpha > 0.1 :
alpha /= 1.01
print('Moyenne des 1000 derniers essais : %2f' % (last_1000/1000))
if last_1000 / 1000 > 50:
break
last_1000 = 0
#Résultat de la taille de l'espace des états
#print(Xmax,Ymax,Vmax)
#Sauvegarde des données avec pickle, marshal ne marchant pas
with open('Qtrained', 'wb') as f:
pickle.dump(Q,f) | true |
2ba6bbd1e9ebdd6aa86859a8f32ec55887181528 | Python | skyxyz-lang/CS_Note | /leetcode/tree/code/54.py | UTF-8 | 827 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
@author: skyxyz-lang
@file: 54.py
@time: 2020/11/21 09:46
@desc:https://leetcode-cn.com/problems/er-cha-sou-suo-shu-de-di-kda-jie-dian-lcof/
二叉搜索树的第k大节点
"""
from tree_node import TreeNode
class Solution(object):
"""
"""
def __init__(self):
self.index = 0
self.val = []
def kthLargest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
self.travel(root, k)
return self.val[len(self.val) - k]
def travel(self, root, k):
"""
:param root:
:param k:
:return:
"""
if not root:
return None
self.travel(root.left, k)
self.val.append(root.val)
self.travel(root.right, k)
| true |
3df032d90ceab58651241481b1cfa04caddb9132 | Python | kmwalsh1/fem | /post/create_pointloads_vtk.py | UTF-8 | 15,356 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/env python
"""
create_pointloads_vtk.py
Creates .vts file, which can be viewed in Paraview, from node and point loads
files.
Here is one solution I found for viewing the loads on the inside of the mesh:
1. Load the mesh into Paraview.
2. Press the "Calculator" button on the top left side of Paraview. The
calculator filter should open up in the sidebar on the left. Next, in the text
box between "Result Array Name" and above all of the calculator buttons, type
"mag(loads)" without the quotation marks. Next, change the "Result Array Name"
from "Result" to something like "load magnitude". Now, hit the Apply button.
This part needs to be done because when the .vts file was created, the loads
data were represented as vectors with non-zero values for the z-components only.
Taking the magnitude of the loads vectors converts them all into scalar values.
3. Now that we have the loads data in scalar form, we can apply a Threshold
filter to visualize only the nodes with non-zero load values. The Threshold
filter can be found on the top left, several buttons to the left of the
"Calculator" button. Before applying the Threshold filter, make sure that you
are filtering by "load magnitude" and that the lower threshold is a small
non-zero value, like 0.000001. You should now only see the nodes with non-zero
load values.
4. In order to visualize these nodes within the context of the mesh, you should
hit the eye-shaped button next to the .vts file in the side bar to allow the
entire mesh to appear in the scene. Next, select the .vts file, scroll down to
Opacity in the Properties tab of the sidebar, and change the opacity to around
0.5. You should now be able to see the loads that were previously hidden inside
the mesh.
EXAMPLE
=======
python create_disp_dat.py --nodefile nodes.dyn
--loadfile PointLoads.dyn
--loadout loadout.vts
=======
"""
def main():
import sys
if sys.version_info[:2] < (2, 7):
sys.exit("ERROR: Requires Python >= 2.7")
# let's read in some command-line arguments
args = parse_cli()
# open nodes.dyn file
print("Extracting data . . .")
# create output file
if args.elefile is None:
# if ele file not given, make a structured grid
# using just nodes and loads files
create_vts(args)
else:
# if ele file is given, make an unstructured grid
# containing part IDs.
create_vtu(args)
def parse_cli():
'''
parse command-line interface arguments
'''
import argparse
parser = argparse.ArgumentParser(description="Generate .vts "
"file from nodes and PointLoads files.",
formatter_class=
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--nodefile",
help="name of ASCII file containing node IDs and positions.",
default="nodes.dyn")
parser.add_argument("--elefile",
help="name of ASCII file containing element IDs, part IDs, "
"and node components. If this argument is given, then "
"an unstructured grid VTK file (.vtu) will be created "
"instead of a structured grid VTK file (.vts).",
default=None)
parser.add_argument("--loadfile", help="name of PointLoads file. Loads will "
"not be written to VTK file if load file is not given.",
default=None)
parser.add_argument("--nonlinear", help="use this flag if mesh is nonlinear ",
default=None, action='store_true')
parser.add_argument("--loadout", help="name of output .vts file.",
default="nodeLoads.vts")
args = parser.parse_args()
return args
def create_vts(args):
'''
Writes .vts file from node and load files. StructuredGrid format assumes
a linear mesh, so if your mesh is actually nonlinear, this script should be run
using with an elements file.
'''
# writing .vts file header
# making sure file extension is correct
if '.' in args.loadout and not args.loadout.endswith('.vts'):
args.loadout = args.loadout[:args.loadout.find('.')]
args.loadout = args.loadout + '.vts'
loadout = open(args.loadout, 'w')
loadout.write('<VTKFile type="StructuredGrid" version="0.1" byte_order="LittleEndian">\n')
# writing opening tags and node position data to .vts file
numNodes, numElems = writeNodePositions(loadout, args, 'vts')
# writing point data(node IDs and loads)
loadout.write('\t\t\t<PointData>\n')
writeNodeIDs(loadout, args, numNodes)
writePointLoads(loadout, args, numNodes)
loadout.write('\t\t\t</PointData>\n')
# write closing tags
loadout.write('\t\t</Piece>\n')
loadout.write('\t</StructuredGrid>\n')
loadout.write('</VTKFile>')
loadout.close()
def create_vtu(args):
# making sure file extension is correct
if '.' in args.loadout and not args.loadout.endswith('.vtu'):
args.loadout = args.loadout[:args.loadout.find('.')]
args.loadout = args.loadout + '.vtu'
# writing .vtu file header
loadout = open(args.loadout, 'w')
loadout.write('<VTKFile type="UnstructuredGrid" version="0.1" byte_order="LittleEndian">\n')
# writing node position data to .vtu file
numNodes, numElems = writeNodePositions(loadout, args, 'vtu')
# writing point data(node IDs and loads (if given))
loadout.write('\t\t\t<PointData>\n')
writeNodeIDs(loadout, args, numNodes)
if not args.loadfile is None:
writePointLoads(loadout, args, numNodes)
loadout.write('\t\t\t</PointData>\n')
# writing cells using elefile
writeCells(loadout, args)
# writing celldata using elefile
loadout.write('\t\t\t<CellData>\n')
writeCellData(loadout, args)
loadout.write('\t\t\t</CellData>\n')
# write closing tags
loadout.write('\t\t</Piece>\n')
loadout.write('\t</UnstructuredGrid>\n')
loadout.write('</VTKFile>')
loadout.close()
def writeNodePositions(loadout, args, filetype):
'''
writes opening tags as well as node positions to
loadout file. returns array containing number of
nodes (index = 0) and number of elements (index = 1).
'''
print 'Writing node positions'
nodes = open(args.nodefile, 'r')
headerWritten = False
for line in nodes:
# if nonlinear flag not given, then check nodes header for
# nonlinearity
if args.nonlinear == None:
if line.startswith('\n'):
args.nonlinear = True
else:
args.nonlinear = False
# getting number of elements in x, y, z dimensions
# as well as total number of nodes (for node ID)
# when number of elements are defined in node file header.
# cannot get dimension data from nodefile header or nodes are nonlinear
if not headerWritten:
if args.nonlinear:
# get max node ID and coordinates of padding node
numNodes = 0
nodeCount = open(args.nodefile, 'r')
for line in nodeCount:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
numNodes = int(raw_data[0])
# padding node is just coordinates of the last read node. it will be used
# to pad unlisted nodes so that no new nodes will be introduced while the
# node indices of the VTK file still match up with the indices necessary
# for the cell definitions.
paddingNodePos = raw_data[1:]
nodeCount.close()
# count number of elements
numElems = 0
elemCount = open(args.elefile, 'r')
for line in elemCount:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
numElems += 1
# initialize currentNode variable, which will be
# used for node position padding
currentNode = 1
loadout.write('\t<UnstructuredGrid>\n')
loadout.write('\t\t<Piece NumberOfPoints="%d" NumberOfCells="%d">\n' \
% (numNodes, numElems))
loadout.write('\t\t\t<Points>\n')
loadout.write('\t\t\t\t<DataArray type="Float32" Name="Array" NumberOfComponents="3" format="ascii">\n')
headerWritten = True
else:
if 'numElem=' in line:
# parse dimensions from node file header
dimensionsStart = line.find('[')
dimensionsEnd = line.find(']')
dimensions = line[dimensionsStart+1:dimensionsEnd].split(', ')
dimensions = [int(x) for x in dimensions]
numNodes = (dimensions[0]+1)*(dimensions[1]+1)*(dimensions[2]+1)
numElems = dimensions[0]*dimensions[1]*dimensions[2]
# writing volume dimensions to .vts file, and finishing up header
if filetype is 'vts':
loadout.write('\t<StructuredGrid WholeExtent="0 %d 0 %d 0 %d">\n' \
% (dimensions[0], dimensions[1], dimensions[2]))
loadout.write('\t\t<Piece Extent="0 %d 0 %d 0 %d">\n' \
% (dimensions[0], dimensions[1], dimensions[2]))
if filetype is 'vtu':
loadout.write('\t<UnstructuredGrid>\n')
loadout.write('\t\t<Piece NumberOfPoints="%d" NumberOfCells="%d">\n' \
% (numNodes, numElems))
loadout.write('\t\t\t<Points>\n')
loadout.write('\t\t\t\t<DataArray type="Float32" Name="Array" NumberOfComponents="3" format="ascii">\n')
headerWritten = True
# reading node position data from nodefile
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
if args.nonlinear:
while currentNode < int(raw_data[0]):
loadout.write('\t\t\t\t\t%s %s %s' \
% (paddingNodePos[0], paddingNodePos[1], paddingNodePos[2]))
currentNode += 1
loadout.write('\t\t\t\t\t%s %s %s' \
% (raw_data[1], raw_data[2], raw_data[3]))
if args.nonlinear:
currentNode += 1
# done writing node position data
loadout.write('\t\t\t\t</DataArray>\n')
loadout.write('\t\t\t</Points>\n')
nodes.close()
return numNodes, numElems
def writeNodeIDs(loadout, args, numNodes):
'''
writes node IDs to loadout file
'''
print 'Writing node IDs'
loadout.write('\t\t\t\t<DataArray type="Float32" Name="node_id" format="ascii">\n')
for i in range(1, numNodes+1):
loadout.write('\t\t\t\t\t%.1f\n' % i)
loadout.write('\t\t\t\t</DataArray>\n')
def writePointLoads(loadout, args, numNodes):
'''
writes point loads to loadout file
'''
print 'Writing point loads'
loadout.write('\t\t\t\t<DataArray NumberOfComponents="3" type="Float32" Name="loads" format="ascii">\n')
# note that PointLoads file only list nodes with nonzero loads,
# so nodes not listed in the PointLoads file written with loads
# of zero.
currentNode = 1
loads = open(args.loadfile, 'r')
for line in loads:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
while currentNode < int(raw_data[0]):
loadout.write('\t\t\t\t\t0.0 0.0 0.0\n')
currentNode += 1
loadout.write('\t\t\t\t\t0.0 0.0 %f\n' % float(raw_data[3]))
currentNode += 1
# finish writing zero load nodes into .vts file
while currentNode <= numNodes:
loadout.write('\t\t\t\t\t0.0 0.0 0.0\n')
currentNode += 1
loadout.write('\t\t\t\t</DataArray>\n')
def writeCells(loadout, args):
'''
writes cell connectivity and types to loadout file
'''
print 'Writing cells'
loadout.write('\t\t\t<Cells>\n')
# unfortunately need to loop through entire elements file 3 separate times
# to get info necessary for cell data. definitely easier to construct VTK file using
# the legacy format, rather than the newer XML format in this case.
# write cell connectivity array
loadout.write('\t\t\t\t<DataArray type="Int32" Name="connectivity" Format="ascii">\n')
elems = open(args.elefile, 'r')
for line in elems:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
loadout.write('\t\t\t\t\t')
for nodeID in raw_data[2:]:
# need to subtract one to convert to 0-based indices
node = int(nodeID)
node -= 1
loadout.write('%d ' % node)
loadout.write('\n')
elems.close()
loadout.write('\t\t\t\t</DataArray>\n')
# write cell offsets
loadout.write('\t\t\t\t<DataArray type="Int32" Name="offsets" Format="ascii">\n')
elems = open(args.elefile, 'r')
offset = 0
for line in elems:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
offset += len(raw_data[2:])
loadout.write('\t\t\t\t\t %d\n' % offset)
elems.close()
loadout.write('\t\t\t\t</DataArray>\n')
# write cell types
# reference figures 2+3 on pages 9-10 for more info on types:
# http://www.vtk.org/VTK/img/file-formats.pdf
loadout.write('\t\t\t\t<DataArray type="Int32" Name="types" Format="ascii">\n')
elems = open(args.elefile, 'r')
for line in elems:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
numVertices = len(raw_data[2:])
if numVertices == 8:
cellType = 12
loadout.write('\t\t\t\t\t %d\n' % cellType)
elems.close()
loadout.write('\t\t\t\t</DataArray>\n')
loadout.write('\t\t\t</Cells>\n')
def writeCellData(loadout, args):
'''
writes cell part IDs
'''
print 'Writing cell data'
loadout.write('\t\t\t\t<DataArray type="Int32" Name="part id" Format="ascii">\n')
elems = open(args.elefile, 'r')
for line in elems:
if not line.startswith('$') and not line.startswith('*') and not line.startswith('\n'):
raw_data = line.split(',')
loadout.write('\t\t\t\t\t%s\n' % raw_data[1])
elems.close()
loadout.write('\t\t\t\t</DataArray>\n')
if __name__ == "__main__":
main()
| true |
325cee2e7e0907b9cb736f36fbedb5eddf2cd7f2 | Python | bamblebam/text-summarizer-thing | /webapp/views.py | UTF-8 | 1,053 | 2.515625 | 3 | [] | no_license | from django.shortcuts import render, redirect
from .text_summarizer_v2 import generate_summary
from django.contrib import messages
# Create your views here.
def home(request):
summarized_text = ''
if request.method == 'POST':
stuff = request.POST.get('stuff')
num_of_lines = int(request.POST.get('num_of_lines'))
try:
summarized_text = generate_summary(stuff, num_of_lines)
except IndexError:
error = 'Number of sentences of summarized text cannot be greater than that of original text'
messages.error(request, error)
return redirect('home')
request.session['summarized_text'] = summarized_text
return redirect('answer')
context = {
'summarized_text': summarized_text,
}
return render(request, 'webapp/home.html', context)
def answer(request):
summarized_text = request.session['summarized_text']
context = {
'summarized_text': summarized_text
}
return render(request, 'webapp/answer.html', context)
| true |
714334ec59c3963710fea9606742a65ead5a0e46 | Python | gary-butler/Learning_Deep_Learning | /cartpole5.py | UTF-8 | 3,405 | 2.65625 | 3 | [] | no_license | import gym
import numpy as np
import tensorflow as tf
def policy_gradient():
params = tf.get_variable("policy_parameters",[4,2])
state = tf.placeholder("float",[None,4])
actions = tf.placeholder("float",[None,2])
advantages = tf.placeholder("float",[None,1])
linear = tf.matmul(state,params)
probabilities = tf.nn.softmax(linear)
good_probabilities = tf.reduce_sum(tf.multiply(probabilities, actions),reduction_indices=[1])
log_probabilities = tf.log(good_probabilities)
eligibility = log_probabilities * advantages
loss = -tf.reduce_sum(eligibility)
optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)
return probabilities, state
def value_gradient():
state = tf.placeholder("float",[None,4])
w1 = tf.get_variable("w1",[4,10])
b1 = tf.get_variable("b1",[10])
h1 = tf.nn.relu(tf.matmul(state,w1) + b1)
w2 = tf.get_variable("w2",[10,1])
b2 = tf.get_variable("b2",[1])
calculated = tf.matmul(h1,w2) + b2
newvals = tf.placeholder("float",[None,1])
diffs = calculated - newvals
loss = tf.nn.l2_loss(diffs)
optimizer = tf.train.AdamOptimizer(0.1).minimize(loss)
return calculated, state, newvals, optimizer
def run_episode(env, parameters):
observation = env.reset()
totalreward = 0
for _ in range(200):
env.render()
action = 0 if np.matmul(parameters,observation) < 0 else 1
observation, reward, done, info = env.step(action)
totalreward += reward
if done:
break
return totalreward
env = gym.make('CartPole-v0')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
pl_probabilities, pl_state = policy_gradient()
observation = env.reset()
actions = []
transitions = []
for _ in range(200):
env.render()
obs_vector = np.expand_dims(observation, axis=0)
probs = sess.run(pl_probabilities,feed_dict={pl_state: obs_vector})
action = 0 if random.uniform(0,1) < probs[0][0] else 1
states.append(observation)
actionblank = np.zeros(2)
actionblank[action] = 1
actions.append(actionblank)
old_observation = observation
observation, reward, done, info = env.step(action)
transitions.append((old_observation, action, reward))
totalreward += reward
if done:
break
vl_calculated, vl_state, vl_newvals, vl_optimizer = value_gradient()
update_vals = []
for index, trans in enumerate(transitions):
obs, action, reward = trans
future_reward = 0
future_transitions = len(transitions) - index
decrease = 1
for index2 in xrange(future_transitions):
future_reward += transitions[(index2) + index][2] * decrease
decrease = decrease * 0.97
update_vals.append(future_reward)
update_vals_vector = np.expand_dims(update_vals, axis=1)
sess.run(vl_optimizer, feed_dict={vl_state: states, vl_newvals: update_vals_vector})
for index, trans in enumerate(transitions):
obs, action, reward = trans
obs_vector = np.expand_dims(obs, axis=0)
currentval = sess.run(vl_calculated,feed_dict={vl_state: obs_vector})[0][0]
advantages.append(future_reward - currentval)
advantages_vector = np.expand_dims(advantages, axis=1)
sess.run(pl_optimizer, feed_dict={pl_state: states, pl_advantages: advantages_vector, pl_actions: actions}) | true |
c9191a6886826502ee2a159bd4fbe6040ddbce65 | Python | srinaveendesu/Programs | /PythonScripts/pattern_command.py | UTF-8 | 2,252 | 4.03125 | 4 | [] | no_license | #Behavioral pattern
# The idea of a Command pattern is to decouple the object that invokes the operation from the
# one that knows how to perform it.
class Screen(object):
def __init__(self, text=''):
self.text = text
self.clip_board = ''
def cut(self, start=0, end=0):
self.clip_board = self.text[start:end]
self.text = self.text[:start] + self.text[end:]
def paste(self, offset=0):
self.text = self.text[:offset] + self.clip_board + self.text[offset:]
def clear_clipboard(self):
self.clip_board = ''
def length(self):
return len(self.text)
def __str__(self):
return self.text
# Screen command interface
class ScreenCommand:
def __init__(self, screen):
self.screen = screen
self.previous_state = screen.text
def execute(self):
pass
def undo(self):
pass
class CutCommand(ScreenCommand):
def __init__(self, screen, start=0, end=0):
super().__init__(screen)
self.start = start
self.end = end
def execute(self):
self.screen.cut(start=self.start, end=self.end)
def undo(self):
self.screen.clear_clipboard()
self.screen.text = self.previous_state
class PasteCommand(ScreenCommand):
def __init__(self, screen, offset=0):
super().__init__(screen)
self.offset = offset
def execute(self):
self.screen.paste(offset=self.offset)
def undo(self):
self.screen.clear_clipboard()
self.screen.text = self.previous_state
class ScreenInvoker:
def __init__(self):
self.history = []
def store_and_execute(self, command):
command.execute()
self.history.append(command)
def undo_last(self):
if self.history:
self.history.pop().undo()
screen = Screen('Hello world')
print(screen.__str__())
cut = CutCommand(screen, start=5, end=11)
client = ScreenInvoker()
client.store_and_execute(cut)
print(screen.__str__())
paste = PasteCommand(screen, offset=0)
client.store_and_execute(paste)
print(screen.__str__())
client.undo_last()
print(screen.__str__())
client.undo_last()
print(screen.__str__())
# Hello world
# Hello
# worldHello
# Hello
# Hello world
| true |
6ab37db3189cfdd355913c44cfc240122397a967 | Python | jorgearoce2102/basic-neural-networks | /tests/fit_tests.py | UTF-8 | 1,996 | 3.03125 | 3 | [] | no_license | from nose.tools import *
import NeuralNetwork as NN
import numpy as np
import random
def with_sgd_test():
"""Stocastic gradient descent backpropagation test"""
#create dataset object
filename = "dataset/iris.data"
dataset = NN.Dataset(filename)
#training and testing datasets
train_ratio = 0.75
train = dataset.exemples[:int(train_ratio*dataset.total_ex),:]
train_labels = dataset.labels[:int(train_ratio*dataset.total_ex)]
test = dataset.exemples[int(train_ratio*dataset.total_ex):,:]
test_labels = dataset.labels[int(train_ratio*dataset.total_ex):]
#get number of classes
NbClasses = dataset.NbClasses
expected = dataset.expected
NbArguments = dataset.NbArguments
#create model
model = NN.model
#add input layer
model.Layer("input", NbArguments)
#add hidden layer with 4 neurons
model.Layer("hidden", 6, model.sigmoid)
#add hidden layer with 4 neurons
model.Layer("hidden", 4, model.sigmoid)
#create output layer
NbNeurons = NbClasses
model.Layer("output", NbNeurons, model.sigmoid)
model.fit(train, train_labels, expected)
return model, test, test_labels, expected
def confusion_matrix_test():
"get the confusion matrix of the model after training"
#get model and test info
model, test, test_labels, expected = with_sgd_test()
#initialize confution matrix
classes = len(np.unique(test_labels))
confusion_matrix = np.zeros((classes,classes), int)
error = 0
for exemple, label in zip(test, test_labels):
#get output from feedforward, flatten it to round it, then make it integer integer
output = (np.round(model.feedforward(exemple).flatten())).astype(int)
#get error
error += np.sum(output - expected[label])
try:
confusion_matrix[np.where(output==1)[0][0]][np.where(expected[label]==1)[0][0]] += 1
except:
print("undefined class")
print(confusion_matrix)
| true |
cc21e78820522dd40f9783288eca179158a76423 | Python | streamr/marvin | /marvin/tests/fixtures/__init__.py | UTF-8 | 1,143 | 2.84375 | 3 | [
"MIT"
] | permissive | """
marvin.tests.fixtures
~~~~~~~~~~~~~~~~~~~~~
This package contains fixtures that can be used for testing or quickly firing up a test
instance with some test data.
"""
from . import complete as COMPLETE
from marvin import db
import re
#: The regex to check whether a module level variable should be added or not. Ignores everything that starts with
#: capital letters, as that's usually the model objects (like User, Movie, Stream, etc)
_FIXTURE_ITEM_FILTER = re.compile('[a-z][a-z0-9_]*')
def load(app, module):
""" Loads all the given items into the database used by app.
:param app: The app to use.
:param items: A list of items to use. Many predefined lists exists in the `marvin.tests.fixtures` package.
"""
items = _get_items_in_module(module)
with app.test_request_context():
db.session.add_all(items)
db.session.commit()
def _get_items_in_module(module):
# Make sure we get fresh objects, since SQLAlchemy doesn't add the same objects twice:
reload(module)
return [item for item_name, item in vars(module).items() if _FIXTURE_ITEM_FILTER.match(item_name)]
| true |
95e3fd21779da713c31b9d08b9b3729b3626d153 | Python | viviancui59/Compressing-Genomic-Sequences | /read_data.py | UTF-8 | 2,160 | 2.5625 | 3 | [] | no_license | # -*- encoding: utf-8 -*-
import os
import os.path
import random
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
# filename ='SRR642636_1.fq'# downloaded multi-fasta file from MITOMAP database
reads = []
# , "rU"
for i in range(2672): #2672 is the number of fasta
filename = './fish_xlt/' + str(i) + '.fasta' # downloaded multi-fasta file from MITOMAP database
with open(filename, "rU") as handle:
for record in SeqIO.parse(handle, "fasta") :
reads.append(record)
print(len(reads))
train_set_size=int(len(reads)*0.7)
valid_set_size=int(len(reads)*0.2)
test_set_size=int(len(reads)*0.1)
items = [x for x in range(len(reads))]
random.shuffle(items)
train = items[0:train_set_size]
valid = items[train_set_size:train_set_size+valid_set_size]
test = items[train_set_size+valid_set_size:]
train_record = []
valid_record = []
test_record = []
# train_set, valid_set,test_set = split_train_test(reads,0.2,0.1)
def check(read):
record = read
my_dna = str(read.seq.upper())
for i, base in enumerate(my_dna):
if base not in 'ACGTN':
my_dna = my_dna.replace(base,'N')
record.seq= Seq(my_dna, generic_dna)
for i, base in enumerate(record.seq):
if base not in 'ACGTN':
print(record.seq[i])
return record
for i in train:
read = check(reads[i])
train_record.append(read)
for i in valid:
read = check(reads[i])
valid_record.append(read)
for i in test:
read = check(reads[i])
test_record.append(read)
print(len(train_record), "train +", len(test_record), "test")
#save the data
SeqIO.write(train_record, "data/train.fasta", "fasta")
SeqIO.write(valid_record, "data/valid.fasta", "fasta")
SeqIO.write(test_record, "data/test.fasta", "fasta")
def read_fasta(data_path):
records = list(SeqIO.parse(data_path, "fasta"))
text = ""
for i,record in enumerate(records):
# text += str(record.seq)
print("No."+str(i)+": "+record.seq)
#return text
#read_fasta("fq_valid.fasta")
| true |
0ead2d2f0d60269535f024b1ebb93590016b7f30 | Python | zhouf1234/untitled8 | /正则表达式-demo练习7.py | UTF-8 | 3,319 | 2.90625 | 3 | [] | no_license | import requests
import re
import json
from lxml import etree
import os
#不写user-agent,不换ip地址,不一步步爬取,会被网站发现是爬虫
#先取得所有分页面的url保存成json文件
#读取json文件的所有url,更换ip,爬取所有章节内容并保存为json文件
#最后把章节内容的json文件保存为120个txt文件。
# proxy = {
# "http":"223.93.145.186:8060", #使用89网获取的这个可用ip地址
# }
# header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
# shouyao = requests.get('http://book.zongheng.com/showchapter/477645.html',headers=header,proxies=proxy)
# sh = shouyao.text
#
# s = re.compile('<div.*?class="volume-list">.*?<div>.*?<ul.*?>(.*?)</ul>.*?</div>.*?</div>',re.S)
# title = re.findall(s,sh)
# for tit in title:
# s2 = re.compile('<a.*?href="(.*?)".*?>')
# title2 = re.findall(s2,tit)
# # print(title2[:2]) #所有章节url
# jsonurl2 = []
# for ju in title2:
# jsonurl = {'url':ju}
# jsonurl2.append(jsonurl)
# print(jsonurl2)
# # 将jsonurl2的json文本以json序列化的方法写进,下载所有分页面的url存进json文件先
# with open('jsonurl.json', 'w')as file:
# file.write(json.dumps(jsonurl2, indent=2))
#读取jsonurl这个json文件的所有url
url_list = []
with open('jsonurl.json','r')as file:
str2 = file.read()
dat = json.loads(str2)
for u in dat:
url_list.append(u['url'])
# print(url_list)
# writes = []
# for url in url_list:
# # print(url)
# proxy2 = {
# "http": "58.56.108.226:58690", # 使用89网获取的这个可用ip地址
# }
# header = {
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
# shouyao1 = requests.get(url, headers=header, proxies=proxy2)
# sh1 = shouyao1.text
#
# # s3 = re.compile('<div.*?class="title">.*?<div.*?class="title_txtbox">(.*?)</div>',re.S)
# # biaoti = re.findall(s3,sh1)
# # print(biaoti) #章节标题,并不需要,注释掉
#
# html = etree.HTML(sh1)
# write = html.xpath('//div[@class="content"]//p//text()')
# # print(write) #章节内容
# write2 = ','.join(write)
# # print(type(write2))
# write3 = {'nr': write2}
# writes.append(write3)
# 存储章节内容,json格式
# with open('nrws.json', 'w')as file:
# file.write(json.dumps(writes, indent=2,ensure_ascii=False))
#读取json文件并保存为txt文件
with open('nrws.json','r')as file:
str2 = file.read()
dat = json.loads(str2)
# print(len(dat)) #长度120,说明内容都爬取下来了
for nr in range(len(dat)):
# print(nr)
write4 = dat[nr]['nr']
write5 = write4.split(',') #字符串分割为列表,使保存时不粘连,一行一行存
for wr in write5:
# print(wr)
filepath = './shouyao'
if not os.path.exists(filepath): # 如果文件夹不存在就创建
os.mkdir(filepath)
p2 = filepath + '/%s.txt' % nr
# print(p2)
# with open(p2, "a")as f:
# f.write(wr + '\n') #保存为120个txt文件 | true |
c2326f7cd572c6216de220d9826680f737ea3944 | Python | kg-0805/Trimester-9-Lab-Assignments | /Artificial Intelligence/Tic-Tac-Toe/Assignment2.py | UTF-8 | 6,322 | 4.125 | 4 | [] | no_license | #Name : Kartik Gupta
#PRN : 1032170673
#Subject : Artificial Intelligance
#Assignment 2
#Roll No. : PB-40
from time import time
class Game:
def __init__(self):
#initialized the empty tic tac toe board
self.current_state = [
['.', '.', '.'],
['.', '.', '.'],
['.', '.', '.']
]
self.result = None
self.player_turn = 'X'
#this functions prints the current state of the board/game
def print_board(self):
for i in range(3):
for j in range(3):
print(self.current_state[i][j], end="\t")
print(end="\n\n")
def winner_is(self):
#for loop returns player if any player won either horizontally or vertically
for i in range(3):
if self.current_state[i] == ['X', 'X', 'X']:
return 'X'
elif self.current_state[i] == ['O', 'O', 'O']:
return 'O'
elif self.current_state[0][i] != '.' and self.current_state[0][i] == self.current_state[1][i] == self.current_state[2][i]:
return self.current_state[0][i]
#this if condition returns the player if that player won diagonally
if self.current_state[1][1] != '.':
if self.current_state[0][0] == self.current_state[1][1] == self.current_state[2][2] or self.current_state[0][2] == self.current_state[1][1] == self.current_state[2][0]:
return self.current_state[1][1]
#this for loop returns None if game is not over yet (i.e if there are still empty places on board)
for i in range(3):
for j in range(3):
if self.current_state[i][j] == '.':
return None
#if no one wins and board is also full, then we return T for Tie
return 'T'
def max(self):
max_value = -10
move_x = None
move_y = None
winner = self.winner_is()
if winner == 'X':
return -10, 0, 0
elif winner == 'O':
return 10, 0, 0
elif winner == 'T':
return 0, 0, 0
elif winner == None:
for i in range(3):
for j in range(3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'O'
m, min_i, min_j = self.min()
if m > max_value:
max_value, move_x, move_y = m, i, j
self.current_state[i][j] = '.'
return max_value, move_x, move_y
def min(self):
min_value = 10
move_x = None
move_y = None
winner = self.winner_is()
if winner == 'X':
return -10, 0, 0
elif winner == 'O':
return 10, 0, 0
elif winner == 'T':
return 0, 0, 0
elif winner == None:
for i in range(3):
for j in range(3):
if self.current_state[i][j] == '.':
self.current_state[i][j] = 'X'
m, max_i, max_j = self.max()
if m < min_value:
min_value, move_x, move_y = m, i, j
self.current_state[i][j] = '.'
return min_value, move_x, move_y
def play(self):
while True:
self.print_board()
self.result = self.winner_is()
if self.result != None:
if self.result == 'T':
print('\n*** Game Tied ***\n')
elif self.result == 'X':
print('\n*** You Won... :( ***\n')
elif self.result == 'O':
print('*** I won... Now you have to take me to park :) ***')
return
if self.player_turn == 'X':
start_time = time()
m, move_x, move_y = self.min()
print(f'Your Turn Hooman.. Don\'t boop\nRecommended move : X = {move_x}, Y = {move_y}\t(Calculated in {round(time()-start_time, 10)} seconds)')
while True:
user_move_x = int(input('Enter X : '))
user_move_y = int(input('Enter y : '))
if self.current_state[user_move_x][user_move_y] == '.':
self.current_state[user_move_x][user_move_y] = 'X'
self.player_turn = 'O'
break
else:
print('Invalid Move... Use your eyes hooman')
else:
print('My turn now... let\'s think beep beep boop boop')
m, move_x, move_y = self.max()
self.current_state[move_x][move_y] = 'O'
print(f'I put my O on ({move_x}, {move_y})')
self.player_turn = 'X'
def main():
print('\nhey... hey hooman... let\'s play tic tac toe...')
print('You are X\n')
game = Game()
game.play()
main()
#Sample Input/Output
#hey... hey hooman... let's play tic tac toe...
#You are X
#
#. . .
#
#. . .
#
#. . .
#
#Your Turn Hooman.. Don't boop
#Recommended move : X = 0, Y = 0 (Calculated in 5.0717909336 seconds)
#Enter X : 1
#Enter y : 1
#. . .
#
#. X .
#
#. . .
#
#My turn now... let's think beep beep boop boop
#I put my O on (0, 0)
#O . .
#
#. X .
#
#. . .
#
#Your Turn Hooman.. Don't boop
#Recommended move : X = 0, Y = 1 (Calculated in 0.0569653511 seconds)
#Enter X : 0
#Enter y : 2
#O . X
#
#. X .
#
#. . .
#
#My turn now... let's think beep beep boop boop
#I put my O on (2, 0)
#O . X
#
#. X .
#
#O . .
#
#Your Turn Hooman.. Don't boop
#Recommended move : X = 1, Y = 0 (Calculated in 0.0010316372 seconds)
#Enter X : 1
#Enter y : 0
#O . X
#
#X X .
#
#O . .
#
#My turn now... let's think beep beep boop boop
#I put my O on (1, 2)
#O . X
#
#X X O
#
#O . .
#
#Your Turn Hooman.. Don't boop
#Recommended move : X = 0, Y = 1 (Calculated in 0.0 seconds)
#Enter X : 0
#Enter y : 1
#O X X
#
#X X O
#
#O . .
#
#My turn now... let's think beep beep boop boop
#I put my O on (2, 1)
#O X X
#
#X X O
#
#O O .
#
#Your Turn Hooman.. Don't boop
#Recommended move : X = 2, Y = 2 (Calculated in 0.0 seconds)
#Enter X : 2
#Enter y : 2
#O X X
#
#X X O
#
#O O X
#
#
#*** Game Tied ***
| true |
6b1363c8694efed48f6878cc7509f4411fa1b645 | Python | reritom/Esvi | /test/test_models/test_objects/car.py | UTF-8 | 484 | 3.078125 | 3 | [] | no_license | class Car():
def __init__(self, colour=None, size=None, speed=None):
self.colour = colour
self.size = size
self.speed = speed
def serialise(self):
return {'colour': self.colour,
'size': self.size,
'speed': self.speed}
def deserialise(self, core: dict):
print("Deserialising {}".format(core))
self.colour = core['colour']
self.size = core['size']
self.speed = core['speed']
| true |
159a50cdb9941a7ba3b7a41d76a9f5d1e4352349 | Python | antoniojkim/Orbis-Challenge | /PlayerAI.py | UTF-8 | 2,658 | 3.375 | 3 | [] | no_license | from PythonClientAPI.game.PointUtils import *
from PythonClientAPI.game.Entities import FriendlyUnit, EnemyUnit, Tile
from PythonClientAPI.game.Enums import Team
from PythonClientAPI.game.World import World
from PythonClientAPI.game.TileUtils import TileUtils
from random import choice as choose_random_from
class PlayerAI:
def __init__(self):
''' Initialize! '''
self.turn_count = 0 # game turn count
self.target = None # target to send unit to!
self.outbound = True # is the unit leaving, or returning?
def do_move(self, world, friendly_unit, enemy_units):
'''
This method is called every turn by the game engine.
Make sure you call friendly_unit.move(target) somewhere here!
Below, you'll find a very rudimentary strategy to get you started.
Feel free to use, or delete any part of the provided code - Good luck!
:param world: world object (more information on the documentation)
- world: contains information about the game map.
- world.path: contains various pathfinding helper methods.
- world.util: contains various tile-finding helper methods.
- world.fill: contains various flood-filling helper methods.
:param friendly_unit: FriendlyUnit object
:param enemy_units: list of EnemyUnit objects
'''
# increment turn count
self.turn_count += 1
# if unit is dead, stop making moves.
if friendly_unit.status == 'DISABLED':
print("Turn {0}: Disabled - skipping move.".format(str(self.turn_count)))
self.target = None
self.outbound = True
return
def viable(tile):
if tile.is_wall or tile.body == friendly_unit.team or tile.head is not None:
return False
return True
x, y = friendly_unit.position
possibleMoves = [
world.position_to_tile_map[(x + 1, y)],
world.position_to_tile_map[(x, y + 1)],
world.position_to_tile_map[(x - 1, y)],
world.position_to_tile_map[(x, y - 1)]
]
viableMoves = [move for move in possibleMoves if viable(move)]
if not viableMoves:
next_move = choose_random_from(possibleMoves).position
else:
next_move = choose_random_from(viableMoves).position
friendly_unit.move(next_move)
print("MyAI Turn {0}: currently at {1}, making move to {2}.".format(
str(self.turn_count),
str(friendly_unit.position),
next_move
))
| true |
831f86912eab253e770de6c7415b841954a9a9df | Python | DKU-STUDY/Algorithm | /BOJ/solved.ac_class/Class03/9095.1, 2, 3 더하기/sAp00n.py | UTF-8 | 1,285 | 3.671875 | 4 | [] | no_license | # https://www.acmicpc.net/problem/9095
"""
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
1 초 128 MB 50501 32144 21313 61.645%
문제
정수 4를 1, 2, 3의 합으로 나타내는 방법은 총 7가지가 있다. 합을 나타낼 때는 수를 1개 이상 사용해야 한다.
1+1+1+1
1+1+2
1+2+1
2+1+1
2+2
1+3
3+1
정수 n이 주어졌을 때, n을 1, 2, 3의 합으로 나타내는 방법의 수를 구하는 프로그램을 작성하시오.
입력
첫째 줄에 테스트 케이스의 개수 T가 주어진다. 각 테스트 케이스는 한 줄로 이루어져 있고, 정수 n이 주어진다. n은 양수이며 11보다 작다.
출력
각 테스트 케이스마다, n을 1, 2, 3의 합으로 나타내는 방법의 수를 출력한다.
"""
# 재귀로 간단하게 구현이 가능함. 풀고 보니 DP 문제였다고 한다
from sys import stdin
def compute(n):
if n == 3:
return 4
if n == 2:
return 2
if n == 1:
return 1
return compute(n - 1) + compute(n - 2) + compute(n - 3)
def sol():
n = int(stdin.readline())
print(compute(n))
T = int(stdin.readline())
for _ in range(T):
sol()
| true |
d96e269f9d7a92b640bdba8ed959b44e668dfe75 | Python | zhou952368/Python_One | /pycharm workpance/9.20.py | UTF-8 | 734 | 4.25 | 4 | [] | no_license | # 2. 使用函数式编程,获得1970~2018所有的闰年
# 过滤器
print(list(filter(lambda n: n % 4 == 0 and n % 100 != 0 or n % 400 == 0, range(1970, 2019))))
"""
1. 使用map进行函数式编程实现如下功能:
将 [1,2,3,4,5] 和 ['a','b','c','d','e'] 合并为
{[(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e')]}
"""
# map()函数
l = [1, 2, 3, 4, 5]
l1 = ['a', 'b', 'c', 'd', 'e']
m = list(map(lambda x, y: (x, y), l, l1))
print("{" + str(m) + "}")
dict = {"name": "zhou", "age": 23, "sex": "男"}
# 遍历字典中所有的键
for i in dict.keys():
print(i)
# 遍历字典中所有的值
for j in dict.values():
print(j)
# 遍历字典中所哟的键值对
for key, value in dict.items():
print(key, value)
| true |
c6b7134d427fc8b1dc4dc4c9522dbe33f92260c9 | Python | wootfish/cryptopals | /challenge_44.py | UTF-8 | 2,202 | 2.78125 | 3 | [] | no_license | from hashlib import sha1
from typing import Dict, Any
from challenge_39 import invmod, InvModException
from challenge_43 import DSA, recover_x, BadKError
y = int("2d026f4bf30195ede3a088da85e398ef869611d0f68f07"
"13d51c9c1a3a26c95105d915e2d8cdf26d056b86b8a7b8"
"5519b1c23cc3ecdc6062650462e3063bd179c2a6581519"
"f674a61f1d89a1fff27171ebc1b93d4dc57bceb7ae2430"
"f98a6a4d83d8279ee65d71c1203d2c96d65ebbf7cce9d3"
"2971c3de5084cce04a2e147821", base=16)
target = "ca8f6f7c66fa362d40760d135b763eb8527d3d52"
def recover_k(m1: int, m2: int, s1: int, s2: int) -> int:
q = DSA.q
denom = (m1 - m2) % q
numer = (s1 - s2) % q
return (denom * invmod(numer, q)) % q
if __name__ == "__main__":
with open("data/44.txt", "r") as f:
lines = f.readlines()
msgs = []
while lines:
msg = {} # type: Dict[str, Any]
msg['msg'] = lines.pop(0)[5:-1].encode("ascii")
msg['s'] = int(lines.pop(0)[3:-1])
msg['r'] = int(lines.pop(0)[3:-1])
msg['m'] = int(lines.pop(0)[3:-1], base=16)
msgs.append(msg)
for i in range(len(msgs) - 1):
for j in range(i+1, len(msgs)):
msg1 = msgs[i]
msg2 = msgs[j]
try:
k = recover_k(msg1['m'], msg2['m'], msg1['s'], msg2['s'])
x1 = recover_x(msg1['r'], msg1['s'], k, msg1['m'])
x2 = recover_x(msg1['r'], msg1['s'], k, msg1['m'])
if x1 != x2:
continue
if DSA(x=x1).sign(msg1['msg'], k=k) != (msg1['r'], msg1['s']):
continue
except (InvModException, BadKError):
continue
x_hex = hex(x1)[2:].encode("ascii")
print("k reuse detected for messages", i, "and", j)
print("k =", k)
print("x =", x1)
print("sha1(hex(x)) =", sha1(x_hex).hexdigest())
assert sha1(x_hex).hexdigest() == target
print("Validity assertion passed.")
break
else:
continue
break # break out if this loop iff we broke out of the inner loop
else:
print("No k reuse detected.")
| true |
48378e1e72ecbbece17837f3219787f0ee6d5913 | Python | ldakir/Machine-Learning | /lab06/random_forest.py | UTF-8 | 3,647 | 3.34375 | 3 | [] | no_license | """
Implements Random Forests with decision stumps.
Authors: Lamiaa Dakir
Date: 10/27/2019
"""
import util
from random import randrange
from math import sqrt
from Partition import *
from DecisionStump import *
import numpy as np
def random_forest_train_data(train_partition,T):
"""
Training data using the random forest algorithm
"""
# training the data
ensemble = []
for i in range(int(T)):
# create a bootstrap training dataset by randomly sampling from the original training data with replacement
bootstrapped_data = []
n = len(train_partition.data)
for i in range(n):
bootstrapped_data.append(train_partition.data[randrange(n)])
# select a random subset of features without replacement
num_features = int(round(sqrt(len(train_partition.F)),0))
features = list(train_partition.F.keys())
features_subset = {}
for i in range(num_features):
element = features[randrange(len(features))]
features_subset[element] = train_partition.F[element]
features.remove(element)
# using the bootstrap sample and the subset of features, create a decision stump
new_partition = Partition(bootstrapped_data,features_subset)
decision_stump = DecisionStump(new_partition)
ensemble.append(decision_stump)
return ensemble
def random_forest_test_data(test_partition,ensemble,threshold):
"""
Testing data using the random forest algorithm
"""
# testing test example by running it through each classifier in the ensemble
prediction = []
for x in test_partition.data:
result = []
for d in ensemble:
label = d.classify(x.features,threshold)
result.append(label)
y = sum(result)
if y > 0:
prediction.append(1)
else:
prediction.append(-1)
#construst the confusion matrix
confusion_matrix = np.zeros((2,2))
accuracy =0
for i in range(len(prediction)):
if prediction[i] == -1 and test_partition.data[i].label == -1:
confusion_matrix[0][0] +=1
accuracy +=1
elif prediction[i] == 1 and test_partition.data[i].label == 1:
confusion_matrix[1][1] +=1
accuracy +=1
elif prediction[i] == -1 and test_partition.data[i].label == 1:
confusion_matrix[1][0] +=1
elif prediction[i] == 1 and test_partition.data[i].label == -1:
confusion_matrix[0][1] +=1
FPR = util.FPR(confusion_matrix)
TPR = util.TPR(confusion_matrix)
return confusion_matrix,FPR,TPR
def main():
# read in data (y in {-1,1})
opts = util.parse_args('Random forests')
train_partition = util.read_arff(opts.train_filename)
test_partition = util.read_arff(opts.test_filename)
T = opts.classifier_nums
threshold = opts.thresh
# training the data
ensemble = random_forest_train_data(train_partition,T)
# testing the data
confusion_matrix,FPR,TPR = random_forest_test_data(test_partition,ensemble,threshold)
print('T: '+ str(T) +' , thresh ' + str(threshold))
print('\n')
print(' prediction')
print(' -1 1')
print(' -----')
print('-1| '+ str(int(confusion_matrix[0][0])) + ' ' + str(int(confusion_matrix[0][1])))
print(' 1| '+ str(int(confusion_matrix[1][0])) + ' ' + str(int(confusion_matrix[1][1])))
print('\n')
# calculating the false positive rate and the true positive rate
print('false positive: '+ str(FPR))
print('true positive: '+ str(TPR))
if __name__ == "__main__":
main()
| true |
c35b584a55a4a2b7dc7f94bb16863b740adea0de | Python | nagi930/coding_test | /cluster.py | UTF-8 | 4,035 | 2.859375 | 3 | [] | no_license | import random
from collections import deque
from copy import deepcopy
import turtle
def under60p(board):
cnt = 0
for i in range(A):
for j in range(A):
if board[i][j] == 'X' or board[i][j] == 'V':
cnt += 1
if cnt/A**2 < 0.6:
return True
else:
return False
def draw(board):
X.clear()
V.clear()
N.clear()
O.clear()
top = 520
left = -800
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col] == 'X':
x = left + (col * 10)
y = top - (row * 10)
X.goto(x, y)
X.stamp()
elif board[row][col] == 'V':
x = left + (col * 10)
y = top - (row * 10)
V.goto(x, y)
V.stamp()
elif board[row][col] == 'N':
x = left + (col * 10)
y = top - (row * 10)
N.goto(x, y)
N.stamp()
else:
x = left + (col * 10)
y = top - (row * 10)
O.goto(x, y)
O.stamp()
if __name__ == '__main__':
turtle.delay(0)
turtle.ht()
turtle.tracer(0, 0)
X = turtle.Turtle()
X.shapesize(0.3, 0.3, 1)
X.hideturtle()
X.penup()
X.shape('square')
X.color('green')
X.speed(0)
X.setundobuffer(None)
N = turtle.Turtle()
N.shapesize(0.3, 0.3, 1)
N.hideturtle()
N.penup()
N.shape('square')
N.color('red')
N.speed(0)
N.setundobuffer(None)
V = turtle.Turtle()
V.shapesize(0.3, 0.3, 1)
V.hideturtle()
V.penup()
V.shape('square')
V.color('purple')
V.speed(0)
V.setundobuffer(None)
O = turtle.Turtle()
O.shapesize(0.3, 0.3, 1)
O.hideturtle()
O.penup()
O.shape('square')
O.color('white')
O.speed(0)
O.setundobuffer(None)
screen = turtle.Screen()
screen.tracer(False)
screen.bgcolor('black')
screen.setup(width=1.0, height=1.0)
A, B = 100, 100
board = [['O'] * A for _ in range(A)]
check = [['O'] * A for _ in range(A)]
dx = [0, 1, 0, -1]
dy = [-1, 0, 1, 0]
total = 0
neutral = 0
draw(board)
while under60p(board):
for i in range(A):
for j in range(A):
if board[i][j] == 'N':
board[i][j] = 'X'
screen.update()
cnt = B
while cnt > 0:
row = random.randint(0, A-1)
col = random.randint(0, A-1)
if board[row][col] == 'O':
board[row][col] = 'N'
cnt -= 1
total += 1
draw(board)
check = deepcopy(board)
zero_count = []
queue = deque()
for i in range(A):
for j in range(A):
if check[i][j] == 'O':
all = []
check[i][j] = 'X'
queue.append((i, j))
all.append((i, j))
while queue:
temp = queue.popleft()
for d in range(4):
x = temp[1] + dx[d]
y = temp[0] + dy[d]
if 0<= x <= A-1 and 0<= y <= A-1 and check[y][x] == 'O':
check[y][x] = 'X'
queue.append((y, x))
all.append((y, x))
zero_count.append(all)
if len(zero_count) < 2:
continue
else:
zero_count.sort(key=len)
for zeros in zero_count[:-1]:
if len(zeros) == 1:
for row, col in zeros:
board[row][col] = 'V'
neutral += 1
else:
for row, col in zeros:
board[row][col] = 'V'
neutral += 1
draw(board)
screen.mainloop()
| true |
d0fd2e713deb1708dca3f06f676b1b3118ce1ab6 | Python | Aasthaengg/IBMdataset | /Python_codes/p02675/s027389381.py | UTF-8 | 120 | 3.140625 | 3 | [] | no_license | N = str(input())
if int(N[-1])==3:
print('bon')
elif int(N[-1]) in [0, 1, 6, 8]:
print('pon')
else:
print('hon')
| true |
903197eba593aaee6a1b3c2062b80462c4311fbc | Python | daniel-reich/ubiquitous-fiesta | /HNjRjrNPueF5vRh9S_0.py | UTF-8 | 150 | 3.03125 | 3 | [] | no_license |
def hamming_code(message):
code = ""
for c in message:
for b in bin(ord(c))[2:].zfill(8):
code += b * 3
return code
| true |
9801c49ad6fcb189fb834bd621f79f579e01f07c | Python | piantado/LOTlib3 | /Hypotheses/Lexicon/SimpleLexicon.py | UTF-8 | 4,507 | 3.34375 | 3 | [] | no_license | from copy import copy
from LOTlib3.Miscellaneous import flip, qq, attrmem
from LOTlib3.Hypotheses.Hypothesis import Hypothesis
from LOTlib3.Hypotheses.FunctionHypothesis import FunctionHypothesis
from LOTlib3.Hypotheses.Proposers import ProposalFailedException
from LOTlib3.Hypotheses.LOTHypothesis import LOTHypothesis
class SimpleLexicon(Hypothesis):
"""
A class for mapping words to hypotheses.
This defaultly assumes that the data comes from sampling with probability alpha from
the true utteranecs
"""
def __init__(self, value=None, propose_p=0.5, **kwargs):
"""
make_hypothesis -- a function to make each individual word meaning. None will leave it empty (for copying)
words -- words to initially add (sampling from the prior)
propose_p -- the probability of proposing to each word
"""
if value is None:
value = dict()
else:
assert isinstance(self.value, dict)
Hypothesis.__init__(self, value=value, **kwargs)
self.propose_p = propose_p
def __copy__(self):
thecopy = type(self)() # Empty initializer
# copy over all the relevant attributes and things.
# Note objects like Grammar are not given new copies
thecopy.__dict__.update(self.__dict__)
# and copy the self.value
thecopy.value = dict()
for k,v in list(self.value.items()):
thecopy.set_word(k, copy(v))
return thecopy
def __call__(self, word, *args):
"""
Just a wrapper so we can call like SimpleLexicon('hi', 4)
"""
return self.value[word](*args)
# this sets the word and automatically compute its function
def set_word(self, w, v):
"""
This sets word w to value v. v can be either None, a FunctionNode or a Hypothesis, and
in either case it is copied here.
"""
assert isinstance(v, Hypothesis)
self.value[w] = v
def get_word(self, w):
return self.value[w]
def all_words(self):
return list(self.value.keys())
def __str__(self):
"""
This defaultly puts a \0 at the end so that we can sort -z if we want (e.g. if we print out a posterior first)
"""
return '\n'+'\n'.join(["%-15s: %s" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\0'
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (str(self) == str(other)) # simple but there are probably better ways
def force_function(self, w, f):
"""
Allow force_function
"""
# If this does not exist, make a function hypothesis from scratch with nothing in it.
if w not in self.value:
self.value[w] = FunctionHypothesis(value=None, args=None)
self.value[w].force_function(f)
def pack_ascii(self):
""" Packing function for more concise representations """
out = ''
for w in sorted(self.all_words()):
assert isinstance(self.value[w], LOTHypothesis), "*** Can only pack Lexicons with FunctionNode values"
out += "%s:%s;" % (w, self.value[w].grammar.pack_ascii(self.value[w].value) )
return out
###################################################################################
## MH stuff
###################################################################################
def propose(self):
"""
Propose to the lexicon by flipping a coin for each word and proposing to it.
This permits ProposalFailExceptions on individual words, but does not return a lexicon
unless we can propose to something.
"""
fb = 0.0
changed_any = False
while not changed_any:
new = copy(self) ## Now we just copy the whole thing
for w in self.all_words():
if flip(self.propose_p):
try:
xp, xfb = self.get_word(w).propose()
changed_any = True
new.set_word(w, xp)
fb += xfb
except ProposalFailedException:
pass
return new, fb
@attrmem('prior')
def compute_prior(self):
return sum([x.compute_prior() for x in list(self.value.values())]) / self.prior_temperature
| true |
11d3ba3947741130c63aef69bf716e7ef86a69db | Python | dpernes/dirsvm | /utils.py | UTF-8 | 919 | 2.8125 | 3 | [] | no_license | import csv
import os
import numpy as np
def read_csv(path, filename, delimiter=','):
f = open(os.path.join(path, filename))
f_csv = csv.reader(f, delimiter=delimiter)
f_csv = list(f_csv)
f.close()
return f_csv
def write_csv(samples, labels, types, path):
ret = [','.join(map(str, s) + [str(l)]) for (s, l) in zip(samples, labels)]
ret = '\n'.join(ret)
f = open(os.path.join(path, 'data.csv'), 'w')
f.write(ret)
f.close()
f = open(os.path.join(path, 'types.data'), 'w')
f.write('\n'.join(types))
f.close()
def read_dataset(path, filename):
f = read_csv(path, filename)
samples = [list(map(float, s[: -1])) for s in f]
labels = [s[-1] for s in f]
return np.asarray(samples), np.asarray(labels)
def read_types(path, filename):
f = open(os.path.join(path, filename))
ret = [x.strip() for x in f.readlines()]
f.close()
return ret
| true |
fd79140b2d09437eff43b36085ad0e390e12f29f | Python | DongGeun974/Practice_gongsu | /20191105.py | UTF-8 | 1,169 | 4.0625 | 4 | [] | no_license | #컬렉션자료형
"""
리스트-느리다, 딕셔너리, 튜블
넘파이 : 컬렉션자료형의 단점을 보안
"""
import numpy as np
a = np.array([0,1,2,3])
print(a)
print(type(a))
b = np.array((0,1,2,3))
print(b)
print(type(b))
#항목별로 비교연산
c = a == b
print(c.dtype)
print(b.dtype)
d = np.array([True, True, False], dtype=int)
print(d)
"""
#넘파이는 같은자료형?? 장점은 빠른 처리 속도
import time
start1 = time.process_time()
a_list = range(0,10000000,2)
a_list_square = [i**2 for i in a_list]
end1 = time.process_time()
print("시간1은 ", end1 - start1)
start2 = time.process_time()
an_array = np.arange(0,10000000,2)
square = an_array**2
end2 = time.process_time()
print("시간2는", end2 - start2)
"""
#array 생성방법
a_1dim = np.array([0,1,2,3])
print(a_1dim)
print("차원",a_1dim.ndim)#n차원확인
print("shape",a_1dim.shape)
a_2dim = np.array([[1,2],[3,4],[5,6]])
print(a_2dim)
print("차원",a_2dim.ndim)#n차원확인
print("shape",a_2dim.shape)
print(len(a_2dim))
print(len(a_2dim.shape))
print(np.shape(a_2dim))
print(np.shape(a_2dim)[0] == len(a_2dim))
print(len(np.shape(a_2dim)) == a_2dim.ndim)
| true |
58632730325488eada6ee3f2353135e96e3add26 | Python | bstempi/pyswf | /pyswfaws/exceptions.py | UTF-8 | 1,040 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | class ActivityTaskException(Exception):
"""
Exception that is thrown when an activity task fails
"""
def __init__(self, task_name, task_version, task_id, failure_reason, failure_status):
self.task_name = task_name
self.task_version = task_version
self.task_id = task_id
self.failure_reason = failure_reason
self.failure_stats = failure_status
def __str__(self):
return 'Task id {} ({}: {}) failed. Reason: {} Status: {}'.format(self.task_id, self.task_name,
self.task_version, self.failure_reason,
self.failure_stats)
class UnfulfilledPromiseException(Exception):
"""
An exception that gets thrown when a promise is called upon for a result, but it is unable to fulfill it.
This is for INTERNAL USE ONLY.
"""
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args,**kwargs)
| true |
e7971fb58cc622bd24998ef9d1f25f051f5a32c7 | Python | diogojapinto/computer-vision | /2nd_part/01_stereo/stereo_1.py | UTF-8 | 720 | 2.90625 | 3 | [] | no_license | import cv2
from matplotlib import pyplot as plt
import numpy as np
# Load both images
img_left = cv2.imread('left.png', cv2.IMREAD_GRAYSCALE)
img_right = cv2.imread('right.png', cv2.IMREAD_GRAYSCALE)
# obtain the disparity matrix
stereo = cv2.StereoBM_create(numDisparities=80, blockSize=21)
disparity = stereo.compute(img_left, img_right)
disparity_scaled = cv2.convertScaleAbs(disparity, alpha=(np.iinfo(np.uint8).max/np.iinfo(np.uint16).max))
# plot everything
plt.subplot(221)
plt.title("Left")
plt.imshow(img_left, cmap='gray')
plt.subplot(222)
plt.title("Right")
plt.imshow(img_right, cmap='gray')
plt.subplot(223)
plt.title("Disparity")
plt.imshow(disparity_scaled, cmap='gray')
plt.tight_layout()
plt.show()
| true |
065aa1f787c1ba602f3af37acd3fb85897618f17 | Python | Evertcolombia/AirBnB_clone_alone | /web_flask/2-c_route.py | UTF-8 | 442 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
from flask import Flask
app = Flask(__name__)
@app.route("/", strict_slashes=False)
def home():
return "Hello"
@app.route("/hbnb", strict_slashes=False)
def hbnb():
return "HBNB"
@app.route("/c/<text>", strict_slashes=False)
def c(text=None):
if (text):
text = text.replace('_', ' ')
return "C {}".format(text)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)
| true |
7d31b978803e94db9091ebf7c98f110ac493d500 | Python | St-Ren/high_school_project | /web/web/paragraph/background/paragraph/3/check.py | UTF-8 | 387 | 2.703125 | 3 | [] | no_license | for num in range(1,127):
f=open('%d.t'%num,'r',encoding='latin1')
lines=f.readlines()
f.close()
f=open('%d.t'%num,'w',encoding='latin1')
for line in lines:
line=line.replace('*','')
line=line.replace(' ',' ')
line=line.replace('??','')
s=line.strip()
if len(s)<10:
try:
print(f)
print(line)
except:
print('false')
else:
f.write(s+'\n')
| true |
ce99966c7536666d60b394facc1ed36017e4cc86 | Python | LesGameDevToolsMagique/Messenger | /test/server/pyServ.py | UTF-8 | 1,399 | 2.640625 | 3 | [
"MIT"
] | permissive | import SocketServer
HOST = "localhost"
PORT = 12321
# this server uses ThreadingMixIn - one thread per connection
# replace with ForkMixIn to spawn a new process per connection
class EchoServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
# no need to override anything - default behavior is just fine
pass
class EchoRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
print "connection from %s" % self.client_address[0]
self.data = self.request.recv(1024).strip()
print "{} wrote:".format(self.client_address[0])
print self.data
self.request.sendall(self.data)
print "%s disconnected" % self.client_address[0]
# Create the server
server = EchoServer((HOST, PORT), EchoRequestHandler)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
print "server listening on %s:%s" % server.server_address
server.serve_forever()
| true |
df94eed12d0c978d232c19689e51e00e25905a16 | Python | HR-027/Lawnmowing | /Alevel_sprites.py | UTF-8 | 6,670 | 3.21875 | 3 | [] | no_license | import pygame
from Alevel_settings import *
vec = pygame.math.Vector2
# For collisions between a sprite and the walls
def collide_with_walls(sprite, group, direction):
# In the horizontal direction
if direction == 'x':
# Checks for hits between a sprite and the the walls
hits = pygame.sprite.spritecollide(sprite, group, False)
if hits:
# When the velocity is positive then the sprite was moving to the right
if sprite.vel.x > 0:
# The sprite needs to be put against the left side of the wall
sprite.pos.x = hits[0].rect.left - sprite.rect.width
# When the velocity is negative then the sprite was moving to the left
if sprite.vel.x < 0:
# The sprite needs to be put against the right side of the wall
sprite.pos.x = hits[0].rect.right + sprite.rect.width
# To stop moving
sprite.vel.x = 0
sprite.rect.centerx = sprite.pos.x
# In the vertical direction
if direction == 'y':
hits = pygame.sprite.spritecollide(sprite, group, False)
if hits:
# When the velocity is positive then the sprite was moving down
if sprite.vel.y > 0:
# The sprite needs to be put against the upper side of the wall
sprite.pos.y = hits[0].rect.top - sprite.rect.height
# When the velocity is negative then the sprite was moving up
if sprite.vel.y < 0:
# The sprite needs to be put against the lower side of the wall
sprite.pos.y = hits[0].rect.bottom + sprite.rect.height
# To stop moving
sprite.vel.y = 0
sprite.rect.centery = sprite.pos.y
# Player class to control things to do with the player
class Player(pygame.sprite.Sprite):
def __init__(self, game, x, y):
# Initialises the player class
self.groups = game.all_sprites
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
# Gets the image of the player and sets the rect
self.image = game.player_img
self.rect = self.image.get_rect()
# Velocity of the player initially zero
self.vel = vec(0, 0)
# Spawn in the position set
self.pos = vec(x, y)
# Sets the initial rotation as zero
self.rot = 0
# Sets the player health
self.health = PLAYER_HEALTH
# Sets the points to zero
self.points = 0
# Player lives
self.lives = 3
# keeps the player hidden
self.hidden = False
def get_keys(self):
# Sets the velocity and rotating speed to 0 unless a key is pressed
self.vel = vec(0, 0)
self.rot_speed = 0
# which key is pressed
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
self.rot_speed = PLAYER_ROT_SPEED
if keys[pygame.K_RIGHT]:
self.rot_speed = -PLAYER_ROT_SPEED
if keys[pygame.K_UP]:
self.vel = vec(PLAYER_SPEED, 0).rotate(-self.rot)
if keys[pygame.K_DOWN]:
self.vel = vec(-PLAYER_SPEED, 0).rotate(self.rot)
# Function to add points to the score
def add_point(self):
self.points += 1
# function used to hide the player temporarily
def hide(self):
self.hidden = True
self.rect.center = (WIDTH/2, HEIGHT+400)
def update(self):
# If the player is hidden, that ends and the player respawns at the spawn point
if self.hidden:
self.hidden = False
self.rect.center = self.pos
self.pos = vec(776, 616)
self.get_keys()
# Rotates according to the rotating speed and the frame rate
# so that rotation is not instant
self.rot = (self.rot + self.rot_speed * self.game.dt)
# Rotates the image
self.image = pygame.transform.rotate(self.game.player_img, self.rot)
# Sets the position of the rect and sprite
self.rect.center = self.pos
self.pos += self.vel * self.game.dt
# Calls for a check in collisions
self.rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, "x")
self.rect.centery = self.pos.y
collide_with_walls(self, self.game.walls, "y")
class Mob(pygame.sprite.Sprite):
# Initialises the Mob
def __init__(self, game, x, y):
self.groups = game.all_sprites, game.mobs
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
# Sets the image and the rect
self.image = game.mob_img
self.rect = self.image.get_rect()
# Sets the position to where it was spawned
self.pos = vec(x, y)
# Velocity and acceleration start of at zero
self.vel = vec(0, 0)
self.acc = vec(0, 0)
# rect center set
self.rect.center = self.pos
# rotation set
self.rot = 0
def update(self):
# Subtract the player vector from the mob vector to get the direction
self.rot = (self.game.player.pos - self.pos).angle_to(vec(1, 0))
# Rotating accordingly
self.image = pygame.transform.rotate(self.game.mob_img, self.rot)
# Rect created and set
self.rect = self.image.get_rect()
self.rect.center = self.pos
# Acceleration set
self.acc = vec(MOB_SPEED, 0).rotate(-self.rot)
# Reducing the acceleration to make it less floaty
self.acc += self.vel * -5
self.vel += self.acc * self.game.dt
self.pos += self.vel * self.game.dt
# Calls the check for collisions
self.rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, 'x')
self.rect.centery = self.pos.y
collide_with_walls(self, self.game.walls, 'y')
# For any walls to spawn on the map
class Obstacle(pygame.sprite.Sprite):
# Initialises the Obstacle class
def __init__(self, game, x, y, w, h):
self.groups = game.walls
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.rect = pygame.Rect(x, y, w, h)
self.x = x
self.y = y
self.rect.x = x
self.rect.y = y
# For the grass tiles to spawn
class Grass_tile(pygame.sprite.Sprite):
# Initialises the grass tile class
def __init__(self, game, pos):
self.groups = game.all_sprites, game.grass_tiles
pygame.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.grass_img
self.rect = self.image.get_rect()
self.rect.center = pos
| true |
493c17baed29a8161d172a722061f387786c140e | Python | CSR-Group/Story-Cloze-Test | /postab.py | UTF-8 | 1,847 | 2.96875 | 3 | [] | no_license | from ingest import *
import nltk
from nltk.corpus import wordnet as wn
def getPosTags(sentence):
return nltk.pos_tag(sentence)
def getEntities(sentence):
nouns = set()
entity_type = {'NNPS', 'NNS', 'NNP', 'PRP', 'NN', 'PRP$'}
taggedWords = getPosTags(sentence)
for (x,y) in taggedWords:
if(y in entity_type):
nouns.add(x)
return nouns
def getAllEntiries(sentences):
nouns = set()
for sentence in sentences:
nouns.update(getEntities(sentence))
return nouns
def getTags(sentence):
entity_type = {'NNPS', 'NNS', 'NNP', 'PRP', 'NN', 'PRP$'}
verb_type = {'VB','VBD','VBG','VBN','VBP','VBZ'}
adv_type = {'RB','RBR','RBS'}
adj_type = {'JJ', 'JJR', 'JJS'}
taggedWords = getPosTags(sentence)
nouns = set()
verbs = set()
adj = set()
adv = set()
for (x,y) in taggedWords:
if(y in entity_type):
nouns.add(x)
if(y in verb_type):
verbs.add(x)
if(y in adv_type):
adv.add(x)
if(y in adj_type):
adj.add(x)
return {'N':nouns,'V':verbs, 'ADV':adv, 'ADJ':adj}
if __name__ == "__main__":
data = getTrainData()
for datapoint in data[5:6]:
print(datapoint.inputSentences)
print(datapoint.nextSentence1)
print(datapoint.nextSentence2)
print(getPosTags(datapoint.inputSentences[0]))
print(getPosTags(datapoint.inputSentences[1]))
print(getPosTags(datapoint.inputSentences[2]))
print(getPosTags(datapoint.inputSentences[3]))
print(getPosTags(datapoint.nextSentence1))
print(getPosTags(datapoint.nextSentence2))
# print(str(getAllEntiries(datapoint.inputSentences)) + " - " + str(getEntities(datapoint.nextSentence1)) + " - " + str(getEntities(datapoint.nextSentence1)))
| true |
1bd3b97e6f3650eebc712c104d73e613f95f5131 | Python | 981377660LMT/algorithm-study | /7_graph/bfs求无权图的最短路径/bfs保持搜索顺序的性质/不连续字符串-dfs生成器搜索字典序.py | UTF-8 | 1,190 | 3.765625 | 4 | [] | no_license | from itertools import islice
from typing import Generator, List
# 2^n
class Solution:
def solve(self, n: int, k: int) -> str:
"""
返回'0''1''2'组成的长为n的字典序的第k个字符串 相邻字符不能相同
用dfs搜,搜出来直接就是字典序,并且用生成器可以节省空间,加速
如果用bfs搜,搜出来是实际大小排序
"""
def bt(index: int, pre: int, path: List[int]) -> Generator[str, None, None]:
if index == n:
yield ''.join(map(str, path))
return
for next in range(3):
if next == pre:
continue
path.append(next)
yield from bt(index + 1, next, path)
path.pop()
iter = bt(0, -1, [])
return next(islice(iter, k, None), '')
print(Solution().solve(n=2, k=0))
print(Solution().solve(n=2, k=1))
print(Solution().solve(n=2, k=2))
print(Solution().solve(n=2, k=3))
print(Solution().solve(n=2, k=4))
print(Solution().solve(n=2, k=5))
print(Solution().solve(n=2, k=6))
print(Solution().solve(n=2, k=7))
| true |
d436d10ca7cca870683e4e77729d47039ecdb9ed | Python | Tusharsampang/All_LabProjects | /venv/Lab3/Question3.py | UTF-8 | 259 | 3 | 3 | [] | no_license | '''
3. Write a function calledshowNumbersthat takes a parameter calledlimit.
It should print all the numbers between 0 and limit with a label to identify the even and odd numbers.
For example, if the limit is 3, it should print:0 EVEN1 ODD2 EVEN
''' | true |
0fadc0b782afcfe19fde8b27254a7dfadc2b2280 | Python | jcpince/algorithms | /leetcode/rotatedDigits.py | UTF-8 | 6,883 | 3.78125 | 4 | [
"MIT"
] | permissive | #! /usr/bin/python3
# 788. Rotated Digits
# Easy
#
# X is a good number if after rotating each digit individually by 180 degrees, we get a valid number that is different from X. Each digit must be rotated - we cannot choose to leave it alone.
#
# A number is valid if each digit remains a digit after rotation. 0, 1, and 8 rotate to themselves; 2 and 5 rotate to each other; 6 and 9 rotate to each other, and the rest of the numbers do not rotate to any other number and become invalid.
#
# Now given a positive number N, how many numbers X from 1 to N are good?
#
# Example:
# Input: 10
# Output: 4
# Explanation:
# There are four good numbers in the range [1, 10] : 2, 5, 6, 9.
# Note that 1 and 10 are not good numbers, since they remain unchanged after rotating.
#
# Note:
#
# N will be in range [1, 10000].
#
class Solution:
up_to_10_same = 3
up_to_10_rotate = 4
up_to_100_same = 3*up_to_10_same # 3 symetric numbers for 0x, 1x and 8x
up_to_100_rotate = 4*(up_to_10_same+up_to_10_rotate) + 3*up_to_10_rotate # 4 good numbers for 0x, 1x, 2x, 5x, 6x, 8x, 9x + 3 good for 2x, 5x, 6x, 9x
up_to_1000_same = 3*up_to_100_same
up_to_1000_rotate = 4*(up_to_100_same+up_to_100_rotate) + 3*up_to_100_rotate
up_to_10000_same = 3*up_to_1000_same
up_to_10000_rotate = 4*(up_to_1000_same+up_to_1000_rotate) + 3*up_to_1000_rotate
cache = { 10: [up_to_10_same, up_to_10_rotate],
100: [up_to_100_same, up_to_100_rotate],
1000: [up_to_1000_same, up_to_1000_rotate]}
def get_rotators(self, digit):
if digit > 8:
return 4
elif digit > 5:
return 3
elif digit > 4:
return 2
elif digit > 1:
return 1
return 0
def get_symetrics(self, digit):
if digit > 7:
return 3
elif digit > 0:
return 2
else:
return 1
def is_rotator(self, digit):
return digit in [2,5,6,9]
def is_symetric(self, digit):
return digit in [0,1,8]
def is_invalid(self, digit):
return digit in [3,4,7]
def rotatedDigits(self, N: int) -> int:
N0 = N
result = 0
global_rotate = False
divisor = 1000
digit = N // 1000
if digit > 0:
N -= 1000 * digit
rot = self.get_rotators(digit - 1)
sym = self.get_symetrics(digit - 1)
result += rot * (self.up_to_1000_same+self.up_to_1000_rotate) + sym * self.up_to_1000_rotate
if self.is_invalid(digit): return result
if self.is_rotator(digit): global_rotate = True
divisor = 100
digit = N // 100
#print("%d 100s" % digit)
if digit > 0:
N -= 100 * digit
rot = self.get_rotators(digit - 1)
sym = self.get_symetrics(digit - 1)
if global_rotate:
result += (rot+sym) * (self.up_to_100_same+self.up_to_100_rotate)
else: result += rot * (self.up_to_100_same+self.up_to_100_rotate) + sym * self.up_to_100_rotate
if self.is_invalid(digit): return result
if self.is_rotator(digit): global_rotate = True
print("N(%d) -> 100s result: %d -- global_rotate %s" % (N0, result, global_rotate))
digit = N // 10
#print("%d 10s" % digit)
if digit > 0:
N -= 10 * digit
rot = self.get_rotators(digit - 1)
sym = self.get_symetrics(digit - 1)
#print("result += %d*%d + %d*%d" % (rot,self.up_to_10_same+self.up_to_10_rotate,sym,self.up_to_10_rotate))
if global_rotate:
result += (rot+sym) * (self.up_to_10_same+self.up_to_10_rotate)
else: result += rot * (self.up_to_10_same+self.up_to_10_rotate) + sym * self.up_to_10_rotate
if self.is_invalid(digit): return result
if self.is_rotator(digit): global_rotate = True
print("N(%d) -> 10s result: %d -- global_rotate %s" % (N0, result, global_rotate))
if global_rotate:
result += self.get_symetrics(N)
result += self.get_rotators(N)
print("N(%d) -> %d" % (N0, result))
return result
def rotatedDigits(self, N: int) -> int:
N0 = N
result = 0
global_rotate = False
divisor = 1000
while N and divisor != 1:
digit = N // divisor
if digit > 0:
N -= divisor * digit
rot = self.get_rotators(digit - 1)
sym = self.get_symetrics(digit - 1)
if global_rotate:
# print("result += (%d + %d) * (%d + %d)" % (rot, sym, self.cache[divisor][0],
# self.cache[divisor][1]))
result += (rot + sym) * (self.cache[divisor][0] + self.cache[divisor][1])
else:
# print("result += %d * (%d + %d) + %d * %d" % (rot, self.cache[divisor][0],
# self.cache[divisor][1], sym, sym * self.cache[divisor][0]))
result += rot * (self.cache[divisor][0] + self.cache[divisor][1]) + \
sym * self.cache[divisor][1]
if self.is_invalid(digit): return result
if self.is_rotator(digit): global_rotate = True
divisor /= 10
if global_rotate:
result += self.get_symetrics(N)
result += self.get_rotators(N)
# print("N(%d) -> %d" % (N0, result))
return result
def check_solution():
s = Solution()
assert(s.rotatedDigits(1) == 0)
assert(s.rotatedDigits(2) == 1)
assert(s.rotatedDigits(3) == 1)
assert(s.rotatedDigits(4) == 1)
assert(s.rotatedDigits(5) == 2)
assert(s.rotatedDigits(6) == 3)
assert(s.rotatedDigits(7) == 3)
assert(s.rotatedDigits(8) == 3)
assert(s.rotatedDigits(9) == 4)
assert(s.rotatedDigits(10) == 4)
assert(s.rotatedDigits(11) == 4)
assert(s.rotatedDigits(19) == 8)
assert(s.rotatedDigits(20) == 9)
assert(s.rotatedDigits(25) == 12)
assert(s.rotatedDigits(29) == 15)
assert(s.rotatedDigits(100) == 40)
assert(s.rotatedDigits(101) == 40)
assert(s.rotatedDigits(199) == 80)
assert(s.rotatedDigits(200) == 81)
assert(s.rotatedDigits(201) == 82)
assert(s.rotatedDigits(250) == 102)
assert(s.rotatedDigits(299) == 129)
assert(s.rotatedDigits(300) == 129)
assert(s.rotatedDigits(320) == 129)
assert(s.rotatedDigits(390) == 129)
assert(s.rotatedDigits(399) == 129)
assert(s.rotatedDigits(799) == 227)
assert(s.rotatedDigits(800) == 227)
assert(s.rotatedDigits(1799) == 543)
assert(s.rotatedDigits(2999) == 975)
assert(s.rotatedDigits(3999) == 975)
assert(s.rotatedDigits(4999) == 975)
print("All tests passed successfully!!")
check_solution()
| true |
515cb83e3095d0afab7892d717bdc0309f6e401b | Python | HengjieXu/FYP-NLP | /Data Acquisition/guardian.py | UTF-8 | 3,634 | 2.5625 | 3 | [] | no_license | #api key: 99b71d35-7fbf-4fb1-b58b-b0c25a18775a
import json
import urllib2
from bs4 import BeautifulSoup
class Guardian:
BASEURL = 'http://content.guardianapis.com/search?q='
def __init__(self, company, start, end, pn):
self.company = company
self.start = start
self.end = end
self.pn = pn
def get_link(self, company, start, end, pn):
subject = self.BASEURL + company.replace(" ", "+")
pn = 'page=' + str(self.pn)
str_date = 'from-date=' + str(start)
end_date = 'to-date=' + str(end)
pageSize = 'page-size=10'
blocks = 'format=json&show-blocks=all'
key = 'api-key=99b71d35-7fbf-4fb1-b58b-b0c25a18775a'
link = [subject, str_date, end_date, pn, pageSize, blocks, key]
url = '&'.join(link)
print url
return url
def get_news(self):
fuzzy_name = self.company.split()
if len(fuzzy_name) == 1:
headline_name = fuzzy_name[0]
else:
headline_name = fuzzy_name[1]
link = self.get_link(self.company, self.start, self.end, self.pn)
request = urllib2.urlopen(link)
response = request.read()
js = json.loads(response)
num = js['response']['total']
print int(num/10) + 1
seqs = range(int(num/10) + 1)
dict ={}
list = []
success = 0
text_path = '/Users/HENGJIE/Desktop/text repo/Guardian/' + self.company + '.txt'
f = open(text_path, 'w+')
for seq in seqs:
pn = 'page=' + str(seq+1)
url = self.get_link(self.company, self.start, self.end, pn)
req = urllib2.urlopen(url).read()
res = req.strip('()')
js = json.loads(res)
results = js['response']['results']
for result in results:
if result['type'] == 'article':
value = True
headline = result['webTitle']
for item in list:
if item['title'] == headline:
value = False
break
if value:
if headline.lower().find(headline_name.lower()) >= 0:
dict['title'] = headline
dict['url'] = result['webUrl']
request = urllib2.Request(result['webUrl'])
html = urllib2.urlopen(request)
if html:
news = ""
soup = BeautifulSoup(html, 'html.parser')
content = soup.select('div.content__article-body > p')
for article in content:
if article.find(class_='element-rich-link'):
continue
text = article.get_text()
if isinstance(text, basestring):
news = news + text.encode('utf8') + "\n"
else:
news = news + unicode(text).encode("utf8") + "\n"
dict['content'] = news
list.append(dict.copy())
success += 1
print ("success {}".format(success))
if success == 10:
break
if success == 10:
break
json.dump(list, f, indent=4)
return list, text_path
| true |
421baec48f3e497d836741ab5669002bcbc1288d | Python | adityataksande/virtualinterview | /speech.py | UTF-8 | 1,391 | 3.296875 | 3 | [] | no_license | import speech_recognition as sr
# get audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print(" What are the basic data types associated with C?")
print("Say Your Answer:")
audio = r.listen(source)
def convert(usr):
return (usr[0].split())
def convert(ans):
return(ans[0].split())
def intersection(ans,usr):
lst3 = [value for value in ans if value in usr]
return lst3
try:
print("You said =" + r.recognize_google(audio))
usr = [r.recognize_google(audio)]
print( convert(usr))
ans =["Int Float Double Charecter void"]
#print( convert(ans))
#print(intersection(convert(usr),convert(ans) ))
l= len(intersection(convert(usr),convert(ans) ));
if l<=2:
print("Your Answer is Partially Correct")
elif l==0 :
print("Your Answer is Wrong")
else:
print("Your Answer is Correct")
except sr.UnknownValueError:
print("Could not understand audio")
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
| true |
0800419fce5377c80c359b56d3ba0d1a9221c05b | Python | elizabethguy86/Traffic_Pullovers_WA | /Police_Data_Pandas.py | UTF-8 | 4,310 | 2.796875 | 3 | [] | no_license |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
import statsmodels.discrete.discrete_model as sm
data_raw = pd.read_csv('/home/ec2-user/WA-clean.csv.gz', compression='gzip')
df = data_raw.drop(['state', 'stop_time', 'location_raw', 'county_fips', 'police_department', 'driver_age_raw', 'driver_race_raw',
'violation_raw','search_type_raw', 'is_arrested'], axis=1)
df['stop_date'] = pd.to_datetime(df.stop_date) #make stopdate a datetime object
df.driver_age.fillna(df.driver_age.mean(), inplace=True) #fill missing driver ages with mean
#Dummy coding:
df['driver_gender'] = pd.Series(np.where(df.driver_gender.values == 'F', 1, 0),
df.index)
df['officer_gender'] = pd.Series(np.where(df.officer_gender.values == 'F', 1, 0),
df.index)
race_dummies = pd.get_dummies(df.driver_race)
officer_race = pd.get_dummies(df.officer_race)
officer_race.columns = ['O_Asian', 'O_Black', 'O_Hispanic', 'O_Other', 'O_White']
merged = df.merge(race_dummies, left_index=True, right_index=True)
merged = merged.merge(officer_race, left_index=True, right_index=True)
merged['drugs_related_stop'] = pd.Series(np.where(merged.drugs_related_stop.values == False, 0, 1),
merged.index)
#was a search conducted --> This is the outcome variable
merged['search_conducted'] = pd.Series(np.where(merged.search_conducted.values == False, 0, 1),
merged.index)
merged['White_White'] = merged.White * merged.O_White#White driver White officer
merged['Black_White'] = merged.Black * merged.O_White#Black driver White officer
merged['Asian_White'] = merged.Asian * merged.O_White#Asian driver White officer
merged['Hispanic_White'] = merged.Hispanic * merged.O_White#Hispanic driver White officer
merged['White_Black'] = merged.White * merged.O_Black#White driver Black officer
merged['Black_Black'] = merged.Black * merged.O_Black #Black driver Black officer
X = merged.loc[:, ['driver_gender', 'driver_age', 'officer_gender', 'drugs_related_stop', 'Asian', 'Black', 'Hispanic',
'Other', 'White', 'O_Asian', 'O_Black', 'O_Hispanic', 'O_Other',
'O_White', 'White_White', 'Black_White', 'Asian_White',
'Hispanic_White', 'White_Black', 'Black_Black']]
y = merged.loc[:, ['search_conducted']]
y = y.values.reshape(8624032,)
fitted_X_train, fitted_X_test, fitted_y_train, fitted_y_test = train_test_split(X, y)
log_model = LogisticRegression()
log_model.fit(fitted_X_train, fitted_y_train)
preds = log_model.predict_proba(fitted_X_test)
log_loss(fitted_y_test, preds)
#log_loss = 0.091707177832494116
#Outputs/Unregularized log model:
'''[('driver_gender', -0.45287198553218022),
('driver_age', -0.022968236778229147),
('officer_gender', -0.010071773580308693),
('drugs_related_stop', 5.3455525078345518),
('Asian', 4.4184342895010786),
('Black', 5.0817284617090754),
('Hispanic', 4.9672820609552959),
('Other', 5.6290894859918872),
('White', 4.51373788931362),
('O_Asian', -1.02323305170735),
('O_Black', -1.3383925344273988),
('O_Hispanic', -1.2568753572088545),
('O_Other', -1.2627456694534289),
('O_White', -1.2557085389982203),
('White_White', 0.15454493742075387),
('Black_White', 0.28540365322816114),
('Asian_White', 0.0098635316063967592),
('Hispanic_White', 0.16441330211105804),
('White_Black', 0.0030278565889577521),
('Black_Black', 0.23015004098785694)]'''
#Outputs/Regularized log model:
'''[('driver_gender', -0.3935240677185543),
('driver_age', -0.026125974607706486),
('officer_gender', -0.18398255317748974),
('drugs_related_stop', 5.344023308753422),
('Asian', -0.897088274439007),
('Black', -0.4200507938800719),
('Hispanic', -0.33117288482073703),
('White', -1.0801687160251703),
('O_Asian', 0.09729142178973262),
('O_Black', 0.24368222941758863),
('O_Hispanic', 0.24624028011210103),
('O_White', 0.6638878949044615),
('White_White', -0.42025884032732197),
('Black_White', -0.35444857105573435),
('Asian_White', -0.6968193355182569),
('Hispanic_White', -0.7312850205495123),
('White_Black', -0.6475790081350615),
('Black_Black', 0.05709114369361723)]''' | true |
057f6321f6842390b45181839d9487b08c581cbd | Python | mingles/shape-recognition | /featuriser.py | UTF-8 | 4,389 | 3.046875 | 3 | [] | no_license | __author__ = 'Sam Davies and Mingles'
import cv2
import numpy as np
from countour_finder import ContourFinder
class FeaturiserSimple(object):
def __init__(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contours_sorted, _, _ = self.img_to_contours(gray_img)
min_area = 110
max_area = cv2.contourArea(contours_sorted[0])/25
relevant_contours = self.find_relevant_contours(contours_sorted, min_area, max_area)
self.feature_vectors = [self.get_feature_vector(cnt, img, gray_img) for cnt in relevant_contours]
@staticmethod
def get_feature_vector(cnt, img, gray_img):
"""
Extract the feature vector of the given contour
:param cnt: the contour to extract from
:return: the feature vector extracted
"""
moments = cv2.moments(cnt)
area = cv2.contourArea(cnt)
perimeter = cv2.arcLength(cnt, True)
hull = cv2.convexHull(cnt)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
x, y, w, h = cv2.boundingRect(cnt)
rect_area = w*h
extent = float(area)/rect_area
mask = np.zeros(gray_img.shape, np.uint8)
cv2.drawContours(mask, [cnt], 0, 255, -1)
mean_val = cv2.mean(img, mask=mask)
hu_moment = cv2.HuMoments(moments)
#print hu_moment
compactness = perimeter * perimeter / (4 * np.pi * area)
feature_vector = [compactness, extent, mean_val[2]]
return feature_vector
def img_to_contours(self, gray_img):
"""
Get a list of all contours in this image sorted by area descending
:param gray_img: the image to get contours from
:return: contours sorted by area descending
"""
# turn the image into binary (black and white, no grey)
blur = cv2.GaussianBlur(gray_img, (1, 1), 1000)
ret, thresh = cv2.threshold(blur, 129, 255, cv2.THRESH_BINARY)
# find all the contours in the image, all areas of joint white/black
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
card_cnt_index, card_cnt = self.max_contour_area_index(contours)
# removed all non childs of the card
good_cnts = [card_cnt]
for n in range(0, len(contours)):
# make sure that the contours parent is the card
if hierarchy[0][n][3] == card_cnt_index:
good_cnts.append(contours[n])
# figure out the largest contour areas
return sorted(good_cnts, key=cv2.contourArea, reverse=True), contours, hierarchy
@staticmethod
def max_contour_area_index(contours, excluding=[]):
max_area = 0
max_area_index = 0
for b in range(0, len(contours)):
if cv2.contourArea(contours[b]) > max_area and b not in excluding:
max_area = cv2.contourArea(contours[b])
max_area_index = b
return max_area_index, contours[max_area_index]
@staticmethod
def find_relevant_contours(contours_sorted, min_area, max_area):
"""
Using a heuristic, find the meaningful contours from a list of contours
:param contours_sorted: the full list of contours
:return: only the meaningful contours
"""
if contours_sorted:
# draw all the contours who's area is between 2 thresholds
relevant_contours = []
# print "max area {0}".format(max_area)
for cnt in contours_sorted[1:]:
area = cv2.contourArea(cnt)
if min_area < area < max_area:
relevant_contours.append(cnt)
else:
if min_area > area:
break
return relevant_contours
else:
return []
class FeaturiserAdaptive(ContourFinder):
def __init__(self, img):
super(FeaturiserAdaptive, self).__init__(img)
self.feature_vectors = [self.get_feature_vector(cnt, img, self.grey_image) for cnt in self.symbol_contours]
@staticmethod
def get_feature_vector(cnt, img, gray_img):
return FeaturiserSimple.get_feature_vector(cnt, img, gray_img)
# class Featuriser(FeaturiserSimple):
class Featuriser(FeaturiserAdaptive):
def __init__(self, img):
super(Featuriser, self).__init__(img)
| true |
3af9f8a3713bd0981e688da88c71f53fdfb74fac | Python | ChaeMyungSeock/Study | /DB/ex01.py | UTF-8 | 2,942 | 3.5 | 4 | [] | no_license | '''
select * from member
/* => 주석처리
from 이후에는 내가 생성한 db 테이블 이름 F5를 눌러서 실행하면
테이블에서 생성한 데이터를 보여줌
*/
--데이터 베이스 구축하기
--데이터 정의어(DDL) : 데이터베이스 만들기
create database Test02;
/*
create database <database명>
위의 쿼리문은 데이터 정의어(DDL) 중의 하나인 create문을 이용하는 쿼리입니다.
위의 쿼리문을 실행시키기 위해서 해당 쿼리문을 블록처리하고 F5를 눌러 실행시킵니다.
그리고 좌측의 개체탐색기 > 데이터베이스를 확인하면 Test02 라는 데이터베이스가 새로 생긴것을 확인할 수 있습니다.
이제 우리가 방금 생성한 Test02 라는 데이터베이스 내에 새로운 테이블을 생성하고 데이터를 추가해야 합니다.
하지만 우리가 처음 시작할 때 master 로 설정하고 시작한 것을 기억하시나요?
이 상태에서 테이블을 생성하거나 데이터를 입력하려고 하면 우리가 원하는대로, Test02 라는 데이터베이스에 데이터가 기록되지 않고 시스템 데이터베이스에 기록되게 됩니다.
따라서 우리가 앞으로 Test02에서 작업하겠다고 컴퓨터에게 알려주어야 합니다.
이를 위해서 아래와 같은 쿼리를 입력합니다.
use Test02;
위의 쿼리문을 실행하면 아래와 같이 master로 선택되어 있었던 것이 Test02로 바뀜
'''
'''
create table member(
id int constraint pk_code primary key,
name char(10),
email char(10)
);
/*
쿼리를 실행시킬 때는 실행시키고자 하는 부분만 블록으로 감싸 F5를 눌러야한다.
그렇지 않고 F5를 누르게되면 해당 쿼리창의 시작부터 끝까지 모든 쿼리가 다시 실행되므로 에러가 발생할 수 있다.
id 칼럼은 contraint pk_code primary key 라고 붙어있는데, 여기서 constraint는 해당 칼럼에 특정 제약조건을 주겠다라는 의미이고 그 제약조건의 내용이 뒤에 따라서 붙습니다
여기서 pk_code primary key 라는 제약조건이 붙었는데, 이는 pk_code 라는 이름의 primary key로 설정하겠다라는 의미입니다.
즉, member 테이블에서의 primary key, 기본키는 id컬럼이며 해당 기본키의 이름은 pk_code이다
*/
-- 데이터 조작어(DML) : INSERT, SELECT
insert into member values(10, '홍범우', 'hong@eamil');
/*
위의 쿼리는, member 라는 테이블에 데이터를 insert 할 것이다라는 의미
입력되는 데이터의 내용은 values(~~~) 내부에 입력
그리고 입력한 데이터가 잘 저장되었나 확인하기 위해 아래 쿼리를 입력
select * from member; 이게 확인하기 위한 쿼리
* : *는 모든 칼럼을 의미 배경이되는 테이블은 from ~~
*/
select * from member
'''
| true |
c875976a2f1dea4fcec2b823ac65156277e6d8f1 | Python | n-schilling/datadog-synthetic-scheduler | /index.py | UTF-8 | 3,325 | 2.515625 | 3 | [
"MIT"
] | permissive | import json
import logging
import os
import sys
import boto3
import urllib3
urllib3.disable_warnings()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
http_pool = urllib3.PoolManager()
secretsmanager_client = boto3.client('secretsmanager')
def changeSyntheticStatus(new_status):
logger.info(f"Start changing Datadog Synthetic status to {new_status}")
try:
datadog_secret_name = os.getenv('datadogSecretName', 'Datadog_API_Key')
except:
logger.error("One of the environmet variable is missing")
raise
try:
get_secret_value_response = secretsmanager_client.get_secret_value(
SecretId=datadog_secret_name
)
if 'SecretString' in get_secret_value_response:
secret_value_str = get_secret_value_response['SecretString']
else:
logger.error(
f"Could not extract secret {datadog_secret_name} from Secrets Manager")
raise
secret_value = json.loads(secret_value_str)
dd_api_key = secret_value['datadog']['api_key']
dd_app_key = secret_value['datadog']['app_key']
except:
logger.error(
"There was an error while getting the parameter from the parameter store")
raise
synthetic_public_id = os.getenv('syntheticPublicId')
datadog_api_endpoint = os.getenv('datadogApiEndpoint')
datadog_endpoint_url = datadog_api_endpoint + \
'synthetics/tests/' + synthetic_public_id + '/status'
logger.info(
f"Changing status to {new_status} for Datadog Synthetic with ID {synthetic_public_id} against endpoint {datadog_endpoint_url}")
body_json = json.dumps({
"new_status": new_status,
})
put_response = http_pool.request('PUT', datadog_endpoint_url,
headers={
'Content-Type': 'application/json',
'DD-API-KEY': dd_api_key,
'DD-APPLICATION-KEY': dd_app_key
},
body=body_json)
if (put_response.status) != 200:
logger.error(
f"HTTP Call to change the status of Datadog Synthetic {synthetic_public_id} to {new_status} failed.")
logger.error(f"HTTP status is {put_response.status}")
raise
else:
decoded_response = json.loads(put_response.data.decode('utf-8'))
if decoded_response: # HTTP response is either true or false
logger.info(
f"Status of Datadog Synthetic {synthetic_public_id} was successfully changed to {new_status}")
else:
logger.error(
f"HTTP Call was successfull but the status of Datadog Synthetic {synthetic_public_id} was NOT changed to {new_status}. Response was {decoded_response}")
raise
def handler(event, context):
logger.info("Start with Datadog Synthetic Scheduler")
try:
synthetic_set_status = event['syntheticSetStatus']
except:
logger.error("Could not extract Synthetic destination status from event")
raise
changeSyntheticStatus(synthetic_set_status)
logger.info("End of Datadog Synthetic Scheduler")
if __name__ == "__main__":
handler(0, 0)
| true |
a9cb6f90f2f6f38df11577b697b4b9139a0c5c3f | Python | SeanPlusPlus/algorithms | /isPalindrome.py | UTF-8 | 365 | 3.203125 | 3 | [] | no_license | def isPal(li):
if len(li) <= 1:
return True
back = li.pop()
front = li.pop(0)
if back != front:
return False
return isPal(li)
def main():
s1 = 'racecar'
print isPal([c for c in s1])
s2 = 'hello'
print isPal([c for c in s2])
s3 = 'a'
print isPal([c for c in s3])
if __name__ == '__main__':
main()
| true |
bec2a75ae59c2d3c539514c4b48cc3845d73e797 | Python | qualiaa/aoc | /2022/16/a.py | UTF-8 | 2,241 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
import sys
from typing import Iterable, Iterator, Tuple
from itertools import chain, combinations
Path = frozenset[str]
def main(lines: Iterable[str]):
adjacency = {}
rates = {}
for line in map(str.split, lines):
key, rate, rest = line[1], line[4], line[9:]
adjacency[key] = [r.rstrip(",") for r in rest]
rates[key] = int(rate[5:-1])
rates = {k: v for k, v in rates.items() if v > 0}
all_pairs = transitive_closure(adjacency)
print(max(score for _, score in all_paths(all_pairs, rates, "AA")))
print(maximum_pair(all_paths(all_pairs, rates, "AA", max_cost=26, early_exit=True))[1])
def maximum_pair(paths: Iterable[Tuple[Path, int]]) -> Tuple[Tuple[Path, Path], int]:
maxed_paths = {}
for path, score in paths:
maxed_paths[path] = max(maxed_paths.get(path, score), score)
path_pairs = sorted((((p1, p2), s1+s2) for (p1, s1), (p2, s2) in combinations(maxed_paths.items(), 2)),
key=lambda pair: -pair[1])
return next(filter(lambda k: not frozenset.intersection(*k[0]), path_pairs))
def all_paths(costs, rates, start_node, max_cost=30, early_exit=False) -> Iterator[Tuple[Path, int]]:
def go(current_node, visited, score, remaining_cost):
next_cost = lambda n: remaining_cost - costs[current_node][n] - 1
next_nodes = [(k, next_cost(k)) for k in rates if k not in visited and next_cost(k) >= 0]
yield from chain.from_iterable(
(go(k, visited | {k}, score + cost*rates[k], cost)) for k, cost in next_nodes)
if early_exit or not next_nodes:
yield (visited, score)
yield from go(start_node, frozenset(), 0, max_cost)
def transitive_closure(adjacency: dict[str, list[str]]) -> dict[str, dict[str, int]]:
def go(working_set: set[str], costs, cost=0):
if working_set:
costs |= {node: cost for node in working_set}
return go(set(chain.from_iterable(
{n for n in adjacency[current_node] if n not in costs}
for current_node in working_set
)), costs, cost+1)
return costs
return {k: go({k}, {}) for k in adjacency}
if __name__ == "__main__":
main(sys.stdin)
| true |
3e4cd2e04cf87b704b95b55dc47f89c63943b12f | Python | alicank/Translation-Augmented-LibriSpeech-Corpus | /TA-LibriSpeech.py | UTF-8 | 11,066 | 2.890625 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] | permissive | # -*- coding: utf-8 -*-
import os,sys
import argparse
import sqlite3,math
from shutil import copyfile
from collections import OrderedDict
import re
class TA_LibriSpeech:
parser_message = "Script developed to interact with the database" \
"to extract information easily:" \
"Example use: python3 TA-LibriSpeech.py train ./folder_output_train --size 1200 --verbose" \
"sort CNG --maxSegDuration 35.0 --minSegDuration 3.0 --extract "
db = "./TA-LibriSpeechCorpus.db"
def __init__(self):
self.evaluated_list = self.getEvaluated()
self.train_dataset = []
def getEvaluated(self):
"""
Query the database to get the audio_filename (unique identifier) for 200 sentences
that are evaluated by us
:return: List of ['audio_filenames'] that are evaluated
"""
chapters = [51758,123443,127083,163375]
db_connection = sqlite3.connect(TA_LibriSpeech.db)
books = []
for chapter in chapters:
cursor = db_connection.execute("SELECT audio_filename FROM alignments "
"WHERE chapter_id = " + str(chapter) + " LIMIT 50")
query_results = cursor.fetchall()
for result in query_results:
books.append(result[0])
cursor.close()
return books
def write_data(outputFolder,data):
args.output = outputFolder
# Open folder
if not os.path.exists(args.output):
os.mkdir(args.output)
# if --extract copyfile and write everything to output folder
# else sys.stdout filepaths
filepath_fh = open(args.output + "/filepaths.txt", "w", encoding="utf8")
metadata_fh = open(args.output + "/metadata.meta", "w", encoding="utf8")
transcription_fh = open(args.output + "/transcription.txt", "w", encoding="utf8")
translation_fh = open(args.output + "/translation.txt", "w", encoding="utf8")
for k, v in data.items():
filepath_fh.write('{}/{}/{}/{}\n'.format('./audio_files',
str(v['book_id']), str(v['chapter_id']), k + ".wav"))
# filepath_fh.write("./audio_files/"+str(v['book_id'])+"/"+str(v['chapter_id'])+"/"+k+".wav\n")
if args.extract:
# Copy files
if args.verbose:
print("Copying file\t" + k + ".wav to destination " + args.output + "/{}".format(k) + ".wav")
copyfile('{}/{}/{}/{}'.format('./audio_files',
str(v['book_id']), str(v['chapter_id']), k + ".wav"),
args.output + "/{}".format(k) + ".wav")
# Write metadata
metadata_fh.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t\n".format(k, str(v['book_id']), str(v['chapter_id']),
str(v['sentNo']), v['transcription'],
v['translation'],
str(v['score']), str(v['segment_duration'])))
transcription_fh.write("{}\n".format(v['transcription']))
translation_fh.write("{}\n".format(v['translation']))
filepath_fh.close()
translation_fh.close()
transcription_fh.close()
if __name__ == '__main__':
database = TA_LibriSpeech()
parser = argparse.ArgumentParser(description=TA_LibriSpeech.parser_message,
formatter_class=argparse.RawDescriptionHelpFormatter)
#Required arguments
parser.add_argument('action', help='1)train 2)dev 3)test')
parser.add_argument('output', help='Destination to output folder')
parser.add_argument('--size', help="Size of the corpus to be extracted: (minutes)"
"Default: 100 hours = 6000 minutes ", default=6000)
## Arguments for dev/test
parser.add_argument("--listTrain", help="Path to the filepaths.txt that contains the filepaths used in training dataset"
"in order to exclude them in dev and test sets")
parser.add_argument("--useEvaluated", help="For test/dev datasets, this clause gives privilege to "
"200 sentences that are manually evaluated", default = True)
#Optional arguments
parser.add_argument('--sort', help='Sorts the corpus using scores before extracting'
'None: Do not sort'
'hunAlign: Using hunAlign scores'
'CNG: Using CL-CNG & CL-CTS scores ', default='None',
choices=('None', 'hunAlign', 'CNG', 'LM'))
parser.add_argument('-v', '--verbose', help='verbose mode',
action='store_true', default= True)
parser.add_argument("--maxSegDuration",help="Maximum length of an audio file (segment) in seconds"
"Default: 30 seconds", default=30.0, type=float)
parser.add_argument("--minSegDuration",help="Minimum length of an audio file", default = 0.0, type=float)
parser.add_argument("--extract", help="Copies the sound files to output folder instead of"
"copying only the audio filenames along with transcription and translation files",
action="store_true", default = False)
args = parser.parse_args()
sorttype = args.sort
if args.sort == "None":
args.sort = "ORDER BY book_id"
elif args.sort == "hunAlign":
args.sort = "ORDER BY alignment_score DESC"
elif args.sort == "CNG":
args.sort = "ORDER BY cng_score DESC"
elif args.sort == "LM":
args.sort = "ORDER BY lm_score"
dev_test = False
if args.listTrain != "" and (args.action == "dev" or args.action == "test"):
dev_test =True
with open(args.listTrain,"r",encoding="utf8") as fh:
train_files = fh.readlines()
for train_file in train_files:
searchObject = re.search(r"(\d+-\d+-\d+)",train_file.strip(),re.I)
identifier = searchObject.group(1)
database.train_dataset.append(identifier)
db_connection = sqlite3.connect(TA_LibriSpeech.db)
maxLimit = args.size
if maxLimit != "max":
maxLimit = float(maxLimit) * 60 # Seconds
else:
maxLimit = math.inf
if dev_test and args.useEvaluated:
time_counter = 0.0
data = OrderedDict()
for id in database.evaluated_list:
query = "SELECT * FROM alignments WHERE audio_filename = \"" + id + "\""
cursor = db_connection.execute(query)
row = cursor.fetchone()
cursor.close()
#Get the human evaluation score (AVG) and append
query = "SELECT AVG(alignment_eval) FROM alignments_evaluations WHERE chapter_id = 123443 AND sent_id = 0"
row_evaluation = db_connection.execute(query).fetchone()
if row_evaluation[0] >= 3.0:
if time_counter <= maxLimit:
print(row_evaluation)
sys.exit("STOP ")
# Query for segment duration
(audio_filename,
book_id, chapter_id,
sentno, transcrpt, transl, exclusion) = row[1], row[2], row[3], row[4], row[5], row[7], row[11]
cursor = db_connection.execute(
"SELECT * FROM alignments_audio WHERE audio_filename = \"" + audio_filename + "\"")
audio_row = cursor.fetchone()
segment_duration = audio_row[-1]
cursor.close()
if segment_duration >= args.minSegDuration \
and segment_duration <= args.maxSegDuration:
time_counter += segment_duration
if args.verbose:
print(row)
# Alignment scores
score = row_evaluation[0]
bookItem = {}
bookItem['book_id'] = book_id
bookItem['chapter_id'] = chapter_id
bookItem['segment_duration'] = segment_duration
bookItem['score'] = score
bookItem['sentNo'] = sentno
bookItem['transcription'] = transcrpt
bookItem['translation'] = transl
data[audio_filename] = bookItem
#Do not continue with the rest of the pipeline if its dev/test
write_data(args.output,data)
sys.exit("")
# Query for DB
query = " SELECT * FROM alignments " \
"JOIN (alignments_excluded JOIN alignments_scores " \
"USING(audio_filename)) USING (audio_filename) WHERE excluded != \"True\" " + args.sort
cursor = db_connection.execute(query)
if args.verbose:
print("Starting query")
query_results = cursor.fetchall()
time_counter = 0.0
data = OrderedDict()
for row in query_results:
if time_counter <= maxLimit:
(audio_filename,
book_id,chapter_id,
sentno,transcrpt,transl,exclusion) = row[1],row[2],row[3],row[4],row[5],row[7],row[11]
# print(audio_filename,book_id,chapter_id,sentno,transcrpt,transl,exclusion)
#Keep manually evaluted audio files to test & dev
if args.action == "train" and audio_filename in database.evaluated_list:
continue
#Alignment scores
scores = {}
scores['CNG'] = row[-2]
scores['hunAlign'] = row[8]
scores['None'] = row[-2]
scores['LM'] = row[-1]
score = scores[sorttype]
#Query for segment duration
cursor = db_connection.execute("SELECT * FROM alignments_audio WHERE audio_filename = \"" + audio_filename+"\"")
audio_row = cursor.fetchone()
segment_duration = audio_row[-1]
cursor.close()
if segment_duration >= args.minSegDuration\
and segment_duration <= args.maxSegDuration:
time_counter += segment_duration
if args.verbose:
print(row)
bookItem = {}
bookItem['book_id'] = book_id
bookItem['chapter_id'] = chapter_id
bookItem['segment_duration'] = segment_duration
bookItem['score'] = score
bookItem['sentNo'] = sentno
bookItem['transcription'] = transcrpt
bookItem['translation'] = transl
data[audio_filename] = bookItem
cursor.close()
write_data(args.output,data)
"""
SELECT * FROM alignments JOIN (alignments_excluded JOIN alignments_scores USING(audio_filename) ) USING ( audio_filename ) WHERE excluded != "True" ORDER BY alignment_score DESC
"""
| true |
3143fe62537119feecd982970e3e6bac0d24c236 | Python | iaakanksha/Basic | /simple calculator.py | UTF-8 | 3,037 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
from tkinter import *
root = Tk()
root.title("Simple calculator")
e = Entry(root,width=35,borderwidth=5)
e.grid(row=0,column=0,columnspan=3,padx=10,pady=10)
def button_click(number):
current = e.get()
e.delete(0,END)
e.insert(0,str(current)+str(number))
def button_clear():
e.delete(0,END)
def button_add():
first_number = e.get()
global f_num
global math
math = "addition"
f_num = int(first_number)
e.delete(0,END)
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = int(first_number)
e.delete(0,END)
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = int(first_number)
e.delete(0,END)
def button_equal():
second_number = e.get()
e.delete(0,END)
if math == 'addition':
e.insert(0,f_num + int(second_number))
if math == 'subtraction':
e.insert(0,f_num - int(second_number))
if math == 'multiplication':
e.insert(0,f_num * int(second_number))
button_1 = Button(root, text='1', padx=40, pady=20, command=lambda: button_click(1))
button_2 = Button(root, text='2', padx=40, pady=20, command=lambda: button_click(2))
button_3 = Button(root, text='3', padx=40, pady=20, command=lambda: button_click(3))
button_4 = Button(root, text='4', padx=40, pady=20, command=lambda: button_click(4))
button_5 = Button(root, text='5', padx=40, pady=20, command=lambda: button_click(5))
button_6 = Button(root, text='6', padx=40, pady=20, command=lambda: button_click(6))
button_7 = Button(root, text='7', padx=40, pady=20, command=lambda: button_click(7))
button_8 = Button(root, text='8', padx=40, pady=20, command=lambda: button_click(8))
button_9 = Button(root, text='9', padx=40, pady=20, command=lambda: button_click(9))
button_0 = Button(root, text='0', padx=40, pady=20, command=lambda: button_click(0))
button_add =Button(root,text='+',padx=30,pady=40,command=button_add)
button_subtract =Button(root,text='-',padx=30,pady=20,command=button_subtract)
button_multiply =Button(root,text='*',padx=30,pady=20,command=button_multiply)
button_equal =Button(root,text='=',padx=91,pady=20,command=button_equal)
button_clear =Button(root,text='clear',padx=79,pady=20,command=button_clear)
#put buttons on the screen
button_1.grid(row=3,column=0)
button_2.grid(row=3,column=1)
button_3.grid(row=3,column=2)
button_4.grid(row=2,column=0)
button_5.grid(row=2,column=1)
button_6.grid(row=2,column=2)
button_7.grid(row=1,column=0)
button_8.grid(row=1,column=1)
button_9.grid(row=1,column=2)
button_0.grid(row=4,column=0)
button_add.grid(row=5,column=0)
button_clear.grid(row=4,column=1)
button_subtract.grid(row=5,column=1)
button_multiply.grid(row=5,column=2)
button_equal.grid(row=4,column=2)
root.mainloop()
| true |
5787133d7827b6ae933689e4c60f8108971f1ba1 | Python | xCiaraG/Kattis | /vauvau.py | UTF-8 | 433 | 3.203125 | 3 | [] | no_license | dog_times = list(map(int, input().strip().split()))
times = list(map(int, input().strip().split()))
for t in times:
if dog_times[0] >= t % (dog_times[0] + dog_times[1]) > 0 and dog_times[2] >= t % (dog_times[2] + dog_times[3]) > 0:
print("both")
elif dog_times[0] >= t % (dog_times[0] + dog_times[1]) > 0 or dog_times[2] >= t % (dog_times[2] + dog_times[3]) > 0:
print("one")
else:
print("none")
| true |
c5c0dba79592ea0219eb51d5b9f06097d86ff773 | Python | gsantam/competitive-programming | /leetcode/easy/is-graph-bipartite.py | UTF-8 | 1,408 | 3 | 3 | [] | no_license | from collections import deque
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
if len(graph)==0:
return True
graph_dict = dict()
vertices = set()
for edge in graph:
for i in range(len(edge)):
v1 = edge[i]
vertices.add(v1)
if v1 not in graph_dict:
graph_dict[v1] = []
for j in range(i+1,len(edge)):
v2 = edge[j]
vertices.add(v2)
if v2 not in graph_dict:
graph_dict[v2] = []
graph_dict[v1].append(v2)
graph_dict[v2].append(v1)
while len(vertices)>0:
seen = dict()
q = deque([[list(vertices)[0],0]])
while len(q)>0:
element = q.popleft()
current = element[0]
part = element[1]
if current in vertices:
vertices.remove(current)
if current in seen:
if seen[current] != part:
return False
else:
seen[current] = part
for vertex in graph[current]:
q.append([vertex,(part+1)%2])
return True
| true |
125fa061cd53a27a0312cb1a1c3052c6a5e9573b | Python | matthewmichihara/project-euler | /python/p045.py | UTF-8 | 223 | 3.3125 | 3 | [] | no_license | #! /usr/bin/python
tri = []
pen = []
hex = []
for n in range(1,100000):
tri.append(n*(n+1)/2)
pen.append(n*(3*n-1)/2)
hex.append(n*(2*n-1))
t = set(tri)
p = set(pen)
h = set(hex)
print max(t & p & h)
| true |
60a7eca536906ab8f21ac8f953561453c90a0379 | Python | statropy/aoc-2020 | /day20.py | UTF-8 | 7,000 | 2.71875 | 3 | [] | no_license | #day20.py
import math
import re
def getedges(tile):
for line in tile:
size = len(tile)
top = int(tile[0],2)
right, bottom, left = 0,0,0
for i,line in enumerate(tile):
if line[-1] == '1':
right |= (1 << (size-i-1))
if line[0] == '1':
left |= (1 << i)
if tile[-1][i] == '1':
bottom |= (1 << i)
return [top, right, bottom, left]
#chenage to return new list
def rev(tile):
revtile = []
for line in tile:
x = list(line)
x.reverse()
revtile.append(''.join(x))
return revtile
tilemap = {}
with open('input20.txt', 'r') as f:
tileid = 0
for line in f:
if line[0] == 'T':
tileid = int(line[5:-2])
tilemap[tileid] = []
elif len(line) > 1:
tilemap[tileid].append(line.strip().replace('.','0').replace('#','1'))
size = len(next(iter(tilemap.values())))
mask = (1<<size)-1
width = int(math.sqrt(len(tilemap)))
edges = {}
edgecount = {}
for tileid,tile in tilemap.items():
front = getedges(tile)
back = getedges(rev(tile))
edges[tileid] = [front, back]
for edge in front+back:
edgecount.setdefault(edge, [])
edgecount[edge].append(tileid)
def other_edges(edges, tile):
edgecount = {}
for tileid,(front, back) in edges.items():
if tileid == tile: continue
for edge in front + back:
edgecount.setdefault(edge, [])
edgecount[edge].append(tileid)
return edgecount
corners = set()
sides = set()
middles = set()
puzzlemap = {}
for tileid,(front, back) in edges.items():
front_matches, back_matches = 0,0
puzzlemap[tileid] = [None, None, None, None]
for i,edge in enumerate(front):
matches = [e for e in edgecount[edge] if e != tileid]
if len(matches) == 1:
puzzlemap[tileid][i] = matches[0]
front_matches += 1
elif len(matches) > 0:
print('Too many', tileid, matches)
if front_matches == 2:
corners.add(tileid)
if front_matches == 3:
sides.add(tileid)
else:
middles.add(tileid)
def rotate(tileid, pmap, tmap):
pmap[tileid].insert(0, pmap[tileid].pop())
tmap[tileid] = [''.join(x) for x in zip(*tmap[tileid][::-1])]
def shrink(oldpiece):
piece = []
for i in range(1, len(oldpiece)-1):
s = oldpiece[i][1:-1]
piece.append(s)
return piece
def findnext(puzzlemap, tilemap, puzzlegrid, imagegrid, current, bottom=True, puzrow=None, gridrow=None):
current_line = ''
if bottom:
#print('finding bottom of', current)
current_line = tilemap[current][-1]
else:
#print('finding right of', current)
current_line = ''.join([line[-1] for line in tilemap[current]])
for tile in [x for x in puzzlemap[current] if x is not None]:
tile_text = tilemap[tile]
found = False
rotations = 0
for rotations in range(8):
tile_line = ''
if bottom:
tile_line = tile_text[0]
else:
tile_line = ''.join([line[0] for line in tile_text])
if tile_line == current_line:
found = True
break
else:
if rotations == 3:
#print('flip')
for i,line in enumerate(tile_text):
line = list(line)
line.reverse()
tile_text[i] = ''.join(line)
else:
tile_text = [''.join(x) for x in zip(*tile_text[::-1])]
if found:
for r,line in enumerate(tile_text):
if puzrow is None:
puzzlegrid.append(line)
else:
puzzlegrid[puzrow+r] += line
for r,line in enumerate(shrink(tile_text)):
if gridrow is None:
imagegrid.append(line)
else:
imagegrid[gridrow+r] += line
tilemap[tile] = tile_text
puzzlemap[current][puzzlemap[current].index(tile)] = None
puzzlemap[tile][puzzlemap[tile].index(current)] = None
return tile
topleft = next(iter(corners))
r = 0
while (puzzlemap[topleft][0] is not None) or (puzzlemap[topleft][3] is not None):
rotate(topleft, puzzlemap, tilemap)
r += 1
puzzlegrid = []
imagegrid = []
for line in tilemap[topleft]:
puzzlegrid.append(line)
for line in shrink(tilemap[topleft]):
imagegrid.append(line)
current = topleft
topofrow = topleft
for line in tilemap[current]:
puzzlegrid.append(line)
#first column
for i in range(1, width):
current = findnext(puzzlemap, tilemap, puzzlegrid, imagegrid, current)
for col in range(1, width):
#top of colum
topofrow = findnext(puzzlemap, tilemap, puzzlegrid, imagegrid, topofrow, False, 0, 0)
current = topofrow
#rest of column
for row in range(1, width):
current = findnext(puzzlemap, tilemap, puzzlegrid, imagegrid, current, True, size*row, (size-2)*row)
midline = re.compile(r'1....11....11....111')
bottomline = re.compile(r'1..1..1..1..1..1')
for i,line in enumerate(imagegrid):
imagegrid[i] = line.replace('0',' ')
matches = 0
finalgrid = None
middlelist = [0,5,6,11,12,17,18,19]
bottomlist = [1,4,7,10,13,16]
for rotations in range(8):
for i,line in enumerate(imagegrid):
if i == 0 or i == len(imagegrid)-1: continue
for m in midline.findall(line):
idx = line.index(m)
if imagegrid[i-1][idx+18] == '1':
bottommatch = sum([1 for x in bottomlist if imagegrid[i+1][x+idx] == '1'])
if bottommatch == len(bottomlist):
matches += 1
if finalgrid is None:
finalgrid = [x for x in imagegrid]
fgl = list(finalgrid[i-1])
fgl[idx+18] = 'O'
finalgrid[i-1] = ''.join(fgl)
fgl = list(finalgrid[i])
for z in middlelist:
fgl[idx+z] = 'O'
finalgrid[i] = ''.join(fgl)
fgl = list(finalgrid[i+1])
for z in bottomlist:
fgl[idx+z]= 'O'
finalgrid[i+1] = ''.join(fgl)
if matches == 0:
if rotations == 3:
#print('flip')
for i,line in enumerate(imagegrid):
line = list(line)
line.reverse()
imagegrid[i] = ''.join(line)
else:
imagegrid = [''.join(x) for x in zip(*imagegrid[::-1])]
else:
break
total = 0
for line in imagegrid:
total += line.count('1')
final = 0
for line in finalgrid:
final += line.count('1')
#part 1 answer
prod = 1
for c in corners:
prod *= c
print(prod)
print(final)
| true |
e0aede753622c191d61464e60cddaba307fb08cd | Python | dlwire/repo_metrics | /src/test_filtering.py | UTF-8 | 461 | 2.875 | 3 | [] | no_license | from fickle import apply_filters
import unittest
class FilterTest(unittest.TestCase):
def setUp(self):
self.collection = range(1,21)
self.filters = [
lambda x: x % 3 == 0,
lambda x: x % 2 == 0 ]
def test_applies_all_filters_to_collection(self):
result = apply_filters(self.collection, self.filters)
self.assertEqual(result, [6, 12, 18])
if __name__ == '__main__':
unittest.main()
| true |
1ef8c7477a71e5b6484ef426b6a9505e7a62c121 | Python | MITMotorsports/Telemetry_GUI | /CAN_Spec_Paser.py | UTF-8 | 3,577 | 2.671875 | 3 | [] | no_license | from collections import OrderedDict
if __name__ == '__main__':
with open('../MY17_Can_Library/can_validator/fsae_can_spec.txt', 'r') as in_file:
with open('CAN_SPEC.py', 'w') as out_file:
out_file.write('from collections import OrderedDict\n\n')
#parse for CAN IDs and data
ID_Dict = {}
Data_Pos_Dict = {}
cur_msg_name = ''
is_little_endian = {}
current_endian = 0
for line in in_file:
if 'MESSAGE_NAME' in line:
tmp = line.split(' ')
cur_msg_name = tmp[0][13:]
print(cur_msg_name)
ID = int(tmp[1][3:], 16)
print(tmp[1][3:])
ID_Dict[ID] = cur_msg_name
is_little_endian[cur_msg_name] = current_endian
if 'ENDIAN=' in line:
tmp = line.split('=')
tmp_endian = tmp[-1][:-1]
print(tmp_endian)
if tmp_endian == 'BIG':
current_endian = 0
else:
current_endian = 1
if 'DATA_NAME' in line:
tmp = line.split(' ')
print(tmp)
data_name = tmp[-2][10:]
print(data_name)
data_pos = tmp[-1][9:]
print(data_pos)
data_pos = data_pos.split(':')
print(data_pos[0])
print(data_pos[1])
data_pos = (int(data_pos[0]),int(data_pos[1]))
if cur_msg_name in Data_Pos_Dict.keys():
Data_Pos_Dict[cur_msg_name][data_name] = data_pos
else:
Data_Pos_Dict[cur_msg_name] = OrderedDict({data_name:data_pos})
#Write ID Dictionary in file
# print(Data_Pos_Dict)
out_file.write('ID_Dict = {')
first = 1
for k,v in ID_Dict.items():
if first:
first = 0
else:
out_file.write(',\n')
out_file.write(str(k)+':')
out_file.write('\'' + v + '\'')
out_file.write('}\n\n')
#Write data position Dictionary in file
out_file.write('Data_Pos_Dict = {')
first = 1
for k,v in Data_Pos_Dict.items():
if first:
first = 0
else:
out_file.write(',\n')
out_file.write('\'' + k + '\': ')
out_file.write('OrderedDict([')
first_v = 1
for k_v, v_v in v.items():
if first_v:
first_v = 0
else:
out_file.write(', ')
out_file.write('(\'' + k_v + '\',(' + str(v_v[0]) + ',' + str(v_v[1]) + '))')
out_file.write('])')
out_file.write('}\n\n')
#Write Endianess Dictionary in file
out_file.write('is_little_endian = {')
first = 1
for k,v in is_little_endian.items():
if first:
first = 0
else:
out_file.write(',\n')
out_file.write('\'' + k + '\': ')
out_file.write(str(v))
out_file.write('}\n\n')
out_file.close()
in_file.close()
| true |
73f00f81c5386ea69f29c26f42c41f82ab0c8fe7 | Python | costapt/linear_regression | /linear_regression.py | UTF-8 | 3,391 | 3.34375 | 3 | [] | no_license | import theano
import numpy as np
import theano.tensor as T
from theano import function
import matplotlib.pyplot as plt
from theano.tensor.shared_randomstreams import RandomStreams
def add_bias(X):
return np.insert(X,0,1,axis=1)
def add_features(X):
(num_points, num_features) = X.shape
for f in range(num_features):
X = np.insert(X,num_features+f,np.square(X[:,f]),axis=1)
return X
class LinearRegression:
def __init__(self,add_features=True,learning_rate=0.001,threshold=0.001,debug=False):
self.add_features = add_features
self.debug = debug
self.learning_rate = learning_rate
self.threshold = threshold
def train(self,X_train,Y_train):
# Add new features
if self.add_features:
X_train = add_features(X_train)
# Add bias to training examples in order to make the mathematics more elegant
X_train = add_bias(X_train)
(num_points, num_features) = X_train.shape
# Define theano variables to represent the training examples (X) and
# respective values (Y)
X = T.dmatrix('X')
Y = T.dvector('Y')
# Define the weight of the model
w = theano.shared(np.asarray([0]*num_features,dtype=theano.config.floatX))
# Prediction
y = T.dot(w,X.T)
# Cost function: Square the difference between the real label and the
# predicted label. Then take the mean of all these differences
cost = T.mean(T.sqr(Y-y))
grad = T.grad(cost=cost, wrt=w)
updates = [[w, w - grad*self.learning_rate]]
# Train function: Inputs X and Y. Computes the cost function.
# X is a matrix of dimensions [num_points,num_features]
# Y is a vector of dimension [num_points]
# cost is a function that returns how well the model fits the reality
# everytime train is called, w is updated
train = function([X,Y],outputs=cost,updates=updates,allow_input_downcast=True)
# Run the training function untill the reduction on the error is small enough
error_before, iterations = 0, 0
while True:
error = train(X_train,Y_train)
if self.debug:
iterations = iterations + 1
print("[iteration {0}] error = {1}".format(iterations,error))
if abs(error_before - error) < self.threshold:
break
error_before = error
self.w = w.get_value()
# return the learned weights
return w.get_value()
def predict(self,X):
if self.add_features:
X = add_features(X)
X = add_bias(X)
return np.dot(self.w,np.transpose(X))
def random_noise_generator(num_points,seed=234):
srng = RandomStreams(seed)
rv_n = srng.normal((num_points,))
f = function([],rv_n)
return f()
def points_generator(f,num_points,start=0,end=4):
x = np.linspace(start,end,num_points)
return x,f(x)+random_noise_generator(num_points)
def main():
num_points = 150
x = T.dvector('x')
y = x**2
f = function([x],y)
X,y = points_generator(f,num_points,start=-5,end=5)
X = np.array([[xi] for xi in X])
model = LinearRegression()
w = model.train(X,y)
y_pred = model.predict(X)
plt.plot(X,y,'ro')
plt.plot(X,y_pred)
plt.show()
if __name__ == '__main__':
main()
| true |
aab2996e9c0b8a1fcc7a20aca4724773385bb894 | Python | hcjun-dev/Python_College | /src/file0306.py | UTF-8 | 1,212 | 4 | 4 | [] | no_license | ##
# Hyungchol Jun
# 2014-03-01
# p6.4 hij
def hfunc(mainlist): # checking if the list is in order
if (mainlist == sorted(mainlist)) or (mainlist == sorted(mainlist, reverse=True)): # in order or in reverse order
return True
return False # if not in order, return False
def ifunc(mainlist): # Duplicate adjacent
for i in range(len(mainlist)-2):
if mainlist[i] == mainlist[i+1]:
return True
return False
def jfunc(mainlist): # Just adjacent
for i in range(len(mainlist)-1):
for j in range(i+1, len(mainlist)-1):
if mainlist[i] == mainlist[j]:
return True
return False
def func0306():
# start of main function
mainlist = []
inputv = input("Please input numbers: ") # Getting input
while inputv != "":
mainlist.append(inputv)
inputv = input("Please input numbers: ")
# Printing area.
if hfunc(mainlist):
print("h function was True")
else:
print("h was FALSE")
if ifunc(mainlist):
print("i function was True")
else:
print("i was FALSE")
if jfunc(mainlist):
print("j function was True")
else:
print("j was FALSE")
| true |
8cfe6ecd5c4b2dac68b974f4178ad2dd9a6b7d0e | Python | joanaalvoeiro/LN-MP1 | /unused_functions.py | UTF-8 | 3,659 | 2.71875 | 3 | [] | no_license | import numpy as np
import nltk
from nltk.corpus import wordnet
from nltk import WordNetLemmatizer
from nltk.stem import PorterStemmer
def cosine_similarity(a1, a2):
return np.dot(a1, np.transpose(a2)) / (np.linalg.norm(a1) * np.linalg.norm(a2))
def tf_idf(test_questions, known_questions):
questions_list = [test_questions, [q.question for q in known_questions]]
tfs = [[], []]
doc_terms = [{}, {}]
terms = set([])
for qn in range(len(questions_list)):
questions = questions_list[qn]
n_questions = len(questions)
for i in range(n_questions):
tfs[qn].append({})
for word in questions[i]:
terms.add(word)
if(word in tfs[qn][i]):
tfs[qn][i][word] += 1/len(questions[i])
else:
tfs[qn][i][word] = 1/len(questions[i])
if(word in doc_terms[qn]):
doc_terms[qn][word] += 1
else:
doc_terms[qn][word] = 1
terms = list(terms)
n_terms = len(terms)
test_tf_idf = np.zeros((len(questions_list[0]), n_terms))
known_tf_idf = np.zeros((len(questions_list[1]), n_terms))
tf_idf = [test_tf_idf, known_tf_idf]
for qn in range(len(questions_list)):
n_questions = len(questions_list[qn])
for i in range(n_questions):
for j in range(len(terms)):
if(terms[j] in tfs[qn][i]):
df = doc_terms[qn][terms[j]]
tf_idf[qn][i][j] = tfs[qn][i][terms[j]] * np.log(n_questions/df + 1)
return tf_idf[0], tf_idf[1]
def predict_labels_tf_idf(test_questions, known_questions, coarseness):
test_tf_idf, known_tf_idf = tf_idf(test_questions, known_questions)
labels = []
n_known_questions = len(known_questions)
for i in range(len(test_questions)):
best_similarity = 0
closest_question = None
for j in range(n_known_questions):
similarity = cosine_similarity(test_tf_idf[i], known_tf_idf[j])
if(similarity > best_similarity):
best_similarity = similarity
closest_question = known_questions[j]
if(closest_question != None):
labels.append(get_label(closest_question, coarseness))
else:
labels.append(None)
return labels
def get_pos_tag(word):
tag = nltk.pos_tag(word)[0][1].upper()
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
else:
return wordnet.ADV
def get_pos_tag(word):
tag = nltk.pos_tag(word)[0][1].upper()
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return ''
def lemma(question):
lemma = WordNetLemmatizer()
lemmatized = []
for word in question:
tag = get_pos_tag(word)
if len(tag) > 0:
lemmatized.append(lemma.lemmatize(word, tag))
else:
lemmatized.append(lemma.lemmatize(word))
return lemmatized
def token_stemm_Porter(questions):
q = []
stemmer = nltk.stem.PorterStemmer()
for word in questions:
w = stemmer.stem(word)
q.append(w)
return q
def token_stemm_RSLP(questions):
q = []
stemmer = nltk.stem.RSLPStemmer()
for word in questions:
w = stemmer.stem(word)
q.append(word)
return q | true |
f7b5f1c1d6a8c71ad1c68bf459f2c7d7d27d7ff7 | Python | srush/tf-fork | /node_and_hyperedge.py | UTF-8 | 17,160 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
''' Two classes that constitute a hypergraph (forest): Node and Hyperedge
On top of these, there is a separate Forest class in forest.py which collects the nodes,
and deals with the loading and dumping of forests.
implementation details:
1. node has a local score "node_score" and hyperedge "edge_score".
2. and they both have "beta" \'s.
this design is quite different from the original, where only edge-prob is present.
'''
import sys, os, re
import math
import copy
#sys.path.append(os.environ["NEWCODE"])
#import mycode
import heapq
logs = sys.stderr
from tree import Tree
from svector import Vector
from utility import symbol, desymbol
print_duplicates = False
import gflags as flags
FLAGS=flags.FLAGS
flags.DEFINE_boolean("bp", True, "use BP rules")
class Node(Tree):
''' Node is based on Tree so that it inherits various functions like binned_len and is_terminal. '''
def copy(self):
return copy.deepcopy(self)
def __init__(self, iden, labelspan, size, fvector, sent):
# NP [0-3]
self.iden = iden
label, span = labelspan.split()
self.span = tuple(map(int, span[1:-1].split("-")))
if label[-1] == "*":
label = label[:-1]
self._spurious = True
else:
self._spurious = False
self.label = "TOP" if label == "S1" else label
self.label = symbol(self.label)
self.edges = []
#new features
self.frags = []
#self.tfedges = []
#new feature: subtree str created for bp rules, NP(NN 'ch') -> lhs(bp) ### feats
self.subtree = ''
## N.B.: parse forest node can be termllinal
word = sent[self.span[0]] if (size == 0) else None
## now in MT forest, nodes are always non-final. hyperedges can be final (terminal).
## in tree.py
self.prepare_stuff(label, word)
self.fvector = fvector
self._root = False
self._bin_len = None
# surface string
self.surface = '%s' % ''.join(sent[self.span[0]:self.span[1]])
self._hash = hash(self.iden)
def __hash__(self):
return self._hash
def psubtree(self):
if self.subtree != '':
return self.subtree
else:
if self.is_terminal():
self.subtree = '%s("%s")' % (self.label, self.word)
return self.subtree
else:
self.subtree = '%s(%s)' % \
(self.label, ' '.join(sub.psubtree() for sub in self.edges[0].subs))
return self.subtree
def prepare_kbest(self):
self.klist = []
self.kset = set()
self.fixed = False
# if self.is_terminal(): --- WHY??/
if self.is_terminal():
self.klist = [self.bestres]
self.kset.add(tuple(self.besttree))
self.fixed = True
## self.bestedge = None ## N.B.: WHY??
self.cand = None
def mapped_span(self, mapping):
return (mapping[self.span[0]], mapping[self.span[1]])
def labelspan(self, separator=":", include_id=True, space=" "):
ss = "%s%s " % (self.iden, separator) if include_id else ""
lbspn = "%s%s[%d-%d]" % (self.label + ("*" if self.is_spurious() else ""),
space,
self.span[0], self.span[1])
return ss + lbspn
__str__ = labelspan
__repr__ = __str__
def is_spurious(self):
return self._spurious
def sp_terminal(self):
return self.is_spurious() and self.edges[0].subs[0].is_terminal()
def add_edge(self, hyperedge):
self.edges.append(hyperedge)
hyperedge.node = self ## important backpointer!
def number_edges(self, counter, newedgeorder):
retcount = counter
for edge in self.edges:
edge.position_id = retcount
newedgeorder.append(edge)
retcount += 1
return retcount
def assemble(self, subtrees):
'''this is nice. to be used by k-best tree generation.'''
## t = Tree(self.label, self.span, subs=subtrees, sym=False) if not self._spurious else subtrees[0]
## now done in hyperedge, not here in node
assert False, "shoudn't be called here, see Hyperedge."
assert t is not None, (self.label, self.span, subtrees, self._spurious)
# if self._root:
# ## notice that, roots are spurious! so...
# t.set_root(True)
return t
def this_tree(self):
## very careful: sym=False! do not symbolize again
return Tree(self.label, self.span, wrd=self.word, sym=False)
def sumparse(self, weights=Vector("gt_prob=1")):
# memoize
if hasattr(self,"betasum"):
return self.betasum
self.node_score = (self.fvector.dot(weights))
score = 0
for i, edge in enumerate(self.edges):
#if FLAGS.bp or not edge.rule.is_bp():
edge.edge_score = (edge.fvector.dot(weights)) # TODO
edge.betasum = 1.0
for sub in edge.subs:
sub_beta = sub.sumparse(weights)
edge.betasum *= sub_beta
edge.betasum *= edge.edge_score
score += edge.betasum
self.betasum = score + self.node_score
return self.betasum
# def sumparse(self, weights=Vector("gt_prob=1")):
# # memoize
# if hasattr(self,"betasum"):
# return (self.insidepaths, self.betasum)
# self.node_score = self.fvector.dot(weights)
# insidepaths = 0
# score = 0
# # not sure the name of this semiring,
# # but it computes + ((a + b),(a' + b')) and * ((a * b), (b * a' + a * b'))
# # gives the sum of all derivations
# for i, edge in enumerate(self.edges):
# #if FLAGS.bp or not edge.rule.is_bp():
# edge.edge_score = edge.fvector.dot(weights) # TODO
# edge.betasum = 0.0
# edge.insidepaths = 1
# for sub in edge.subs:
# sub_insidepaths, sub_beta = sub.sumparse(weights)
# edge.betasum = edge.insidepaths * sub_beta + sub_insidepaths * edge.betasum
# edge.insidepaths *= sub_insidepaths
# edge.betasum += edge.edge_score * edge.insidepaths
# score += edge.betasum
# insidepaths += edge.insidepaths
# self.insidepaths = insidepaths
# self.betasum = score + self.node_score
# return (self.insidepaths, self.betasum)
def bestparse(self, weights, use_min, dep=0):
'''now returns a triple (score, tree, fvector) '''
if self.bestres is not None:
return self.bestres
self.node_score = self.fvector.dot(weights)
if self._terminal:
self.beta = self.node_score
self.besttree = self.this_tree()
self.bestres = (self.node_score, self.besttree, self.fvector.__copy__()) ## caution copy TODO
else:
self.bestedge = None
for edge in self.edges:
if FLAGS.bp or not edge.rule.is_bp():
## weights are attached to the forest, shared by all nodes and hyperedges
score = edge.edge_score = edge.fvector.dot(weights) # TODO
fvector = edge.fvector.__copy__() ## N.B.! copy! TODO
subtrees = []
for sub in edge.subs:
sc, tr, fv = sub.bestparse(weights, use_min, dep+1)
score += sc
fvector += fv
subtrees.append(tr)
tree = edge.assemble(subtrees)
edge.beta = score
if self.bestedge is None or (score < self.bestedge.beta if use_min else score > self.bestedge.beta):
## print >> logs, self, edge
self.bestedge = edge
self.besttree = tree
best_fvector = fvector
self.beta = self.bestedge.beta + self.node_score
best_fvector += self.fvector ## nodefvector
self.bestres = (self.beta, self.besttree, best_fvector)
return self.bestres
def print_derivation(self, dep=0):
if not self.is_terminal():
print " " * dep, self.labelspan()
for sub in self.bestedge.subs:
sub.print_derivation(dep+1)
print
def getcandidates(self, dep=0):
self.cand = []
for edge in self.edges:
vecone = edge.vecone()
edge.oldvecs = set([vecone])
res = edge.getres(vecone, dep)
assert res, "bad at candidates"
self.cand.append( (res, edge, vecone) )
heapq.heapify (self.cand)
def lazykbest(self, k, dep=0):
now = len(self.klist)
## print >> logs, self, k, now
if self.fixed or now >= k:
return
if self.cand is None:
self.getcandidates(dep)
self.last_edge_vecj = None
if self.last_edge_vecj is not None:
edge, vecj = self.last_edge_vecj
edge.lazynext(vecj, self.cand, dep+1)
while now < k:
if self.cand == []:
self.fixed = True
return
(score, tree, fvector), edge, vecj = heapq.heappop(self.cand)
if tuple(tree) not in self.kset:
## assemble dynamically
self.klist.append ((score, tree, fvector))
self.kset.add(tuple(tree))
now += 1
else:
if print_duplicates:
print >> logs, "*** duplicate %s: \"%s\", @%d(k=%d)" % (self, tree, now, k) #labespan
if now < k: ## don't do extra work if you are done!
edge.lazynext(vecj, self.cand, dep+1)
self.last_edge_vecj = None
else:
self.last_edge_vecj = (edge, vecj)
def get_oracle_edgelist(self):
assert hasattr(self, "oracle_edge"), self
edge = self.oracle_edge
es = [edge]
for sub in edge.subs:
es += sub.get_oracle_edgelist()
return es
def compute_oracle(self, weights, fbleu, flen, fwlen, model_weight=0, bleu_weight=1, memo=None):
if memo is None:
memo = {}
if self.iden in memo:
return memo[self.iden]
bleu = fbleu.copy()
ratio = self.span_width() / float(flen)
bleu.special_reflen = fbleu.single_reflen() * ratio # proportional reflen
best_score = float("-inf")
best_fv = None
wlen = ratio * fwlen
for edge in self.edges:
fv = edge.fvector.__copy__() + self.fvector.__copy__() #N.B.:don't forget node feats!
edges = [edge]
hyps = []
for sub in edge.subs:
sub_s, sub_h, sub_fv, sub_es = sub.compute_oracle(weights, fbleu, flen, fwlen, model_weight, bleu_weight, memo)
edges += sub_es
hyps.append(sub_h)
fv += sub_fv
hyp = edge.assemble(hyps) ## TODO: use LM states instead!
bleu_score = bleu.rescore(hyp) ## TODO: effective ref len!
model_score = weights.dot(fv)
#print wlen, ratio, flen, bleu.rescore(hyp), hyp
## interpolate with 1-best weights
score = bleu_score * wlen * bleu_weight - model_score * model_weight # relative!
if score > best_score or \
model_weight == 0 and math.fabs(score - best_score) < 1e-4 and \
(best_fv is None or model_score < best_model_score):
best_score = score
best_bleu_score = bleu_score
best_model_score = model_score
best_edges = edges
best_hyp = hyp
best_fv = fv
memo[self.iden] = (best_bleu_score, best_hyp, best_fv, best_edges)
return memo[self.iden]
def substitute(varstr, subtrees):
''' now returns a str!'''
s = []
varid = 0
for w in varstr:
if type(w) is str:
#w = desymbol(w) ## N.B.: unquote here!
if w != "": ## @UNKNOWN@ => "", do not include it
s.append(w)
else:
if subtrees[varid] != "": # single @UNKNOWN@
s.append(subtrees[varid])
varid += 1
return " ".join(s)
class Hyperedge(object):
def unary(self):
return not self.head.is_root() and len(self.subs) == 1
def unary_cycle(self):
return self.unary() and self.subs[0].label == self.head.label
def __str__(self):
return "%-17s -> %s " % (self.head, " ".join([str(x) for x in self.subs]))
def shorter(self):
''' shorter form str: NP [3-5] -> DT [3-4] NN [4-5]'''
return "%s -> %s " % (self.head.labelspan(include_id=False), \
" ".join([x.labelspan(include_id=False) \
for x in self.subs]))
def dotted_str(self, dot):
''' NP [3-5] -> DT [3-4] . NN [4-5]'''
rhs = [(x.labelspan(include_id=False, space="") \
if type(x) is Node else x) for x in self.lhsstr]
rhs.insert(dot, ".")
return "%s -> %s" % (self.head, " ".join(rhs))
def shortest(self):
''' shortest form str: NP -> DT NN '''
return "%s -> %s " % (self.head.label, " ".join([str(x.label) for x in self.subs]))
__repr__ = __str__
def __init__(self, head, tails, fvector, lhsstr):
self.head = head
self.subs = tails
self.fvector = fvector
# lhsstr is a list of either variables (type "Node") or strings
# like ["thank", node_5, "very", "much"]
self.lhsstr = lhsstr
#self.rhsstr = rhsstr
self._hash = hash((head, ) + tuple(lhsstr))
def arity(self):
return len(self.subs)
def vecone(self):
return (0,) * self.arity()
def compatible(self, tree, care_POS=False):
if self.arity() == tree.arity():
for sub, tsub in zip(self.subs, tree.subs):
if not sub.compatible(tsub, care_POS):
return False
return True
def assemble(self, subtrees):
return substitute(self.lhsstr, subtrees)
def getres(self, vecj, dep=0):
score = self.edge_score
fvector = self.fvector + self.head.fvector
subtrees = []
for i, sub in enumerate(self.subs):
if vecj[i] >= len(sub.klist) and not sub.fixed:
sub.lazykbest(vecj[i]+1, dep+1)
if vecj[i] >= len(sub.klist):
return None
sc, tr, fv = sub.klist[vecj[i]]
subtrees.append(tr)
score += sc
fvector += fv
return (score, self.assemble(subtrees), fvector)
def lazynext(self, vecj, cand, dep=0):
for i in xrange(self.arity()):
## vecj' = vecj + b^i (just change the i^th dimension
newvecj = vecj[:i] + (vecj[i]+1,) + vecj[i+1:]
if newvecj not in self.oldvecs:
newres = self.getres(newvecj, dep)
if newres is not None:
self.oldvecs.add (newvecj)
heapq.heappush(cand, (newres, self, newvecj))
@staticmethod
def _deriv2tree(edgelist, i=0):
'''convert a derivation (a list of edges) to a tree, using assemble
like Tree.parse, returns (pos, tree) pair
'''
edge = edgelist[i]
node = edge.head
subs = []
for sub in edge.subs:
if not sub.is_terminal():
i, subtree = Hyperedge._deriv2tree(edgelist, i+1)
else:
subtree = sub.this_tree()
subs.append(subtree)
return i, edge.assemble(subs)
@staticmethod
def deriv2tree(edgelist):
_, tree = Hyperedge._deriv2tree(edgelist)
## list => string
return tree
@staticmethod
def deriv2fvector(edgelist):
'''be careful -- not only edge fvectors, but also node fvectors, including terminals'''
fv = Vector()
for edge in edgelist:
fv += edge.fvector + edge.head.fvector
for sub in edge.subs:
if sub.is_terminal():
fv += sub.fvector
return fv
def __hash__(self):
return self._hash
| true |
67b2247c133c4dd2a9277485859bcb5bfafad63b | Python | Majoras-Kid/RedShellDetector | /src/redshelldetector.py | UTF-8 | 3,888 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import argparse
import string
import os
import subprocess
#sudo pip install git+https://github.com/toastdriven/pyskip.git
import pyskip as skiplist
TARGET_DIRECTORY = ""
REDSHELL_LIST = []
REDSHELL_FUNCTIONS = skiplist.Skiplist()
FUNCTION_COUNTER_PER_FILE = dict()
def parse_arguments():
parser = argparse.ArgumentParser(description="Python script to detect the presence of the RedShell malware")
parser.add_argument('directory',metavar='dir',type=str,help="Directory that will be checked,")
args = parser.parse_args()
return args
def identify_dll_functions(dll_name):
function_names = ""
print("Executing: objdump -p %s" % dll_name)
#execute object-dump: objdump -p example.dll
function_names_raw = subprocess.getoutput("objdump -p %s" %dll_name)
for line in function_names_raw.splitlines():
func_name = (line.split(" "))[-1]
#print(func_name)
function_names+=func_name + "\n"
# print("Extracted functions: %s" % function_names)
return (function_names)
#for line in function_names.splitlines():
# print((line.split(" "))[-1])
def extract_redshell_functions_from_reference_file():
global REDSHELL_FUNCTIONS
print("Extracting all reference RedShell files")
REDSHELL_PATH = "redshell_reference/"
for name in os.listdir(REDSHELL_PATH):
print("Filename: %s" %name)
function_names = (identify_dll_functions("%s%s" % (REDSHELL_PATH,name)))
for func_name in function_names.split("\n"):
if func_name not in REDSHELL_FUNCTIONS and func_name != "":
#print("Adding Func:%s to REDSHell" % func_name)
REDSHELL_FUNCTIONS.insert(func_name)
#print("Redshell_functions skiplist:")
print("REDSHELL_FUNCTIONS created")
def check_target_dll_with_redshell_reference(target_dll_functions):
counter = 0
for func in target_dll_functions.split("\n"):
if func in REDSHELL_FUNCTIONS:
#rint("Func %s is in RedShell reference function" % func)
counter +=1
return counter
def print_statistic():
print("\n#### Printing generated statistic")
print("Print number of functions of RedShell reference found in each file")
print("File\t\t\t\t\t\t\tCount\n")
for x in FUNCTION_COUNTER_PER_FILE:
print("%s\t\t\t\t\t%s" % (x,FUNCTION_COUNTER_PER_FILE[x]))
def crawl_directory():
global TARGET_DIRECTORY
global FUNCTION_COUNTER_PER_FILE
args = parse_arguments()
TARGET_DIRECTORY = args.directory
print("Checking directory: %s" % TARGET_DIRECTORY)
for dirName, subdirList, fileList in os.walk(TARGET_DIRECTORY):
#print('Found directory: %s' % dirName)
if "reshell" in dirName:
print("Found Path: %s" % dirName)
for fname in fileList:
#testing for Redshell in filename
if "redshell" in fname.lower():
print('Found File: %s/%s' % (dirName,fname))
if ".dll" not in fname:
continue
else:
print("Testing %s" % fname)
#testing for function names
print("Testing file %s/%s for function presence"% (dirName,fname))
function_names = identify_dll_functions("%s%s" % (dirName,fname))
counter = check_target_dll_with_redshell_reference(function_names)
if ("%s%s" % (dirName,fname)) not in FUNCTION_COUNTER_PER_FILE:
FUNCTION_COUNTER_PER_FILE["%s%s" % (dirName,fname)] = counter
else:
print("Key %s%s already in map" % (dirName,fname))
print_statistic()
#crawl_directory()
extract_redshell_functions_from_reference_file()
crawl_directory()
#identify_dll_functions("examples/example.dll") | true |
42d0c77b12ad4917e0590719c4a701ac3becf886 | Python | all1m-algorithm-study/2021-1-Algorithm-Study | /week3/Group2/boj1629_kir3i.py | UTF-8 | 329 | 3.015625 | 3 | [] | no_license | import sys
input = sys.stdin.readline
def solv(A, B, C):
if B <= 2:
return (A**B) % C
if B %2 == 1:
return ((solv(A, B // 2, C) % C) ** 2) * A
else:
return (solv(A, B // 2, C) % C) ** 2
if __name__ == '__main__':
A, B, C = map(int, input().strip().split())
print(solv(A, B, C) % C)
| true |
dce0adb6a198dc43dd62feaa5d94648efe63604d | Python | supermariogo/assign-ee | /caesar/caesar.py | UTF-8 | 7,179 | 3.125 | 3 | [] | no_license | #
# Name:
# ID:
# Date: March 8, 2015
import sys
class CaesarCipher:
"""docstring for CaesarCipher"""
def __init__(self):
self.hash_table={}
for c in "0123456789":
self.hash_table[c] = ord(c)-ord('0')
for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
self.hash_table[c] = ord(c)-ord('A')+10
for c in "abcdefghijklmnopqrstuvwxyz":
self.hash_table[c] = ord(c)-ord('a') +36
self.index_list="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
self.decrypted_text=""
self.encrypted_text=""
def ClearALL(self):
self.decrypted_text=""
self.encrypted_text=""
def PrintDecrypted(self):
print(self.decrypted_text)
def PrintEncrypted(self):
print(self.encrypted_text)
def Encrypt(self, shift):
if shift<1 or shift >61 or shift==26 or shift==36:
return
self.encrypted_text=""
for c in self.decrypted_text:
if c in self.index_list:
new_index = self.hash_table[c]+shift
if new_index > 61:
new_index = new_index-62
if new_index <0:
new_index = 62 + new_index # 0 1 2....61
c = self.index_list[new_index]
self.encrypted_text=self.encrypted_text+c
#print"After: "+self.encrypted_text
def Decrypt(self, shift):
if shift<1 or shift >61 or shift==26 or shift==36:
return
self.decrypted_text=""
for c in self.encrypted_text:
if c in self.index_list:
new_index = self.hash_table[c]-shift
if new_index > 61:
new_index = new_index-62
if new_index <0:
new_index = 62 + new_index # 0 1 2....61
c = self.index_list[new_index]
self.decrypted_text=self.decrypted_text+c
#print"After"+self.decrypted_text
def LoadEncryptedFile(self, filename):
try:
with open (filename, "r") as myfile:
self.encrypted_text=myfile.read()
if self.encrypted_text=="":
print("File is empty")
return False
else:
return True
except:
print("File not exist")
return False
def LoadDecryptedFile(self, filename):
try:
with open (filename, "r") as myfile:
self.decrypted_text=myfile.read()
if self.decrypted_text=="":
print("File is empty")
return False
else:
return True
except:
print("File not exist")
return False
def SaveEncryptedFile(self, filename):
with open (filename, "w") as newfile:
newfile.write(self.encrypted_text)
def SaveDecryptedFile(self, filename):
with open (filename, "w") as newfile:
newfile.write(self.decrypted_text)
def ShowALL(self):
print("decry is:")
print(self.decrypted_text)
print("encry is")
print(self.encrypted_text)
def DetermineShift(self):
# english leetter frequency
cor=[0.64297,0.11746,0.21902,0.33483,1.00000,0.17541,
0.15864,0.47977,0.54842,0.01205,0.06078,0.31688,0.18942,
0.53133,0.59101,0.15187,0.00748,0.47134,0.49811,0.71296,
0.21713,0.07700,0.18580,0.01181,0.15541,0.00583]
error=[0.0]*61
for shift in range(0,61):
self.Decrypt(shift)
arr = self.freq(self.decrypted_text)
e=0.0
for j in range(0, 26):
e+=abs(arr[j]-cor[j])**2
error[shift]=e;
result=[]
result.append(error.index(min(error)))
error.remove(min(error))
result.append(error.index(min(error)))
error.remove(min(error))
result.append(error.index(min(error)))
error.remove(min(error))
print(result)
def freq(self, text):
arr=[0.0]*26
for ch in text:
x=ord(ch)
if(x>97 and x<=122):
arr[x-97]+=1.0
for i in range(0,26):
try:
arr[i]/=max(arr)
except:
continue
return arr
def display_menu():
print('''C Clear All
L Load Encrypted File
R Read Decrypted File
S Store Encrypted File
W Write Decrypted File
O Output Encrypted Text
P Print Decrypted Text
E Encrypt Decrypted Text
D Decrypted Encrypted Text
Q Quit
G Debug
--------------------------''')
return
def run_choice(MyCaesarCipher, choice):
if choice=='C':
MyCaesarCipher.ClearALL()
elif choice=='L':
filename = input("Enter Filename> ")
MyCaesarCipher.LoadDecryptedFile(filename)
elif choice=='R':
filename = input("Enter Filename> ")
MyCaesarCipher.LoadDecryptedFile(filename)
elif choice=='S':
filename = input("Enter Filename> ")
MyCaesarCipher.SaveEncryptedFile(filename)
elif choice=='W':
filename = input("Enter Filename> ")
MyCaesarCipher.SaveDecryptedFile(filename)
elif choice=='O':
MyCaesarCipher.PrintEncrypted()
elif choice=='P':
MyCaesarCipher.PrintDecrypted()
elif choice=='E':
shift = int(input("Enter Shift Amount> "))
MyCaesarCipher.Encrypt(shift)
elif choice=='D':
shift = int(input("Enter Shift Amount> "))
MyCaesarCipher.Decrypt(shift)
elif choice=='Q':
sys.exit(0)
elif choice=="G":
MyCaesarCipher.ShowALL()
if __name__ == '__main__':
MyCaesarCipher = CaesarCipher()
#print len(sys.argv)
if(len(sys.argv)==1):
# no argument
while(1):
display_menu()
choice = input("Enter Choice> ")
run_choice(MyCaesarCipher, choice.upper())
elif(len(sys.argv)==3):
try:
shift = int(sys.argv[1])
if abs(shift)<0 or abs(shift) >61 or abs(shift)==26 or abs(shift)==36:
print("Invalid syntax: caesar shift infile [outfile]")
sys.exit(0)
inputfile=sys.argv[2]
if shift>0:
MyCaesarCipher.LoadDecryptedFile(inputfile)
MyCaesarCipher.Encrypt(abs(shift))
MyCaesarCipher.PrintEncrypted()
elif shift<0:
MyCaesarCipher.LoadEncryptedFile(inputfile)
MyCaesarCipher.Decrypt(abs(shift))
MyCaesarCipher.PrintDecrypted()
elif shift==0:
MyCaesarCipher.LoadEncryptedFile(inputfile)
MyCaesarCipher.DetermineShift()
except ValueError:
print("aInvalid syntax: caesar shift infile [outfile]")
sys.exit(0)
elif(len(sys.argv)==4):
inputfile=sys.argv[2]
MyCaesarCipher.LoadEncryptedFile(inputfile)
for i in range(0,62):
print(i)
MyCaesarCipher.Decrypt(i)
MyCaesarCipher.PrintDecrypted()
else:
print("Invalid syntax: caesar shift infile [outfile]")
| true |
7cfacaa5b774ff67d39cc13a8a7544adbf86c208 | Python | abdullahzameek/watson_stuff | /app.py | UTF-8 | 1,355 | 2.515625 | 3 | [] | no_license | import re
import flask
import requests
import json
from flask_cors import CORS
from flask import request
app = flask.Flask(__name__)
CORS(app)
url = 'https://gateway-lon.watsonplatform.net/natural-language-understanding/api/v1/analyze'
user = "apiKey"
pw = "NYrce6xil-76pPdybo0xaNLtf2u2a1iM7zrQlKDppETF"
@app.route("/getSentiment", methods=['POST', 'GET'])
def getSentiment():
print('here is the request json')
print(request.json)
payload_text = request.json['text']
payload = {
'version': '2020-08-01',
'features': 'sentiment',
'text':payload_text
}
resp = requests.get(url, params=payload, auth=(user, pw))
content = resp.content.decode()
content2 = json.loads(content)
# print(content2['sentiment']['document'])
return json.dumps(content2['sentiment']['document'])
@app.route("/getEmotion", methods=['POST', 'GET'])
def getEmotion():
print('here is the request json')
print(request.json)
payload_text = request.json['text']
payload = {
'version': '2020-08-01',
'features': 'emotion',
'text':payload_text
}
resp = requests.get(url, params=payload, auth=(user, pw))
content = resp.content.decode()
content2 = json.loads(content)
print(content2['emotion']['document']['emotion'])
return json.dumps(content2['emotion']['document']['emotion'])
| true |
13e54b1f4a9316b29f943915b282eb6613944cfd | Python | ASY246/DeepLearningFromScratch | /Tensorflow/TensorFlowHDFS.py | UTF-8 | 1,660 | 2.53125 | 3 | [] | no_license | import tensorflow as tf
IMAGE_PIXELS = 28
filenames = ['hdfs://default/user/bdusr01/asy/mergeOneHot.csv']
filename_queue = tf.train.string_input_producer(filenames, shuffle=False) #读入文件名序列
reader = tf.TextLineReader() #读取器,用于输出由换行符分隔的行,读文件名
key, value = reader.read(filename_queue) #返回reader产生的下一个记录
lines = tf.decode_csv(value, record_defaults=[[0] for i in range(794)])
features = tf.pack([*[lines[:-10]]])
labels = tf.pack([*lines[-10:]])
'''--------------------------------------------------------------------------------------'''
W = tf.Variable(tf.zeros([784, 10]))
W = tf.to_float(W)
b = tf.Variable(tf.zeros([10]))
b = tf.to_float(b)
x = tf.reshape(features, [1, IMAGE_PIXELS*IMAGE_PIXELS]) #直接把变量传进去
x = tf.to_float(x)
y = tf.nn.softmax(tf.matmul(x, W) + b)
#y_ = tf.placeholder(tf.float32, [None, 10])
y_ = tf.reshape(labels,[1,10])
y_ = tf.to_float(y_)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator() #创建一个协调器,管理线程
threads = tf.train.start_queue_runners(coord=coord) #启动QueueRunner, 此时文件名队列已经进队。
for i in range(1000):
sess.run(train_step)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy))
coord.request_stop()
coord.join(threads) | true |
e0203888c1acf3e601b61328f89a18335b742f97 | Python | aklap/python-crash-course | /ch-13/ball/run_game.py | UTF-8 | 915 | 3.3125 | 3 | [] | no_license | import pygame
import game_functions as gf
from settings import Settings
from stats import Stats
def run_game():
"""Run game."""
# Initialize game
pygame.init()
# Initialize settings
settings = Settings()
# Create window
screen = pygame.display.set_mode((1200, 800))
# Create caption
pygame.display.set_caption('Catch Game')
# Generate a ball
ball = gf.create_ball(screen)
# Create a person
person = gf.create_person(screen)
# Initialize stats
stats = Stats(settings)
# Run loop
while True:
gf.check_events(person)
if stats.game_active:
# Update sprite positions
gf.update_person(person)
gf.update_ball(ball, person, settings, stats)
# Re-render screen
gf.update_screen(screen, ball, person)
else:
print('game over')
# Run our game loop
run_game()
| true |
bb328a2be39325c25b4f822181549e1323fe395d | Python | bigdog156/Trie | /run.py | UTF-8 | 2,074 | 3 | 3 | [] | no_license | from trie import Node, Trie
import re
def makeTrie(words,CreateTrie):
for word in words:
CreateTrie.insert(word[1],word[0])
return CreateTrie
#Xử lí file data.txt thành mảng các phần tử gồm seekIndex và từ khoá
def processFileToArray(PATH):
data = open(PATH,'r+')
listData = list(data)
for i in range(len(listData)):
listData[i] = re.split(":",listData[i][:-1])
listData[i][0] = int(listData[i][0])
return listData
#Tiền xử lí 2 file: File nghĩa và file Text
def preProcessFile(pathText, pathMean):
listText = list(open(pathText,'r+'))
listMean = list(open(pathMean, 'r+'))
fileData = open("repository/data.txt",'w+')
indexSeek = 0
for i in range(len(listMean)):
if i != 0:
indexSeek = indexSeek + len(listMean[i-1])
fileData.write(str(indexSeek)+':'+listText[i])
indexFinal = len(listMean[i])
fileData.close()
return indexSeek + indexFinal
#Thêm node vào Trie
def addNodeTrie(word, mean,trieCurrent):
fileMean = open("repository/mean.txt",'a+')
fileData = open("repository/data.txt",'a+')
checkInData = trieCurrent.searchTrie(word)
if checkInData == -1 or checkInData == -2:
#Thêm vào Mean.txt
fileMean.seek(0,2)
lenMean = len(mean)
fileMean.write(mean+"\n")
indexMean = fileMean.tell() - lenMean -1
#Tiếp tục thêm vị trí SeekFile của file Data
fileData.seek(0,2)
fileData.write(str(indexMean)+":"+word+"\n")
fileData.close()
fileMean.close()
trieCurrent.insert(word,indexMean)
return 1
return 0
#Xử lí khi muốn update Trie
def updateTrie(word, mean, trie):
fileMean = open("repository/mean.txt",'wb+')
fileData = open("repository/data.txt",'a+')
search = trie.searchTrie(word)
if search >=0 :
fileMean.seek(search)
print(search)
fileMean.writelines(mean)
return 1
return 0
| true |
196b3b5e1b44c274b8b837e5ced30674f8335548 | Python | mridubhatnagar/HackerRank | /Algorithms/20-MigratoryBirds.py | UTF-8 | 1,559 | 3.9375 | 4 | [] | no_license | """
You have been asked to help study the population of birds migrating across the continent.
Each type of bird you are interested in will be identified by an integer value.
Each time a particular kind of bird is spotted, its id number will be added to your array
of sightings. You would like to be able to find out which type of bird is most common
given a list of sightings. Your task is to print the type number of that bird and if two
or more types of birds are equally common, choose the type with the smallest ID number.
Input Format
The first line contains an integer denoting n, the number of birds sighted and reported in the array ar.
The second line describes ar as n space-separated integers representing the type numbers of each bird
sighted.
Output Format
Print the type number of the most common bird; if two or more types of birds are equally common, choose the type with the smallest ID number.
Sample Input 0
6
1 4 4 4 5 3
Sample Output 0
4
"""
#!/bin/python3
import os
import sys
# Complete the migratoryBirds function below.
def migratoryBirds(n, ar):
freq_dict = {}
for element in range(1,6):
count = ar.count(element)
freq_dict[element] = count
result = max(freq_dict, key=freq_dict.get)
return result
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
ar_count = int(input())
ar = list(map(int, input().rstrip().split()))
result = migratoryBirds(ar_count, ar)
print(result)
#fptr.write(str(result) + '\n')
#fptr.close()
| true |
9cc2b2a0672dd2b089c8a122be53f531a50b6225 | Python | erictroebs/wikigraph | /wikigraph/cli/NamedParameter.py | UTF-8 | 778 | 2.90625 | 3 | [] | no_license | class NamedParameter:
def __init__(self, name, description, expects=None, default=None, parse=None):
if not isinstance(name, list):
name = [name]
self.name = name
self.description = description
self.expects = expects
self.default = default
self.parse = parse
self.value = None
@property
def normalized_name(self):
return list(map(lambda n: ('-' if len(n) == 1 else '--') + n, self.name))
def add(self, value):
if self.parse is not None:
value = self.parse(value)
if self.value is None:
self.value = value
return
if not isinstance(self.value, list):
self.value = [self.value]
self.value.append(value)
| true |
40b64a6dc2382e53773e04a997b604e4d21df27e | Python | mozilla/hera | /hera/__init__.py | UTF-8 | 2,865 | 2.515625 | 3 | [] | no_license | import os
from urlparse import urlparse
from suds.client import Client
from suds.transport.http import HttpAuthenticated
from suds.xsd.doctor import ImportDoctor, Import
class Hera:
def __init__(self, username, password, location, wsdl="System.Cache.wsdl"):
# Sorry windows
cur = os.path.dirname(__file__)
url = "file://%s" % os.path.abspath(os.path.join(cur, 'wsdl', wsdl))
# Apparently Zeus's wsdl is broken and we have to jimmy this thing in
# manually. See https://fedorahosted.org/suds/ticket/220 for details.
# Also, you'll be happy to know that the zillion .wsdl files that Zeus
# includes apparently have different targetNamespace's. This one
# happens to be 1.1, but if you load something else you'll probably
# need to adjust it.
imp = Import('http://schemas.xmlsoap.org/soap/encoding/')
imp.filter.add('http://soap.zeus.com/zxtm/1.1/')
doctor = ImportDoctor(imp)
transporter = HttpAuthenticated(username=username, password=password)
self.client = Client(url, doctor=doctor, location=location,
transport=transporter)
def flushAll(self):
"""Flushes everything in the system: all objects across all virtual
servers."""
return self.client.service.clearWebCache()
def getGlobalCacheInfo(self):
"""Returns a small object of statistics."""
return self.client.service.getGlobalCacheInfo()
def flushObjectsByPattern(self, url, return_list=False):
"""Flush objects out of the cache. This accepts simple wildcards (*)
in the host and/or path. If return_list is True we'll return a list of
URLs that matched the pattern. There is a performance hit when
returning the list since we have to request it, build it, and return
it. """
if return_list:
objects = self.getObjectsByPattern(url)
o = urlparse(url)
r = self.client.service.clearMatchingCacheContent(o.scheme,
o.netloc,
o.path)
if return_list and objects:
return ["%s://%s%s" % (o.protocol, o.host, o.path)
for o in objects]
else:
return []
def getObjectByPattern(self, url):
"""A simple convenience function. If you have a full URL and you want
a single object back, this is the one."""
return self.getObjectsByPattern(url, 1)
def getObjectsByPattern(self, url, limit=None):
o = urlparse(url)
r = self.client.service.getCacheContent(o.scheme, o.netloc,
o.path, limit)
if r.number_matching_items:
return r.matching_items
| true |
8123e14842c579914c17eeecaf6fea59dcb36d8d | Python | wdsrocha/anime-recommender | /src/lib/content_based_recommender.py | UTF-8 | 618 | 2.5625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
from sklearn.neighbors import NearestNeighbors
def setup_content_based_recommender(n_neighbors=6):
features = pd.read_csv("data/processed_features.csv")
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm="ball_tree").fit(
features
)
distances, indices = nbrs.kneighbors(features)
anime = pd.read_csv("data/processed_anime.csv")
d = dict(zip(anime.index, anime.anime_id))
anime_indices = np.vectorize(lambda i: d[i])(indices)
rev_d = dict(zip(anime.anime_id, anime.index))
return (distances, indices, anime_indices, rev_d)
| true |
7e664fa9ded87a27276b27c5b0ce6c527541dc32 | Python | domiee13/ttud | /ex02.py | UTF-8 | 594 | 3.59375 | 4 | [] | no_license | # Viết chương trình kiểm tra một số nguyên dương bất kỳ (2 chữ số trở lên, không quá 9 chữ số) có chữ số bắt đầu và kết thúc bằng nhau hay không.
# Dữ liệu vào: Dòng đầu tiên ghi số bộ test. Mỗi bộ test viết trên một dòng số nguyên dương tương ứng cần kiểm tra.
# Kết quả: Mỗi bộ test viết ra YES hoặc NO, tương ứng với bộ dữ liệu vào
t = int(input())
for i in range(t):
s = input()
print(s[0],s[-1])
if s[0]==s[-1]:
print("YES")
else:
print("NO") | true |
c8ad4f9c7071fa2cf2f5d94a1585b5a75be806b4 | Python | niteshthali08/Disaster-Notofication | /wikipedia.py | UTF-8 | 964 | 2.859375 | 3 | [] | no_license | import requests
def clean_uni_gram_candidates(uni_grams, wiki_term):
unknowns = []
knowns = []
for term in wiki_term:
a = term[0].split(' ')
knowns.append(a[0])
knowns.append(a[1])
for word in uni_grams:
if word not in knowns:
unknowns.append(word)
return unknowns
def get_title(url):
response = requests.get(url).json()
page_id = response['query']['pages'].keys()[0]
if int(page_id) > 0:
title = response['query']['pages'][page_id]['title']
return title
else:
return None
def get_wikipedia_urls(url, dest_url, bi_grams):
wiki_url_terms = []
flag = False
for bi_gram in bi_grams:
if not flag:
wiki_term = get_title(url + bi_gram)
if wiki_term != None:
wiki_url_terms.append([bi_gram, wiki_term])
flag = True
continue;
flag = False
return wiki_url_terms | true |
e539bba6128138fbd0c32f41cd67fdcc92fbb50a | Python | Yang11100/python | /jiaoben/studyexample.py | UTF-8 | 51 | 2.890625 | 3 | [] | no_license | x="a"
y="b"
# 不换行输出
print (x),
print (y)
| true |
800db4ed93cf9337d0116f30347f59376feabfaf | Python | zhch-sun/leetcode_szc | /33.search-in-rotated-sorted-array.py | UTF-8 | 2,401 | 4.03125 | 4 | [] | no_license | #
# @lc app=leetcode id=33 lang=python
#
# [33] Search in Rotated Sorted Array
#
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if not nums:
return -1
lo, hi = 0, len(nums) - 1
pivot = nums[0] # require最前面的nums判断
while lo <= hi: # 需要check最后一位
mid = lo + (hi - lo) // 2
if (nums[mid] < pivot) == (target < pivot):
if nums[mid] < target:
lo = mid + 1
elif nums[mid] > target:
hi = mid - 1
else:
return mid
elif nums[mid] < target: # elif target < pivot 也可以
hi = mid - 1
else: # 不可能相等
lo = mid + 1
return -1
if __name__ == '__main__':
"""
题设:
有序数列可能在一个位置旋转, 从这个数列中找任意target, 不存在-1
无重复数字.
拓展: 如果是找旋转位置呢? 有点类似153题.
首先确定0>-1即有解. 然后二分, 判断条件是
1. 与pivot比较, 2. i和i+1比较.
分析:
两段上升序列: 左边高比右边高. 81th是follow-up
这种解法最好理解: 只用nums[0]当做pivot.
还有的解法用当前的lo当pivot, 不是那么好分析.
坑:
nums[0]使得必须判断空输入...
解法1:
只需要target和nums[mid]是否处于同一半区(不用管lo, hi!):
原因是循环的内环是通过比较这两个来更新位置的, 在同一半区时mid值可以正确赋值.
不同半区时比较target和nums[0]的大小, 来更新 lo, hi
必须有在最上面判断符为<=的时候 必须有+1和-1,
当只有右半边的时候(没有pivot), 为什么也对:
只有右半边的时候, 这俩必处于同一边. 按照条件也处于同一边.
还有一个答案(相对于我的答案没有优势):
https://leetcode.com/problems/search-in-rotated-sorted-array/discuss/14436/Revised-Binary-Search
"""
s = Solution()
print(s.search([4,5,6,7,0,1,2], 4))
print(s.search([4,5,6,7,0,1,2], 5))
print(s.search([4,5,6,7,0,1,2], 2))
print(s.search([1,3], 2))
| true |
1df100078a4cdee47e4b44a23b33a6511653fec9 | Python | the-last-question/CodeTask1-Tarcio | /Question03.py | UTF-8 | 480 | 4.28125 | 4 | [
"MIT"
] | permissive | def __checkPerfectNumber(Number):
SumDivisors = 0
for i in range(1, Number):
if(Number % i == 0):
SumDivisors = SumDivisors + i
if (SumDivisors == Number):
return True
else:
return False
def __main__():
print("Displaying all perfect numbers between 1 and 10000:")
for i in range(1, 10000):
if(__checkPerfectNumber(i)):
print(" %d is a Perfect Number" %i)
__main__() | true |
015bcbc8c52ffbbba60bec79aecbe4d21b90d91e | Python | Aasthaengg/IBMdataset | /Python_codes/p02804/s846371839.py | UTF-8 | 437 | 2.5625 | 3 | [] | no_license | MOD, ans = 10**9+7, 0
n, k = map(int, input().split())
a = list(map(int, input().split()))
a.sort()
kai, gai = [1], [1]
for i in range(n):
kai.append((kai[i] * (i+1)) % MOD)
gai.append(pow((kai[i] * (i+1)) % MOD, MOD-2, MOD))
for i in range(n):
x, y = 0, 0
if i <= (n-k):
x = (a[i] * kai[n-i-1] * gai[n-i-k] * gai[k-1]) % MOD
if i >= (k-1):
y = (a[i] * kai[i] * gai[i-k+1] * gai[k-1]) % MOD
ans = (ans + y - x) % MOD
print(ans) | true |
18c74588adaceea9a0ceb9b8c687508bbccdbcef | Python | jamesfeixue/Parallel-Algos | /ParallelMatrixMultiplication/matrix_mult_pycuda.py | UTF-8 | 15,494 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python
"""
.
.
.
Python Code
.
.
.
"""
#%%
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
#%%
from pycuda import driver, compiler, gpuarray, tools
import time
#%%
import pycuda.autoinit
class Transpose:
def transpose(self, a_cpu):
print("--"*40)
print("transpose checks")
print("--"*40)
a_cpu = a_cpu.astype(np.float32)
# print(a_cpu)
block_size = 16
height = np.int32(a_cpu.shape[0])
width = np.int32(a_cpu.shape[1])
output_gpu = gpuarray.empty((width, height), np.float32)
transpose_kernel = """
#include <stdio.h>
__global__ void Transpose(float *input, float *output, int input_width, int input_height){
int tx = blockIdx.x*blockDim.x + threadIdx.x;
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int Y = input_height;
int X = input_width;
// printf("tx:%d, ty:%d, Y:%d, X:%d, input[tx*X+ty]:%d \\n", tx, ty, Y, X, input[tx*X+ty]);
if (tx<Y && ty<X){
output[ty*Y+tx] = input[tx*X+ty];
}
}
"""
start2 = time.time()
# transfer host (CPU) memory to device (GPU) memory
input_gpu = gpuarray.to_gpu(a_cpu)
# get the kernel code from the template
# by specifying the constant MATRIX_SIZE
kernel_code = transpose_kernel
mod = compiler.SourceModule(kernel_code)
transpose = mod.get_function("Transpose")
grid_x = int(np.ceil(height/float(block_size)))
grid_y = int(np.ceil(width/float(block_size)))
# print "grid_x", grid_x
# print "gird_x.type", type(grid_x)
# call the kernel on the card
start = time.time()
transpose(
input_gpu,
output_gpu,
width, height,
block = (block_size, block_size, 1), grid = (grid_x, grid_y, 1))#line blocksize
kernel_time = time.time() - start
result = output_gpu.get()
total_time = time.time() - start2
"""change to just result at some time"""
return result, kernel_time, total_time
#%%
#generic transpose algo
def matrix_transpose(matrix):
start = time.time()
matrix = np.matrix(matrix)
rows = matrix.shape[0]
columns = matrix.shape[1]
new_matrix = np.empty((columns, rows))
new_matrix = np.matrix(new_matrix)
for i in range(0, rows):
for j in range(0, columns):
new_matrix[j,i] = matrix[i, j]
running_time = time.time() - start
return new_matrix, running_time
#%%
#M = [[1, 2, 3], [1, 2, 3]]
##matrix_transpose(M)
##j = [1, 2, 3]
##np.matrix(j).shape
##matrix_transpose(j)
#
#kernel_transpose = Transpose()
#result, kernel_time, total_time, check = kernel_transpose.transpose(M)
#print(result, kernel_time, total_time, check)
"""
Calculate the transpose of them using 3 transpose algorithms (2 parallel, 1 serial) respectively.
Record the running time of each call for each of the algorithm.
"""
def transpose_check():
M = 2
N = 3
kernel_times = []
total_times = []
serial_times = []
multiple = []
for i in range(1, 100):
rows = M*i
columns = N*i
matrix = np.random.rand(rows, columns)
kernel_transpose = Transpose()
result, kernel_time, total_time = kernel_transpose.transpose(matrix)
kernel_times.append(kernel_time)
total_times.append(total_time)
transposed = np.transpose(matrix).astype(np.float32)
result_2, serial_time = matrix_transpose(matrix)
serial_times.append(serial_time)
# print(transposed)
# print("-" * 80)
# print result
multiple.append(i)
"""Plotting"""
plt.plot(multiple, serial_times, color='b', label="serial")
plt.plot(multiple, kernel_times, color='g', label="kernel")
plt.plot(multiple, total_times, color='r', label="kernel+load")
plt.legend(loc='upper left')
plt.title('CUDA Transpose Times')
plt.ylabel('Time')
plt.xlabel('Iteration')
plt.savefig('cuda_transpose.png')
print (transposed==result)
return (transposed==result)
#transpose_check()
#%%
"""
MATRIX MULTIPLICATION
"""
#%%
import pycuda.autoinit
class MatrixMultiply:
def matrix_mul_naive(self, a_cpu):
print("-"*80)
print("naive")
print("-"*80)
a_cpu = a_cpu.astype(np.float32)
block_size = 32
height = np.int32(a_cpu.shape[0])
width = np.int32(a_cpu.shape[1])
output_gpu = gpuarray.empty((height, height), np.float32)
grid_x = int(np.ceil(height/float(block_size)))
grid_y = int(np.ceil(height/float(block_size)))
matrix_multiplication_kernel = """
#include <stdio.h>
__global__ void MatMul(float *a_gpu, float *output, int input_width, int input_height){
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tx = blockIdx.x*blockDim.x + threadIdx.x;
int Y = input_height;
int X = input_width;
if( (ty < Y) && (tx < Y) ) {
float summation = 0;
for (int k = 0; k < X; k++){
float a_value = a_gpu[ty * X + k];
float b_value = a_gpu[k + X * tx];
summation += a_value * b_value;
// printf("tx:%d, ty:%d, k:%d, temp:%d, a_value:%d, b_value:%d\\n", tx, ty, k, a_value, b_value);
}
output[ty* Y + tx] = summation;
}
__syncthreads();
}
"""
start2 = time.time()
# transfer host (CPU) memory to device (GPU) memory
a_gpu = gpuarray.to_gpu(a_cpu)
kernel_code = matrix_multiplication_kernel
mod = compiler.SourceModule(kernel_code)
MatrixMultiplication = mod.get_function("MatMul")
# call the kernel on the card
start = time.time()
MatrixMultiplication(
a_gpu,
output_gpu,
width, height,
block = (block_size, block_size, 1), grid = (grid_x, grid_y, 1))
#line blocksize
kernel_time = time.time() - start
result = output_gpu.get()
total_time = time.time() - start2
"""change to just result at some time"""
return result, kernel_time, total_time
#%%
def matrix_mul_optimized1(self, a_cpu):
print("-"*80)
print("opt1")
print("-"*80)
a_cpu = a_cpu.astype(np.float32)
block_size = 32
height = np.int32(a_cpu.shape[0])
width = np.int32(a_cpu.shape[1])
output_gpu = gpuarray.empty((height, height), np.float32)
grid_x = int(np.ceil(height/float(block_size)))
grid_y = int(np.ceil(height/float(block_size)))
grid_size = np.int32(grid_x)
matrix_multiplication_kernel = """
#include <stdio.h>
#include <math.h>
#define tile_size 32
__global__ void MatMul(float *a_gpu, float *output, const int input_height, const int input_width, const int grid_x){
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tx = blockIdx.x*blockDim.x + threadIdx.x;
int Y = input_height;
int X = input_width;
__shared__ float A_shared[tile_size][tile_size];
float summation = 0;
for(int i=0; i < grid_x ; i++)
{
if((i * tile_size + threadIdx.x < X) && (ty < Y)) {
A_shared[threadIdx.y][threadIdx.x] = a_gpu[ty * X + i * tile_size + threadIdx.x];
}
else {
A_shared[threadIdx.y][threadIdx.x] = 0;
}
__syncthreads();
for(int j = 0; j < tile_size; j++)
{
summation += A_shared[threadIdx.y][j] * a_gpu[j + i * tile_size + tx * X];
}
__syncthreads();
}
__syncthreads();
if((ty < Y) && (tx < Y))
{
output[ty * Y + tx] = summation;
}
__syncthreads();
}
"""
start2 = time.time()
# transfer host (CPU) memory to device (GPU) memory
a_gpu = gpuarray.to_gpu(a_cpu)
kernel_code = matrix_multiplication_kernel
mod = compiler.SourceModule(kernel_code)
MatrixMultiplication = mod.get_function("MatMul")
# call the kernel on the card
start = time.time()
MatrixMultiplication(
a_gpu,
output_gpu,
height, width,
grid_size,
block = (block_size, block_size, 1), grid = (grid_x, grid_y, 1))
#line blocksize
kernel_time = time.time() - start
result = output_gpu.get()
total_time = time.time() - start2
"""change to just result at some time"""
return result, kernel_time, total_time
#%%
def matrix_mul_optimized2(self, a_cpu):
print("-"*80)
print("opt2")
print("-"*80)
a_cpu = a_cpu.astype(np.float32)
block_size = 32
height = np.int32(a_cpu.shape[0])
width = np.int32(a_cpu.shape[1])
output_gpu = gpuarray.empty((height, height), np.float32)
grid_x = int(np.ceil(height/float(block_size)))
grid_y = int(np.ceil(height/float(block_size)))
grid_size = np.int32(grid_x)
matrix_multiplication_kernel = """
#include <stdio.h>
#include <math.h>
#define tile_size 32
__global__ void MatMul(float *a_gpu, float *output, const int input_height, const int input_width, const int grid_x){
int ty = blockIdx.y*blockDim.y + threadIdx.y;
int tx = blockIdx.x*blockDim.x + threadIdx.x;
const int Y = input_height;
const int X = input_width;
__shared__ float A_shared[tile_size][tile_size];
__shared__ float B_shared[tile_size][tile_size];
int threadx = threadIdx.x;
int thready = threadIdx.y;
float summation = 0;
for(int i=0; i < grid_x ; i++)
{
if((i * tile_size + threadx < X) && (ty < Y)) {
A_shared[thready][threadx] = a_gpu[ty * X + i * tile_size + threadx];
}
else {
A_shared[thready][threadx] = 0;
}
if((i * tile_size + thready < X) && (tx < Y)){
B_shared[thready][threadx] = a_gpu[tx * X + i * tile_size + thready];
}
else
{
B_shared[ty][tx] = 0;
}
__syncthreads();
for(int j = 0; j < tile_size; j++)
{
summation += A_shared[thready][j] * B_shared[j][threadx];
}
__syncthreads();
}
__syncthreads();
if((ty < Y) && (tx < Y))
{
output[ty * Y + tx] = summation;
}
__syncthreads();
}
"""
start2 = time.time()
# transfer host (CPU) memory to device (GPU) memory
a_gpu = gpuarray.to_gpu(a_cpu)
kernel_code = matrix_multiplication_kernel
mod = compiler.SourceModule(kernel_code)
MatrixMultiplication = mod.get_function("MatMul")
# call the kernel on the card
start = time.time()
MatrixMultiplication(
a_gpu,
output_gpu,
height, width,
grid_size,
block = (block_size, block_size, 1), grid = (grid_x, grid_y, 1))
#line blocksize
kernel_time = time.time() - start
result = output_gpu.get()
total_time = time.time() - start2
"""change to just result at some time"""
return result, kernel_time, total_time
#%%
def check_matmul():
M = 2
N = 3
naive_kernel_times = []
naive_total_times = []
opt1_kernel_times = []
opt1_total_times = []
opt2_kernel_times = []
opt2_total_times = []
iteration = []
for i in range(1, 10):
rows = M*i
columns = N*i
matrix = np.random.rand(rows, columns)
# matrix = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
matrix = matrix.astype(np.float32)
transpose = np.transpose(matrix)
transpose = transpose.astype(np.float32)
kernel_multiply = MatrixMultiply()
naive_result, naive_kernel_time, naive_total_time = kernel_multiply.matrix_mul_naive(matrix)
naive_kernel_times.append(naive_kernel_time)
naive_total_times.append(naive_total_time)
opt1_result, opt1_kernel_time, opt1_total_time = kernel_multiply.matrix_mul_optimized1(matrix)
opt1_kernel_times.append(opt1_kernel_time)
opt1_total_times.append(opt1_total_time)
#
opt2_result, opt2_kernel_time, opt2_total_time = kernel_multiply.matrix_mul_optimized2(matrix)
opt2_kernel_times.append(opt2_kernel_time)
opt2_total_times.append(opt2_total_time)
cpu_multiply = np.matmul(matrix, transpose)
cpu_multiply = cpu_multiply.astype(np.float32)
iteration.append(i)
# """Plotting"""
plt.plot(iteration, naive_kernel_times, color='b', label="naive_kernel")
plt.plot(iteration, naive_total_times, color='b', linestyle = '--', label="naive_total")
plt.plot(iteration, opt1_kernel_times, color='r', label="opt1_kernel")
plt.plot(iteration, opt1_total_times, color='r', linestyle = '--', label="opt1_total")
plt.plot(iteration, opt2_kernel_times, color='g', label="opt2_kernel")
plt.plot(iteration, opt2_total_times, color='g', linestyle = '--', label="opt2_total")
plt.legend(loc='upper left')
plt.title('CUDA Mat Mul Times')
plt.ylabel('Time')
plt.xlabel('Iteration')
plt.savefig('cuda_matmul.png')
print("-"*80)
print("Matrix Multiplication Checks")
print("-"*80)
print (cpu_multiply)
print("-"*80)
print (naive_result)
print("-"*80)
print(opt1_result)
print("-"*80)
print(opt2_result)
print("-"*80)
print (opt1_result==naive_result)
print("*"*10)
print(opt1_result == opt2_result)
print("-"*80)
print("complete")
return (cpu_multiply==naive_result)
check_matmul() | true |
09099fe7c7618f173603302ac96a87e0d14f6977 | Python | ekr-ccp4/jsCoFE | /pycofe/varut/selectdir.py | UTF-8 | 1,442 | 2.703125 | 3 | [
"MIT"
] | permissive | ##!/usr/bin/python
#
# ============================================================================
#
# 05.07.17 <-- Date of Last Modification.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ----------------------------------------------------------------------------
#
# QT SELECT DIRCTORY DIALOG FOR CLIENT-SIDE WRAPPERS
#
# Copyright (C) Eugene Krissinel, Andrey Lebedev 2017
#
# ============================================================================
#
import sys
import os
from PyQt4 import QtGui
#from PyQt4 import QtCore
def select ( title ):
if 'HOME' in os.environ:
startDir = os.environ['HOME']
else:
startDir = None
app = QtGui.QApplication([])
dialog = QtGui.QFileDialog(None,title,startDir)
#dialog.setWindowTitle(title)
dialog.setFileMode(QtGui.QFileDialog.Directory)
dialog.setOption ( QtGui.QFileDialog.DontUseNativeDialog,True )
#dialog.setDirectory ( startDir )
#dialog.setOption(QtGui.QFileDialog.ShowDirsOnly, True)
dialog.show()
dialog.raise_()
if dialog.exec_():
if len(dialog.selectedFiles()) > 0:
return dialog.selectedFiles()[0]
return ""
"""
file = str ( QtGui.QFileDialog.getExistingDirectory(None,title,startDir) )
return file
"""
if __name__ == '__main__':
if len(sys.argv) > 1:
title = sys.argv[1]
else:
title = 'Select Directory'
file = select ( title )
print file
| true |
2d12b98036008b12719b865fdeff1ccc4eae855b | Python | dannyroberts/couchjock | /test.py | UTF-8 | 2,313 | 2.546875 | 3 | [
"MIT"
] | permissive | from operator import attrgetter
import unittest2
from couchdbkit import Server
import couchdbkit
import couchjock
class CouchjockTestCase(unittest2.TestCase):
server_url = 'http://localhost:5984/'
db_name = 'couchjock__test'
schema = couchjock
def setUp(self):
self.server = Server(uri=self.server_url)
self.db = self.server.create_db(self.db_name)
def tearDown(self):
self.server.delete_db(self.db_name)
def test_save(self):
class Foo(self.schema.Document):
_db = self.db
pass
foo = Foo()
foo.save()
foo_id = foo._id
self.assertIsNotNone(foo_id)
foo2 = Foo.get(foo_id)
self.assertEqual(foo2._id, foo_id)
def test_simple_schema(self):
class Foo(self.schema.Document):
_db = self.db
string = self.schema.StringProperty()
boolean = self.schema.BooleanProperty(default=True)
foo1 = Foo()
foo1.save()
foo1_id = foo1._id
foo1_rev = foo1._rev
self.assertIsNotNone(foo1_id)
self.assertIsNotNone(foo1_rev)
foo1 = Foo.get(foo1_id)
self.assertEqual(foo1.to_json(), {
'doc_type': 'Foo',
'_id': foo1_id,
'_rev': foo1_rev,
'string': None,
'boolean': True,
})
foo1._doc.update({'boolean': False})
self.assertEqual(foo1.boolean, False)
def _individual_save(self, docs):
for doc in docs:
doc.save()
def _bulk_save(self, docs):
self.db.bulk_save(docs)
def _test_simple_view(self, save_fn):
class Foo(self.schema.Document):
_db = self.db
string = self.schema.StringProperty()
foo1 = Foo(string='fun one')
foo2 = Foo(string='poop')
save_fn([foo1, foo2])
self.assertEqual(
map(lambda x: x.to_json(), Foo.view('_all_docs', include_docs=True).all()),
map(lambda x: x.to_json(), sorted([foo1, foo2], key=attrgetter('_id')))
)
def test_simple_view(self):
self._test_simple_view(self._individual_save)
def test_bulk_save(self):
self._test_simple_view(self._bulk_save)
class CouchdbkitTestCase(CouchjockTestCase):
schema = couchdbkit
| true |
2a7f7650d5ab997cee3c5929b0bcab0824df0d1b | Python | deepakkarki/pruspeak | /src/userspace_lib/bs_tcp_client.py | UTF-8 | 663 | 3.078125 | 3 | [
"MIT"
] | permissive | import socket
import sys
out = sys.stdout
sentinel = ''
TCP_IP = '127.0.0.1'
TCP_PORT = 6060
BUFFER_SIZE = 1024 * 2
def get_data():
out.write("ps>") #prompt the user
l = []
for line in iter(raw_input, sentinel):
l.append(line) #get the input
out.write("...")
return '\n'.join(l) #return the data entered
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
while 1:
try:
msg = get_data()
print "sending message\n", msg, "\n"
s.send(msg)
data = s.recv(BUFFER_SIZE)
if data != '\n':
print "return value:", data
except Exception as e:
print e
print "Ending Connection"
s.close()
break
| true |
66f26f714c14c10b9759a96f7580a6068dc03013 | Python | tbischler/PEAKachu | /peakachulib/library.py | UTF-8 | 2,125 | 2.5625 | 3 | [
"ISC",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | from os.path import basename, splitext
import pandas as pd
from peakachulib.bam_to_bed import BamToBed
from peakachulib.count import ReadCounter
class Library(object):
'''
This class reads the alignment file for a library and counts and stores
the reads mapping to different annotations
'''
def __init__(self, paired_end, max_insert_size, bam_file, replicon_dict):
self.paired_end = paired_end
self.bam_file = bam_file
self.max_insert_size = max_insert_size
self.lib_name = splitext(basename(bam_file))[0]
self.replicon_dict = replicon_dict
def count_reads_for_windows(self):
read_counter = ReadCounter(self.paired_end, self.max_insert_size,
self.bam_file)
for replicon in self.replicon_dict:
self.replicon_dict[replicon]['window_counts'] = pd.DataFrame()
for strand in ['+', '-']:
window_counts = read_counter.count_reads_for_windows(
replicon,
strand,
self.replicon_dict[replicon]["window_list"])
self.replicon_dict[replicon]['window_counts'][
strand] = window_counts
read_counter.close_bam()
def count_reads_for_peaks(self):
read_counter = ReadCounter(self.paired_end, self.max_insert_size,
self.bam_file)
for replicon in self.replicon_dict:
peak_counts = read_counter.count_reads_for_peaks(
replicon,
self.replicon_dict[replicon]["peak_df"].to_dict('records'))
del self.replicon_dict[replicon]["peak_df"]
self.replicon_dict[replicon]["peak_counts"] = peak_counts
read_counter.close_bam()
def merge_reads(self):
bam_to_bed = BamToBed(self.paired_end, self.max_insert_size)
for replicon, reads in bam_to_bed.generate_bed_format(self.bam_file):
self.replicon_dict[replicon]["reads"] = pd.Series(reads)
return self.replicon_dict # it seems that a copy is returned!
| true |
8713585b0acfd068e15396482900871c6e62a57d | Python | MayaBishop/Python-projects | /Random Projects/fractals.py | UTF-8 | 2,818 | 3.09375 | 3 | [] | no_license | import math
import pygame
pygame.init()
#ellipse(Surface, color, Rect, width=0) -> Rect
# sand colour r 255 g 180+ b 30+
def coral(sp,length,win,angle=math.pi/2):
epx = sp[0]+(length*math.cos(angle))
epy = sp[1]-(length*math.sin(angle))
ep=(epx,epy)
pygame.draw.line(win,(244, 107, 66),sp,ep)
pygame.display.update()
if length > 2*3:
coral(ep,length-2*3,win,angle/2)
coral(ep,length-2*3,win,angle*3/2)
def shell(x1,y1,t,angle,length,n):
x2 = x1 + (length * math.cos(angle))
y2 = y1 + (length * math.sin(angle))
wx = (x2-x1)*t
hy = (y2-y1)*t
tamp = wx*hy+wx*hy
print(tamp)
if wx!=0:
if wx/abs(wx) == 1:
k = tamp*-1/15
else:
k = tamp*1/15
else:
k = 0
print("k",k)
pygame.draw.line(window,(0,0,0),(x1,y1),(x2,y2),5)
#pygame.draw.polygon(window,(224, 62, 33),[(x1,y1),(x2,y2),(x2+k,y2),(x2+k,y1)])
#print((x1,y1),(x2,y2),wx+hy)
pygame.display.update()
if n>1:
shell(x2,y2,t-.1,angle+0.4,length*.9,n-1)
print((x1,y1))
def waves(x,y,r,n):
pygame.draw.circle(window,(0, 0, 0),(x,y),r)
pygame.draw.circle(window,(0, 14, 219),(x,y),r-5)
pygame.display.update()
pygame.time.delay(50)
if n>1 and r-10>0 and x<640:
waves(x+int(r/2),y,r-10,n-1)
def sand(clr,x,y,w,h,high):
pygame.draw.ellipse(window,clr,(x,y,w,h))
pygame.display.update()
x += 10
if (x+w)*3/4>640:
y += h-2
w += 10
x = 0-w/2
h += 10
clr = (clr[0],clr[1]+20,clr[2]+20)
if clr[1]>255 or clr[2]>255:
clr=(clr[0],clr[1]-20,clr[2]-20)
if y<high:
sand(clr,x,y,w,h,high)
def sun(x,y,r,maxnum):
pygame.draw.circle(window,(255, 236, 94),(x,y),r)
pygame.display.update()
angle = 2*math.pi/maxnum
sunbeams(x,y,r,angle,1,maxnum)
def sunbeams(x,y,r,a,n,mn):
angle = a*n
if n%2 == 0:
nx = x+(r*1.5)*math.cos(angle)
ny = y+(r*1.5)*math.sin(angle)
else:
nx = x+(r*2)*math.cos(angle)
ny = y+(r*2)*math.sin(angle)
pygame.draw.line(window,(255, 236, 94),(x,y),(nx,ny))
pygame.display.update()
if n<=mn:
sunbeams(x,y,r,a,n+1,mn)
window = pygame.display.set_mode((640,420))
window.fill((32, 191, 183))
waves(100,300,100,20)
waves(300,300,100,20)
waves(500,300,100,20)
sand((255,180,30),-15,300,30,20,420)
coral((320,400),20*3,window)
sun(540,100,40,20)
##shell(302,290,1,math.pi*3/2,50,40)
##shell(202,290,1,math.pi*3/2,50,40)
##shell(102,290,1,math.pi*3/2,50,40)
##pygame.draw.circle(window,(224, 62, 33),(100,),50)
##shell(2,290,1,math.pi*3/2,50,40)
pygame.display.update()
pygame.time.delay(30000)
pygame.quit()
| true |
041dd8d5dd9fe7e2ddb9cab956261b3d8733ee4c | Python | thommms/hacker_rank | /algorithms/implementation/python/migratory_birds.py | UTF-8 | 338 | 3.15625 | 3 | [] | no_license | n = int(input())
bird_type = [int(t) for t in input().strip().split(' ')]
from collections import Counter
type_dict = Counter(bird_type)
max_key = max_val = 0
for k, v in type_dict.items():
if v > max_val:
max_val = v
max_key = k
if v == max_val:
if k < max_key:
max_key = k
print(max_key)
| true |
40c06be03decf5035a05f4f145d5e3956022ea8d | Python | ParksProjets/kattis-hunter | /kattishunter/codegen/birds.py | UTF-8 | 2,155 | 2.765625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | """
Generate C++ code for getting information about birds.
Copyright (C) 2019, Guillaume Gonnet
This project is under the MIT license.
"""
from typing import List
def num_birds_shoot(N: int, **kargs):
"Get the number of birds for round rindex and rindex+1."
return f"""
if (pState.getRound() == {N})
mCacheNumber = (pState.getNumBirds() - 1);
if (pState.getRound() == {N+1})
WaitForMs(40 * (mCacheNumber + (20 * (pState.getNumBirds() - 1))));
"""
def species_guess(**kargs):
"Guess function for getting species."
return """
std::vector<ESpecies> lSGuesses(pState.getNumBirds(), SPECIES_PIGEON);
return lSGuesses;
"""
def species_reveal(N: int, E: int, R: List, **kargs):
"Reveal function for getting species."
Nmax = min(N+3, R[E]["num-birds"])
return """
for (int i = 0; i < pSpecies.size(); i++) {
mCacheIndex++;
if (mCacheIndex > %s) {
mCacheNumber += pSpecies[i] * mBaseShift;
mBaseShift *= 6;
}
if (mCacheIndex == %s)
WaitForMs(40 * mCacheNumber);
}
""" % (N, Nmax)
def directions_for(round_i: int, bird_i: int, code_i: int, code_max: int):
"Generate direction code for the given bird."
obs_i = bird_i + 1
content = "mCacheNumber += pState.getBird(%s).getObservation(%s) * %s;" % (
bird_i, obs_i, (9 ** code_i))
if code_i == code_max:
content += "\n%sWaitForMs(20 * mCacheNumber);" % (" " * 12)
return """
if (pState.getRound() == %s && pState.getBird(0).getSeqLength() == %s) {
%s
}
""" % (round_i, (obs_i + 1), content)
def directions_shoot(N: int, E: int, R: List, **kargs):
"Shoot function for getting bird directions."
index, N0 = (0, N)
Nmax = min(N+3, R[E]["num-birds"])
result = ""
for ri, r in enumerate(R[E]["rounds"]):
for i in range(N, min(Nmax, index + r["num-birds"])):
result += directions_for(ri, i - index, N - N0, Nmax - N0 - 1)
N += 1
index += r["num-birds"]
return result
| true |
7569d7bbee7110de9fb8be64751c3e449f420912 | Python | Dipson7/LabExercise | /Lab2/question_no_11.py | UTF-8 | 55 | 2.78125 | 3 | [] | no_license | '''
What is the result of 10**3?
'''
a = 10**3
print(a) | true |
a76578e5239c79d962d4dbb30894334745049a4d | Python | shenbingdy/Steam-game-recommendation | /py/get_data_from_web.py | UTF-8 | 5,438 | 2.875 | 3 | [] | no_license |
import requests,sys,time
import pandas as pd
import numpy as np
import json
## show work status fuction
def F_status (step, total, current=0):
current+=step
Percentage= int((current/total)*100)
status='>'*Percentage+' '*(100-Percentage)
if Percentage < 100:
sys.stdout.write('\rStatus: [{0}] {1:.2f}% '.format(status, Percentage))
sys.stdout.flush()
else:
print ('\n')
# We have the 5000 user_id and plan to get user_id inofrmation and game information
# get user_inventoty from steampy power
# get api information https://developer.valvesoftware.com/wiki/Steam_Web_API#GetGlobalAchievementPercentagesForApp_.28v0001.29
# IsPlayingSharedGame (v0001)
# IsPlayingSharedGame returns the original owner's SteamID if a borrowing account is currently playing this game. If the game is not borrowed or the borrower currently doesn't play this game, the result is always 0.
# Example URL: http://api.steampowered.com/IPlayerService/IsPlayingSharedGame/v0001/?key=XXXXXXXXXXXXXXXXX&steamid=76561197960434622&appid_playing=240&format=json
#load the 5000 user_id
user_id_path= 'C:/Users/shenbingdy/Desktop/datalab/game/my git/data/steam_user_id.txt'
with open (user_id_path, 'r') as f:
steam_user_id_lst=f.readlines()[::2]###
len(steam_user_id_lst)
#get user_inventoty from steam by api
current=0
total=len( steam_user_id_lst)
F_status(0,total,current)
initial_time=time.time()
user_inventory={}
for steam_user_id in steam_user_id_lst:
steam_user_id_url='http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/'
param={'key':'4850408CACE43F02FCFB811A67B4DAAF',
'steamid':steam_user_id.strip(),
'format':'json'}
for i in range(3):
try:
r=requests.get(steam_user_id_url, param)
break
except:
time.sleep(.5)
pass
steam_user_game=r.json().get('response').get('games')
user_inventory.update({steam_user_id.strip():steam_user_game})
F_status(1,total,current)
current+=1
if current%200==0:
time.sleep(300)
final_time=time.time()
print('The total second is :',-initial_time+final_time)
# Save the data in to a txt file:
user_inventory_adress='C:/Users/shenbingdy/Desktop/datalab/game/my git/data/steam_user_id_summary.txt'
with open (user_inventory_adress, 'w') as f:
for steam_user_id,steam_user_game in user_inventory.items():
f.write(json.dumps({steam_user_id:steam_user_game}))
f.write('\n')
##get user_inventoty from steam by api recent 2 weeks
current=0
total=len( steam_user_id_lst)
F_status(0,total,current)
initial_time=time.time()
user_inventory_recent={}
for steam_user_id in steam_user_id_lst:
steam_user_id_url='http://api.steampowered.com/IPlayerService/GetRecentlyPlayedGames/v0001/'
param={'key':'4850408CACE43F02FCFB811A67B4DAAF',
'steamid':steam_user_id.strip(),
'format':'json'}
for i in range(3):
try:
r=requests.get(steam_user_id_url, param)
break
except:
time.sleep(.5)
pass
steam_user_game=r.json().get('response').get('games')
user_inventory_recent.update({steam_user_id.strip():steam_user_game})
F_status(1,total,current)
current+=1
if current%200==0:
time.sleep(280)
final_time=time.time()
print('The total second is :',-initial_time+final_time)
# Save the data in to a txt file:
user_inventory_recent_adress='C:/Users/shenbingdy/Desktop/datalab/game/my git/data/steam_user_id_recent_summary.txt'
with open (user_inventory_recent_adress, 'w') as f:
for steam_user_id,steam_user_game in user_inventory.items():
f.write(json.dumps({steam_user_id:steam_user_game}))
f.write('\n')
#By using the third part software get the ranking of game
#get all the game app_id and save to a file
appid_url='http://steamspy.com/api.php?request=all'
appid_dict=requests.get(appid_url).json()
appid_list=list(appid_dict.keys())
appid_list_url='C:/Users/shenbingdy/Desktop/datalab/game/my git/data/appid_list.txt'
appid_dict_url='C:/Users/shenbingdy/Desktop/datalab/game/my git/data/appid_dict.txt'
with open (appid_list_url, 'w') as f:
for i in range(len(appid_list)):
f.write(appid_list[i])
f.write('\n')
with open (appid_dict_url, 'w') as f:
f.write(json.dumps(appid_dict))
#get all the game information and save them to a file
appid_url='http://steamspy.com/api.php?request=all'
appid_dict=requests.get(appid_url).json()
appid_detail_dict={}
appid_detail_txt_url='C:/Users/shenbingdy/Desktop/datalab/game/my git/data/appid_detail1.txt'
current=0
total=len(appid_list)
F_status(0,total,current)
initial_time=time.time()
with open (appid_detail_txt_url,'w') as f:
for appid in appid_list:
appid_detial_url=('http://store.steampowered.com/api/appdetails?appids=%s')%(appid)
for i in range(3):
try:
r=requests.get(appid_detial_url).json()
break
except:
time.sleep(0.5)
pass
appid_detail_dict.update(r)
f.write(json.dumps(r))
f.write('\n')
F_status(1,total,current)
current+=1
if current%200==0:
time.sleep(300)
print('The total second is ', (time.time()-initial_time))
| true |
7070937c840c58d81b27ea3df449b4ce4b24a165 | Python | Fibird/sosp_plot | /fairness/r2b_reserve_plot_631.py | UTF-8 | 3,536 | 2.75 | 3 | [] | no_license | import matplotlib.pyplot as plt
from datetime import datetime
import numpy as np
import math
color_styles = ['#d73027', '#f46d43', '#2c7bb6', '#fdae61', '#fee090', '#ffffbf', '#e0f3f8', '#abd9e9', '#74add1',
'#4575b4']
markers = ['x', 'o', '>', 'square', '*', '<']
linestyles = ['solid', 'dashed', 'dashdot', 'dotted']
client_labels = ['B, burst=4000', 'R, rserve=1000', 'BE']
def get_data_from_sim(file_name):
data = []
f = open(file_name)
for index, line in enumerate(f):
line_list = line.split()
cols = len(line_list)
if index == 0:
for i in range(cols - 1):
data.append([])
if index > 0:
for i in range(0, cols - 1):
data[i].append(float(line_list[i + 1]))
f.close()
return data
def stat_resource_time(raw_data, win_size=20):
rts = []
i = 0
while i < len(raw_data):
s = math.fsum(raw_data[i:i+win_size])
s = s / 3600.0
rts.append(s)
i = i + win_size
return rts
def io_plot(plot_data, save_img=False):
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
xs = np.arange(0, len(plot_data[0]), 1)
count = 0
for ys in plot_data:
ys = stat_resource_time(ys)
print(ys)
xs = np.arange(0, len(ys), 1)
if count < (len(plot_data) - 1):
ax.plot(xs, ys, linewidth=2, color=color_styles[count], linestyle=linestyles[count], marker=markers[count],
label=client_labels[count])
count = count + 1
# ax.xaxis.set_major_locator(plt.MultipleLocator(50))
# ax.yaxis.set_major_locator(plt.MultipleLocator(10))
ax.set(xlim=(0, len(xs) - 1), ylim=(0, None))
# ax.legend(loc='center right')
font1 = {'family': 'Times New Roman',
'weight': 'normal',
'size': 16,
}
ncol = int(math.ceil(6 / 2.))
plt.legend(ncol=ncol, loc='upper center', prop=font1)
plt.grid(axis='y', color='0.7', linestyle=':')
plt.tick_params(labelsize=16)
labels = ax.get_xticklabels() + ax.get_yticklabels()
[label.set_fontname('Times New Roman') for label in labels]
ax.set_xlabel("Time (sec)", font1)
ax.set_ylabel("IOPS", font1)
if save_img:
date_str = datetime.now().strftime("%y%m%d%H%M%S")
plt.savefig("sim_plot" + str(date_str) + ".svg")
plt.show()
if __name__ == '__main__':
plt.rc('font', family='Arial')
labels = ['Reservation Policy', 'Burst Policy', 'Best-effort Policy']
# methods = ['', 'Reservation App', 'Burst App', 'Best-effort App', '']
x = np.arange(len(labels)) # the label locations
width = 0.2 # the width of the bars
fig, ax = plt.subplots(figsize=(4, 5))
reserve = [24258.0/1000.0, 27425.0/1000.0, 39498.0/1000.0]; rx = [1 - width, 1, 1+width]
burst = [73.0, 62, 66.0]; bx = [2 - width, 2, 2+width]
best = [98.0, 300.0, 300.0]; bex = [3 - width, 3, 3+width]
# rx = np.array([0])
rects1 = ax.bar(x, reserve, width, color='#93cf93', edgecolor='black')
ax.set_ylabel('$95^{th}$ Latency($\mu$s)', fontsize=16)
ax.set_xticks(x)
ax.set_xticklabels(labels)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.xticks(rotation=15)
# ax.legend(ncol=3, loc='upper center', bbox_to_anchor=(0.5, 1.15))
# ax.yaxis.set_major_locator(plt.MultipleLocator(50))
plt.grid(axis="y")
fig.tight_layout()
plt.savefig("fairness_reserve_r2b.svg")
plt.show()
| true |