blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a265fa9fd39d7e2927ee0298e051f12a840d9b54
|
Python
|
galvarez6/datamining
|
/assignment1/understanding python /checker.py
|
UTF-8
| 2,792
| 3.703125
| 4
|
[] |
no_license
|
import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
# Create a dataframe from csv
df = pd.read_csv('practice.txt', delimiter='\t')
myData = df.values
def minMaxVec(vec1,vec2):
#for jaccard
minimums=[]
maximums=[]
for i in range(0, len(vec1)):
minimums.append(min( vec1[i] , vec2[i]))
for i in range(0, len(vec1)):
maximums.append(max( vec1[i] , vec2[i]))
return minimums, maximums
def euclid(vec1, vec2):
### Write your code here and return an appropriate value
euclidean_dist = np.sqrt(np.sum((vec1-vec2)**2))
return euclidean_dist
#return None
def manhattan_distance(vec1, vec2):
### Write your code here and return an appropriate value
man_dist = np.sum(abs(vec1-vec2))
return man_dist
#return None
def cosine(vec1, vec2):
### Write your code here and return an appropriate value
numerator = np.dot(vec1 , vec2)
denominator = np.sqrt(sum(vec1**2))* np.sqrt(sum(vec2**2))
cosinesim = numerator/denominator
return cosinesim
#return None
def jaccard(vec1, vec2):
### Write your code here and return an appropriate value
minimums, maximums = minMaxVec(vec1,vec2)
jaccard = sum(minimums)/sum(maximums);
return jaccard
#return None
def tanimoto(vec1, vec2):
### Write your code here and return an appropriate value
numerator = np.dot( vec1 , vec2)
denominator = (sum(vec1**2)+sum(vec2**2))-numerator
tanimoto = numerator/denominator
return tanimoto
#return None
def sortKey(item):
return item[1]
def knearest(vec, data, k, method):
# Write code to return the indices of k nearest
# neighbors of vec in data using method
result = []
for row in range (0, len(data)):
distance = euclid(vec, data[row])
result.append([row, distance])
sortedResult = sorted(result, key=sortKey)
indicies = []
if k<len(data):
for r in range(0, k):
indicies.append(sortedResult[r][0])
else:
indicies = [i[0] for i in sortedResult]
return indicies
#return None
print("Euclidean distance between row 0 and 1: ", euclid(myData[0], myData[1]))
print("Manhattan distance between row 0 and 1: ", manhattan_distance(myData[0], myData[1]))
print("Cosine similarity between row 0 and 1: ", cosine(myData[0], myData[1]))
print("Jaccard similarity between row 0 and 1: ", jaccard(myData[0], myData[1]))
print("Tanimoto similarity between row 0 and 1: ", tanimoto(myData[0], myData[1]))
print("***************************************")
print("knn of row 100 using euclidean distance: ", knearest(myData[100], myData, k=5, method = "euclidean"))
#print("knn of row 100 using manhattan distance: ", knearest(myData[100], myData, k=5, method = "manhattan"))
| true
|
99a1d49e425ee486d3bd893841efc2732d935925
|
Python
|
Ursinus-IDS301-S2020/Week10Class
|
/NearestNeighbors2D_Naive.py
|
UTF-8
| 1,123
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
"""
The purpose of this file is to demonstrate how one might write
naive code to do k-nearest neighbors by manually computing the
distances from a point to a collection of points and then using
argsort to find the indices of the closest points in the collection
"""
import matplotlib.pyplot as plt
import numpy as np
# Make 2 clusters. The first cluster is in the first
# 100 rows, the second cluster is in the next 100 rows
# centered at an offset of (10, 10)
N = 100
X = np.random.randn(N*2, 2)
X[100::, :] += np.array([10, 10])
q = np.array([3, 3]) # Query point
# How far is the query point from every other point
distances = np.zeros(N*2)
for i in range(N*2):
x = X[i, :] #Point under consideration is in the ith row of X
distances[i] = np.sqrt(np.sum((x-q)**2))
# Find the nearest neighbor indices by using argsort
n_neighbors = 10
neighbors = np.argsort(distances)[0:n_neighbors]
plt.figure(figsize=(8,8))
plt.scatter(X[:, 0], X[:, 1])
plt.scatter(q[0], q[0], 40, marker='x')
# Plot ten nearest neighbors
print(neighbors)
plt.scatter(X[neighbors, 0], X[neighbors, 1], 100, marker='*')
plt.show()
| true
|
9268c7c294a7c6e19210662d9ac256e49242e202
|
Python
|
HelloImKevo/PyAi-SelfDrivingCar
|
/src/app_logging.py
|
UTF-8
| 1,474
| 2.953125
| 3
|
[] |
no_license
|
"""
Logger object
=============
Different logging levels are available: debug, info, warning, error and critical.
"""
import logging
_level_to_tag_map = {
logging.CRITICAL: 'E',
logging.ERROR: 'E',
logging.WARNING: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D',
logging.NOTSET: 'V',
}
class ConsoleFormatter(logging.Formatter):
def __init__(self, message_format, timestamp_format):
logging.Formatter.__init__(self, fmt=message_format, datefmt=timestamp_format)
def format(self, record: logging.LogRecord):
tag: str = _get_tag(record.levelno)
record.levelname = tag
return logging.Formatter.format(self, record)
def _get_tag(log_level) -> str:
if log_level in _level_to_tag_map:
return _level_to_tag_map.get(log_level)
else:
return _level_to_tag_map.get(logging.NOTSET)
def get_logger(logger_name: str) -> logging.Logger:
logger = logging.getLogger(name=logger_name)
logger.setLevel(level=logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(level=logging.DEBUG)
# 06-03 14:38:23.783/I: app.py:13 - Initializing app...
formatter = ConsoleFormatter('%(asctime)s.%(msecs)d/%(levelname)s: %(filename)s:%(lineno)d - %(message)s',
'%m-%d %H:%M:%S')
console_handler.setFormatter(formatter)
logger.addHandler(logging.NullHandler())
logger.addHandler(console_handler)
return logger
| true
|
902d87fe72769b0a52a76704ba4e94e71973ec2f
|
Python
|
ZF-1000/Python_Algos
|
/Урок 2. Практическое задание/task_3/task_3_1.py
|
UTF-8
| 1,285
| 4.15625
| 4
|
[] |
no_license
|
"""
3. Сформировать из введенного числа обратное по порядку входящих в него
цифр и вывести на экран. Например, если введено число 3486,
то надо вывести число 6843.
Подсказка:
Используйте арифм операции для формирования числа, обратного введенному
Пример:
Введите число: 123
Перевернутое число: 321
ЗДЕСЬ ДОЛЖНА БЫТЬ РЕАЛИЗАЦИЯ ЧЕРЕЗ ЦИКЛ
"""
while True:
try:
NUMBER = int(input('Введите число: '))
INVERTED_NUMBER = 0
while NUMBER > 0:
DIGIT = NUMBER % 10 # последняя цифра числа
NUMBER = NUMBER // 10 # убираем последнюю цифру
INVERTED_NUMBER = INVERTED_NUMBER * 10 # увеличиваем разядность
INVERTED_NUMBER = INVERTED_NUMBER + DIGIT # добавляем очередную цифру
print(f'Перевёрнутое число: {INVERTED_NUMBER}')
break
except ValueError:
print('Некорректно введены данные!\n')
| true
|
166cd9cd8ec24cf28414672e56d4559e2d6779c9
|
Python
|
multipitch/prog1
|
/squareroot.py
|
UTF-8
| 6,968
| 3.921875
| 4
|
[] |
no_license
|
# squareroot.py
#
# contains two functions that iterate over the following function:
# x_k = (1/2) * [ x_(k-1) + a / x_(k-1) ]
# the first function, fsqrt, uses floating point arithmetic
# the second function, dsqrt, uses specified-precision decimal arithmetic
#
# additionally, results using the above functions are collected and graphed
# using matplotlib
#
# Author: Sean Tully
# Date: 23 Oct 2016
# Rev: 1.0
import matplotlib.pyplot as plt
import timeit
from decimal import *
import sys
from math import log
# set maximum number of iterations
kmax = 100
# note there will therefore be a maximum of (kmax + 1) results, i.e. the
# original guess is x_0 and the maximum final solution is x_kmax
def fsqrt(a, kmax, eps=0):
'''
Finds square root of a number using Babylonian Method and floating point
arithmetic
Keyword Arguments:
a (number): number for which square root is required
kmax (int) : maximum number of iterations
eps (number): user-specified epsilon (default = 0)
Returns:
results (list) : the set of results after each iteration (including
initial guess) (list of floats)
conv (bool) : True if converged, False if not
'''
eps_m = sys.float_info.epsilon # get value for machine epsilon
xold = float(a) # take 'a' as first guess, cast as float
xnew = float('nan') # initialise xnew as float, value unimportant
results = [xold] # record first (k = 0) guess
conv = False
for k in range(1,kmax+1): # ensure max no. of iterations isn't exceeded
xnew = 0.5 * (xold + a / xold) # Babylonian method
results.append(xnew) # record new guess
if abs(xnew - xold) <= eps + 4.0*eps_m*abs(xnew): # test for convergence
conv = True # if convergence test met, set conv to true
break # and break out of iterations
else: # if convergence test not met:
xold = xnew # update xold and iterate
return results, conv # return results and conversion status
def dsqrt(a, kmax, prec=getcontext().prec):
'''
Finds square root of a number using Babylonian Method and fixed-precision
decimal arithmetic
Keyword Arguments:
a (number): number for which square root is required
kmax (int): maximum number of iterations
prec (int): decimal precision (defaults to existing setting)
Returns:
results (list) : the set of results after each iteration (including
initial guess) (list of Decimal objects)
conv (bool) : True if converged, False if not
'''
getcontext().prec = prec # set precision of Decimal objects
xold = Decimal(a) # take 'a' as first guess, cast as Decimal
xnew = Decimal('NaN') # initialise xnew as Decimal, value unimportant
results = [xold] # record first guess
conv = False
for k in range(1,kmax+1): # ensure max no. of iterations isn't exceeded
xnew = (xold + a / xold) / Decimal(2) # Babylonian method
results.append(xnew) # record new guess
if Decimal.compare(xnew,xold) == 0: # test for convergence
conv = True # if convergence test met, set conv to true
break # and break out of iterations
else: # if convergence test not met:
xold = xnew # update xold and iterate
return results, conv # return results and conversion status
# set a large value for 'a'
a = 268435456 # (2**14)**2
xknown = 16384 # 2**14
# run floating point solver
# fx = list of outputs after each iteration
# fconv: True if converged, False if not
fx, fconv = fsqrt(a, kmax)
# run decimal solver for a range of precisions
# dx = list of (list of outputs after each iteration) for range of precisions
# dconv = list of convergence test outputs for each precision
dx = []
dconv = []
p = [4,28,100,200,300,400] # specify precisions to use in runs
for prec in p: # loop for a range of precisions
spam, eggs = dsqrt(a, kmax, prec) # run decimal solver
dx.append(spam)
dconv.append(eggs)
# plot convergence for floating point and various fixed-precision decimal runs
# (plot of results as a function of number of iterations for various precisions)
s = ['.b-','vg-','*r-','+c-','xm-','1y-'] # styles to use
plt.plot(range(len(fx)), fx, 'ok-', label=r'$float$') # plot float results
for i in range(len(p)): # plot decimal results
plt.plot(range(len(dx[i])), dx[i], s[i], label=(p[i]))
plt.xlabel(r'$k$',fontsize=16) # add labels
plt.ylabel(r'$x_k$',fontsize=16)
plt.yscale('log')
plt.tick_params(axis='both', which='major', labelsize=10) # size tick labels
plt.legend(title=r'$precision$',fontsize=12)
plt.plot() # create plot
plt.savefig("fig4.png", format="png") # export as pdf
plt.close('all')
# plot convergence for floating point and various fixed-precision decimal runs
# (plot of number of iterations to achieve convergence as a function of
# precision)
kconvs = []
for x in dx:
kconvs.append(len(x)-1)
#plt.plot(p, kconvs, 'ob-', label=r'$decimal$')
plt.plot([0,max(p)],[len(fx)-1,len(fx)-1], ',k--', label=r'$float$')
plt.scatter(p, kconvs, label=r'$decimal$')
plt.axis([0, max(p), 0, max(kconvs)+1])
plt.xlabel(r'$p$',fontsize=16) # add labels
plt.ylabel(r'$k$',fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=10) # size tick labels
plt.legend(title=r'$type$',fontsize=12, loc=4)
plt.plot() # create plot
plt.savefig("fig5.png", format="png") # export as pdf
plt.close('all')
# calculate relative errors
fe = []
for i in range(len(fx)):
fe.append(abs(fx[i] - xknown) / xknown)
de = []
for i in range(len(dx)):
de.append([])
for j in range(len(dx[i])):
de[i].append(abs(dx[i][j] - xknown) / xknown)
# plot some relative errors
s = ['.b-','vg-','*r-','+c-','xm-','1y-'] # styles to use
plt.plot(range(len(fe)), fe, 'ok-', label=r'$float$') # plot float results
for i in range(len(p)): # plot decimal results
plt.plot(range(len(de[i])), de[i], s[i], label=(p[i]))
plt.xlabel(r'$k$',fontsize=16) # add labels
plt.ylabel('relative error',fontsize=16)
plt.yscale('log')
plt.tick_params(axis='both', which='major', labelsize=10) # size tick labels
plt.legend(title=r'$precision$',fontsize=12)
plt.plot() # create plot
plt.savefig("fig6.png", format="png") # export as pdf
plt.close('all')
| true
|
1929ba02461b965e22b433a59d73c9e78cea459a
|
Python
|
ArasBozk/Shortest-Common-Superstring
|
/experiment_run_time.py
|
UTF-8
| 8,596
| 2.9375
| 3
|
[] |
no_license
|
import time
import random
import math
from matplotlib import pyplot as plt
from tabulate import tabulate
run_size=100
def standardDeviation(results):
sum = 0
mean = 0
standard_deviation = 0
for i in range(len(results)):
sum += results[i]
mean = sum / len(results)
for j in range(len(results)):
standard_deviation += pow(results[j] - mean, 2)
standard_deviation = math.sqrt(standard_deviation / len(results))
return standard_deviation
def standardError(standard_deviation, n):
return standard_deviation / math.sqrt(n)
def runningTime(running_times):
totalTime = 0
for i in range(len(running_times)):
totalTime += running_times[i]
standard_dev = standardDeviation(running_times)
N = len(running_times)
m = totalTime / N
t_value_90 = 1.660
t_value_95 = 1.984
standard_error = standardError(standard_dev, N)
upper_mean_90 = m + t_value_90 * standard_error
lower_mean_90 = m - t_value_90 * standard_error
upper_mean_95 = m + t_value_95 * standard_error
lower_mean_95 = m - t_value_95 * standard_error
return [m, standard_dev, standard_error, lower_mean_90, upper_mean_90, lower_mean_95, upper_mean_95]
def Compress2strings(ind, edges):
a = edges[ind][0]
b = edges[ind][1]
i = len(edges) - 1
while i != -1:
if edges[i][0] == a: # Remove edges start with a
del edges[i]
elif edges[i][1] == b: # Remove edges end with b
del edges[i]
elif edges[i][0] == b: # Edges which which b goes are now goes from X
if edges[i][1] == a:
del edges[i]
else:
edges[i][0] = a
i = i - 1 # Edges which goes to a, now goes to this new X
return
def overlap(a, b):
# return length of longest suffix of a which matches prefix of w
start = 0
while True:
start = a.find(b[0], start)
if start == -1:
return 0
if b.startswith(a[start:]):
return len(a) - start
start += 1
from itertools import permutations
def FindAllOverlaps(Set):
Edges = []
for a, b in permutations(range(len(Set)), 2):
W = overlap(Set[a], Set[b])
if W > 0:
Edges.append([a, b, W])
return Edges
def SCSS(Edges): # GREEDY
Total_Path_Weight = 0
while (len(Edges) != 0):
# Find Longest Weight & its index
maxWeight = 0
index = -1
for E in range(len(Edges)):
if Edges[E][2] > maxWeight:
maxWeight = Edges[E][2]
index = E
Total_Path_Weight += maxWeight
Compress2strings(index, Edges)
return Total_Path_Weight
def Eliminate_Substr(SS):
i = 0
while i != len(SS):
t = i + 1
while t != len(SS):
if (SS[t] in SS[i]):
del SS[t]
elif (SS[i] in SS[t]):
del SS[i]
i -= 1
break
else:
t += 1
i += 1
return
def Check(Str_Set, k):
Eliminate_Substr(Str_Set)
Total_Len = 0
for st in Strings:
Total_Len += len(st)
E = FindAllOverlaps(Strings)
SCSS_len = Total_Len - SCSS(E)
if k >= SCSS_len:
return True
return False
time_arr = []
size = []
stan_dev_arr = []
stan_err_arr = []
conf_lev_90 = []
conf_lev_95 = []
for i in range(20):
running_times = []
for m in range(run_size):
start_time = time.time()
Strings = []
for x in range((i + 1) * 5):
a = "{0:010b}".format(random.getrandbits(10))
Strings.append(a)
no_of_strings = (i + 1) * 5
k = 6 * no_of_strings
Check(Strings, k)
elapsed_time = time.time() - start_time
running_times.append(elapsed_time)
run_time_array = runningTime(running_times)
time_arr.append(run_time_array[0])
size.append(no_of_strings)
stan_dev_arr.append((run_time_array[1]))
stan_err_arr.append(run_time_array[2])
run_time_array[3] = "{0:.5f}".format(run_time_array[3])
run_time_array[4] = "{0:.5f}".format(run_time_array[4])
run_time_array[5] = "{0:.5f}".format(run_time_array[5])
run_time_array[6] = "{0:.5f}".format(run_time_array[6])
conf_lev_90.append(str(run_time_array[3]) + "-" + str(run_time_array[4]))
conf_lev_95.append(str(run_time_array[5]) + "-" + str(run_time_array[6]))
plt.plot(size, time_arr)
plt.title('Mean Time Comparison Based on Array Size for ' + str(run_size) + " Runs")
plt.xlabel('Array Size')
plt.ylabel('Mean Time')
plt.savefig('plot-array-size-'+str(run_size)+'.png', bbox_inches='tight', pad_inches=0.05)
headers = ["Array Size","Mean Time", "Standard Deviation", "Standard Error", "90% Confidence Level", "95% Confidence Level"]
data = []
for item in range(len(size)):
data.append((size[item],time_arr[item],stan_dev_arr[item], stan_err_arr[item], conf_lev_90[item], conf_lev_95[item]))
print(tabulate(data, headers=headers))
data_arr = []
for i in range(len(data)):
data_arr.append(data[i])
plt.cla()
plt.clf()
plt.title('Mean Time Based on Array Size for ' + str(run_size) + " Runs")
the_table = plt.table(cellText=data_arr, colLabels=headers, loc='center')
for x in range(len(headers)):
the_table.auto_set_column_width(x)
the_table.auto_set_font_size(False)
the_table.set_fontsize(5)
the_table.scale(1, 1)
# Removing ticks and spines enables you to get the figure only with table
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
for pos in ['right','top','bottom','left']:
plt.gca().spines[pos].set_visible(False)
plt.savefig('table-array-size-'+str(run_size)+'.png', bbox_inches='tight', pad_inches=0.05)
#####
time_arr = []
str_len = []
stan_dev_arr = []
stan_err_arr = []
conf_lev_90 = []
conf_lev_95 = []
for str_size in range(5, 105, 5):
running_times_size = []
no_of_strings = 20
k = 6 * no_of_strings
count = 0
for i in range(run_size):
Strings = []
start_time = time.time()
for x in range(20):
str_shift = "{0:0" + str(str_size) + "b}"
a = str_shift.format(random.getrandbits(str_size))
Strings.append(a)
Check(Strings, k)
elapsed_time = time.time() - start_time
running_times_size.append(elapsed_time)
run_time_array = runningTime(running_times_size)
time_arr.append(run_time_array[0])
str_len.append(str_size)
stan_dev_arr.append((run_time_array[1]))
stan_err_arr.append(run_time_array[2])
run_time_array[1] = "{0:.5f}".format(run_time_array[1])
run_time_array[2] = "{0:.5f}".format(run_time_array[2])
run_time_array[3] = "{0:.5f}".format(run_time_array[3])
run_time_array[4] = "{0:.5f}".format(run_time_array[4])
run_time_array[5] = "{0:.5f}".format(run_time_array[5])
run_time_array[6] = "{0:.5f}".format(run_time_array[6])
conf_lev_90.append(str(run_time_array[3]) + "-" + str(run_time_array[4]))
conf_lev_95.append(str(run_time_array[5]) + "-" + str(run_time_array[6]))
plt.cla()
plt.clf()
plt.plot(str_len, time_arr)
plt.title('Mean Time Comparison Based on String Size for ' + str(run_size) + " Runs")
plt.xlabel('String size')
plt.ylabel('Mean Time')
plt.savefig('plot-string-size-'+str(run_size)+'.png', bbox_inches='tight', pad_inches=0.05)
headers = ["String Size","Mean Time", "Standard Deviation", "Standard Error", "90% Confidence Level", "95% Confidence Level"]
data = []
for m in range(len(str_len)):
data.append((str_len[m],time_arr[m], stan_dev_arr[m], stan_err_arr[m], conf_lev_90[m], conf_lev_95[m]))
print(tabulate(data, headers=headers))
data_arr = []
for i in range(len(str_len)):
data_arr.append(data[i])
plt.cla()
plt.clf()
plt.title('Mean Time Based on String Size for ' + str(run_size) + " Runs")
the_table = plt.table(cellText=data_arr, colLabels=headers, loc='center')
for x in range(len(headers)):
the_table.auto_set_column_width(x)
the_table.auto_set_font_size(False)
the_table.set_fontsize(5)
the_table.scale(1, 1)
# Removing ticks and spines enables you to get the figure only with table
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tick_params(axis='y', which='both', right=False, left=False, labelleft=False)
for pos in ['right','top','bottom','left']:
plt.gca().spines[pos].set_visible(False)
plt.savefig('table-string-size-'+str(run_size)+'.png', bbox_inches='tight', pad_inches=0.05)
| true
|
1178dcf8efa461fe724fd06b894c8024fc8993f0
|
Python
|
scotta42/MachineLearningFinal
|
/Emotion-detection/src/writeto_file.py
|
UTF-8
| 2,060
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
import csv
import numpy as np
# {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
emoteCounter = [0, 0, 0, 0, 0, 0, 0]
emoteLTG = []
emotion_data = ""
emoteNames = ["Angry", "Disgusted", "Fearful", "Happy", "Neutral", "Sad", "Surprised"]
def writeto_file(emotionData):
dataResult = ""+emotionData
dataResult = dataResult
get_counts(emotionData)
data = []
data = emotion_data.split(';')
emoteCounts = ""
i = 0
while i<7:
emoteCounts = ("\n"+str(emoteNames[i]) + ": Number of occurrences: " + str(emoteCounter[i])+"\n")
i=i+1
text_file = open("emotiondata.txt", "w")
n = text_file.write(dataResult+emoteCounts)
text_file.close()
def get_counts(emotionData):
emotion_data = emotionData
data = []
data = emotion_data.split(';')
for i in data:
currEmote = i
switch_case(currEmote)
print("emote Added")
def get_median():
lowestVal = ""
counterLen = len(emoteCounter)-1
currIndex = j
i = 0
j = i+1
lowestVal = i
while i < counterLen:
j = i+1
while i < counterLen:
lowestVal = emoteCounter[i]
if emoteCounter[j] > lowestVal:
j+1
else:
lowestVal = emoteCounter[j]
j = counterLen
i+1
emoteLTG.append(switch_case_rev(j))
i+1
print(emoteLTG)
def switch_case(argument):
switcher = {
"Angry": emoteCounter[0]+1,
"Disgusted": emoteCounter[1]+1,
"Fearful": emoteCounter[2]+1,
"Happy": emoteCounter[3]+1,
"Neutral": emoteCounter[4]+1,
"Sad": emoteCounter[5]+1,
"Surprised": emoteCounter[6]+1,
}
def switch_case_rev(argument):
switcher = {
0: "Angry",
1: "Disgusted",
2: "Fearful",
3: "Happy",
4: "Neutral",
5: "Sad",
6: "Surprised",
}
return switcher
| true
|
02b4f8a9b4a4eaf1033d16fee93d87b231ca6f97
|
Python
|
WPKENAN/Junior_homework
|
/pr/perceptron.py
|
UTF-8
| 2,856
| 2.796875
| 3
|
[] |
no_license
|
#coding:utf8
from numpy import *
from matplotlib.pyplot import *
from matplotlib.animation import *
import sys
datapath="perceptrondata.txt"
data=genfromtxt(datapath,delimiter=' ');
#print min(data[1,:]),max(data[1,:])
#符号函数
def sign(v):
if v>0:
return 1;
else:
return -1;
def training(train_datas):
weight=[1,1]
bias=0;
learning_rate=0.05
wb=[];
# train_num=int(raw_input("train num: "))
# print train_datas
train_num=10000
for i in range(train_num):
m,n=shape(train_datas);
index=random.randint(0,m)
# index=i%shape(train_datas)[0]
train=train_datas[index,:];
# train=random.choice(train_datas);
# print train
x1,x2,y=train;
# print index,weight[0],weight[1],bias,": ",weight[0]*x1+weight[1]*x2+bias
# print " "
predict=sign(weight[0]*x1+weight[1]*x2+bias)
if y*predict>=0:
weight[0]=weight[0]-y*learning_rate*x1;
weight[1]=weight[1]-y*learning_rate*x2;
bias=bias+learning_rate*y;
wb+=[[weight[0],weight[1],bias]];
# print x1,x2,":",y,y*predict
# print " "
return weight,bias,array(wb);
fig=figure();
window=fig.add_subplot(111)
window.axis([min(data[:,0])-1,max(data[:,0])+1,min(data[:,1])-1,max(data[:,1])+1])
def test(data):
weight,bias=training(data);
while True:
test_data=[];
data=raw_input("Enter data test (x1,x2):");
if data=='1':break;
test_data+=[int(n) for n in data.split(',')]
predict=sign(weight[0]*test_data[0]+weight[1]*test_data[1]+bias);
# print predict
#
def picture(weight,bias):
m,n=shape(data);
for i in range(m):
if data[i,2]>0:
window.scatter(data[i,0],data[i,1],color='red');
else:
window.scatter(data[i,0],data[i,1],color='black');
# x=linspace(min(data[:,0]),max(data[:,1]),1000);
# window.plot(x,weight[0]/weight[1]*x-bias/weight[1])
# show()
weight,bias,wb=training(data)
#print shape(wb)
print wb
picture(weight,bias)
x=linspace(min(data[:,0]),max(data[:,0]),1000);
#print x
#x=list(x)
#print 'x',x
m_wb,n_wb=shape(wb);
y=[]
for i in range(m_wb):
if wb[i,1]==0:
# y.append()
continue
y.append(-x*wb[i,0]/wb[i,1]-wb[i,2]/wb[i,1])
print "start"
#print y
y=array(y)
if shape(y)[0]==0:
kase=5
mid=1/2.0*(max(data[0:5,0])+min(data[kase:shape(data)[0],0]))
# print min(data[1,:]),max(data[1,:])
plot([mid,mid],[min(data[:,1]),max(data[:,1])])
show()
else:
p=min(data[:,0])
q=max(data[:,0])
# line,=window.plot(x,y[0,:])
def update(data):
line.set_xdata(linspace(p,q,1000));
line.set_ydata(data);
return line
# ani = FuncAnimation(fig, update, y, interval=200)
plot(x,y[shape(y)[0]-1,:])
show()
| true
|
676bfbfd315ae885de6f144b90c7b50a7e3b8f8a
|
Python
|
896385665/crabby
|
/day03/02-sel_form.py
|
UTF-8
| 1,981
| 2.640625
| 3
|
[] |
no_license
|
from flask import Flask, render_template, request, flash
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, EqualTo
app = Flask(__name__)
app.secret_key = 'bvdhkbdvskhbvdsh' # 这个值随意输入
'''
/根节点是普通表单、 demo1是WTF表单,使用两种表单提交,验证其过程。
'''
# 表单类
class RegisterForm(FlaskForm):
username = StringField('用户名:', validators=[DataRequired()])
password = PasswordField('密码:', validators=[DataRequired()])
password2 = PasswordField('确认密码:', validators=[DataRequired(), EqualTo('password', '密码填入的不一致')])
submit = SubmitField('提交')
@app.route('/demo1', methods=["get", "post"])
def demo1():
regist_form = RegisterForm()
if regist_form.validate_on_submit(): # 内置校验,关联RegisterForm类中的validators属性的所有验证
# 1. 取到注册所对应的数据
username = request.form.get("username")
password = request.form.get("password")
password2 = request.form.get('password2')
# 2. 执行注册操作
print("%s %s %s" % (username, password, password2))
return "注册成功"
else:
if request.method == 'POST':
return '获得post请求'
return render_template('html/04-tempWTF.html', form=regist_form)
@app.route('/', methods=['GET', 'POST'])
def get_form():
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
refirmpwd = request.form.get('refirmpwd')
print(username)
if not all([username, password, refirmpwd]):
flash('参数不完整')
elif password != refirmpwd:
flash('密码不一致')
else:
return 'success'
return render_template('html/02-form.html')
if __name__ == '__main__':
app.run(debug=True)
| true
|
fb81b3e016fa55268ae786478fe30168e543792e
|
Python
|
Irene-GM/02_Tick_Dynamics
|
/predictions_NL/plot_sites_prediction_year_gdal.py
|
UTF-8
| 1,002
| 2.65625
| 3
|
[] |
no_license
|
import gdal
import datetime
import numpy as np
import matplotlib.pyplot as plt
def generate_dates(year):
basedate = datetime.datetime(year, 1, 1)
for x in range(0, 365):
increment = basedate + datetime.timedelta(days=x)
yield(increment)
def format_ints(m, d):
if m<10:
mo = str(m).zfill(2)
else:
mo = str(m)
if d<10:
da = str(d).zfill(2)
else:
da = str(d)
return mo, da
gendates = generate_dates(2014)
path_in = r"/home/irene/PycharmProjects/NL_predictors/data/versions/v8/maps_v8/2014/{0}"
basename = "NL_Map_RF_NL_Prediction_{0}_{1}_{2}.tif"
l = []
for date in gendates:
m, d = format_ints(date.month, date.day)
name = basename.format(date.year, m, d)
path = path_in.format(name)
print(path)
tif = gdal.Open(path)
data = tif.GetRasterBand(1).ReadAsArray(225, 156, 1, 1)[0][0]
l.append(data)
xlinspace = np.linspace(0, len(l)-1, len(l))
plt.plot(xlinspace, np.array(l), "-")
plt.show()
| true
|
d2abcff5bda672c7a96065aa5f6a67573e478539
|
Python
|
miloczek/Projekty-II-UWR
|
/MIA/kefa_and_park/case_of.py
|
UTF-8
| 212
| 3.28125
| 3
|
[] |
no_license
|
n = int(input())
string = list(input())
pointer1 = pointer2 = 0
for char in string:
if char == '0':
pointer1 += 1
else:
pointer2 += 1
result = min(pointer1, pointer2)
print(n - (2*result))
| true
|
f9ba7c8114d679318662905181634f8e0690b47b
|
Python
|
BenTheNetizen/StockTools
|
/stockscraper/models.py
|
UTF-8
| 1,160
| 2.75
| 3
|
[] |
no_license
|
from django.db import models
# Create your models here.
from django.urls import reverse
import uuid #Required for unique book instances
#Counter model is used to numerate the entries in the table returned in the StockScraper tool
class Counter:
count = 0
def increment(self):
self.count += 1
return self.count
#model for blogposts
class Blog(models.Model):
title = models.CharField(max_length=200)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the blog post')
date = models.DateField()
post = models.TextField(max_length=4000)
class Meta:
ordering = ['date']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog-detail', args=[str(self.id)])
#model for visitor count and search count
class VisitorCount(models.Model):
visitor_count = models.IntegerField()
search_count = models.IntegerField()
def increment_visitors(self):
self.visitor_count += 1
return self.visitor_count
def increment_searches(self):
self.search_count += 1
return self.search_count
| true
|
baffd303172949c03be1717fce93cd7f7f08fb05
|
Python
|
studybar-ykx/python
|
/画蛇.py
|
UTF-8
| 495
| 3.84375
| 4
|
[] |
no_license
|
import turtle
def drawSnake(rad, angle, len, neckrad):
for i in range(len):
turtle.circle(rad, angle)
turtle.circle(-rad, angle)
turtle.circle(rad,angle/2)
turtle.fd(rad)
turtle.circle(neckrad+2, 180)
turtle.fd(rad*2/3)
def main():
turtle.setup(1300, 800, 0, 0)
pythonsize = 30
turtle.pensize(pythonsize)
turtle.pencolor("blue")
turtle.seth(-40)
drawSnake(40, 80, 5, pythonsize/2)
print(pow(2,10))
#pow(n,n)几的几次方
main()
| true
|
8e7ce6cb7b1bc07fde63d87922ec905b607bad91
|
Python
|
twrdyyy/make-it-from-scratch
|
/machine_learning/batch_sampling/batch_sampling.py
|
UTF-8
| 685
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from typing import Generator, List
# python generator that yields samples of given dataset e.g.
# dataset = np.zeros((100, 10))
# for batch in sampling(dataset):
# print(len(batch))
#
# 32
# 32
# 32
# 4
def sampling(dataset: List, batch_size: int = 32) -> Generator:
assert type(dataset) == np.array or type(dataset) == np.ndarray
"we go through provided dataset and we cut it into batch_size size samples"
for batch in range(0, len(dataset), batch_size):
if batch + 32 < len(dataset):
yield dataset[batch : batch + batch_size, ...] # yield sample
else:
yield dataset[batch:, ...] # yield rest of dataset
| true
|
9868a770bd319ca21e2249165b558d1230760ffe
|
Python
|
DanP01/cp1404_practicals
|
/prac_02/files.py
|
UTF-8
| 496
| 4.25
| 4
|
[] |
no_license
|
# 1:
user_name = 'name.txt'
name_file = open(user_name, 'w')
enter_name = input("Please enter name: ")
print(" Your name is: {} ".format(enter_name), file = name_file)
name_file.close()
# 2:
read_name_file = open('name.txt', 'r')
file_to_read = read_name_file.read().strip()
read_name_file.close()
print(file_to_read)
# 3:
in_file = open("numbers.txt", "r")
first_number = int(in_file.readline())
second_number = int(in_file.readline())
in_file.close()
print(first_number + second_number)
| true
|
c1a3b1f8b0da606a1e662f1e164f2ee7bc9c2405
|
Python
|
weiting1608/Leetcode
|
/3 longest substring without repeating characters.py
|
UTF-8
| 2,392
| 3.796875
| 4
|
[] |
no_license
|
class Solution():
def lengthOfLongestSubstring(self, s: str) -> int:
# """
# Brute Force:
# 1. enumerate all substring of strings;
# 2. check whether the substring is not repeating;
# 3. return the longest non-repeating substring
# Time Complexity: O(n^3):
# for each fixed substring (i to j): search all elements unique
# then for each i and each j, search another round.
# Space Complexity: (O(min(n,m)))
# """
# str_len = len(s)
# result = 0
# for i in range(str_len):
# # j is not the index, but the one behind that, 'cause in def allUnique i in range(start, end)
# for j in range(i+1,str_len+1):
# if(self.allUnique(s,i,j)):
# result = max(result, j-i)
# return result
# def allUnique(self, s, start, end):
# set_str = set()
# for i in range(start, end):
# ch = s[i]
# if ch in set_str:
# return False
# set_str.add(ch)
# return True
"""
HashMap
"""
# Approach 2: hashmap
dicts = {}
result = start = 0
for i, value in enumerate(s):
# For character already in dicts
# update the start from the element behind this existing char
if value in dicts:
update_start = dicts[value] + 1
if update_start > start: # to make sure the start won't roll back, consider the case of "abba"
start = update_start
num = i - start + 1
if num > result:
result = num
# this step is for adding the new pairs to the dictionary. Important!!!
# or to update the i for existing value.
dicts[value] = i
return result
# Approach 3: sliding window
if len(s) <= 1: return len(s)
charSet = set()
left, res = 0, 0
for right in range(len(s)):
while s[right] in charSet:
charSet.remove(s[left])
left += 1
charSet.add(s[right])
res = max(res, right - left + 1)
return res
sol = Solution()
print(sol.lengthOfLongestSubstring("abba"))
| true
|
ce17a065d82997b5efa6a3bad159a76b59f053d3
|
Python
|
nuke7/python
|
/web_request/web_req.py
|
UTF-8
| 187
| 2.84375
| 3
|
[] |
no_license
|
import requests
url = 'https://my-json-server.typicode.com/typicode/demo/comments'
x = requests.get(url)
print(x.json())
my_object = x.json()
for o in my_object:
print(o["id"])
| true
|
3d08281c7373a87cb97f18f39f0595aebbd54fba
|
Python
|
AdamArena/LUNA
|
/Status.py
|
UTF-8
| 1,701
| 3.5
| 4
|
[] |
no_license
|
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BOARD)
class Status:
import time
def strobe(self):
for _ in range(5):
lst = ['s', 'c', 'r']
for i in range(3):
self.update_status(lst[i])
time.sleep(0.2)
def update_status(self, status):
GPIO.output(self.search_LED, GPIO.LOW)
GPIO.output(self.collect_LED, GPIO.LOW)
GPIO.output(self.return_LED, GPIO.LOW)
#s = white, c = yellow, l = green
if status == 's': # s = searching
GPIO.output(self.search_LED, GPIO.HIGH)
elif status == 'c': # c = collecting
GPIO.output(self.collect_LED, GPIO.HIGH)
elif status == 'r': # r = returning
GPIO.output(self.return_LED, GPIO.HIGH)
def __init__(self):
GPIO.setmode(GPIO.BOARD)
self.search_LED = 3
self.collect_LED = 5
self.return_LED = 11
GPIO.setup(self.search_LED, GPIO.OUT)
GPIO.setup(self.collect_LED, GPIO.OUT)
GPIO.setup(self.return_LED, GPIO.OUT)
GPIO.output(self.search_LED, GPIO.LOW)
GPIO.output(self.collect_LED, GPIO.LOW)
GPIO.output(self.return_LED, GPIO.LOW)
if __name__ == '__main__':
status = Status()
lst = ['s', 'c', 'r']
while True:
val = input("1-3 input is LEDs 1-3. 4 is dance for 3 seconds: ")
if val in ['1', '2', '3']:
status.update_status(lst[int(val)-1])
elif val == '4':
status.strobe()
| true
|
52dc68d84b80c33c9a396be673d1551ddf080578
|
Python
|
gtmanfred/Euler
|
/e003.py
|
UTF-8
| 605
| 3.171875
| 3
|
[] |
no_license
|
from script.maths import isprime2
from script.sieve import sieve
def e003(num=600851475143):
p = sieve(round(num**.5))
for i in p[::-1]:
if num%i:continue
else:return i
def Euler_3(num=600851475143):
primes = []
i=2
while i <= num:
if num%i ==0 and isprime2(i):
num = num//i
primes = primes +[i]
i = 2
i+=1
return max(primes)
def isprime(n):
i = 2
while i<n:
if n%i ==0:
return False
i += 1
return True
if __name__=='__main__':
#print(e003())
print(Euler_3())
| true
|
7510c93759bfcd7a5bef3e28667550b7d557a7b1
|
Python
|
newrain7803/24Solver
|
/batch03_kelompok45.py
|
UTF-8
| 430
| 2.515625
| 3
|
[] |
no_license
|
from backend import *
import sys
import re
inFile = sys.argv[1]
outFile = sys.argv[2]
sol = []
with open(inFile,'r+') as i:
lines = i.readline()
array = [int(s) for s in lines.split() if s.isdigit()]
Solve(array,sol)
lines = str(array[0]) + str(sol[0]) + str(array[1]) + str(sol[1]) + str(array[2]) + str(sol[2]) + str(array[3]) + "=" + str(sol[3])
with open(outFile,'w') as o:
for line in lines:
o.write(line)
| true
|
1db45f50c38a565a6d07a24276d31d4d804e9f1f
|
Python
|
Innokutman/py-learn
|
/alphabeticShift.py
|
UTF-8
| 383
| 3.09375
| 3
|
[] |
no_license
|
# https://app.codesignal.com/arcade/intro/level-6/PWLT8GBrv9xXy4Dui
def alphabeticShift(i):
i=list(i)
for x in range(len(i)):
if i[x] == 'z':
i[x] = 'a'
continue
i[x] = chr(ord(i[x])+1)
return "".join(i)
# from string import ascii_lowercase as a
# def alphabeticShift(s):
# return "".join([a[a.find(i)-25] for i in s])
| true
|
7667d719bf8b6f9f801f8e4dc3e719e8ba860154
|
Python
|
Wojtbart/Python_2020
|
/Zestaw4/4_7.py
|
UTF-8
| 562
| 4.1875
| 4
|
[] |
no_license
|
# 4.7
def flatten(sequence):
flattenList = []
for item in sequence:
# jezeli nie jest lista ani krotka to dodaje jako elemnty do listy, w przeciwnym wypadku dodawaj wywołania rekurencyjne
if not isinstance(item, (list, tuple)):
flattenList.append(item)
else:
flattenList += (flatten(item))
return flattenList
if __name__ == "__main__":
seq = [1, (2, 3), [], [4, (5, 6, 7)], 8, [9]]
assert flatten(seq) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(flatten(seq)) # [1,2,3,4,5,6,7,8,9]
| true
|
c0d201354d396bf28d777f51daf4e3fd82e98eec
|
Python
|
QitaoXu/Lintcode
|
/Alog/class4/exercises/queue.py
|
UTF-8
| 635
| 4.15625
| 4
|
[] |
no_license
|
class MyQueue:
# 队列初始化
def __init__(self):
self.elements = [] # 用list存储队列元素
self.pointer = 0 # 队头位置
# 获取队列中元素个数
def size(self):
return len(self.elements) - self.pointer
# 判断队列是否为空
def empty(self):
return self.size() == 0
# 在队尾添加一个元素
def add(self, e):
self.elements.append(e)
# 弹出队首元素,如果为空则返回None
def poll(self):
if self.empty():
return None
self.pointer += 1
return self.elements[self.pointer-1]
| true
|
e52becf0600584b5fa3f718166f6fd122c044541
|
Python
|
GoKarolis/RealEstateFinder
|
/get_user_input.py
|
UTF-8
| 543
| 2.921875
| 3
|
[] |
no_license
|
import tkinter as tk
from tkinter import simpledialog
root = tk.Tk()
root.withdraw()
def get_prices():
min_price = simpledialog.askstring(title="Price", prompt="What's minimum price?")
max_price = simpledialog.askstring(title="Price", prompt="What's maximum price?")
return min_price, max_price
def get_size():
min_size = simpledialog.askstring(title="Size", prompt="What's minimum size?")
max_size = simpledialog.askstring(title="Size", prompt="What's maximum size?")
return min_size, max_size
| true
|
3d58c58f31a73d77109ecf8b7f7af9d5158f2b07
|
Python
|
campbead/LoZscraper
|
/scraper/scrapeLOZ.py
|
UTF-8
| 27,729
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
from PIL import Image
import pytesseract
import argparse
import cv2
import os
import imutils
import numpy as np
import sqlite3 as lite
import sys
import time
import math
import csv
def get_other_info(time,video):
"""Returns a list containing full hearts, total hearts, rubies, keys
and bombs.
Keyword arguments:
time -- the time to query
video -- the video object
"""
vidcap.set(cv2.CAP_PROP_POS_MSEC,time)
success,image = vidcap.read()
rubie_image = image[71:91,697:777]
key_image = image[112:134,722:751]
bomb_image = image[133:156,722:751]
full_hearts, total_hearts = get_num_hearts(image)
rubies = get_number_text(rubie_image,'multi')
keys = get_number_text(key_image,'single')
bombs = get_number_text(bomb_image,'single')
output = [full_hearts, total_hearts, rubies, keys, bombs]
return output
def get_num_hearts(image):
"""Returns the number of full and total hearts.
Keyword arguements:
image - image of hearts region
"""
# definitions:
lower_full = np.array([0, 15, 70])
upper_full = np.array([30, 35, 250])
lower_empty = np.array([150, 160, 220])
upper_empty = np.array([255, 255, 255])
full_heart_area_lower = 200
full_heart_area_upper = 300
half_heart_area_lower = 60
half_heart_area_upper = 100
# define heart image:
hearts_image = image[98:161,967:1200] # this the heart region
# initialize hearts
full_hearts = 0
empty_hearts = 0
# calculate shapes in hearts image
shapeMask_full = cv2.inRange(hearts_image, lower_full, upper_full)
shapeMask_empty = cv2.inRange(hearts_image, lower_empty, upper_empty)
# count full hearts
cnts_full_hearts = cv2.findContours(shapeMask_full.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_full_hearts = cnts_full_hearts[0] if imutils.is_cv2() else cnts_full_hearts[1]
for c in cnts_full_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
full_hearts = full_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
full_hearts = full_hearts + 0.5
# count empty hearts
cnts_empty_hearts = cv2.findContours(shapeMask_empty.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts_empty_hearts = cnts_empty_hearts[0] if imutils.is_cv2() else cnts_empty_hearts[1]
for c in cnts_empty_hearts:
if cv2.contourArea(c) >= full_heart_area_lower and cv2.contourArea(c) <= full_heart_area_upper:
empty_hearts = empty_hearts +1
if cv2.contourArea(c) >= half_heart_area_lower and cv2.contourArea(c) <= half_heart_area_upper:
empty_hearts = empty_hearts + 0.5
return full_hearts, empty_hearts+full_hearts
def get_number_text(image_selection,flag):
"""Returns text in an image.
Keyword arguments:
image_selection -- the image to analysis
flag -- a flag to denote type of text to expect
"""
gray = cv2.cvtColor(image_selection, cv2.COLOR_BGR2GRAY)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, gray)
if flag == 'multi':
text = pytesseract.image_to_string(Image.open(filename), lang = 'eng') # options for multi character
elif flag == 'single':
text = pytesseract.image_to_string(Image.open(filename), lang = 'eng', config='-psm 10 -c tessedit_char_whitelist=0123456789') # options for single character
elif flag == 'rubie':
text = pytesseract.image_to_string(Image.open(filename), lang = 'eng', config='-psm 10 -c tessedit_char_whitelist=X0123456789')
os.remove(filename)
return text
def write_results(con,screen_data):
"""Writes screen_data to database con.
Keyword arguements:
con -- the database connection
screen_data -- the data to write
"""
with con:
cur = con.cursor()
cur.execute("INSERT INTO Screen VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", screen_data)
def init_table(con):
"""Initializes the table con.
Keyword arguements:
con -- the database connection
"""
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Screen")
cur.execute("CREATE TABLE Screen(Run TEXT, Run_Man INT, Abs_time REAL, Room TEXT, Full_hearts REAL, Total_hearts INT, Rubies TEXT, Keys TEXT, Bombs TEXT)")
def find_start_screen(begin_time, delta_t, vidcap):
"""Searches vidcap for first 'OH8'screen. Returns a screen 'OH8' and time.
Keyword Arguments:
begin_time -- time to start the search
delta_t -- time step size when searching for screen
vidcap -- the video object
"""
not_start = True
time = begin_time
while not_start:
time = time + delta_t
screen = get_screen_at_time(time,vidcap)
if screen == 'OH8':
not_start= False
upper_bound_time = time
end_screen = screen
return end_screen, upper_bound_time
def get_screen_at_time(time,vidcap):
"""Returns Screen from a video object at a time."""
def in_overworld(image):
"""Returns a booleen, testing if screen is in overworld."""
gray_cut = 50 # cutoff for overworld gray
# convert image to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# get average of gray scale
average_gray = np.average(gray)
# if average of gray scale falls within a range return true, otherwise false
if average_gray > gray_cut:
return True
else:
return False
def get_screen_coords(image):
"""Returns a set of screen coordinates."""
cutoff_area = 20
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 110, 255, cv2.THRESH_BINARY)[1]
# find contours in the thresholded image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# process contours to find X,Y position of mini-map marker
if len(cnts) == 1:
# if only one contour exists, the return the coordinates of its centre
M = cv2.moments(cnts[0])
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
# if centre cannot be calculated return -1 for cX, cY
cX = -1
cY = -1
elif len(cnts) > 1:
cnts_real = []
for cnt in cnts:
#print('area:',cv2.contourArea(cnt))
if cv2.contourArea(cnt) > cutoff_area:
cnts_real.append(cnt)
if len(cnts_real) == 1:
M = cv2.moments(cnts_real[0])
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
# if centre cannot be calculated return -1 for cX, cY
cX = -1
cY = -1
else:
# if more than one contour is found return -2
cX = -2
cY = -2
else:
# if zero are found return -3
cX = -3
cY = -3
return cX,cY
vidcap.set(cv2.CAP_PROP_POS_MSEC,time)
success,image = vidcap.read()
image = image[70:155, 430:644] # RANGE HERE IS SET FOR MINI MAP ON SCREEN
X_off = 430
Y_off = 70
pixel_cutoff = 4
overworld_X_coord = np.linspace(435.5,638,16)
overworld_X_label = ('A', 'B', 'C', 'D','E', 'F', 'G', 'H', 'I','J', 'K', 'L', 'M', 'N', 'O', 'P')
overworld_Y_coord = np.linspace(75,148.5,8)
overworld_Y_label = ('1','2','3','4','5','6','7','8')
dungeon_X_coord = np.linspace(386,683,12)
dungeon_X_label = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J','K', 'L')
dungeon_Y_coord = np.linspace(73,149,8)
dungeon_Y_label = ('1','2','3','4','5','6','7','8')
X,Y = get_screen_coords(image)
X_adj = X + X_off
Y_adj = Y + Y_off
if X < 0:
screen = 'X' + str(X)
return screen
else:
if in_overworld(image):
closest = min(abs(np.array(overworld_X_coord - X_adj)))
closest_ind = np.argmin(abs(np.array(overworld_X_coord - X_adj)))
if closest < pixel_cutoff:
X_L = overworld_X_label[closest_ind]
else:
X_L = 'X'
closest = min(abs(np.array(overworld_Y_coord - Y_adj)))
closest_ind = np.argmin(abs(np.array(overworld_Y_coord - Y_adj)))
if closest < pixel_cutoff:
Y_L = overworld_Y_label[closest_ind]
else:
Y_L = '0'
screen = 'O' + X_L + Y_L
return screen
else:
closest = min(abs(np.array(dungeon_X_coord - X_adj)))
closest_ind = np.argmin(abs(np.array(dungeon_X_coord - X_adj)))
if closest < pixel_cutoff:
X_L = dungeon_X_label[closest_ind]
else:
X_L = 'X'
closest = min(abs(np.array(dungeon_Y_coord - Y_adj)))
closest_ind = np.argmin(abs(np.array(dungeon_Y_coord - Y_adj)))
if closest < pixel_cutoff:
Y_L = dungeon_Y_label[closest_ind]
else:
Y_L = '0'
screen = 'D' + X_L + Y_L
return screen
def get_run_number(time,video):
"""Return a run number."""
vidcap.set(cv2.CAP_PROP_POS_MSEC,time)
success,image = vidcap.read()
run_image = image[312:330,254:296]
return get_number_text(run_image,'multi')
def load_room_list(room_list_file):
"""Returns list rooms from file."""
with open(room_list_file, newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',')
room_list = []
for row in spamreader:
room_list.append(row[1])
return room_list
def process_run(start_time, video, dT, run_number, master_room_list,
unique_room_list, time_resolution, con):
"""Run the scraper."""
# some initializations
kill_room = 'XXX'
kill_video = False
# calculate end time of video
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = video.get(cv2.CAP_PROP_FPS)
# there should not be two of these
master_end_time = frame_count/fps *1000
master_end_time = 18280000
# set run number to one lower
run_number_man = run_number -1
# get the first screen
screen = get_screen_at_time(start_time,video)
print(screen)
# if the first screen isn't the start screen then go until you find it
if screen != 'OH8':
screen, known_time_in_room = find_start_screen(start_time, dT, video)
print(screen,known_time_in_room)
# this is the main loop. it runs until the kill_video signal is sent
# this loop should run once per run
while kill_video == False:
verbose_list = []#['8DE41','8DE31'] #['4OI31', '4OJ31'] #['9DE41', 'OH8']
run_room_list = [master_room_list[0]]
run_time_list = []
current_room = master_room_list[0]
room_ind = 0
next_room_in = master_room_list[1]
# get run number using tesseract
run_number = get_run_number(known_time_in_room,video)
print('Run: ', run_number)
# run a loop until the next room is a start room or kill room
while next_room_in != master_room_list[0] and next_room_in != kill_room :
if room_ind == 0:
# if you're in the start room, get the start time, adjust the
# run number and print the run number
current_room_time = find_start_time_room(
video, current_room, known_time_in_room, dT,
time_resolution)
time = current_room_time
run_number_man = run_number_man + 1
print('man run:', run_number_man)
else:
# if you aren't in the first room, do the normal stuff
# room_A, room_B, time_A, and time_B are trackers
# at this point, we know the r
room_A = [master_room_list[room_ind-1]]
room_B = master_room_list[room_ind]
time_A = [max_time_previous]
time_B = [known_time_in_room]
run_count = 0
# this is a special debug mode and can be removed
if any(unique_room_list[room_ind] in room for room in verbose_list):
print('UNIQUE ID:',unique_room_list[room_ind])
print('RoomA:', room_A, 'RoomB:', room_B)
print('TimeA:', time_A, 'TimeB:', time_B)
# this the the code that finds the start time of the room
current_room_time, time = \
find_start_time_room_2(video, room_A, room_B, time_A, time_B,
time_resolution, run_count)
# print room info
print('room:',current_room,' time:',current_room_time, 'roomID:',
unique_room_list[room_ind])
#append the start time of the current room to the run_time_list
run_time_list.append(current_room_time)
# increase our room number
room_ind = room_ind + 1
# get the next room in our list and make the room selection list
next_room_in = master_room_list[room_ind]
rooms_list_selection = master_room_list[room_ind-1:room_ind+3]
time_previous = [time]
time_future =[]
# This is a special mode for debuging and can probably be cut
if any(unique_room_list[room_ind] in room for room in verbose_list):
verbose_mode = True
else:
verbose_mode = False
# set an internal count to 0
count = 0
# this is your search function of the next room, your looking for
# the next room, any time in that room, and the next time in the
# current room
next_room_out, known_time_in_room, max_time_previous = \
find_time_next_room_adaptive(video,time_previous, time_future, dT,
rooms_list_selection, master_end_time,
time_resolution, verbose_mode, count)
# this is stuff related to a debug mode and can probably be removed
if any(unique_room_list[room_ind] \
in room for room in verbose_list):
print('next_room_out:',next_room_out)
print('next_room_in:', next_room_in)
print('known_time_in_room:', known_time_in_room)
print('max_time_previous:', max_time_previous)
# check if the next room entered is the next on your master list
if next_room_in == next_room_out:
run_room_list.append(next_room_in)
current_room = next_room_in
else:
next_room_in = next_room_out
# grab all info from begining of each screen
keys_list = []
bomb_list = []
rubies_list = []
full_hearts_list =[]
total_hearts_list =[]
print('run complete: getting room info')
for time in run_time_list:
# grab all the other info using a .8 s offset to give the room a
# load time
output = get_other_info(time+800,video)
full_hearts_list.append(output[0])
total_hearts_list.append(output[1])
rubies_list.append(output[2])
keys_list.append(output[3])
bomb_list.append(output[4])
# save data to a sql
if con != False:
print('saving run')
for index in range(0,len(run_time_list)):
screen_data = []
screen_data.append(run_number)
screen_data.append(run_number_man)
screen_data.append(run_time_list[index])
screen_data.append(unique_room_list[index])
screen_data.append(full_hearts_list[index])
screen_data.append(total_hearts_list[index])
screen_data.append(rubies_list[index])
screen_data.append(keys_list[index])
screen_data.append(bomb_list[index])
write_results(con,screen_data)
print('save complete')
# check for kill room
if next_room_in == kill_room:
kill_video = True
else:
kill_video = False
def find_start_time_room(video, current_room, known_time_in_room, dT, time_resolution):
time = (known_time_in_room + known_time_in_room-dT)/2
new_screen = get_screen_at_time(time,video)
if new_screen == current_room:
dT = dT/2
known_time_in_room = time
if dT < 100:
dT = 100
# if the middle time is the begining screen, search the later half
return find_start_time_room(video, current_room, known_time_in_room, dT,time_resolution)
else:
if known_time_in_room - time < time_resolution:
return time
else:
dT = dT/2
return find_start_time_room(video, current_room, known_time_in_room, dT,time_resolution)
def find_start_time_room_2(video, room_A, room_B, time_A, time_B, time_resolution, run_count):
run_count = run_count +1
if any(room_B in room for room in room_A):
print('ROOMS ARE SAME - BAD')
if (min(time_B) - max(time_A)) < time_resolution:
return min(time_B), max(time_B)
else:
time = (max(time_A) + min(time_B))/2
test_screen = get_screen_at_time(time,video)
if any(test_screen in room for room in room_A):
time_A.append(time)
return find_start_time_room_2(video,room_A,room_B, time_A, time_B,time_resolution, run_count)
elif test_screen == room_B:
time_B.append(time)
return find_start_time_room_2(video,room_A,room_B, time_A, time_B,time_resolution, run_count)
else:
# if you're in a room without an ID, then step backwards in TR steps until you have the original room
# or you find next room. If you find next room, restart with updated times. If you get to original
# room then add
# are we <= TS away from room_A
if (abs(max(time_A)-time) < time_resolution) or run_count > 10:
room_A.append(test_screen)
time_A.append(time)
run_count = 0
return find_start_time_room_2(video,room_A,room_B, time_A, time_B,time_resolution, run_count)
else:
# if not calc array of times
time_array = [time - time_resolution]
array_test = min(time_array) > (max(time_A) + time_resolution)
while array_test:
time_array.append(min(time_array) - time_resolution)
array_test = min(time_array) > max(time_A) + time_resolution
test_screen_array = []
for ind_time in time_array:
test_screen_ind = get_screen_at_time(ind_time,video)
if any(test_screen_ind in room for room in room_A):
time_A.append(ind_time)
return find_start_time_room_2(video,room_A,room_B, time_A, time_B,time_resolution, run_count)
max_test_screen = get_screen_at_time(max(time_array),video)
time_A.append(max(time_array))
room_A.append(max_test_screen)
#print('^^^')
return find_start_time_room_2(video,room_A,room_B, time_A, time_B,time_resolution, run_count)
def find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count):
# init code
previous_room = rooms_list_selection[0]
next_room = rooms_list_selection[1]
future_rooms = rooms_list_selection[2:4]
#special code for first room looking for next room
if previous_room == 'OH8':
room = 'OH8'
dt = 2000
time = max(time_previous)
while room == 'OH8':
while room == 'OH8':
time = time + dt
room = get_screen_at_time(time,video)
if room == 'OG8':
return 'OG8', time, time - dt
X_time = time
while room != 'OH8':
if time - X_time < 2000:
time = time + 200
else:
time = time + dT
room = get_screen_at_time(time,video)
if time - X_time >= 2000:
return room, time, time - dT
# clean future room list
if any('X-3' in future_room for future_room in future_rooms):
future_rooms.remove('X-3')
if any(previous_room in future_room for future_room in future_rooms):
future_rooms.remove(previous_room)
if verbose_mode:
print('rooms_list_selection:', rooms_list_selection)
if len(time_future) == 0 or count > 5:
time = max(time_previous) + dT
e_dT = dT
else:
time = (max(time_previous) + min(time_future))/2
print(time)
e_dT = time - max(time_previous)
if verbose_mode:
print('time: ', time)
# check for end time
if time > master_end_time:
room = 'XXX'
return room, time, max(time_previous)
room = get_screen_at_time(time,video)
if verbose_mode:
print('room:', room)
if room == next_room:
return room, time, max(time_previous)
elif room == previous_room:
time_previous.append(time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count)
elif room == 'OH8':
return room, time, max(time_previous)
elif any(room in future_room for future_room in future_rooms):
time_future.append(time)
count = count + 1
if count > 6:
time_previous.append(time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count)
else:
if next_room != 'OH8' and count < 6:
count = count + 1
num_bumps = math.floor(e_dT/time_resolution)
if verbose_mode:
print(e_dT/time_resolution)
print(e_dT % time_resolution == 0)
if e_dT % time_resolution == 0:
num_bumps = num_bumps - 1
bump_times =[]
for bump in range(1,num_bumps+1):
bump_times.append(time + bump*time_resolution)
bump_times.append(time - bump*time_resolution)
if verbose_mode:
print('bump times:' ,bump_times)
for bump_time in bump_times:
screen_bump = get_screen_at_time(bump_time,video)
if verbose_mode:
print('bump screen: ', screen_bump, 'time', bump_time)
if screen_bump == next_room:
return screen_bump, bump_time, max(time_previous)
elif screen_bump == previous_room:
time_previous.append(bump_time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution,verbose_mode, count)
elif any(screen_bump in future_room for future_room in future_rooms):
time_future.append(bump_time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count)
elif screen_bump == 'OH8':
return screen_bump, bump_time, max(time_previous)
if len(bump_times) == 0 and len(time_future) > 0:
time_future =[]
future_rooms =[]
else:
time = max(bump_times)
time_previous.append(time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count)
else:
time_previous.append(time)
return find_time_next_room_adaptive(video,time_previous, time_future,dT,rooms_list_selection,master_end_time, time_resolution, verbose_mode, count)
def convert_room_list(room_list):
converted_room_list = []
for room in room_list:
converted_room = room[1:4]
converted_room_list.append(converted_room)
return converted_room_list
# start clock for run timer
wall_start_time = time.time()
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to the video image")
ap.add_argument("--room_list", required=False,
help="path to room list csv file, defaults to double hundo")
ap.add_argument("--verbose", required=False,
help="flag for more output", action="store_true")
ap.add_argument("-t", "--start", required=False,
help="start time")
ap.add_argument("-d", "--delta", required=False,
help="delta time")
ap.add_argument("-nosave", required=False,
help="don't save", action="store_true")
ap.add_argument("-run", required=False,
help="run number")
args = vars(ap.parse_args())
video = args["video"]
room_list_file = args["room_list"]
verbose = args["verbose"]
run_start_time = args["start"]
delta_time = args["delta"]
nosave = args["nosave"]
run_number = args["run"]
## set default values
# set this to be manually adjustable later
time_resolution = 100
# set DT_i - the initial time interval for room scanning
if delta_time is not None:
DT_i = delta_time
else:
DT_i = 3000
# set run_number the run number of the first run
if run_number is not None:
run_number = int(run_number)
else:
run_number = 1
# set run_start_time the time (ms) in the video to start the scraping
# this works best if this is just before the start of the first run.
if run_start_time is None:
run_start_time = 1
run_start_time = int(run_start_time)
# load video
vidcap = cv2.VideoCapture(video)
# create database file
filename = os.path.basename(video)
data_file = os.path.splitext(filename)[0] + '.db'
if nosave == False:
initialize_table = not(os.path.exists(data_file))
con = lite.connect(data_file)
if initialize_table:
init_table(con)
else:
con = False
# load room list, if a roomlist insn't provided, use double hundo
if room_list_file is None:
room_list_file = '../data/unique_room_list_double_hundo_with_index.csv'
unique_room_list = load_room_list(room_list_file)
room_list = convert_room_list(unique_room_list)
# run the normal adaptive run
process_run(run_start_time, vidcap, DT_i, run_number, room_list,
unique_room_list, time_resolution, con)
wall_end_time = time.time()
elapsed = wall_end_time - wall_start_time
print('elapsed processing time: ', elapsed)
| true
|
53f3f67a358e59eaf7cef239ff96c12f94280ec4
|
Python
|
rahulsharma20/algorithms
|
/arestringcharactersunique.py
|
UTF-8
| 686
| 4.34375
| 4
|
[] |
no_license
|
# Determine if a string has all unique characters
def isUnique(string):
hashMap = {}
for char in string:
if char in hashMap:
return False
else:
hashMap[char] = True
return True
if __name__ == "__main__":
stringWithDupes = 'somerandomstrigwithduplicates'
uniqueString = 'asdfghjklqwertyuiop'
if isUnique(stringWithDupes):
print(stringWithDupes + " : String is unique")
else:
print(stringWithDupes + " : String has duplicate characters")
if isUnique(uniqueString):
print(uniqueString + " : String is unique")
else:
print(uniqueString + " : String has duplicate characters")
| true
|
12548e3b8e0a2d8c216bae1595999e0317ea39c3
|
Python
|
rrwt/daily-coding-challenge
|
/daily_problems/n_queen_problem.py
|
UTF-8
| 1,728
| 4
| 4
|
[
"MIT"
] |
permissive
|
"""
You have an N by N board. Write a function that returns the number of possible
arrangements of the board where N queens can be placed on the board without
threatening each other, i.e. no two queens share the same row, column, or diagonal.
"""
def is_legal_move(x: int, y: int, dim: int, board: list) -> bool:
"""
since we know that previous rows are all occupied,
we can reduce the number of checks
"""
for i in range(x): # check previous rows with current column
if board[i][y]:
return False
for i, j in zip(range(x, -1, -1), range(y, -1, -1)): # check left upper
if board[i][j]:
return False
for i, j in zip(range(x, -1, -1), range(y, dim)): # check right upper
if board[i][j]:
return False
return True
def n_queen(dim: int):
"""
To reduce the problem size, we always place a queen in the next row.
Afterwards we decide which column to place it in.
In case we know that dim is even, we can reduce the time by half because
the solution will be symmetric with respect to the x axis.
"""
def solution(q_placed: int):
nonlocal ways, board
if q_placed == dim:
ways += 1
else:
for i in range(dim):
# q_placed serves as row number too
if is_legal_move(q_placed, i, dim, board):
board[q_placed][i] = 1
solution(q_placed + 1)
board[q_placed][i] = None # backtrack
ways: int = 0
board: list = [[None] * dim for _ in range(dim)]
solution(0)
return ways
if __name__ == "__main__":
for i in range(1, 10):
print(i, ":", n_queen(i))
| true
|
98c48fc1418fd4caf77cd25a0ce58aa10008c2c8
|
Python
|
michelleweii/Leetcode
|
/16_剑指offer二刷/剑指 Offer 51-数组中的逆序对.py
|
UTF-8
| 2,273
| 3.328125
| 3
|
[] |
no_license
|
"""
hard 归并排序进阶
2021-07-21
https://leetcode-cn.com/problems/shu-zu-zhong-de-ni-xu-dui-lcof/solution/jian-zhi-offer-51-shu-zu-zhong-de-ni-xu-pvn2h/
"""
# https://leetcode-cn.com/problems/shu-zu-zhong-de-ni-xu-dui-lcof/solution/jian-zhi-offerdi-51ti-ti-jie-gui-bing-pa-7m88/
class Solution:
def reversePairs(self, nums):
self.res = 0
return self.merge(nums, 0, len(nums)-1)
def merge(self, nums, l, r):
if l>=r:return 0
mid = (l+r)//2
res = self.merge(nums, l, mid) + self.merge(nums, mid+1, r)
# a = self.merge(nums, l, mid)
# b = self.merge(nums, mid+1, r)
# print("a+b", a, b)
i, j = l, mid+1
temp = []
while i<=mid and j<=r:
if nums[i] <= nums[j]: # 如果左边<=右边,不构成逆序对
temp.append(nums[i])
i += 1
else:
temp.append(nums[j])
j += 1
res += mid-i+1
temp += nums[i:mid + 1]
temp += nums[j:r + 1]
# 把临时数组的元素再放回去,实现原地更改
for k in range(r-l+1):
nums[l+k] = temp[k]
# k = l
# for x in temp:
# nums[k] = x
# k+=1
print("temp", temp)
return res
# def reversePairs(self, nums):
# self.tmp = [0] * len(nums)
# return self.merge_sort(nums, 0, len(nums)-1)
#
# def merge_sort(self, nums, l, r):
# # 终止条件
# if l >= r: return 0
# # 递归划分
# m = (l + r) // 2
# res = self.merge_sort(nums, l, m) + self.merge_sort(nums, m + 1, r)
# # 合并阶段
# i, j = l, m + 1
# self.tmp[l:r + 1] = nums[l:r + 1]
# for k in range(l, r + 1):
# if i == m + 1:
# nums[k] = self.tmp[j]
# j += 1
# elif j == r + 1 or self.tmp[i] <= self.tmp[j]:
# nums[k] = self.tmp[i]
# i += 1
# else:
# nums[k] = self.tmp[j]
# j += 1
# res += m - i + 1 # 统计逆序对
# return res
if __name__ == '__main__':
nums = [7,5,6,4]
print(Solution().reversePairs(nums))
| true
|
f4fa1ab0e01b92b19f61f57f264c0462f85e10a8
|
Python
|
chrislyon/my-robot-motor-class
|
/first.py
|
UTF-8
| 4,356
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import sys, traceback
import time
import datetime
#import pyfirmata
import pyfirmata_fake as pyfirmata
# Démarrer la connection avec Arduino UNO
# USB: /dev/ttyUSB0 ou /dev/ttyACM0
# UART: /dev/ttyAMA0
import pdb
def log(msg):
a = datetime.datetime.now()
print "%s : %s" % ( a.strftime("%X"), msg)
def print_error(msg):
print '-'*60
print "Erreur : %s " % msg
print '-'*60
traceback.print_exc(file=sys.stdout)
print '-'*60
sys.exit(1)
## -----------------
## La classe Robot
## -----------------
class Robot(object):
DEF_SPEED = 50
EN_AVANT = 0
EN_ARRIERE = 1
def __init__(self, name):
log("Creation du Robot : %s " % name)
self.name = name
self.board = None
self.moteur_gauche = None
self.moteur_droit = None
self.direction = None
self.vitesse = 0
self.isOnline = False
def offline(self):
log("Robot : %s : Offline" % self.name )
self.board.exit()
def online(self):
self.isOnline = True
if not self.moteur_droit:
log("Robot : %s : Moteur Droit inexistant")
self.isOnline = False
if not self.moteur_gauche:
log("Robot : %s : Moteur gauche inexistant")
self.isOnline = False
if self.isOnline:
log("Robot : %s : Online" % self.name )
def set_board(self):
log("Init Robot : board")
try:
self.board = pyfirmata.Arduino('/dev/ttyACM0')
self.isOnline = True
except:
print_error("Pb init board")
def set_Moteur_Droit(self, pin_sens=0, pin_vitesse=0):
if self.board:
self.moteur_droit = Motor("Moteur Droit", self.board, pin_sens, pin_vitesse)
else:
log("Set Board First")
def set_Moteur_Gauche(self, pin_sens, pin_vitesse):
if self.board:
self.moteur_gauche = Motor("Moteur Gauche", self.board, pin_sens, pin_vitesse)
else:
log("Set Board First")
def stop(self):
if self.isOnline:
self.moteur_droit.stop()
self.moteur_gauche.stop()
else:
log("Robot : %s is offline" % self.name)
def avance(self, vitesse=DEF_SPEED):
## Les 2 moteurs même vitesse
if self.isOnline:
self.moteur_droit.run(vitesse, Robot.EN_AVANT)
self.moteur_gauche.run(vitesse, Robot.EN_AVANT)
else:
log("Robot : %s is offline" % self.name)
def recule(self, vitesse=DEF_SPEED):
pass
def droite(self):
pass
def gauche(self):
pass
def __str__(self):
return """
Robot Name : {name} direction={d} vitesse={v}
- Moteur droit : {m_d}
- Moteur gauche : {m_g}
""".format( name=self.name, d=self.direction, v=self.vitesse, m_d=self.moteur_droit, m_g=self.moteur_gauche )
class Motor(object):
def __init__(self, name, board, pin_direction, pin_vitesse):
self.board = board
self.name = name
self.d_pin = pin_direction
self.s_pin = pin_vitesse
self.pwm = None
self.sens = None
self.direction = 0
self.vitesse = 0
self.mode_test = True
try:
log ("Init %s : PWM : %s" % (self.name, self.s_pin))
self.pwm = self.board.get_pin("d:%s:p" % self.s_pin)
log ("Init %s : DIR : %s" % (self.name, self.d_pin))
self.sens = self.board.get_pin("d:%s:o" % self.d_pin)
except:
print_error( "PB : Init Motor : %s " % self.name )
def _write(self):
self.sens.write(self.direction)
self.pwm.write(self.vitesse)
def stop(self):
self.vitesse = 0
self._write()
def run(self, vitesse=0.5, sens=0):
self.vitesse = vitesse
self.direction = sens
self._write()
def __str__(self):
return "Moteur : %s d=%s v=%s" % (self.name, self.direction, self.vitesse)
log( "Debut" )
## Creation du robot
log( " Creation du robot ")
R1 = Robot("R1")
R1.set_board()
R1.set_Moteur_Droit(pin_sens=12, pin_vitesse=3)
R1.set_Moteur_Gauche(pin_sens=13,pin_vitesse=11)
print R1
R1.online()
R1.recule()
R1.avance()
R1.stop()
R1.offline()
log("Fin")
| true
|
6200c87fc1c42d2c7e02fe85ea90345a8dd80ee8
|
Python
|
ivankreso/stereo-vision
|
/scripts/crop_images.py
|
UTF-8
| 1,418
| 2.5625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/python
# Note: python3 script
import os, sys, re
if len(sys.argv) != 3:
print("Usage:\n\t\t" + sys.argv[0] + " src_dir/ dst_dir/\n")
sys.exit(1)
# create output dir
if not os.path.exists(sys.argv[2]):
os.makedirs(sys.argv[2])
# get file list of input dir
imglst = os.listdir(sys.argv[1])
# filter only appropriate images
#regex = re.compile(".*\.png$", re.IGNORECASE)
regex = re.compile(".*\.pgm$", re.IGNORECASE)
imglst = [f for f in imglst if regex.search(f)]
imglst.sort()
# split images
for i in range(len(imglst)):
print(str(i/len(imglst)*100.0)[:5] + "%\t" + imglst[i])
#os.system("convert -crop 590x362+23+35 " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
# +repage to remove offset information after cropping that some formats like png and gif stores
# tractor dataset
#os.system("convert -crop 1183x934+49+40 +repage " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
os.system("convert -crop 1183x810+49+40 +repage " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
#os.system("convert -crop 640x426+0+0 +repage " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
#os.system("convert -crop 590x362+23+35 +repage " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
#os.system("convert -crop 590x272+0+90 +repage " + sys.argv[1] + imglst[i] + " " + sys.argv[2] + imglst[i])
| true
|
335d7b0c4ff3450515b37068c029f6a05377e343
|
Python
|
LXZbackend/Base_python
|
/feiji/pygameDemo.py
|
UTF-8
| 750
| 3.546875
| 4
|
[] |
no_license
|
#coding=utf-8
#导入pygame库
import pygame
#向sys模块借一个exit函数用来退出程序
from sys import exit
#初始化pygame,为使用硬件做准备
pygame.init()
#创建了一个窗口,窗口大小和背景图片大小一样
screen = pygame.display.set_mode((600, 170), 0, 32)
#设置窗口标题
pygame.display.set_caption("Hello, World!")
#加载并转换图像
background = pygame.image.load('bg.jpg').convert()
#游戏主循环
while True:
for event in pygame.event.get():
#接收到退出事件后退出程序
if event.type == pygame.QUIT:
pygame.quit()
exit()
#将背景图画上去
screen.blit(background, (0,0))
#刷新一下画面
pygame.display.update()
| true
|
a011ad5d0b14e6ab0a9ff8f41cad110a92adee3b
|
Python
|
Bomullsdotten/Euler
|
/Even_fibonacci/test.py
|
UTF-8
| 752
| 3.375
| 3
|
[] |
no_license
|
from __future__ import absolute_import
import unittest
class MyTestCase(unittest.TestCase):
def test_fibonacci_returns_fibonacci_number_x(self):
from Even_fibonacci.fibonacci import fibonacci
ten_first_fib = [1,1,2,3,5,8,13,21,34,55]
result = fibonacci(1)
self.assertEqual(result, ten_first_fib[0])
result = fibonacci(2)
self.assertEqual(result, ten_first_fib[1])
result = fibonacci(5)
self.assertEqual(result, ten_first_fib[4])
def test_is_even(self):
from Even_fibonacci.fibonacci import is_even
result = is_even(14)
self.assertTrue(result)
result = is_even(13)
self.assertFalse(result)
if __name__ == '__main__':
unittest.main()
| true
|
8c17e2e29969e6e296c18763044eae40f64b4577
|
Python
|
xtompok/uvod-do-prg
|
/koch/koch.py
|
UTF-8
| 815
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
from turtle import pendown,penup,goto,exitonclick
from math import sqrt
def koch(startx,starty,endx,endy,d):
if d == 0:
return
dirx = endx-startx
diry = endy-starty
pointA = (startx + dirx/3,starty + diry/3)
pointB = (startx + 2*dirx/3,starty + 2*diry/3)
baseC = (startx + dirx/2, starty + diry/2)
pointCx = baseC[0]-diry/3*sqrt(3)/2
pointCy = baseC[1]+dirx/3*sqrt(3)/2
goto(startx,starty)
pendown()
goto(pointA[0],pointA[1])
goto(pointCx,pointCy)
goto(pointB[0],pointB[1])
goto(endx,endy)
penup()
koch(startx,starty,pointA[0],pointA[1],d-1)
koch(pointA[0],pointA[1],pointCx,pointCy,d-1)
koch(pointCx,pointCy,pointB[0],pointB[1],d-1)
koch(pointB[0],pointB[1],endx,endy,d-1)
penup()
koch(-750,-500,750,-500,5)
exitonclick()
| true
|
b7a696a39a6f82f70ee23ee80b26d6a87714fe21
|
Python
|
serubirikenny/Shoppinlist2db
|
/r.py
|
UTF-8
| 6,627
| 2.546875
| 3
|
[] |
no_license
|
from flask import Flask, render_template, url_for, request, redirect, jsonify
from forms import LoginForm, SignUpForm, NewListForm,NewItemForm
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user, current_user, login_required
######################### initialisation ##########################
app = Flask('__name__')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://kenny3:kenny4@localhost:5432/db_four'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
app.config['SECRET_KEY'] = 'not_really_secret'
app.config['WTF_CSRF_ENABLED'] = False
####################### LOGIN & LOGOUT ############################
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'index'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/auth/login', methods=['POST'])
def login():
form = LoginForm()
usr = User.query.filter_by(email=str(request.form['email'])).first()
if usr:
if usr.password == form.password.data:
login_user(usr)
response = jsonify({'MSG':'Login Successful'})
response.status_code = 200
else:
response = jsonify({'ERR':'Incorrect Password'})
response.status_code = 401
else:
response = jsonify({'ERR': 'User does not exist'})
response.status_code = 404
return response
@app.route('/auth/register', methods=['POST'])
def register():
form = SignUpForm()
if form.validate_on_submit():
usr = User(str(request.form['email']), str(request.form['password']))
if usr:
db.session.add(usr)
db.session.commit()
response = jsonify({'MSG':'Success'})
response.status_code = 200
else:
response = jsonify({'ERR':'User object wasnt created.'})
response.status_code = 400
else:
response = jsonify({'ERR': form.errors})
response.status_code = 400
return response
####################### MODELS ####################################
class User(db.Model, UserMixin):
"""This class represents the user table"""
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(16))
lists = db.relationship('ShoppingList', backref='user', lazy='dynamic')
def __init__(self, email, password):
self.email = email
self.password = password
class ShoppingList(db.Model):
"""This class represents the shopping_list table"""
__tablename__ = 'shopping_list'
id = db.Column(db.Integer, primary_key=True)
list_name = db.Column(db.String(64), unique=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, list_name):
self.list_name = list_name
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return { 'list_name': self.list_name }
class Item(db.Model):
"""This class represents the item table"""
__tablename__ = 'items'
item_id = db.Column(db.Integer, primary_key=True)
item_name = db.Column(db.String(32))
quantity = db.Column(db.Integer)
list_id = db.Column(db.Integer, db.ForeignKey('shopping_list.id'))#change to id
def __init__(self, item_name, list_name, quantity=1):
self.item_name = item_name
self.list_name = list_name
self.quantity = quantity
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return { 'item_name': self.list_name, 'list_id': self.list_id }
###################### views and routing functions##################
@app.route('/shoppinglist', methods=['GET'])
def view_all_lists():
all_sh_lists = ShoppingList.query.all()
if all_sh_lists is not None:
response = jsonify([obj.serialize for obj in all_sh_lists])
response.status_code = 200
else:
response = jsonify({'ERR':'No lists returned.'})
response.status_code = 404
return response
@app.route('/shoppinglist', methods=['POST'])
def create_list():
form = NewListForm()
new_list = ShoppingList(request.form['list_name'])
if new_list is not None:
db.session.add(new_list)
db.session.commit()
response = jsonify({'MSG': 'Success'})
response.status_code = 201
else:
response = jsonify({'ERR':'List was not created'})
response.status_code = 400
return response
@app.route('/shoppinglist/<id>', methods = ['DELETE'])
def delete_list(id):
del_list = ShoppingList.query.filter_by(id=id).one()
if del_list is not None:
db.session.delete(del_list)
db.session.commit()
response = jsonify({'MSG':'Success'})
response.status_code = 204
else:
response = jsonify({'ERR':'Requested list was not found'})
response.status_code = 404
return response
@app.route('/shoppinglist/<id>', methods=['GET'])
def view_list(id):
list_items = Item.query.filter_by(id=id).all()
if list_items is not None:
response = jsonify(list_items)
response.status_code = 200
else:
response = jsonify({'ERR':'List items not Found'})
response.status_code = 400
return response
@app.route('/shoppinglists/<id>/item/', methods=['POST'])
def add_item(id):
form = NewItemForm()
new_item = Item(request.form['item_name'], id)
if new_item is not None:
db.session.add(new_item)
db.session.commit()
response = jsonify({'MSG': 'Item added to list'})
response.status_code = 201
else:
response.jsonify({'ERR':'Item wasnt added to list'})
response.status_code = 400
return response
@app.route('/shoppinglists/<id>/items/<item_id>', methods=['DELETE'])
def delete_item(id, item_id):
del_item = Item.query.filter_by(id=id, item_id=item_id).one()
if del_item is not None:
db.session.delete(del_item)
db.session.commit()
response = jsonify({'MSG':'Success'})
response.status_code = 204
else:
response = jsonify({'ERR':'Requested item was not found'})
response.status_code = 404
return response
# -----------------------------------------------------------------------------------------------------------------------------------------------
db.create_all()
if __name__ == '__main__':
app.run(debug=True)
| true
|
245998ddb1601f7a9001c10a59dd55fdb0bfc15e
|
Python
|
jaeseok4104/AI_IoT_makerthon
|
/RPi Timer/volunm.py
|
UTF-8
| 485
| 3.28125
| 3
|
[] |
no_license
|
import tkinter
window=tkinter.Tk()
window.title("YUN DAE HEE")
window.geometry("640x400+100+100")
window.resizable(False, False)
frame=tkinter.Frame(window)
scrollbar=tkinter.Scrollbar(frame)
scrollbar.pack(side="right", fill="y")
listbox=tkinter.Listbox(frame, yscrollcommand = scrollbar.set)
for line in range(1,1001):
listbox.insert(line, str(line) + "/1000")
listbox.pack(side="left")
scrollbar["command"]=listbox.yview
frame.pack()
window.mainloop()
| true
|
7a185806cc944ff566d362260de8db5d4b89754b
|
Python
|
marcussev/football-score-prediction
|
/tests/regression/regression_adv.py
|
UTF-8
| 1,278
| 2.75
| 3
|
[] |
no_license
|
from data.datasets import StatsDatasetRegression
from models.linear_regression import LinearRegression
from trainer.regression_trainer import RegressionTrainer
import visualizer
import pandas as pd
import torch
# ---------------------------------------------------------------------------------------------
# This file trains and tests performance of the linear regression model on the advanced dataset
# ---------------------------------------------------------------------------------------------
# MODEL VARIABLES
MODEL = LinearRegression(18, 2)
TRAINING_SET = StatsDatasetRegression(pd.read_csv("../../data/datasets/processed/adv_train_data.csv"))
TESTING_SET = StatsDatasetRegression(pd.read_csv("../../data/datasets/processed/adv_test_data.csv"))
EPOCHS = 500
LEARNING_RATE = 0.001
OPTIMIZER = torch.optim.SGD(MODEL.parameters(), lr=LEARNING_RATE)
LOSS = torch.nn.MSELoss()
if __name__ == '__main__':
trainer = RegressionTrainer(MODEL, TRAINING_SET, TESTING_SET, EPOCHS, OPTIMIZER, LOSS)
trainer.train()
trainer.print_best_results()
visualizer.plot_accuracy(trainer.epochs, trainer.val_accuracy, "../../results/graphs/accuracy/adv_reg_acc.png")
visualizer.plot_loss(trainer.epochs, trainer.val_loss, "../../results/graphs/loss/adv_reg_loss.png")
| true
|
2b37b3d1e53a2a62ef989f17d191b834744e32db
|
Python
|
balassit/improved-potato
|
/examples/salesforce/test.py
|
UTF-8
| 158
| 3.109375
| 3
|
[] |
no_license
|
alist = [0, 1, 0, 0]
blist = [0, 0, 1, 0]
# b[1] = 1
# res = [0, 1, 1, 0]
for i, (a, b) in enumerate(zip(alist, blist)):
blist[i] = a or b
print(blist)
| true
|
daeb8496907754132fa619a20000340ac2d01149
|
Python
|
c-hurt/utility-functions
|
/collections/chain_iter.py
|
UTF-8
| 156
| 3.484375
| 3
|
[] |
no_license
|
from itertools import *
def yielding_iter():
for a in range(0,10):
yield [a]
for a in chain.from_iterable(yielding_iter()):
print(f'{a} ')
| true
|
758163d59d71e2c76137e99b416727cbe55550dc
|
Python
|
quique0194/UmayuxBase
|
/umayux_base/position.py
|
UTF-8
| 4,373
| 3.25
| 3
|
[] |
no_license
|
from math import sqrt
from flag_positions import flag_positions
from mymath import dist, angle_to
def closer_point(target_point, list_of_points):
list_of_points.sort(key=lambda x: dist(x, target_point))
return list_of_points[0]
def mean_points(list_of_points):
ret = [0,0]
for point in list_of_points:
ret[0] += point[0]
ret[1] += point[1]
ret[0] /= len(list_of_points)
ret[1] /= len(list_of_points)
return ret
def intersect_circles(P0, P1, r0, r1):
"""
Determines whether two circles collide and, if applicable,
the points at which their borders intersect.
Based on an algorithm described by Paul Bourke:
http://local.wasp.uwa.edu.au/~pbourke/geometry/2circle/
Arguments:
P0 (2-tuple): the centre point of the first circle
P1 (2-tuple): the centre point of the second circle
r0 (numeric): radius of the first circle
r1 (numeric): radius of the second circle
Returns:
False if the circles do not collide
True if one circle wholly contains another such that the borders
do not overlap, or overlap exactly (e.g. two identical circles)
An array of two complex numbers containing the intersection points
if the circle's borders intersect.
"""
if len(P0) != 2 or len(P1) != 2:
raise TypeError("P0 and P1 must be 2-tuples")
d = dist(P0, P1)
if d > (r0 + r1):
return False
elif d < abs(r0 - r1):
return True
elif d == 0:
return True
a = (r0**2 - r1**2 + d**2) / (2 * d)
b = d - a
temp = max(0, r0**2 - a**2)
h = sqrt(temp)
P2 = [0, 0]
P2[0] = P0[0] + a * (P1[0] - P0[0]) / d
P2[1] = P0[1] + a * (P1[1] - P0[1]) / d
i1x = P2[0] + h * (P1[1] - P0[1]) / d
i1y = P2[1] - h * (P1[0] - P0[0]) / d
i2x = P2[0] - h * (P1[1] - P0[1]) / d
i2y = P2[1] + h * (P1[0] - P0[0]) / d
i1 = (i1x, i1y)
i2 = (i2x, i2y)
return [i1, i2]
def intersect_circles_with_error(P0, P1, r0, r1):
"""
Call this function when you're sure that both circles intersect, but due to
error variation, the intersection can be null
"""
if len(P0) != 2 or len(P1) != 2:
raise TypeError("P0 and P1 must be 2-tuples")
d = dist(P0, P1)
# Make r0 <= r1
if r0 > r1:
r0, r1 = r1, r0
P0, P1 = P1, P0
# Fix error
if d > r0 + r1:
r0 += d - (r0+r1)
r0 += 0.001 # Fix to accuracy problems
elif d < r1 - r0:
r0 += r1 - r0 - d
r0 += 0.001 # Fix to accuracy problems
elif d == 0:
raise Exception("This should never happen")
return intersect_circles(P0, P1, r0, r1)
# This is to be used out there
def triangulate_position(flags, prev_position=None):
if prev_position is None:
print "I don't have previous position to work with"
raise Exception("This should never happen")
if len(flags) < 2:
print "WARNING: I cannot see enough flags to determine position"
return prev_position
l = flags.items()
l.sort(key=lambda x: x[1].distance)
list_of_points = []
for i in range(len(l)):
for j in range(i+1, len(l)):
i1, i2 = intersect_circles_with_error(flag_positions[l[i][0]],
flag_positions[l[j][0]],
l[i][1].distance, l[j][1].distance)
list_of_points.append(closer_point(prev_position, [i1, i2]))
return mean_points(list_of_points)
# This is to be used out there
def calculate_orientation(flags, position):
if len(flags) == 0:
print "WARNING: I cannot see enough flags to determine orientation"
return None
l = flags.items()
l.sort(key=lambda x: x[1].distance)
idx = 0
ref = position
while dist(position, ref) < 5 and idx < len(l):
ref = flag_positions[l[idx][0]]
idx += 1
return -angle_to(position, ref) - l[0][1].direction
if __name__ == "__main__":
ip = intersect_circles
ipe = intersect_circles_with_error
print "Intersection:", ip((0,0), (1, 0), 2, 2)
print "Wholly inside:", ip((0,0), (1, 0), 5, 2)
print "Single-point edge collision:", ip((0,0), (4, 0), 2, 2)
print "No collision:", ip((0,0), (5, 0), 2, 2)
print "Intersection with error:", ipe((2,0), (1,0), 2, 0.9)
| true
|
f66c55ad2c2edd82f5d8c4e6381d990d74fb4d3d
|
Python
|
joel-reujoe/AlgosAndPrograms
|
/Arrays/Arrays2.py
|
UTF-8
| 588
| 4.03125
| 4
|
[] |
no_license
|
## Find max and min element in Array with min comparison
def getMinMax(A):
max = 0
min = 0
if len(A)==1: #if there is only one element in the Array
return A[0], A[0]
if A[0] > A[1]:
max = A[0]
min = A[1]
else:
max = A[1]
min = A[0]
for i in range(2, len(A)):
if A[i] < min:
min = A[i]
elif A[i] > max:
max = A[i]
return (min, max)
arr = [1000, 11, 445, 1, 330, 3000]
min,max = getMinMax(arr)
print("Minimum element is", min)
print("Maximum element is", max)
| true
|
dd8aa62185e5b8ed893e85cc875f6231ee392b84
|
Python
|
ParadoxZW/fancy-and-tricky
|
/py_snippets/dud print/example.py
|
UTF-8
| 766
| 2.71875
| 3
|
[] |
no_license
|
import multiprocessing as mp
import os
import time
def main(rank, a):
if rank != 0:
__print = lambda *args, **kwargs: ...
__builtins__['print'] = __print
else:
ori_print = __builtins__['print']
__print = lambda *args, **kwargs: ori_print(*args, **kwargs, flush=True)
__builtins__['print'] = __print
for i in range(5):
time.sleep(2)
print(rank, a, time.ctime())
def spawn(target, nprocs, args=(), kwargs={}):
procs = []
for i in range(nprocs):
p = mp.Process(
target=target,
args=(i, ) + args,
kwargs=kwargs,
daemon=True
)
p.start()
procs.append(p)
for p in procs:
p.join()
if __name__ == '__main__':
spawn(
target=main,
nprocs=4,
args=('hello world!', )
)
print('done!')
| true
|
f0c0a9c2f1365d30dc0bd77746076ef8a8c9194d
|
Python
|
TILE-repository/TILE-repository.github.io
|
/docs/nifties/2022/files/generate_test_report_all.py
|
UTF-8
| 11,984
| 3.171875
| 3
|
[
"CC-BY-3.0",
"CC-BY-4.0"
] |
permissive
|
import json
import xlwt
from xlwt import Workbook
from lark import Lark
from lark import Transformer
def get_failed_testcases(filename):
"""
Expects filename to be a file that contains the output of a !pytest run.
Returns the list of testcases that have failed.
Throws FileNotFoundError exception if file does not exist.
"""
#1.Open the file and name the file-handle fhand
fhand = open(filename, 'r')
#2.Copy the content of the file in variable content
content = fhand.read()
#3: Close the file
fhand.close()
#Look for the failed test cases
if not ("= FAILURES" in content):
return [] #There are no failed test cases
else:
# Find the testcases that have failed, they
# start with "testcase = " in the file
lss_lines = content.splitlines()
testcases = []
for l in lss_lines:
if "testcase =" in l:
testcases.append(l)
return testcases
def get_test_signature(filename):
"""
Given a Python file containing "@pytest.mark.parametrize", it returns a list that
represents the signature of the test. If there are no pytests in the file, it returns
the empty list
Throws FileNotFoundError exception if file does not exist.
"""
#1: Open the file and name the file-handle fhand
python_file = open(filename, "r")
#2: Read through the file to find the line that indicates that
# the test cases start (i.e. @pytest.mark.parametrize)
line = python_file.readline()
while not (line.startswith("@pytest.mark.parametrize") or line==''):
line = python_file.readline()
#3: Close the file
python_file.close()
#line now is the "@pytest.mark.parametrize" line
#Now, we need to know what the structure of the test cases is,
#i.e. how many inputs. So we first filter the characters that we do not need.
filter_out = [',', "@pytest.mark.parametrize", "(", ")", "[", '"']
for f in filter_out:
line = line.replace(f, "")
#Then we split, such that we get a list like
#['testcase', input1, ..., inputn, output]
test_signature = line.split()
return test_signature
# Below is the grammar describing test cases.
# test case lines look like: '(num, i1, i2,...,in o), #any type of comments'
# - starts with (
# - ends with ),
# - the first argument is a number, the ID of the test case
# - after the end test case ), commenst starting with #can be discarded
# - different parts of the test case are separated by ", "
# - i1, i2, ..., in and o can be of any Python type (int, float, bool, strings, lists, tuples, variables, sets)
# - the exercise explicity indicate that we assume there are no operators (unary, binary operators), variable names, dictionaries, function calls
testcase_parser = Lark(r"""
testcase : "(" DEC_NUMBER "," value ("," value)* ")" [","] [SH_COMMENT]
value: list
| tuple
| emptyset
| set
| string
| number
| "True" -> true
| "False" -> false
| "None" -> none
list : "[" [value ("," value)*] "]"
tuple: "(" [value ("," value)*] ")"
set : "{" value ("," value)* "}"
emptyset: "set()"
number: DEC_NUMBER | FLOAT_NUMBER
string: /[ubf]?r?("(?!"").*?(?<!\\)(\\\\)*?"|'(?!'').*?(?<!\\)(\\\\)*?')/i
DEC_NUMBER: /0|[1-9][\d_]*/i
FLOAT_NUMBER: /((\d+\.[\d_]*|\.[\d_]+)([Ee][-+]?\d+)?|\d+([Ee][-+]?\d+))/
%import common.ESCAPED_STRING
%import common.SH_COMMENT
%import common.CNAME
%import common.SIGNED_NUMBER
%import common.WS
%ignore WS
""", start='testcase')
# Evaluate the tree, using a Transformer.
# A transformer is a class with methods corresponding to branch names.
# For each branch, the appropriate method will be called with the children
# of the branch as its argument, and its return value will replace the branch
# in the tree. We want to transform the parse tree into a tuple containing the
# test case values.
class MyTransformer(Transformer):
def testcase(self, items):
*vs, c = items
if c==None: #it means it is a comment (see SH_COMMENT), so we can discard
return tuple(vs)
else:
return tuple(items)
def SH_COMMENT(self,n):
return None
def value(self, items):
[res] = items
return res
def pair(self, key_value):
k, v = key_value
return k, v
def string(self, s):
(s,) = s
return s[1:-1]
def number (self, n):
(n,) = n
return n
def FLOAT_NUMBER (self, n):
return float(n)
def DEC_NUMBER(self, n):
return int(n)
def emptyset(self, items):
return set()
def set(self, items):
res = set()
for i in items:
res.add(i)
return res
list = list
tuple = tuple
dict = dict
none = lambda self, _: None
true = lambda self, _: True
false = lambda self, _: False
def get_test_cases(filename):
"""
This function returns a list of the test cases that are defined in the
file with "@pytest.mark.parametrize". If it is not a pytest file it returns
the empty list
Throws FileNotFoundError exception if file does not exist.
"""
#1: Open the file
python_file = open(filename, "r")
#2: Read the file until you encounter the line where the testcases
# start (that is @pytest.mark.parametrize)
line = python_file.readline()
while not (line.startswith("@pytest.mark.parametrize") or line==''):
line = python_file.readline()
#read one more line, to point line to the first test case
line = python_file.readline()
test_cases = []
while (line.startswith("(")): #each test case starts with "("
#parse the line
tc_tree = testcase_parser.parse(line)
#reduce the parse tree to a tc tuple like (num, i1, i2,...,in o)
tc = MyTransformer().transform(tc_tree)
#add the testcase to the list of test cases
test_cases.append(tc)
line = python_file.readline() #go to next line in file
return test_cases
#3: Close the file
python_file.close()
def fill_excell_headers(test_signature, wb):
"""
# This function fills the headers of a test report with number_of_inputs input values
"""
#We know the structure we need to create for the excell file from the test_signature
number_of_inputs = len(test_signature)-2
# add_sheet is used to create sheet for Test Report
sheet = wb.add_sheet('Test Report')
# add test case ID colum at 0,0
sheet.write(0, 0, 'test case ID')
# add input columns for each test case input
for i in range (1, number_of_inputs+1):
sheet.write(0, i, 'input'+str(i))
# add input columns for the expected outcome and the result
sheet.write(0, number_of_inputs+1 , 'expected outcome')
sheet.write(0, number_of_inputs+2 , 'result')
return sheet
def generate_excell_test_report(filenameTest, filenameTestRes):
""" filenameTest es el nombre de fichero .py de testing y
filenameTestRes es el nombre de fichero .txt con test results
"""
try:
test_signature = get_test_signature(filenameTest)
if (test_signature == []):
print("This is not a pytest file")
else:
test_cases = get_test_cases(filenameTest)
failed_test_cases = get_failed_testcases(filenameTestRes)
failed_test_cases_numbers = []
for f in failed_test_cases:
failed_test_cases_numbers.append(int(f.split()[2].replace(",","")))
# Workbook is created
wb = Workbook()
#fill with headers for the columns
sheet = fill_excell_headers(test_signature, wb)
#write ID, inputs y output in excell
for i in range(len(test_cases)):
for j in range(len(test_cases[i])):
sheet.write(i+1, j , str(test_cases[i][j]))
if test_cases[i][0] in failed_test_cases_numbers:
sheet.write(i+1, len(test_cases[i]) , "FAILED")
else:
sheet.write(i+1, len(test_cases[i]) , "PASSED")
report_name = filenameTest.replace(".py", "")
# Save the Workbook
wb.save(report_name + 'TestReport.xls')
except FileNotFoundError:
print("El fichero no existe" + filenameTest + " o " + filenameTestRes)
def generate_JSON_test_report(filenameTest, filenameTestRes):
""" filenameTest es el nombre de fichero .py de testing y
filenameTestRes es el nombre de fichero .txt con test results
"""
try:
test_signature = get_test_signature(filenameTest)
if (test_signature == []):
print("This is not a pytest file")
else:
test_cases = get_test_cases(filenameTest)
failed_test_cases = get_failed_testcases(filenameTestRes)
failed_test_cases_numbers = []
for f in failed_test_cases:
failed_test_cases_numbers.append(int(f.split()[2].replace(",","")))
test_cases_dicts = []
for tc in test_cases:
tc_dict = {"id":tc[0]}
out = tc[-1]
tc_inputs = tc[1:len(tc)-1]
inputs = []
for t in tc:
inputs.append(t)
tc_dict["inputs"]=inputs
tc_dict["output esperado"]=out
if tc[0] in failed_test_cases_numbers:
tc_dict["resultado"]= "FAILED"
else:
tc_dict["resultado"]= "PASSED"
test_cases_dicts.append(tc_dict)
report_name = filenameTest.replace(".py", "")
fhand_write = open(report_name + "test_case_report.json", "w")
fhand_write.write(json.dumps(test_cases_dicts))
fhand_write.close()
except FileNotFoundError:
print("El fichero no existe" + filenameTest)
def main():
#test the report generatiom
#you need to check the output manualy as follows:
# 1) see if the files were generated in the directory
# 2) check the data in the files corresponds to the testcases in the .py file,
# and the outputs in the .txt file
file1_test = "pytests-for_testing_reports/union_test.py"
file1_testres = "pytests-for_testing_reports/output_union_test.txt"
file2_test = "pytests-for_testing_reports/min_max_list_test.py"
file2_testres = "pytests-for_testing_reports/output_min_max_list_test.txt"
file3_test = "pytests-for_testing_reports/interseccion_test.py"
file3_testres = "pytests-for_testing_reports/output_interseccion_test.txt"
file4_test = "pytests-for_testing_reports/filtrar_impares_test.py"
file4_testres = "pytests-for_testing_reports/output_filtrar_impares_test.txt"
generate_excell_test_report(file1_test, file1_testres)
generate_JSON_test_report(file1_test, file1_testres)
generate_excell_test_report(file2_test, file2_testres)
generate_JSON_test_report(file2_test, file2_testres)
generate_excell_test_report(file3_test, file3_testres)
generate_JSON_test_report(file3_test, file3_testres)
generate_excell_test_report(file4_test, file4_testres)
generate_JSON_test_report(file4_test, file4_testres)
| true
|
d53002cb6ff14245b7544b89f0f0e1a1730a7960
|
Python
|
cosmoglint/strings_with_turtle
|
/6_dot_flower.py
|
UTF-8
| 1,003
| 3.5625
| 4
|
[] |
no_license
|
# flower made with dots of increasing sizes
import turtle
import math
ts = turtle.getscreen()
ts.colormode(255)
t = turtle.Turtle()
t.speed(0)
sides = 30
turn_angle = 360/sides
in_radius = 60 #initial radius of first circle
def slen_rad(radius):
side_len = radius * 2 * (math.sin(math.radians(180)/sides))
return side_len
side_length = slen_rad(in_radius) #side length of first circle
each_side = side_length
for i in range(1,10):
t.width(i)
radius = in_radius + i*20
each_side = slen_rad(radius)
t.up()
t.sety(radius*(-1))
t.setx(-each_side/2)
for j in range(sides):
if (i%2==0):
t.up()
t.forward(each_side)
t.left(turn_angle)
t.down()
t.dot()
else:
t.up()
t.forward(each_side/2)
t.down()
t.dot()
t.up()
t.forward(each_side/2)
t.left(turn_angle)
t.down()
turtle.exitonclick()
| true
|
a25d67fbd5efa0aa5726f11bee1e11686ba1ee03
|
Python
|
maggieyam/LeetCode
|
/matrix.py
|
UTF-8
| 804
| 3.21875
| 3
|
[] |
no_license
|
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
size = len(matrix)
offset = 0
innerSize = size
while innerSize > 1:
for i in range(innerSize - 1):
row = offset
col = i + offset
before = matrix[row][col]
after = matrix[col][len(matrix) - row - 1]
for j in range(4):
matrix[col][len(matrix) - row - 1] = before
row, col = col, len(matrix) - row - 1
before = after
after = matrix[col][len(matrix) - row - 1]
innerSize -= 2
offset += 1
return matrix
| true
|
9af6065e97d9b881863d0b3cce7d8cae529838e7
|
Python
|
benjaminthedev/FreeCodeCamp-Python-for-Everybody
|
/10-build-your-own-functions.py
|
UTF-8
| 166
| 3.21875
| 3
|
[] |
no_license
|
# What will the following Python program print out?:
def fred():
print("Zap")
def jane():
print("ABC")
jane()
fred()
jane()
# Answer
# ABC
# Zap
# ABC
| true
|
6e63df8e3c42dd58e9393598f71cae2b316588a5
|
Python
|
perezperret/euler
|
/problem002_test.py
|
UTF-8
| 349
| 3.34375
| 3
|
[] |
no_license
|
import unittest
import problem002
class TestStringMethods(unittest.TestCase):
def test_fibs_up_to_25(self):
self.assertEqual(problem002.fib(25), [0, 1, 1, 2, 3, 5, 8, 13, 21])
def test_sum_evens(self):
self.assertEqual(problem002.sumEvens([0, 1, 1, 2, 3, 5, 8, 13, 21]), 10)
if __name__ == '__main__':
unittest.main()
| true
|
09e2fe98d52afa3d1dfc90755328c2614cbf0900
|
Python
|
seonukim/Study
|
/ML/m35_outliers.py
|
UTF-8
| 620
| 3.5625
| 4
|
[] |
no_license
|
import numpy as np
def outliers(data_out):
quartile_1, quartile_3 = np.percentile(data_out, [25, 75])
print("1사분위 : ", quartile_1)
print("3사분위 : ", quartile_3)
iqr = quartile_3 - quartile_1
lower_bound = quartile_1 - (iqr * 1.5)
upper_bound = quartile_3 + (iqr * 1.5)
return np.where((data_out > upper_bound) | (data_out < lower_bound))
# a = np.array([1, 2, 3, 4, 10000, 6, 7, 5000, 90, 100])
# b = outliers(a)
# print("이상치의 위치 : ", b)
# 실습 : 행렬을 입력해서 컬럼별로 이상치 발견하는 함수를 구현하시오
# 파일명 : m36_outliers2.py
| true
|
19451ab05d912d8ff9d2426742689561f6292302
|
Python
|
m4rdukkkkk/web_monitor
|
/A50_myStock.py
|
UTF-8
| 4,006
| 2.515625
| 3
|
[] |
no_license
|
# ! -*- coding:utf-8 -*-
# 2019.1.23 模型重新梳理,两次PL汇率换算,加上了手数的因素
import time
import re
import pymysql
import requests
from selenium import webdriver
# 还是要用PhantomJS
import datetime
import string
from math import floor
total_Cash = 30000 # 是人民币
FX_price = 6.95
index_Cash_dollar = (0.3*total_Cash)/FX_price # index的人民币部位除以汇率,变成美元
stock_Cash = 0.6*total_Cash # stock部位的人民币
index_Future_N = floor(index_Cash_dollar/880) # index_leg的手数
index_cost = 10500
stock_cost = 2.40
# 2019.1.7 远兴能源——————a50指数模型测试(重新关注,有了接口)
def get_index_PL():
try :
driver = webdriver.Chrome()
url = 'https://finance.sina.com.cn/futures/quotes/CHA50CFD.shtml'
# driver = webdriver.PhantomJS(service_args=SERVICE_ARGS)
driver.set_window_size(38, 12) # 设置窗口大小
driver.get(url)
# time.sleep(1)
html = driver.page_source
# print(html) #正则还是有问题,选择了一个动态变动的颜色标记是不好的 最近浏览不是每次都有的!所以用数字的颜色取判断吧
patt = re.compile('<th>最新价:' + '.*?</th><td class=".*?">(.*?)</td>', re.S)
items = re.findall(patt, html)
items_int = int(items[0][:-3])
indexF_PL = (index_cost-items_int)*1*index_Future_N*FX_price #把点差,乘以1美元,乘以手数,在乘以汇率换算成人民币盈亏
indexF_PL_2 = round(indexF_PL,2)
big_list.append(str(indexF_PL_2))
driver.quit()
except ValueError as e:
pass
# 远兴能源
def get_stocks_PL():
url = 'https://www.laohu8.com/hq/s/000683'
headers = {'Useragent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; GTB7.0'}
response = requests.get(url, headers=headers)
content = response.text
patt = re.compile('<td class="price">(.*?)</td>', re.S)
items = re.findall(patt, content)
stock_PL = ((float(items[0])-stock_cost) /stock_cost) *stock_Cash # stock的涨跌幅 乘以 stock部位的人民币
stock_PL_2 = round(stock_PL,2)
big_list.append(stock_PL_2)
def profilo_PL():
try:
A = big_list[0]
B = big_list[1]
profilo_PL = float(B) + float(A)
profilo_PL_2 = round(profilo_PL,2)
big_list.append(profilo_PL_2)
total_profit_R = profilo_PL_2/total_Cash
# total_profit_R_2 = '%.2f%%' % (total_profit_R * 100) 这个是为加上 %
total_profit_R_2 = round(total_profit_R,3) * 100 # 这个最简单
big_list.append(total_profit_R_2)
except IndexError as e:
print(e)
def insertDB(content):
connection = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123456', db='web_monitor',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor()
# 这里是判断big_list的长度,不是content字符的长度
if len(big_list) == 4:
cursor.executemany('insert into A50_OneStock_PL (index_PL,stock_PL,profilo_PL,profilo_PL_R) values (%s,%s,%s,%s)', content)
connection.commit()
connection.close()
print('向MySQL中添加数据成功!')
else:
print('出列啦')
#
# #
# 尝试数据源不一定稳定,勉强可以用下
if __name__ == '__main__':
i = 0
while True:
i += 1
print(i)
big_list = []
get_index_PL()
get_stocks_PL()
profilo_PL()
l_tuple = tuple(big_list)
content = []
content.append(l_tuple)
insertDB(content)
time.sleep(6)
# create table A50_OneStock_PL(
# id int not null primary key auto_increment,
# index_PL varchar(10),
# stock_PL varchar(10),
# profilo_PL varchar(10),
# profilo_PL_R varchar(10)
# ) engine=InnoDB charset=utf8;
# drop table A50_OneStock_PL;
| true
|
33d969063a49e3989f2e96e5af6af3c1e299d77b
|
Python
|
Ordoptimus/Coding
|
/Problems/HRML1.py
|
UTF-8
| 236
| 2.859375
| 3
|
[] |
no_license
|
a = []
a = [int(x) for x in input().split()]
b = [int(y) for y in input().split()]
#a = list(map(int, a)) (also learning)
#b = list(map(int, b))
a.sort()
b.sort()
res=list(product(a, b))
res = [str(a) for a in res]
print(' '.join(res))
| true
|
17b4ba086773ba4e938b3c11a59c315da5219ff3
|
Python
|
Ch4pster/chappy-chaps
|
/misc experimental.py
|
UTF-8
| 389
| 3.8125
| 4
|
[] |
no_license
|
def factorial(x):
total = 1
while x>0:
total *= x
x-=1
return total
"""def anti_vowel(argument):
text = str(argument)
text.lower
for x in text:
if x == "a" or x == "e" or x == "i" or x == "o" or x == "u": ###if vowels = aeiou, how do you iterate through that?####
text = text - x
else:
()
print anti_vowel(Hello)"""
| true
|
b49a5b317244c52294a6c241ec126c5d3d0de41e
|
Python
|
kirill-kovalev/VK-feed-bot
|
/bot/UserList.py
|
UTF-8
| 2,035
| 2.859375
| 3
|
[] |
no_license
|
import json
from User import *
class UserList:
class UserExists(Exception):
def __init__(self): return ;
class UserNotExists(Exception):
def __init__(self): return;
userList:[User] = []
def add(self,chat_id:int , token:str ):
for user in self.userList:
if user.chat_id == chat_id:
raise self.UserExists()
return
try:
user = User(chat_id,token)
self.userList.append(user)
except Exception as exception:
raise exception;
def get(self,chat_id:int):
for user in self.userList:
if user.chat_id == chat_id:
return user
def remove(self,chat_id):
for user in self.userList:
if user.chat_id == chat_id:
user.stop()
self.userList.remove(user)
return True
raise self.UserNotExists
def toJSON(self):
users = [ (user.chat_id,user.token,user.last_upd_time) for user in self.userList]
return json.dumps(users)
def fromJSON(self,string:str):
for user in self.userList:
user.stop()
users = json.loads(string)
for user in users:
try:
self.userList.append(User(user[0],user[1],user[2]))
except IndexError:
self.userList.append(User(user[0], user[1]))
except Exception: pass
def save(self):
try:
file = open("users.json", "w+");
file.writelines(self.toJSON())
file.close();
log("saved", "")
except:
log("can't save");
log(traceback.format_exc())
pass
def load(self):
try:
file = open("users.json", "r");
json = file.readlines(1)[0]
self.fromJSON(json)
except:
trace_exc()
pass;
def __del__(self):
for u in self.userList:
u.stop()
self.save()
| true
|
70ae8b05de9d96f9e89252649d0d0d47eb3ec66a
|
Python
|
glfAdd/note
|
/python/004_并发/learn_multiprocessing.py
|
UTF-8
| 2,436
| 3.078125
| 3
|
[] |
no_license
|
import multiprocessing
import os
import time
import logging
""" ============================ multiprocessing
当前进程
multiprocessing.current_process()
设置调试的日志
默认情况下,日志记录级别设置为NOTSET不生成任何消息
multiprocessing.log_to_stderr(logging.DEBUG) 设置调试的日志
"""
""" ============================ Process
用来创建子进程
def __init__(self, group, target, name, args, kwargs, *, daemon):
- target
- args
- kwargs
- name 进程实例的别名, Process-1
- group
is_alive()
start()
run() 去调用target指定的函数,自定义类的类中一定要实现该方法
terminate() 强制终止进程,不会进行任何清理操作。如果该进程终止前,创建了子进程,那么该子进程在其强制结束后变为僵尸进程;如果该进程还保存了一个锁那么也将不会被释放,进而导致死锁。使用时,要注意
join([timeout]) 主线程等待子线程终止。timeout为可选择超时时间;需要强调的是,p.join只能join住start开启的进程,而不能join住run开启的进程 。
name str 别名
daemon bool
pid int 当前进程实例的PID值
exitcode int 子进程的退出代码. None如果流程尚未终止, 负值-N表示孩子被信号N终止
authkey bytes
sentinel int
daemon bool 守护进程
默认情况: 在所有子进程退出之前,主程序不会退出
守护进程:
- 主进程代码执行结束后就终止.
- 内无法再开启子进程,否则抛出异常:AssertionError: daemonic processes are not allowed to havechildren
"""
class MyProcess(multiprocessing.Process):
def __init__(self):
super(self, MyProcess).__init__()
def run(self):
print('My Process')
def test(*args):
time.sleep(2)
print(multiprocessing.current_process().name)
print(multiprocessing.current_process().name)
print(*args, os.getpid())
if __name__ == '__main__':
multiprocessing.log_to_stderr(logging.DEBUG)
p = multiprocessing.Process(target=test, args=('a', 'b', 'c'))
print(multiprocessing.current_process().name)
print('父进程 %d' % os.getpid())
p.start()
print(p.exitcode)
p.join()
print(p.exitcode)
""" ============================ Queue
进程之间通信, 使用Queue来传递消息
"""
""" ============================ Pool
"""
| true
|
8a42b40ec569f48a0aa132573694cb4722ed0d03
|
Python
|
brook-hc/py-study
|
/004-类/031-多继承.py
|
UTF-8
| 582
| 3.671875
| 4
|
[] |
no_license
|
class a():
def demo(self):
print('this is a\'s demo method')
def test(self):
print('this is a\'s test method')
class b():
def demo(self):
print('this is b\'s demo method')
def test(self):
print('this is b\'s test method')
class c(b,a): # b在a前面,所以优先搜索b。
pass
d=c()
print(dir(c)) # dir函数可以查看当前类自带的一些方法。
print(c.__mro__) # mro可以查询类执行代码的搜索顺序,只能用类来查,不能用实例来查,如d.__mro_是错的。
d.demo()
d.test()
| true
|
11f50e52f2f34b1fe07e396d78c7d6e6709e4a86
|
Python
|
shamoldas/pythonBasic
|
/DataScience/pandas/Concatenation.py
|
UTF-8
| 921
| 3.625
| 4
|
[] |
no_license
|
# importing pandas module
import pandas as pd
# Define a dictionary containing employee data
data1 = {'Name':['Jai', 'Princi', 'Gaurav', 'Anuj'],
'Age':[27, 24, 22, 32],
'Address':['Nagpur', 'Kanpur', 'Allahabad', 'Kannuaj'],
'Qualification':['Msc', 'MA', 'MCA', 'Phd']}
# Define a dictionary containing employee data
data2 = {'Name':['Abhi', 'Ayushi', 'Dhiraj', 'Hitesh'],
'Age':[17, 14, 12, 52],
'Address':['Nagpur', 'Kanpur', 'Allahabad', 'Kannuaj'],
'Qualification':['Btech', 'B.A', 'Bcom', 'B.hons']}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data1,index=[0, 1, 2, 3])
# Convert the dictionary into DataFrame
df1 = pd.DataFrame(data2, index=[4, 5, 6, 7])
print(df, "\n\n", df1)
print('Concatetion.\n')
# using keys
frames = [df, df1 ]
res = pd.concat(frames, keys=['x', 'y'])
res
print(res)
| true
|
71337899114e2a0a884ca948dcec2c2b7154bdc9
|
Python
|
ao-song/dd2424-project
|
/ultils.py
|
UTF-8
| 1,785
| 2.96875
| 3
|
[] |
no_license
|
import numpy as np
from sklearn.neighbors import NearestNeighbors
def getQ(pixels):
colors = np.zeros((22, 22))
for p in pixels:
a, b = p
colors[get_index(a), get_index(b)] = 1
return np.count_nonzero(colors)
def get_index(num):
return (num + 110) / 10
def get_space():
# Cifar10 occupied the full color space
a = np.arange(-110, 110, 10)
b = np.arange(-110, 110, 10)
space = []
for i in a:
for j in b:
space.append([i, j])
return np.array(space)
def gaussian_kernel(distance, sigma=5):
a = np.exp(-np.power(distance, 2) / (2*np.power(sigma, 2)))
b = np.sum(a, axis=1).reshape(-1, 1)
return a / b
def soft_encoding_ab(ab):
n = ab.shape[0]
Y = []
for i in range(n):
# Flatten the a and b and construct 2d array
a = ab[i, 0, :, :]
b = ab[i, 1, :, :]
# print(a.shape)
a = a.flatten()
# print(a.shape)
b = b.flatten()
newab = np.vstack((a, b)).T
# Full color space
space = get_space()
# Compute soft encoding
nbrs = NearestNeighbors(
n_neighbors=5, algorithm='ball_tree').fit(space)
distances, indices = nbrs.kneighbors(newab)
# print('indices is: ' + str(indices))
# print(indices.shape)
gk = gaussian_kernel(distances)
# print('gk is : ' + str(gk))
# print(gk.shape)
y = np.zeros((newab.shape[0], space.shape[0]))
# print(y.shape)
index = np.arange(newab.shape[0]).reshape(-1, 1)
# print(index)
y[index, indices] = gk
# print(y.shape)
y = y.reshape(ab[i, 0, :, :].shape[0], ab[i, 0, :, :].shape[1], space.shape[0])
Y.append(y.T)
return np.stack(Y)
| true
|
c8da1cb35f570a289c05b591baa87b479e92cb0a
|
Python
|
angelusualle/algorithms
|
/advanced_algs/kruskals/min_span_tree_kruskal.py
|
UTF-8
| 584
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
# O(ElogE)
def min_span_tree_kruskal(graph):
min_tree = []
edges= []
visited = set()
for k in graph:
for i, pair in enumerate(graph[k]):
edge = sorted([k,pair[0]])
if str(edge) not in visited:
edges.append((pair[1], edge[0], edge[1]))
visited.add(str(edge))
visited.clear()
edges = sorted(edges, key=lambda x: x[0])
for wt, n1, n2 in edges:
if n1 not in visited or n2 not in visited:
min_tree.append((wt, n1, n2))
visited.add(n1)
visited.add(n2)
return min_tree
| true
|
995f49ceaf9d8be5b19ef52efe582220e8d957c7
|
Python
|
skang29/GANs
|
/Parallel_GAN_structure/sndcgan_zgp/ops/layers/linears.py
|
UTF-8
| 1,209
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
""""
Layers / linear layers under tensorflow environment.
Supports NCCL multi-gpu environment.
To activate the environment, use code below in your
main.py.
>> os.environ['nccl_multigpu_env'] = 'true'
"""
__version__ = "1.0.0"
import os
import tensorflow as tf
from ..normalizations import spectral_norm
NCCL_FLAG = os.environ.get('nccl_multigpu_env')
def linear(input_, output_size, name='linear', bias_init=0.0, sn=False, with_w=False, tower_config=None):
shape = input_.get_shape().as_list()
with tf.variable_scope(name):
w = tf.get_variable(name="w",
shape=[shape[1], output_size],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
b = tf.get_variable(name="b",
shape=[output_size],
initializer=tf.constant_initializer(bias_init))
if sn:
y = tf.matmul(input_, spectral_norm(w, tower_config=tower_config)) + b
else:
y = tf.matmul(input_, w) + b
if with_w:
return y, w, b
else:
return y
| true
|
3dd8a73f2209ce4987210ec2ea27a5c4ac184576
|
Python
|
emirelesg/Self-Driving-Vehicle
|
/src/processor.py
|
UTF-8
| 5,843
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
import cv2
from line import Line
class ImageProcessor():
"""
Implements the computer vision algorithms for detecting lanes in an image.
"""
def __init__(self, frameDimensions, frameRate):
# Define camera dimensions.
self.frameDimensions = frameDimensions
self.frameRate = frameRate
self.w = self.frameDimensions[0]
self.h = self.frameDimensions[1]
# ROI dimensions in percentage.
self.roiY = (0.57, 0.71)
self.roiX = (0.67, 0.95)
# Initialize the left and right lane classes.
self.left = Line(self.frameDimensions, (0, 0, 255))
self.right = Line(self.frameDimensions, (255, 0, 0))
# Camera calibration
# Scale the calibration matrix to the desired frame dimensions.
self.calibrationResolution = (1280, 720) # Resolution at which the camera matrix is provided.
kx = self.w / self.calibrationResolution[0] # Calculate the change in the -x axis.
ky = self.h / self.calibrationResolution[1] # Calculate the change in the -y axis.
cameraMatrix = np.array([ # Raw camera calibration matrix.
[1.00612323e+03, 0.00000000e+00, 6.31540281e+02],
[0.00000000e+00, 1.00551440e+03, 3.48207362e+02],
[0.00000000e+00, 0.00000000e+00, 1.00000000e+00]
])
self.cameraMatrix = np.multiply(cameraMatrix, [ # Adjust the camera calibration matrix.
[kx, 1, kx],
[1, ky, ky],
[1, 1, 1]
])
self.distortionCoefficients = np.array([[0.18541226, -0.32660915, 0.00088513, -0.00038131, -0.02052374]])
self.newCameraMatrix, self.roi = cv2.getOptimalNewCameraMatrix(self.cameraMatrix, self.distortionCoefficients, self.frameDimensions, 1, self.frameDimensions)
self.rectifyMapX, self.rectifyMapY = cv2.initUndistortRectifyMap(self.cameraMatrix, self.distortionCoefficients, None, self.newCameraMatrix, self.frameDimensions, 5)
def doBlur(self, frame, iterations, kernelSize):
"""
Performs a gaussian blur with the set number of iterations.
"""
blured = frame.copy()
while iterations > 0:
blured = cv2.GaussianBlur(blured, (kernelSize, kernelSize), sigmaX=0, sigmaY=0)
iterations -= 1
return blured
def doRegionOfInterest(self, frame):
"""
Obtains the region of interest from a frame. The dimensions of the ROI are set by the class
properties roiX and roiY.
"""
y0Px = self.h * self.roiY[0]
y1Px = self.h * self.roiY[1]
x0Px = (1 - self.roiX[0]) * self.w / 2
x1Px = (1 - self.roiX[1]) * self.w / 2
vertices = np.array([[
(x0Px, y0Px),
(x1Px, y1Px),
(self.w - x1Px, y1Px),
(self.w - x0Px, y0Px)
]], dtype=np.int32)
mask = np.zeros_like(frame)
cv2.fillPoly(mask, vertices, 255)
return cv2.bitwise_and(frame, mask)
def findLanes(self, frame, lines, minAngle=10, drawAll=False):
"""
Iterates through the results from the Hough Transform and filters the detected lines into
those who belong to the left and right lane. Finally fits the data to a 1st order polynomial.
"""
self.left.clear()
self.right.clear()
if type(lines) == type(np.array([])):
for line in lines:
for x1, y1, x2, y2 in line:
angle = np.degrees(np.arctan2(y2 - y1, x2 - x1))
if np.abs(angle) > minAngle:
if angle > 0:
self.right.add(x1, y1, x2, y2)
if drawAll:
cv2.line(frame, (x1, y1), (x2, y2), self.right.color)
else:
self.left.add(x1, y1, x2, y2)
if drawAll:
cv2.line(frame, (x1, y1), (x2, y2), self.left.color)
self.left.fit()
self.right.fit()
return frame
def drawPoly(self, frame, poly, color, width=3):
"""
Draws a 1-D polynomial into the frame. Uses the roiY for the -y coordinates.
"""
y0 = self.h * self.roiY[0]
y1 = self.h * self.roiY[1]
y0Px = int(y0)
y1Px = int(y1)
if poly:
x0Px = int(poly(y0))
x1Px = int(poly(y1))
cv2.line(frame, (x0Px, y0Px), (x1Px, y1Px), color, width)
else:
cv2.line(frame, (0, y0Px), (0, y1Px), color, width)
def process(self, frame):
"""
Main pipeline for detecting lanes on a frame.
"""
undistort = cv2.remap(frame, self.rectifyMapX, self.rectifyMapY, cv2.INTER_LINEAR)
gray = cv2.cvtColor(undistort, cv2.COLOR_BGR2GRAY)
grayColor = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
blured = self.doBlur(gray, iterations=3, kernelSize=7)
canny = cv2.Canny(blured, threshold1=20, threshold2=40)
roi = self.doRegionOfInterest(canny)
houghLines = cv2.HoughLinesP(
roi,
rho = 1,
theta = np.pi / 180,
threshold = 20,
lines = np.array([]),
minLineLength = 5,
maxLineGap = 60
)
lanes = self.findLanes(grayColor, houghLines, minAngle=10, drawAll=True)
# self.drawPoly(lanes, self.left.poly, self.left.color, width=3)
# self.drawPoly(lanes, self.right.poly, self.right.color, width=3)
return grayColor
| true
|
e92f66f20776176925bec09777d1ea06c1dfe3e3
|
Python
|
kiote/ebook
|
/api.py
|
UTF-8
| 8,757
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
import re
import hashlib
from urllib import urlencode
class Books():
books = [
{
'Фантастика':
[
{
'id': 2,
'name': 'Понедельник начинается в субботу',
'author': 'Братья Стругацкие',
'descr': 'советская фантастическая классика'
},
{
'id': 5,
'name': 'Корпорация "Бессмертие"',
'author': 'Робер Шекли',
'descr': 'зачетная книжень'
},
]
},
{
'detective':
[
{
'id': 4,
'name': 'Дуновение смерти',
'author': 'Айзек Азимов',
'descr': '''В детективном романе Айзека Азимова «Дуновение
смерти» рассказывается о том, как Луис Брэйд, старший преподаватель химии Университета,
обнаруживает как-то вечером в лаборатории мертвое тело своего аспиранта Ральфа Ньюфелда,
который был отравлен цианидом. Было похоже на несчастный случай или на самоубийство.
Лишь один Брэйд твердо стоял на своем. Это убийство! В результате своего дилетантского расследования
он и сам чуть не стал жертвой...'''
},
{
'id': 8,
'name': 'Закон трех отрицаний',
'author': 'Александра Маринина',
'descr': '''Насте Каменской не повезло - она попала в аварию.
Скоро ее выпишут из госпиталя, но сломанная нога все болит и болит, так что Настя
передвигается с большим трудом. Она решает обратиться к специалисту, использующему
нетрадиционные методы лечения. Но когда Настя звонит по нужному телефону, выясняется,
что этот специалист убит. А тут еще одна неприятность. После госпиталя Насте негде жить:
ее квартира занята неожиданно нагрянувшими родственниками. Так Настя оказывается на даче
у знакомого, где совершает лечебные прогулки и развлекает себя обсуждением с коллегами
подробностей очередного громкого убийства молодой кинозвезды. И вдруг она с ужасом
обнаруживает, что за ней кто-то следит...'''
},
]
},
{
'single':
[
{
'id': 11,
'name': 'Аксиология личностного бытия',
'author': 'В. П. Барышков',
'descr': '''В монографии исследуются онтологические основания ценностного отношения.
Предмет исследования — личностное бытие как область формирования и функционирования
ценностных смыслов. Рассматриваются субстациональная и коммуникативная концепции
ценностного мира человека. Для научных работников, преподавателей философии
и студентов вузов'''
},
]
}
]
current_ver = '1.0.2'
def __init__(self, ver, bid = 0, isfinal = 0, pid = -1):
self.ver = ver
self.bid = int(bid)
self.isfinal = int(isfinal)
self.pid = int(pid)
def check_ver(self):
'''validates version'''
# version does not setted at all
if not self.ver: raise Exception('VER is empty', 1)
matches = re.compile('^([0-9]{1})\.([0-9]{1})\.([0-9]{1})$').findall(self.ver)
# version does not match pattern N.N.N
if matches == []: raise Exception('VER is invaild', 2)
if self.ver <> self.current_ver: raise Exception('this VER is not supported', 3)
return True
def book_by_id(self):
'''returns book data as dictionary by book id'''
res = ''
b = []
for book in self.books:
b.extend([book[k] for k in book.keys()])
for book_shelf in b:
for one_book in book_shelf:
if one_book['id'] == self.bid:
res = one_book
if (not isinstance(res, dict)): raise Exception('Book information error, dosen\'t exisits?', 4)
return res
def get_category_books(self):
# show books in category
if self.pid == -1: raise Exception('specify subcategory id (pid)', 5)
res = ''
count = 0
if self.pid>=0 and self.pid in range(len(self.books)):
book = [self.books[self.pid][k] for k in self.books[self.pid].keys()]
book = book[0]
count = len(book)
i = 0
for b in book:
res += '&' + urlencode({'NAME' + str(i): b['name'], 'ID' + str(i): b['id'], })
i += 1
return res, count
def get_categories(self):
#show categories
i = 0
categories = [k.keys() for k in self.books]
#directories + books
elcount = len(categories)
# directoies
count = elcount
res = ''
for category in categories:
if (category[0] == 'single'):
# we have single books
count -= 1
else:
res += '&' + urlencode({'NAME' + str(i): category[0], 'ID' + str(i): i})
i += 1
res += '&' + urlencode({'NAME' + str(i): 'Аксиология личностного бытия', 'ID' + str(i): 11})
return res, count, elcount
def index(cmd = '', ver = 0, new = 0, isfinal = 0, pid = -1, bid = 0):
cmd = cmd.upper()
bid = int(bid)
count = 0
books = Books(ver, bid, isfinal, pid)
try:
books.check_ver()
except Exception, (error, code):
return urlencode({'MESSAGE': error, 'CODE': code})
# >> LIST
if cmd == 'LIST':
# check isfinal
if not isfinal: return urlencode({'MESSAGE': 'ISFINAL is empty', 'CODE': 6})
isfinal = int(isfinal)
res = ''
if isfinal == 1:
res, count = books.get_category_books()
return 'ELCOUNT=%d%s' % (count, res)
elif isfinal == 0:
res, count, elcount = books.get_categories()
return 'ELCOUNT=%d&COUNT=%d%s' % (elcount, count, res)
else:
return urlencode({'MESSAGE': 'ISFINAL should be 1 or 0', 'CODE': 7})
# <<
# >> BOOK
elif cmd == 'BOOK':
if not bid: return urlencode({'MESSAGE': 'no book id (bid) found', 'CODE': 8})
try:
res = urlencode(books.book_by_id())
except Exception, (error, code):
return urlencode({'MESSAGE': error, 'CODE': code})
return 'BID=' + str(bid) + '&%s' % res
# <<
# >> GET
elif cmd == 'GET':
if not bid: return urlencode({'MESSAGE': 'no book id (bid) found', 'CODE': 8})
try:
res = urlencode(books.book_by_id())
except Exception, (error, code):
return urlencode({'MESSAGE': error, 'CODE': code})
bidded_link = hashlib.md5(res+'salt').hexdigest()
return urlencode({'http://wwww.bugtest.ru/ebook/get.py?fname': bidded_link})
# <<
elif cmd == 'REG':
return 'LOGIN=footren&PASS=v324jzrn'
else:
return urlencode({'MESSAGE': 'unknown command', 'CODE': 9})
| true
|
36f0f22798763cd59a7b981dabd4984529bb5a1d
|
Python
|
flsilves/meetme
|
/tests.py
|
UTF-8
| 4,814
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import unittest
from flask import json
import app
from models import *
users_url = 'http://localhost:5000/users'
recordings_url = 'http://localhost:5000/recordings'
json_header = {'Content-type': 'application/json'}
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.app = app.create_app()
self.client = self.app.test_client()
self.db = create_engine(DB_URI)
Base.metadata.drop_all(self.db)
Base.metadata.create_all(self.db)
def tearDown(self):
pass
def create_user(self, name='User1', email='dummy@email.com'):
data = {'name': name, 'email': email}
response = self.client.post(users_url, data=json.dumps(data), headers=json_header)
json_data = json.loads(response.data)
code = response.status_code
return json_data, code
def delete_user(self, user_id):
uri = users_url + '/' + str(user_id)
response = self.client.delete(uri, headers=json_header)
code = response.status_code
return code
def create_recording(self, owner_id, storage_url, password):
data = {'owner_id': owner_id, 'storage_url': storage_url, 'password': password}
response = self.client.post(recordings_url, data=json.dumps(data), headers=json_header)
json_data = json.loads(response.data)
code = response.status_code
return json_data, code
def delete_recording(self, recording_id):
uri = recordings_url + '/' + str(recording_id)
response = self.client.delete(uri, headers=json_header)
code = response.status_code
return code
def share_recording(self, recording_id, user_id):
uri = users_url + '/' + str(user_id) + '/permissions/' + str(recording_id)
data = {'user_id': user_id, 'recording_id': recording_id}
response = self.client.put(uri, data=json.dumps(data), headers=json_header)
code = response.status_code
return code
def unshare_recording(self, recording_id, user_id):
uri = users_url + '/' + str(user_id) + '/permissions/' + str(recording_id)
data = {'user_id': user_id, 'recording_id': recording_id}
response = self.client.delete(uri, data=json.dumps(data), headers=json_header)
code = response.status_code
return code
def test_create_user(self):
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
self.assertEqual(code, 201)
self.assertEqual(data['name'], 'Flavio')
self.assertEqual(data['email'], 'flaviosilvestre89@gmail.com')
def test_create_same_email(self):
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
self.assertEqual(code, 201)
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
self.assertEqual(code, 404)
def test_create_recording(self):
password = 'secret'
url = 'https://s3.amazonaws.com/recording/393217'
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
flavio_id = data['id']
data, code = self.create_recording(owner_id=flavio_id, storage_url=url, password=password)
self.assertEqual(code, 201)
self.assertEqual(data['owner_id'], str(flavio_id))
self.assertEqual(data['storage_url'], url)
self.assertEqual(data['password'], password)
data, code = self.create_user(name='Flavio',
email='flaviosilvestre89@gmail.com') ## try to create duplicated recording
self.assertEqual(code, 404)
def test_delete_user(self):
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
self.assertEqual(code, 201)
self.assertEqual(data['name'], 'Flavio')
self.assertEqual(data['email'], 'flaviosilvestre89@gmail.com')
id = data['id']
code = self.delete_user(id)
self.assertEqual(code, 204)
def test_delete_recording(self):
self.test_create_recording();
code = self.delete_recording('1')
self.assertEqual(code, 204)
def test_recording_share(self):
data, code = self.create_user(name='Flavio', email='flaviosilvestre89@gmail.com')
self.assertEqual(code, 201)
user1_id = data['id']
data, code = self.create_user(name='FriendUser', email='sample@gmail.com')
self.assertEqual(code, 201)
user2_id = data['id']
data, code = self.create_recording(owner_id=user1_id, storage_url='https://s3.amazonaws.com/recording/393217',
password='password')
self.assertEqual(code, 201)
recording_id = data['id']
code = self.share_recording(recording_id, user2_id)
self.assertEqual(code, 201)
code = self.unshare_recording(recording_id, user2_id)
self.assertEqual(code, 204)
if __name__ == '__main__':
unittest.main()
| true
|
66be5538b2ff77d8798cf097cecbed64b3a54253
|
Python
|
TheFutureJholler/TheFutureJholler.github.io
|
/module 13- GUI Programming with Tkinter/tkinter_canvas.py
|
UTF-8
| 1,044
| 3.4375
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 12 16:57:26 2018
@author: zeba
"""
"""
The Canvas is a rectangular area intended for drawing pictures or other complex layouts.
You can place graphics, text, widgets, or frames on a Canvas.
arc . Creates an arc item.
"""
#import tkinter as tk
#root=tk.Tk()
#
#c = tk.Canvas(root,bg="blue",width=500, height=500)#to draw canvas
##to draw something on canvas
#coord = 10, 50, 240, 210
#arc = c.create_arc(coord, start=0, extent=150, fill="red")
#
#c.grid()#to place canvas
#root.mainloop()
#########################################################################
import tkinter as tk
root=tk.Tk()
c = tk.Canvas(root,bg="blue",width=500, height=500)#to draw canvas
#to draw something on canvas
#filename = PhotoImage(file="sunshine.gif")
line = c.create_line(20,20,250,250, fill="red")
oval=c.create_oval(50,50,20,20)
polygon=c.create_polygon(150,250,210,310,250,250)
c.grid()#to place canvas
root.mainloop()
#######################################################################
| true
|
566c364c34f56910768f90e93dc3e396a35989b2
|
Python
|
AkiraMisawa/sicp_in_python
|
/chap1/c1_36.py
|
UTF-8
| 583
| 3.5
| 4
|
[] |
no_license
|
from math import log
def tolerance():
return 0.00001
def fixed_point(f,first_guess):
def close_enough(v1,v2):
return abs(v1-v2)<tolerance()
def try_(guess):
next_=f(guess)
print(next_)
if close_enough(guess,next_):
return next_
else:
return try_(next_)
return try_(first_guess)
def main():
fixed_point(lambda x:log(1000)/log(x),10.0) #without ave damping, 33回
print()
fixed_point(lambda x:1/2*(x+log(1000)/log(x)),10.0) #with ave damping, 10回
if __name__ == '__main__':
main()
| true
|
938a4d2320a95819497437124aae9dec066e5c1c
|
Python
|
snehavaddi/DataStructures-Algorithms
|
/STACK_implemt_2_stacks_in_1_array.py
|
UTF-8
| 896
| 3.859375
| 4
|
[] |
no_license
|
class stack:
def __init__(self,n):
self.size = n
self.arr = [None] * n
self.top1 = -1
self.top2 = self.size
def push1(self,data):
if self.top1 < self.top2:
self.top1 = self.top1 + 1
self.arr[self.top1] = data
def push2(self,data):
if self.top1 < self.top2:
self.top2 = self.top2 - 1
self.arr[self.top2] = data
def pop1(self):
if self.top1 >= 0:
x = self.arr[self.top1]
self.top1 = self.top1 - 1
return x
def pop2(self):
if self.top2 <= self.size:
x = self.arr[self.top2]
self.top2 = self.top2 + 1
return x
s = stack(5)
s.push1(10)
s.push2(10)
s.push1(20)
s.push2(20)
s.push1(30)
print(s.pop1())
print(s.pop1())
print(s.pop1())
print(s.pop2())
print(s.pop2())
| true
|
080a48762fef024ec6cc3bc35e9a32f7d404a42d
|
Python
|
gouravsb17/LJMU_Exoplanets
|
/code/exploratoryDataAnalysis.py
|
UTF-8
| 6,961
| 2.640625
| 3
|
[] |
no_license
|
# Importing the required libraries
import pandas as pd
import lightkurve as lk
import matplotlib.pyplot as plt
import os, shutil
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosis
from tqdm import tqdm
import warnings
import seaborn as sns
os.chdir('..')
tqdm.pandas(desc="Progress: ")
warnings.filterwarnings('ignore')
pd.set_option('display.width', 400)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 3000)
def my_custom_corrector_func(lc_raw):
# Source: https://docs.lightkurve.org/tutorials/05-advanced_patterns_binning.html
# Clean outliers, but only those that are above the mean level (e.g. attributable to stellar flares or cosmic rays).
lc_clean_outliers = lc_raw.remove_outliers(sigma=20, sigma_upper=4)
lc_nan_normalize_flatten = lc_clean_outliers.remove_nans().normalize().flatten(window_length=101)
lc_flat, trend_lc = lc_nan_normalize_flatten.flatten(return_trend=True)
return lc_flat
def read_kepler_data_from_external_HDD(kepler_id):
res_path = 'res/kepler_ID_' + kepler_id + '/'
try:
# Getting from local if already present
os.listdir(res_path)
except:
try:
# Pulling from the External HDD to the temp resource folder
res_path = '/Volumes/PaligraphyS/kepler_data/res/kepler_ID_' + kepler_id + '/'
shutil.copytree(res_path, 'temp_res/kepler_ID_' + kepler_id + '/')
res_path = 'temp_res/kepler_ID_' + kepler_id + '/'
except Exception as e:
if ('File exists: ' in str(e)):
res_path = 'temp_res/kepler_ID_' + kepler_id + '/'
else:
print('Data for KIC not downloaded')
return [False, np.array([])]
lc_list_files = []
for lc_file in os.listdir(res_path):
if ('llc.fits' in lc_file):
lc_list_files.append(lk.lightcurvefile.KeplerLightCurveFile(res_path + lc_file))
lc_collection = lk.LightCurveFileCollection(lc_list_files)
stitched_lc_PDCSAP = lc_collection.PDCSAP_FLUX.stitch()
corrected_lc = my_custom_corrector_func(stitched_lc_PDCSAP)
corrected_lc_df = corrected_lc.to_pandas()
corrected_lc_df['flux'] = corrected_lc_df['flux'] - 1
# Removing the kepler data brought to the temporary directory
shutil.rmtree('temp_res/kepler_ID_' + kepler_id)
return [True, np.array([corrected_lc_df['time'], corrected_lc_df['flux']])]
try:
stats_df = pd.read_csv('planetary_data/stats_df.csv', dtype={'KIC': str})
except:
stats_df = pd.DataFrame(columns=['KIC', 'flux_point_counts', 'max_flux_value', 'min_flux_value',
'avg_flux_value', 'median_flux_value', 'skewness_flux_value',
'kurtosis_flux_value', 'Q1_flux_value', 'Q3_flux_value', 'std_flux_value',
'variance_flux_value'])
# Getting the kepler ID's for which we will train and test the model
i = len(stats_df)
# for file in tqdm(os.listdir('res/KIC_flux_graphs_80_dpi_1_size_color_b/')):
# if ('.png' in file):
# kepler_id = file.split('_')[-1].split('.')[0]
# if (kepler_id in list(stats_df['KIC'])):
# continue
# try:
# response_list = read_kepler_data_from_external_HDD(kepler_id)
# except:
# print('Error in '+str(kepler_id))
# continue
# if (response_list[0]):
# stats_df.loc[i] = [str(kepler_id), response_list[1].shape[1], np.max(response_list[1][1]),
# np.min(response_list[1][1]), np.average(response_list[1][1]),
# np.nanmedian(response_list[1][1]), skew(response_list[1][1]),
# kurtosis(response_list[1][1]), np.nanquantile(response_list[1][1], 0.25),
# np.nanquantile(response_list[1][1], 0.75),np.nanstd(response_list[1][1]),
# np.nanvar(response_list[1][1])]
# i += 1
#
# if (i % 20 == 0):
# stats_df.drop_duplicates('KIC', inplace=True)
# stats_df.to_csv('planetary_data/stats_df.csv', sep=',', index=False)
# exit()
complete_kepler_df = pd.read_csv('planetary_data/planetary_data_kepler_mission.csv', sep=',', dtype={'kepid': str})
complete_kepler_df = complete_kepler_df[['kepid', 'nconfp', 'nkoi']]
stats_planets_df = pd.merge(stats_df, complete_kepler_df, left_on='KIC', right_on='kepid')
stats_planets_df.drop_duplicates('KIC', inplace=True)
stats_planets_df.drop('kepid', inplace=True, axis=1)
stats_planets_df.to_csv('planetary_data/stats_planets_df.csv', sep=',', index=False)
stats_planets_df = stats_planets_df.loc[((stats_planets_df['max_flux_value']<=0.03) &
(stats_planets_df['min_flux_value']>=-0.03)) |
(stats_planets_df['nconfp']>0.0)]
stats_planets_df['Confirmed_planets'] = [1.0 * x for x in stats_planets_df['nconfp'] > 0.0]
print(stats_planets_df.groupby('Confirmed_planets').count()[['KIC']])
print(stats_planets_df.groupby(['Confirmed_planets', 'nkoi']).count()['KIC'])
print(stats_planets_df.loc[(stats_planets_df['nkoi'] == 0) &
(stats_planets_df['Confirmed_planets'] == 1)].sort_values('nkoi')[
['KIC', 'nkoi', 'Confirmed_planets']])
def plot_curve(x_column, y_column, hue_column="Confirmed_planets"):
graph_name = y_column + '.png'
if (x_column == 'nkoi'):
x_label = 'Number of Kepler object of interest'
else:
x_label = x_column[0].upper() + x_column[1:].replace('_', ' ')
y_label = y_column[0].upper() + y_column[1:].replace('_', ' ')
# Plot 1: This will show the flux point counts for both the classes
sns.set_theme(style="darkgrid")
g = sns.catplot(x=x_column, y=y_column,
hue=hue_column,
data=stats_planets_df, kind="strip",
dodge=True,
height=4, aspect=1.5, legend_out=False)
g.despine(left=True)
# title
new_title = hue_column.replace('_', ' ')
g._legend.set_title(new_title)
# replace labels
new_labels = ['0 - No exoplanet', '1 - Exoplanet Present']
for t, l in zip(g._legend.texts, new_labels): t.set_text(l)
g.set(xlabel=x_label, ylabel=y_label)
plt.xlim(-0.5, 7.5)
plt.tight_layout()
plt.savefig('EDA_images/' + graph_name)
# plt.show()
plt.close()
y_columns = ['flux_point_counts', 'max_flux_value', 'min_flux_value',
'avg_flux_value', 'median_flux_value', 'skewness_flux_value',
'kurtosis_flux_value', 'Q1_flux_value', 'Q3_flux_value',
'std_flux_value', 'variance_flux_value']
for y_column in y_columns:
plot_curve('nkoi', y_column)
print(len(stats_planets_df.loc[stats_planets_df['nconfp'] > 0.0]))
print(len(stats_planets_df.loc[stats_planets_df['nconfp'] == 0.0]))
| true
|
86d139f4e6b655950b281d2cbce6f78a47e99ca9
|
Python
|
hoon4233/Algo-study
|
/2020_winter/2020_01_13/2146_JH.py
|
UTF-8
| 2,325
| 2.8125
| 3
|
[] |
no_license
|
from collections import deque
N = int(input())
mat = [ list(map(int,input().split())) for _ in range(N) ]
result = 300
numbering = 1
def seperate(ori_x, ori_y):
global N, mat, numbering
numbering += 1
# print("first, ",ori_x, ori_y, numbering)
dx, dy = [1,-1,0,0], [0,0,1,-1]
visit = [ [False for _ in range(N)] for i in range(N) ]
q = deque()
q.append([ori_x, ori_y])
visit[ori_x][ori_y] = True
mat[ori_x][ori_y] = numbering
while q :
for j in range(len(q)):
x,y = q.popleft()
for i in range(4):
nx,ny = x+dx[i], y+dy[i]
if nx>=0 and nx<N and ny>=0 and ny<N and visit[nx][ny] == False and mat[nx][ny] == 1 :
q.append([nx, ny])
visit[nx][ny] = True
mat[nx][ny] = numbering
# print(nx, ny, mat[nx][ny])
# for line in mat :
# print(line)
# exit(0)
def solution(ori_x, ori_y, my_num):
global N, mat, result
dx, dy = [1,-1,0,0], [0,0,1,-1]
flag = True
for i in range(4):
nx,ny = ori_x+dx[i], ori_y+dy[i]
if nx>=0 and nx<N and ny>=0 and ny<N and mat[nx][ny] != my_num :
flag = False
break
if flag :
return result
visit = [ [False for _ in range(N)] for i in range(N) ]
q = deque()
q.append([ori_x, ori_y])
visit[ori_x][ori_y] = True
depth = -1
while q :
depth += 1
for j in range(len(q)):
x,y = q.popleft()
for i in range(4):
nx,ny = x+dx[i], y+dy[i]
if nx>=0 and nx<N and ny>=0 and ny<N and visit[nx][ny] == False :
if mat[nx][ny] != my_num :
if mat[nx][ny] != 0 :
return depth
else :
q.append([nx, ny])
visit[nx][ny] = True
return 300
for i in range(N):
for j in range(N):
if mat[i][j] == 1 :
seperate(i,j)
for i in range(N):
for j in range(N):
if mat[i][j] != 0 :
# tmp = solution(i,j,mat[i][j])
# if (tmp < result) :
# print(i,j)
result = min(result, solution(i,j, mat[i][j]))
print(result)
| true
|
5b491d7531d016448829fdfbdea93bd86078b231
|
Python
|
vkuznet/WMCore
|
/test/python/WMCore_t/Database_t/DBFormatter_t.py
|
UTF-8
| 2,852
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
_DBFormatterTest_
Unit tests for the DBFormatter class
"""
from __future__ import print_function
import threading
import unittest
from builtins import str
from WMCore.Database.DBFormatter import DBFormatter
from WMQuality.TestInit import TestInit
class DBFormatterTest(unittest.TestCase):
"""
_DBFormatterTest_
Unit tests for the DBFormatter class
"""
def setUp(self):
"make a logger instance and create tables"
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection(destroyAllDatabase=True)
self.testInit.setSchema(customModules=["WMQuality.TestDB"],
useDefault=False)
self.selectSQL = "SELECT * FROM test_tableb"
def tearDown(self):
"""
Delete the databases
"""
self.testInit.clearDatabase()
def stuffDB(self):
"""Populate one of the test tables"""
insertSQL = "INSERT INTO test_tableb (column1, column2, column3) values (:bind1, :bind2, :bind3)"
insertBinds = [{'bind1': u'value1a', 'bind2': 1, 'bind3': u'value2a'},
{'bind1': 'value1b', 'bind2': 2, 'bind3': 'value2b'},
{'bind1': b'value1c', 'bind2': 3, 'bind3': b'value2d'}]
myThread = threading.currentThread()
myThread.dbi.processData(insertSQL, insertBinds)
def testBFormatting(self):
"""
Test various formats
"""
# fill the database with some initial data
self.stuffDB()
myThread = threading.currentThread()
dbformatter = DBFormatter(myThread.logger, myThread.dbi)
result = myThread.dbi.processData(self.selectSQL)
output = dbformatter.format(result)
self.assertEqual(output, [['value1a', 1, 'value2a'],
['value1b', 2, 'value2b'],
['value1c', 3, 'value2d']])
result = myThread.dbi.processData(self.selectSQL)
output = dbformatter.formatOne(result)
print('test1 ' + str(output))
self.assertEqual(output, ['value1a', 1, 'value2a'])
result = myThread.dbi.processData(self.selectSQL)
output = dbformatter.formatDict(result)
self.assertEqual(output, [{'column3': 'value2a', 'column2': 1, 'column1': 'value1a'},
{'column3': 'value2b', 'column2': 2, 'column1': 'value1b'},
{'column3': 'value2d', 'column2': 3, 'column1': 'value1c'}])
result = myThread.dbi.processData(self.selectSQL)
output = dbformatter.formatOneDict(result)
self.assertEqual(output, {'column3': 'value2a', 'column2': 1, 'column1': 'value1a'})
if __name__ == "__main__":
unittest.main()
| true
|
1c29ad58198dbf3c0562d48c633eb57779c411c4
|
Python
|
cnk/django_test_examples
|
/example/tests/test_html_form.py
|
UTF-8
| 1,691
| 2.78125
| 3
|
[] |
no_license
|
from django.test import TestCase, Client
from ..models import Color
class ExampleTestsWithDjangoClient(TestCase):
def setUp(self):
for color in ['blue', 'green', 'yellow', 'orange', 'red']:
c = Color(name=color)
c.full_clean()
c.save()
def test_request_without_form_data(self):
client = Client()
response = client.get('/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], None)
self.assertEqual(response.context['choices'], None)
def test_request_with_form_submission(self):
client = Client()
response = client.post('/', {})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['data'], None)
self.assertEqual(response.context['choices'], None)
def test_request_with_form_submitting_one_choice(self):
client = Client()
response = client.post('/', {'choice': 2})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['choices'], ['2'])
def test_request_with_form_submitting_three_choices_in_one_group(self):
client = Client()
response = client.post('/', {'choice': [2, 3, 4]})
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.context['choices'], ['2', '3', '4'])
def test_request_with_form_submitting_choices_in_two_groups_only_sees_the_last_one(self):
client = Client()
response = client.post('/', {'choice': 2, 'choice': 3})
self.assertEqual(response.status_code, 200)
self.assertListEqual(response.context['choices'], ['3'])
| true
|
3c5a5ee744662c36b5197c230fb9329ac3b397ef
|
Python
|
zhijazi3/Scrapper
|
/webScrapper.py
|
UTF-8
| 1,483
| 3.171875
| 3
|
[] |
no_license
|
from bs4 import BeautifulSoup
import requests
import pdb
class WebScrapper:
def __init__(self):
self.start_url = "https://coinmarketcap.com/"
self.cryptos = []
self.counter = 1
def scrape(self):
self.url = self.start_url
while True:
# If no more new pages, exit
if not self.url:
break
self.page = requests.get(self.url)
# get formatted version of page content
self.content = BeautifulSoup(self.page.content, 'html.parser')
# parse content
result = self.content.find_all('a', title=True)
for alt in result:
self.cryptos.append(alt.text)
new_page = self.getNextPage()
self.url = self.new_url
print(self.cryptos)
def getNextPage(self):
# determine if a next page exists, if so return page url, else return false
headerDiv = self.content.find('div', {'class': 'cmc-button-group'})
for div in headerDiv:
text = div.text
if "Next" in text:
self.counter +=1
# page_number = [string for string in text.split() if string.isdigit()][0]
self.new_url = self.start_url + str(self.counter) + '/'
return
self.new_url = False
return
if __name__ == "__main__":
# run scrapper
scapper = WebScrapper()
scapper.scrape()
| true
|
25c903b3b88aa55cdda5875a7afe85181932e2c7
|
Python
|
xiaoniudonghe2015/strings2xls
|
/xml2xls/xls2xml.py
|
UTF-8
| 4,153
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from distutils.log import Log
from optparse import OptionParser
import xlrd
import os
import time
def open_excel(path):
try:
data = xlrd.open_workbook(path, encoding_override="utf-8")
return data
except Exception as ex:
return ex
def read_from_excel(file_path):
data = open_excel(file_path)
table = data.sheets()[0]
keys = table.col_values(0)
del keys[0]
# print(keys)
first_row = table.row_values(0)
lan_values = {}
for index in range(len(first_row)):
if index <= 0:
continue
language_name = first_row[index]
# print(language_name)
values = table.col_values(index)
del values[0]
# print(values)
lan_values[language_name] = values
return keys, lan_values
def write_to_xml(keys, values, file_path, language_name):
fo = open(file_path, "wb")
string_encoding = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n"
fo.write(bytes(string_encoding, encoding="utf-8"))
for x in range(len(keys)):
if values[x] is None or values[x] == '':
Log().error("Language: " + language_name + " Key:" + keys[x] + " value is None. Index:" + str(x + 1))
continue
key = keys[x].strip()
# value = re.sub(r'(%\d\$)(@)', r'\1s', str(values[x]))
value = str(values[x])
content = " <string name=\"" + key + "\">" + value + "</string>\n"
fo.write(bytes(content, encoding="utf-8"))
fo.write(bytes("</resources>", encoding="utf-8"))
fo.close()
def add_parser():
parser = OptionParser()
parser.add_option("-f", "--fileDir",
help="Xls files directory.",
metavar="fileDir")
parser.add_option("-t", "--targetDir",
help="The directory where the xml files will be saved.",
metavar="targetDir")
(options, args) = parser.parse_args()
# print("options: %s, args: %s" % (options, args))
return options
def convert_to_xml(file_dir, target_dir):
dest_dir = target_dir + "/xls2xml/" + time.strftime("%Y%m%d_%H%M%S")
for _, _, file_names in os.walk(file_dir):
xls_file_names = [fi for fi in file_names if fi.endswith(".xls") or fi.endswith(".xlsx")]
for file in xls_file_names:
data = xlrd.open_workbook(file_dir + "/" + file, 'utf-8')
sheet = data.sheets()
for table in sheet:
first_row = table.row_values(0)
keys = table.col_values(0)
del keys[0]
for index in range(len(first_row)):
if index <= 0:
continue
language_name = first_row[index]
values = table.col_values(index)
del values[0]
if language_name == "zh-Hans":
language_name = "zh-rCN"
path = dest_dir + "/values-" + language_name + "/"
if language_name == 'en':
path = dest_dir + "/values/"
if not os.path.exists(path):
os.makedirs(path)
filename = 'strings.xml'
write_to_xml(keys, values, path + filename, language_name)
print("Convert %s successfully! you can see xml files in %s" % (
file_dir, dest_dir))
def start_convert(options):
file_dir = options.fileDir
target_dir = options.targetDir
print("Start converting")
if file_dir is None:
Log().error("xls files directory can not be empty! try -h for help.")
return
if not os.path.exists(file_dir):
Log().error("%s does not exist." % file_dir)
return
if target_dir is None:
target_dir = os.getcwd()
if not os.path.exists(target_dir):
os.makedirs(target_dir)
convert_to_xml(file_dir, target_dir)
def main():
options = add_parser()
start_convert(options)
# convert_to_xml("/Users/shewenbiao/Desktop/xls2xml", os.getcwd())
main()
| true
|
a4015e3986a892590a823be976d20e3d9786c32b
|
Python
|
martofeld/algoritmos1-ejercicios
|
/Guia 2/ejercicio3.py
|
UTF-8
| 259
| 3.21875
| 3
|
[] |
no_license
|
import "./ejercicio2"
def show_conversion_table():
print("|---------------------|")
print("| farenhait | celcius |")
for f in range(0, 120, 10):
celcius = ejercicio2.farenhait_to_celcius(f)
print("|", f, "|", celcius)
print("|---------------------|")
| true
|
37b8a619974052f07ecd165575dee6174ea41fd1
|
Python
|
NicolaRonzoni/Multivariate-Time-series-clustering
|
/30min data&code/clustering
|
UTF-8
| 2,678
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 13 09:49:32 2021
@author: nicolaronzoni
"""
#library
import scipy
import pandas as pd
import sklearn
import numpy as np
pip install tslearn
import tslearn
#import the dataset
df = pd.read_csv ('/Users/nicolaronzoni/Downloads/I35W_NB 30min 2013/S60.csv')
df
#normalization of the series
from sklearn.preprocessing import MinMaxScaler, StandardScaler
#flow
flow = df.loc[:, 'Flow']
flow
flow=np.array(flow)
flow = flow.reshape((len(flow), 1))
# train the normalization
scaler = MinMaxScaler(feature_range=(0, 1))
scaler = scaler.fit(flow)
print('Min: %f, Max: %f' % (scaler.data_min_, scaler.data_max_))
normalized_flow = scaler.transform(flow)
#from array to list
normalized_flow=normalized_flow.tolist()
len(normalized_flow)
from toolz.itertoolz import sliding_window, partition
#create the daily time series
day_flow=list(partition(48,normalized_flow))
day_flow
len(day_flow)
#from list to array
day_flow=np.asarray(day_flow)
day_flow
from tslearn.utils import to_time_series
#univariate series for the flow normalized
first_time_series = to_time_series(day_flow)
print(first_time_series.shape)
#speed
speed =df.loc[:,'Speed']
speed=np.array(speed)
speed= speed.reshape((len(speed), 1))
scaler = scaler.fit(speed)
print('Min: %f, Max: %f' % (scaler.data_min_, scaler.data_max_))
# normalize the dataset and print the first 5 rows
normalized_speed = scaler.transform(speed)
normalized_speed
#from array to list
normalized_speed=normalized_speed.tolist()
len(normalized_speed)
#create daily time series
day_speed=list(partition(48,normalized_speed))
day_speed
len(day_speed)
#from list to array
day_speed=np.asarray(day_speed)
day_speed
#univariate series for the speed normalized
second_time_series = to_time_series(day_speed)
print(second_time_series.shape)
second_time_series
#normalized_speed= tuple(map(tuple, normalized_speed))
#creation of the multivariate time series
multivariate=np.dstack((first_time_series,second_time_series))
multivariate_time_series = to_time_series(multivariate)
print(multivariate_time_series.shape)
#clustering
from tslearn.clustering import TimeSeriesKMeans
#try Euclidean softdtw dtw
km_dba = TimeSeriesKMeans(n_clusters=4, metric="softdtw", max_iter=5,max_iter_barycenter=5, random_state=0).fit(multivariate_time_series)
km_dba.cluster_centers_.shape
km_dba.cluster_centers_
prediction=km_dba.fit_predict(multivariate_time_series,y=None)
len(prediction)
#visualization
pip install calplot
import calplot
all_days = pd.date_range('1/1/2013', periods=365, freq='D')
events = pd.Series(prediction, index=all_days)
calplot.calplot(events)
| true
|
1b3734fe9d2e64c72d5bdfb97d7cb012f93138f6
|
Python
|
JLtheking/cpy5python
|
/HCI_PrelimP1_2013/Additional Materials/1.2.py
|
UTF-8
| 683
| 4.125
| 4
|
[] |
no_license
|
def bitshift(string):
shiftedbit = string[0]
newstring = ""
for i in range(1,8): #shifts all bits forward by 1, except the eighth bit
newstring += string[i]
newstring += shiftedbit
return newstring
inputAccepted = False
while not inputAccepted:
string = input("Input bits to shift: ")
#validate input
if string == "":
print("Empty input") #presence check
elif len(string) != 8:
print("Input must be 8-bit") #length check
else:
for i in range(len(string)):
if string[i] != '0' and string[i] != '1': #value check
print("Input can only utilise the digits 0 and 1 for bits")
break
else:
inputAccepted = True
string = bitshift(string)
print(string)
| true
|
34ed55c076b32d2a6b649118193d24e94515061f
|
Python
|
Kanevskiyoleksandr/DZ8
|
/Main menu.py
|
UTF-8
| 441
| 2.640625
| 3
|
[] |
no_license
|
from tkinter import *
root = Tk()
root.geometry('580x300+100+100')
mainmenu = Menu(root)
root.config(menu=mainmenu)
mainmenu.add_command(label='Создать запись')
mainmenu.add_command(label='Найти запись')
mainmenu.add_command(label='Редактировать запись')
mainmenu.add_command(label='Удалить запись')
mainmenu.add_command(label='Выйти из программы')
root.mainloop()
| true
|
20eae44645e7bb1d10b164388d154b7d15749fdc
|
Python
|
zenna/asl
|
/asl/run.py
|
UTF-8
| 1,968
| 3.109375
| 3
|
[] |
no_license
|
"Get reference loss"
import asl
def isidle(runstate):
return runstate['mode'] == "idle"
def empty_runstate():
return {'observes' : {},
'mode' : 'idle'}
def set_mode(runstate, mode):
runstate['mode'] = mode
def mode(runstate):
return runstate['mode']
def set_idle(runstate):
set_mode(runstate, 'idle')
def observe(value, label, runstate, log=True):
if isidle(runstate):
print("cant observe values without choosing mode")
raise ValueError
if runstate['mode'] not in runstate['observes']:
runstate['observes'][runstate['mode']] = {}
runstate['observes'][runstate['mode']][label] = value
return value
def callfuncs(functions, inputs, modes):
"""Execute each function and record runstate"""
runstate = empty_runstate()
for i, func in enumerate(functions):
set_mode(runstate, modes[i])
func(*inputs[i], runstate)
set_idle(runstate)
return runstate
def refresh_iter(dl, itr_transform=None):
if itr_transform is None:
return iter(dl)
else:
return asl.util.misc.imap(itr_transform, iter(dl))
def run_observe(functions, inputs, refresh_inputs, modes, log=True):
"""Run functions and accumulate observed values
Args:
functions: list of functions to call under different modes
refresh_inputs[i](inputs[i]) should return list of inputs for functions[i]
use refresh_inputs to restart iterators for example
Returns:
A function of no arguments that produces a ``runstate``, which accumulates
information from running all the functions in ``functions``
"""
inp = [refresh_inputs(inp) for inp in inputs]
def runobserve():
nonlocal inp
try:
runstate = callfuncs(functions, inp, modes)
if log:
asl.log("runstate", runstate)
return runstate
except StopIteration:
print("End of Epoch, restarting inputs")
inp = [refresh_inputs(inp) for inp in inputs]
return callfuncs(functions, inp, modes)
return runobserve
| true
|
ba294aa48d6c4dae2772a51140ac362fb0dca042
|
Python
|
rui233/leetcode-python
|
/Array and String/121-Best time to Buy and Sell Stock.py
|
UTF-8
| 264
| 3.359375
| 3
|
[] |
no_license
|
class Solution(object):
def maxProfit(self,prices):
"""
:param prices:
:return:
"""
max_profit,min_price =0,float("inf")
for price in prices:
min_price = min(min_price,price)
max_profit = max(max_profit,price - min_price)
return max_profit
| true
|
3309069e99ee70902cac596cd267a069c97039ad
|
Python
|
leobarrientos/wiitruck
|
/src/morse.py
|
UTF-8
| 2,449
| 3.0625
| 3
|
[] |
no_license
|
import cwiid, time
import RPi.GPIO as GPIO
button_delay = 0.1
print 'Please press buttons 1 + 2 on your Wiimote now ...'
time.sleep(1)
# This code attempts to connect to your Wiimote and if it fails the program quits
try:
wii=cwiid.Wiimote()
#turn on led to show connected
wii.led = 1
except RuntimeError:
print "Cannot connect to your Wiimote. Run again and make sure you are holding buttons 1 + 2!"
quit()
print 'Wiimote connection established!\n'
print 'Go ahead and press some buttons\n'
print 'Press PLUS and MINUS together to disconnect and quit.\n'
time.sleep(3)
#Now if we want to read values from the Wiimote we must turn on the reporting mode. First let's have it just report button presses
wii.rpt_mode = cwiid.RPT_BTN | cwiid.RPT_ACC
gpio17 = LED(17)
gpio17.off()
print 'Ready!!!'
CODE = {' ': ' ',
"'": '.----.',
'(': '-.--.-',
')': '-.--.-',
',': '--..--',
'-': '-....-',
'.': '.-.-.-',
'/': '-..-.',
'0': '-----',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
':': '---...',
';': '-.-.-.',
'?': '..--..',
'A': '.-',
'B': '-...',
'C': '-.-.',
'D': '-..',
'E': '.',
'F': '..-.',
'G': '--.',
'H': '....',
'I': '..',
'J': '.---',
'K': '-.-',
'L': '.-..',
'M': '--',
'N': '-.',
'O': '---',
'P': '.--.',
'Q': '--.-',
'R': '.-.',
'S': '...',
'T': '-',
'U': '..-',
'V': '...-',
'W': '.--',
'X': '-..-',
'Y': '-.--',
'Z': '--..',
'_': '..--.-'}
ledPin=17
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.ALT0)
GPIO.setclock(4,64000)
def dot():
GPIO.output(ledPin,1)
time.sleep(0.2)
GPIO.output(ledPin,0)
time.sleep(0.2)
def dash():
GPIO.output(ledPin,1)
time.sleep(0.5)
GPIO.output(ledPin,0)
time.sleep(0.2)
while True:
input = raw_input('What would you like to send? ')
for letter in input:
for symbol in CODE[letter.upper()]:
if symbol == '-':
dash()
elif symbol == '.':
dot()
else:
time.sleep(0.5)
time.sleep(0.5)
| true
|
57f5eeafc542339921fcd04edbeabcea8f20a51c
|
Python
|
OathKeeper723/data_report
|
/information_extraction/qichacha.py
|
UTF-8
| 1,999
| 2.734375
| 3
|
[] |
no_license
|
# coding=utf-8
# 此程序输出来源于企查查的信息,包括:身份信息,股东信息,变更记录信息
# 以json格式输出
import docx
import re
import yaml
import os
from word_manipulation import docx_enhanced
current_path = os.path.dirname(os.path.realpath(__file__))
f = open(current_path+"\\qichacha_config.yml", encoding="utf-8")
config = yaml.load(f, Loader=yaml.FullLoader)
f.close()
def dict2list(dict):
result = []
for i in dict:
result.append(dict[i])
return result
def recurse_dict2list(ll):
if isinstance(ll, list):
for i in range(len(ll)):
ll[i] = recurse_dict2list(ll[i])
elif isinstance(ll, dict):
ll = recurse_dict2list(dict2list(ll))
return ll
# title表示表头内容,content表示需要删选的列
def get_table_content(docx_file, title, content):
result = []
docx_list = docx_enhanced.docx_to_list(docx_file)
for i in range(1, len(docx_list)):
if isinstance(docx_list[i-1], str) and isinstance(docx_list[i], list) and config[title] in docx_list[i-1]:
for j in range(1, len(docx_list[i])):
temp = {}
for k in range(0, len(docx_list[i][j])):
if docx_list[i][0][k] in config[content]:
temp[docx_list[i][0][k]] = docx_list[i][j][k]
result.append(temp)
return result
# 提取规则
def read_docx(file_name):
docx_file = docx.Document(file_name)
paragraphs_content = '\n'.join([para.text for para in docx_file.paragraphs])
# 身份信息
BUSINESS_INFO = {}
for i in config['identity_info']:
BUSINESS_INFO[i] = re.search("%s:(.*?)\n" % i, paragraphs_content).group(1).strip()
INFO_DICT = {}
for info in config['info_list']:
INFO_DICT[info[1]] = get_table_content(docx_file, info[0], info[1])
BUSINESS_INFO = dict2list(BUSINESS_INFO)
INFO_LIST = recurse_dict2list(INFO_DICT)
return BUSINESS_INFO, INFO_LIST
| true
|
e95a63d1c83071a13f08b1fd01fa4ed83be10625
|
Python
|
Narvaliton/Learning
|
/Python/OpenClassrooms/methode_str.py
|
UTF-8
| 2,019
| 4.28125
| 4
|
[] |
no_license
|
from random import randrange
import os
"""Les méthodes de la classe str"""
nom = "Colin"
prenom = "Maxime"
age = "22"
#Utilisation de la fonction upper qui permet de passer une chaine de caractère en majuscule ( != lower() )
print("Tu t'appeles " + prenom + " " + nom.upper() + " et tu as " + age + " ans.")
#Formater une chaine de caractère
print("Tu t'appeles {1} {0} et tu as {2} ans.".format(nom.upper(), prenom, age))
#Parcours de chaine
chaine = "Hello world !"
print(chaine[0:5])
#Equivaut à:
print(chaine[:5])
print(chaine[6:11])
print(chaine[-1])
#On veut compter le nombre de fois ou la chaine de caractère "x" est présente dans la variable "chaine"
x = "l"
print(chaine.count(x))
#On remplace les occurences de la chaine "x" par la chaine de caractère "y" dans la variable "chaine"
y = "j"
newChaine = chaine.replace(x, y)
newChaine2 = chaine.replace(x,y,1)#On peut indiquer le nombre de fois qu'on remplace la chaine de caractère
print(newChaine)
print(newChaine2)
print(chaine)#La chaine originale n'est pas modifiée
#On cherche la première occurence d'une chaine de caractère dans une autre
occurence = chaine.find("l")
print(occurence)
#On peut limiter la recherche à une partie de la chaine en spécifiant les indices ou l'on veut commencer et/ou finir la recherche
occurence2 = chaine.find("l", 5, -1)
print(occurence2)
#On veut remplacer toutes les occurences dans une chaine de caractère par un caractère aléatoire
lettres = "abcdefghijklmnopqrstuvwxyzêéèàâôî"
chaine2 ="C'est fou le nombre de lettres qui se trouve dans cette chaine de caractère"
newChaine2 = ""
lettre = "4"
while lettre not in lettres or len(lettre) != 1:
lettre = input("Choisissez une lettre de l'alphabet :\n").lower()
for i in range(0,len(chaine2)):
if chaine2[i].lower() == lettre:
newChaine2 += lettres[randrange(len(lettres))]
else:
newChaine2 += chaine2[i]
print(newChaine2)
os.system("pause")
| true
|
4251c6d476027402bd1019cbf8965c21e61adbd3
|
Python
|
nalapati/sdc-behavioral-cloning
|
/models.py
|
UTF-8
| 9,270
| 2.671875
| 3
|
[] |
no_license
|
"""Model definitions, construction, testing, validation, training.
NOTE: We used parts of this code as a framework for the Udacity
SDC Challenge 2, https://github.com/emef/sdc, however for this
project I experimented with 3D convolutional networks.
"""
import logging
import os
import time
# Adds functionality to work with a dataset
from datasets import load_dataset
from keras import backend as K
from keras import metrics
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.engine.topology import Merge
from keras.layers import (
Activation, BatchNormalization, Dense, Dropout, Flatten,
Input, SpatialDropout2D, SpatialDropout3D, merge)
from keras.layers.advanced_activations import PReLU
from keras.layers.convolutional import (
AveragePooling2D, Convolution2D, Convolution3D,
MaxPooling2D, MaxPooling3D)
from keras.models import Model, Sequential
from keras.models import load_model as keras_load_model
from keras.regularizers import l2
import numpy as np
import tensorflow as tf
logger = logging.getLogger(__name__)
class SdcModel(object):
""" Contains functions to train/evaluate/save models.
"""
def __init__(self, model_config):
"""
@param model_config - dictionary containing a model configuration.
"""
self.model = keras_load_model(model_config['model_uri'])
self.timesteps = model_config['timesteps']
def fit(self, dataset, training_args, callbacks=None):
""" This method constructs a training and validation generator
and calls keras model fit_generator to train the model.
@param dataset - See Dataset(datasets.py)
@param training_args - Dict containing training params
(epochs, batch_size, pctl_sampling)
@param callbacks - Any keras callbacks to use in the training process (snapshots,
early exit) and so on.
"""
batch_size = training_args.get('batch_size', 100)
epochs = training_args.get('epochs', 5)
pctl_sampling = training_args.get('pctl_sampling', False)
validation_size = training_args.get(
'validation_size', dataset.validation_generator(
batch_size).get_size())
epoch_size = training_args.get(
'epoch_size', dataset.training_generator(
batch_size).get_size())
# display model configuration
self.model.summary()
training_generator = dataset.training_generator(batch_size)
validation_generator = dataset.validation_generator(batch_size)
if self.timesteps:
# Timesteps for the 3D model.
training_generator = training_generator.with_timesteps(
self.timesteps)
validation_generator = validation_generator.with_timesteps(
self.timesteps)
if pctl_sampling:
training_generator = training_generator.with_pctl_sampling()
history = self.model.fit_generator(
training_generator,
validation_data=validation_generator,
samples_per_epoch=epoch_size,
nb_val_samples=validation_size,
nb_epoch=epochs,
verbose=1,
callbacks=(callbacks or []))
def evaluate(self, dataset):
"""
@param dataset - See Dataset(dataset.py)
"""
generator = dataset.testing_generator(32)
if self.timesteps:
generator = generator.with_timesteps(self.timesteps)
return std_evaluate(self, generator)
def predict_on_batch(self, batch):
"""
@param batch - batch of input per model configuration.
"""
return self.model.predict_on_batch(batch)
def save(self, model_path):
"""
@param model_path - path at which to save the model.
@return - dict with a model configuration.
"""
save_model(self.model, model_path)
return {
'model_uri': model_path
}
@classmethod
def create(cls, creation_args):
"""
@param creation_args - Dict containing params with which to create a model.
(input_shape, timesteps, model_uri).
@return - model configuration dict to be used to construct an SDCModel.
"""
# Only support sequential models
timesteps = creation_args['timesteps']
img_input = Input(shape=creation_args['input_shape'])
layer = MaxPooling3D((1, 2, 2))(img_input)
layer = Convolution3D(60, 5, 5, 5, init="he_normal", activation="relu", border_mode="same")(layer)
layer = MaxPooling3D((2, 3, 3))(layer)
layer = SpatialDropout3D(0.5)(layer)
layer = BatchNormalization(axis=4)(layer)
layer = Convolution3D(120, 3, 3, 3, init="he_normal", activation="relu", border_mode="same")(layer)
layer = MaxPooling3D((2, 3, 2))(layer)
layer = SpatialDropout3D(0.5)(layer)
layer = BatchNormalization(axis=4)(layer)
layer = Convolution3D(180, 3, 3, 3, init="he_normal", activation="relu", border_mode="same")(layer)
layer = MaxPooling3D((2, 3, 2))(layer)
layer = SpatialDropout3D(0.5)(layer)
layer = BatchNormalization(axis=4)(layer)
layer = Flatten()(layer)
layer = Dense(256)(layer)
layer = PReLU()(layer)
layer = Dropout(0.5)(layer)
layer = Dense(1, W_regularizer=l2(0.001))(layer)
model = Model(input=img_input, output=layer)
model.compile(
loss='mean_squared_error',
optimizer='adadelta',
metrics=['rmse'])
model.save(creation_args['model_uri'])
return {
'model_uri': creation_args['model_uri'],
'timesteps': creation_args['timesteps']
}
def std_evaluate(model, generator):
"""
Evaluates a model on the dataset represented by the generator.
@param model - SDCModel
@param generator - generator generating (batch_size, X, y)
@return - list of mse, rmse
"""
size = generator.get_size()
batch_size = generator.get_batch_size()
n_batches = size / batch_size
err_sum = 0.
err_count = 0.
for _ in np.arange(n_batches):
X_batch, y_batch = generator.__next__()
y_pred = model.predict_on_batch(X_batch)
err_sum += np.sum((y_batch - y_pred) ** 2)
err_count += len(y_pred)
mse = err_sum / err_count
return [mse, np.sqrt(mse)]
def save_model(model, model_path):
"""
Save a keras model to a local path.
@param model - keras model
@param model_path - local path to write to
"""
try: os.makedirs(os.path.dirname(model_path))
except: pass
json_string = model.to_json()
model.save(model_path)
with open(model_path.replace("h5", "json"), "w") as f:
f.write(json_string)
f.write("\n")
def rmse(y_true, y_pred):
"""Calculates RMSE
"""
return K.sqrt(K.mean(K.square(y_pred - y_true)))
metrics.rmse = rmse
def train_model(args):
""" Trains a model using the specified args.
@param args - Dict (model_config, dataset_path, task_id)
"""
logger.info('loading model with config %s', args)
model = SdcModel(args['model_config'])
dataset = load_dataset(args['dataset_path'])
baseline_mse = dataset.get_baseline_mse()
logger.info('baseline mse: %f, baseline rmse: %f' % (
baseline_mse, np.sqrt(baseline_mse)))
model_checkpoint = ModelCheckpoint(
"weights.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor='val_loss',
verbose=1,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
earlystop = EarlyStopping(monitor="val_rmse", min_delta=0.0005, patience=12, mode="min")
model.fit(dataset, args['training_args'], [earlystop, model_checkpoint])
output_model_path = os.path.join(
args['model_path'], '%s.h5' % args['task_id'])
output_config = model.save(output_model_path)
logger.info('Wrote final model to %s', output_model_path)
# assume evaluation is mse
evaluation = model.evaluate(dataset)
training_mse = evaluation[0]
improvement = -(training_mse - baseline_mse) / baseline_mse
logger.info('Evaluation: %s', evaluation)
logger.info('Baseline MSE %.5f, training MSE %.5f, improvement %.2f%%',
baseline_mse, training_mse, improvement * 100)
logger.info('output config: %s' % output_config)
def generate_id():
"""
@return - a task id under which to store a model."
"""
return str(int(time.time()))
def main():
logging.basicConfig(level=logging.INFO)
train_model({
"dataset_path": "/home/nalapati/udacity/sdc/udacity-p3/datasets/dataset_32",
"model_path": "/home/nalapati/udacity/sdc/udacity-p3/models",
"model_config": SdcModel.create({
"input_shape": (10, 80, 320, 3),
"model_uri": "/home/nalapati/models/" + generate_id() + ".h5",
"timesteps": 10
}),
"task_id": str(int(time.time())),
"training_args": {
"batch_size": 32,
"epochs": 50
},
})
if __name__ == '__main__':
main()
| true
|
1f021ba4c879256feea64ba8a6a897fbfa42d872
|
Python
|
Gedevan-Aleksizde/datar
|
/datar/forcats/lvl_addrm.py
|
UTF-8
| 3,823
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
"""Provides functions to add or remove levels"""
from typing import Any, Iterable, List
from pandas import Categorical
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..base import levels, union, table, intersect, setdiff
from ..core.contexts import Context
from ..core.types import ForcatsRegType, ForcatsType, is_scalar, is_null
from .lvls import lvls_expand, lvls_union, refactor
from .utils import check_factor
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_expand(_f: ForcatsType, *additional_levels: Any) -> Categorical:
"""Add additional levels to a factor
Args:
_f: A factor
*additional_levels: Additional levels to add to the factor.
Levels that already exist will be silently ignored.
Returns:
The factor with levels expanded
"""
_f = check_factor(_f)
levs = levels(_f, __calling_env=CallingEnvs.REGULAR)
addlevs = []
for alev in additional_levels:
if is_scalar(alev):
addlevs.append(alev)
else:
addlevs.extend(alev)
new_levels = union(levs, addlevs)
return lvls_expand(_f, new_levels, __calling_env=CallingEnvs.REGULAR)
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_explicit_na(
_f: ForcatsType, na_level: Any = "(Missing)"
) -> Categorical:
"""Make missing values explicit
This gives missing values an explicit factor level, ensuring that they
appear in summaries and on plots.
Args:
_f: A factor
na_level: Level to use for missing values.
This is what NAs will be changed to.
Returns:
The factor with explict na_levels
"""
_f = check_factor(_f)
# levs = levels(_f, __calling_env=CallingEnvs.REGULAR)
is_missing = is_null(_f)
# is_missing_level = is_null(levs)
if any(is_missing):
_f = fct_expand(_f, na_level)
_f[is_missing] = na_level
return _f
# NAs cannot be a level in pandas.Categorical
# if any(is_missing_level):
# levs[is_missing_level] = na_level
# return lvls_revalue(_f, levs)
return _f
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_drop(_f: ForcatsType, only: Any = None) -> Categorical:
"""Drop unused levels
Args:
_f: A factor
only: A character vector restricting the set of levels to be dropped.
If supplied, only levels that have no entries and appear in
this vector will be removed.
Returns:
The factor with unused levels dropped
"""
_f = check_factor(_f)
levs = levels(_f, __calling_env=CallingEnvs.REGULAR)
count = table(_f, __calling_env=CallingEnvs.REGULAR).iloc[0, :]
to_drop = levs[count == 0]
if only is not None and is_scalar(only):
only = [only]
if only is not None:
to_drop = intersect(to_drop, only, __calling_env=CallingEnvs.REGULAR)
return refactor(
_f,
new_levels=setdiff(levs, to_drop, __calling_env=CallingEnvs.REGULAR),
)
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_unify( # pylint: disable=invalid-name,redefined-outer-name
fs: Iterable[ForcatsType],
levels: Iterable = None,
) -> List[Categorical]:
"""Unify the levels in a list of factors
Args:
fs: A list of factors
levels: Set of levels to apply to every factor. Default to union
of all factor levels
Returns:
A list of factors with the levels expanded
"""
if levels is None:
levels = lvls_union(fs)
out = []
for fct in fs:
fct = check_factor(fct)
out.append(
lvls_expand(
fct,
new_levels=levels,
__calling_env=CallingEnvs.REGULAR,
)
)
return out
| true
|
db2064382dcd88c124b1ec09226493cb2e525e1a
|
Python
|
JanHendrikDolling/configvalidator
|
/test/test_timezone.py
|
UTF-8
| 1,141
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015 by Jan-Hendrik Dolling.
:license: Apache 2.0, see LICENSE for more details.
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
from configvalidator.tools.timezone import TZ
import datetime
class MyTestCase(unittest.TestCase):
def test_tzinfo_utc(self):
self.assertEqual("UTC", TZ().tzname(None))
self.assertEqual(datetime.timedelta(0), TZ().utcoffset(None))
self.assertEqual(datetime.timedelta(0), TZ().dst(None))
#
self.assertEqual(datetime.timedelta(hours=-10), TZ(hours=-10).utcoffset(None))
self.assertEqual(datetime.timedelta(minutes=1), TZ(minutes=1).dst(None))
self.assertEqual(datetime.timedelta(minutes=-1), TZ(minutes=-1).dst(None))
#
self.assertEqual("UTC-02:39", TZ(hours=-2, minutes=-39).tzname(None))
self.assertEqual("UTC-02:39", TZ(hours=-2, minutes=39).tzname(None))
#
self.assertEqual("UTC+00:39", TZ(minutes=39).tzname(None))
self.assertEqual("UTC+22:04", TZ(hours=22, minutes=4).tzname(None))
if __name__ == '__main__':
unittest.main()
| true
|
8b1210e6ec5f242bb968b8855fc6ba3803ba0a24
|
Python
|
hope7th/FluencyPython
|
/1703011417encode.py
|
UTF-8
| 153
| 2.609375
| 3
|
[] |
no_license
|
# -*- coding:utf-8 -*-
if __name__ == '__main__':
for codec in ['latin_1','utf_8','utf_16']:
print(codec,'El Niño'.encode(codec),sep='\t')
| true
|
dcc4f4e6f994a3687487919d92d9c57034bbd5c1
|
Python
|
Nicolezjy/Recommend_system
|
/Recommendation_Item.py
|
UTF-8
| 4,068
| 2.90625
| 3
|
[] |
no_license
|
# coding: utf-8
#item based CF
from __future__ import division
import numpy as np
import scipy as sp
class Item_based_CF:
def __init__(self, X):
self.X = X #评分表
self.mu = np.mean(self.X[:,2]) #average rating
self.ItemsForUser={} #用户打过分的所有Item
self.UsersForItem={} #给Item打过分的所有用户
for i in range(self.X.shape[0]):
uid=self.X[i][0] #user id
i_id=self.X[i][1] #item_id
rat=self.X[i][2] #rating
self.UsersForItem.setdefault(i_id,{})
self.ItemsForUser.setdefault(uid,{})
self.UsersForItem[i_id][uid]=rat
self.ItemsForUser[uid][i_id]=rat
pass
n_Items = len(self.UsersForItem)+1 #数组的索引从0开始,浪费第0个元素
print(n_Items-1)
self.similarity = np.zeros((n_Items, n_Items), dtype=np.float)
self.similarity[:,:] = -1
#计算Item i_id1和i_id2之间的相似性
def sim_cal(self, i_id1, i_id2):
if self.similarity[i_id1][i_id2]!=-1: #如果已经计算好
return self.similarity[i_id1][i_id2]
si={}
for user in self.UsersForItem[i_id1]: #所有对Item1打过分的的user
if user in self.UsersForItem[i_id2]: #如果该用户对Item2也打过分
si[user]=1 #user为一个有效用用户
#print si
n=len(si) #有效用户数,有效用户为即对Item1打过分,也对Item2打过分
if (n==0): #没有共同打过分的用户,相似度设为1.因为最低打分为1?
self.similarity[i_id1][i_id2]=0
self.similarity[i_id1][i_id1]=0
return 0
#所有有效用户对Item1的打分
s1=np.array([self.UsersForItem[i_id1][u] for u in si])
#所有有效用户对Item2的打分
s2=np.array([self.UsersForItem[i_id2][u] for u in si])
sum1=np.sum(s1)
sum2=np.sum(s2)
sum1Sq=np.sum(s1**2)
sum2Sq=np.sum(s2**2)
pSum=np.sum(s1*s2)
#分子
num=pSum-(sum1*sum2/n)
#分母
den=np.sqrt((sum1Sq-sum1**2/n)*(sum2Sq-sum2**2/n))
if den==0:
self.similarity[i_id1][i_id2]=0
self.similarity[i_id2][i_id1]=0
return 0
self.similarity[i_id1][i_id2]=num/den
self.similarity[i_id2][i_id1]=num/den
return num/den
#预测用户uid对Item i_id的打分
def pred(self,uid,i_id):
sim_accumulate=0.0
rat_acc=0.0
if(i_id == 599):
print(self.UsersForItem[i_id])
for item in self.ItemsForUser[uid]: #用户uid打过分的所有Item
sim = self.sim_cal(item,i_id) #该Item与i_id之间的相似度
if sim<0:continue
rat_acc += sim * self.ItemsForUser[uid][item]
sim_accumulate += sim
if sim_accumulate==0: #no same user rated,return average rates of the data
return self.mu
return rat_acc/sim_accumulate
#测试
def test(self,test_X):
test_X=np.array(test_X)
output=[]
sums=0
print("the test data size is ",test_X.shape)
for i in range(test_X.shape[0]):
uid = test_X[i][0] #user id
i_id = test_X[i][1] #item_id
#设置默认值,否则用户或item没在训练集中出现时会报错
self.UsersForItem.setdefault(i_id,{})
self.ItemsForUser.setdefault(uid,{})
pre=self.pred(uid, i_id)
output.append(pre)
sums += (pre-test_X[i][2])**2
rmse=np.sqrt(sums/test_X.shape[0])
print("the rmse on test data is ",rmse)
return output
| true
|
e14e6c581ecf719d2bdbd801d121c53568a84601
|
Python
|
ITT-wh/NeuralNetwork
|
/NerualNetwork/neural_network/week2/lr_utils.py
|
UTF-8
| 2,091
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
import h5py
import matplotlib.pyplot as plot
# 加载数据
def load_dataset():
train_dataset = h5py.File('../../datasets/train_catvnoncat.h5', "r")
# 可以通过train_dataset.keys()查看键值的集合; [:]: 表示除当前维度以外的所有
train_set_x_orig = np.array(train_dataset["train_set_x"][:])
# print(train_set_x_orig[24].shape)
# your train set labels
train_set_y_orig = np.array(train_dataset["train_set_y"][:])
test_dataset = h5py.File('../../datasets/test_catvnoncat.h5', "r")
# your test set features
test_set_x_orig = np.array(test_dataset["test_set_x"][:])
# your test set labels
test_set_y_orig = np.array(test_dataset["test_set_y"][:])
# the list of classes
classes = np.array(test_dataset["list_classes"][:])
# 完善数据维度:由(209,) ---> (1, 209)
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
# 数据预处理
def pre_process_data():
# 加载数据
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# 数据扁平化
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
# shapes = test_set_x_orig.shape
# test_set_x_flatten = test_set_x_orig.reshape(shapes[0], shapes[1] * shapes[2] * shapes[3]).T
# 与下面的形式等价, 但更贴近于理解, 即:examples_num * Vector; 转置后: 易于输入神经网络中
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
# 标准化
train_set_x = train_set_x_flatten / 255.0
test_set_x = test_set_x_flatten / 255.0
return train_set_x, train_set_y, test_set_x, test_set_y
# sigmoid 函数
def sigmoid(z):
s = 1 / (1 + np.exp(-z))
return s
# relu 函数
# def relu(z):
#
# s = np.max(0, z)
#
# return s
# def main():
#
# load_dataset()
#
#
# main()
| true
|
eeb00931ba248bbd9bf559f1d9b00182afbb0667
|
Python
|
FraugDib/algorithms
|
/money_change.py
|
UTF-8
| 4,602
| 3.71875
| 4
|
[] |
no_license
|
import time
def find_change(results, current_decomposition, n, denominations):
"""Find changes
Arguments
results -- accumulate result in an array. Each item is also an array
current_decomposition -- decomposition of n in denominations
n -- number to decompose. Does not change
denominations -- array of denominations to be used for decomposition
"""
# Guard conditions to stop the recursion
if sum(current_decomposition) == n:
results.append(list(current_decomposition))
return
elif sum(current_decomposition) > n:
return
# We first iterate through the denominations
# then recursively call the function to continue
# to find the remaining decomposition in denomination of n
for denomination in denominations:
my_current_decomposition = list(current_decomposition)
my_current_decomposition.append(denomination)
find_change(results, my_current_decomposition, n, denominations)
return
def main():
# Test 1 #############################################
expected_results =[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
test(n=10,denominations=[1], expected_results=expected_results)
# Test 2 #############################################
expected_results = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 8],
[1, 8, 1],
[8, 1, 1]]
test(n=10,denominations=[1, 8], expected_results=expected_results)
# Test 3 #############################################
expected_results =[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 7],
[1, 1, 7, 1],
[1, 1, 8],
[1, 7, 1, 1],
[1, 8, 1],
[7, 1, 1, 1],
[8, 1, 1]]
test(n=10,denominations=[1, 7, 8], expected_results=expected_results)
# Test 3 #############################################
expected_results =[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 3],
[1, 1, 1, 1, 1, 1, 3, 1],
[1, 1, 1, 1, 1, 3, 1, 1],
[1, 1, 1, 1, 1, 5],
[1, 1, 1, 1, 3, 1, 1, 1],
[1, 1, 1, 1, 3, 3],
[1, 1, 1, 1, 5, 1],
[1, 1, 1, 3, 1, 1, 1, 1],
[1, 1, 1, 3, 1, 3],
[1, 1, 1, 3, 3, 1],
[1, 1, 1, 5, 1, 1],
[1, 1, 3, 1, 1, 1, 1, 1],
[1, 1, 3, 1, 1, 3],
[1, 1, 3, 1, 3, 1],
[1, 1, 3, 3, 1, 1],
[1, 1, 3, 5],
[1, 1, 5, 1, 1, 1],
[1, 1, 5, 3],
[1, 3, 1, 1, 1, 1, 1, 1],
[1, 3, 1, 1, 1, 3],
[1, 3, 1, 1, 3, 1],
[1, 3, 1, 3, 1, 1],
[1, 3, 1, 5],
[1, 3, 3, 1, 1, 1],
[1, 3, 3, 3],
[1, 3, 5, 1],
[1, 5, 1, 1, 1, 1],
[1, 5, 1, 3],
[1, 5, 3, 1],
[3, 1, 1, 1, 1, 1, 1, 1],
[3, 1, 1, 1, 1, 3],
[3, 1, 1, 1, 3, 1],
[3, 1, 1, 3, 1, 1],
[3, 1, 1, 5],
[3, 1, 3, 1, 1, 1],
[3, 1, 3, 3],
[3, 1, 5, 1],
[3, 3, 1, 1, 1, 1],
[3, 3, 1, 3],
[3, 3, 3, 1],
[3, 5, 1, 1],
[5, 1, 1, 1, 1, 1],
[5, 1, 1, 3],
[5, 1, 3, 1],
[5, 3, 1, 1],
[5, 5]]
test(n=10,denominations=[1, 3, 5], expected_results=expected_results)
def test(n, denominations, expected_results):
print("Testing `find_change()` for n: {}, denominations: {}".format(n, denominations))
results = []
current_decomposition = []
start = time.time()
find_change(results, current_decomposition, n, denominations)
end = time.time()
print("Time elapsed: {}".format(end - start))
print("Results: \n{}\n".format(results))
assert (results == expected_results)
main()
| true
|
4be2e384e8ccaf17a94d4dc157c85a9c0dca7e85
|
Python
|
matk86/pymatgen
|
/pymatgen/core/bonds.py
|
UTF-8
| 4,070
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import json
import collections
import warnings
from pymatgen.core.periodic_table import get_el_sp
"""
This class implements definitions for various kinds of bonds. Typically used in
Molecule analysis.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 26, 2012"
def _load_bond_length_data():
"""Loads bond length data from json file"""
with open(os.path.join(os.path.dirname(__file__),
"bond_lengths.json")) as f:
data = collections.defaultdict(dict)
for row in json.load(f):
els = sorted(row['elements'])
data[tuple(els)][row['bond_order']] = row['length']
return data
bond_lengths = _load_bond_length_data()
class CovalentBond(object):
"""
Defines a covalent bond between two sites.
"""
def __init__(self, site1, site2):
"""
Initializes a covalent bond between two sites.
Args:
site1 (Site): First site.
site2 (Site): Second site.
"""
self.site1 = site1
self.site2 = site2
@property
def length(self):
"""
Length of the bond.
"""
return self.site1.distance(self.site2)
@staticmethod
def is_bonded(site1, site2, tol=0.2, bond_order=None):
"""
Test if two sites are bonded, up to a certain limit.
Args:
site1 (Site): First site
site2 (Site): Second site
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
bond_order: Bond order to test. If None, the code simply checks
against all possible bond data. Defaults to None.
Returns:
Boolean indicating whether two sites are bonded.
"""
sp1 = list(site1.species_and_occu.keys())[0]
sp2 = list(site2.species_and_occu.keys())[0]
dist = site1.distance(site2)
syms = tuple(sorted([sp1.symbol, sp2.symbol]))
if syms in bond_lengths:
all_lengths = bond_lengths[syms]
if bond_order:
return dist < (1 + tol) * all_lengths[bond_order]
for v in all_lengths.values():
if dist < (1 + tol) * v:
return True
return False
raise ValueError("No bond data for elements {} - {}".format(*syms))
def __repr__(self):
return "Covalent bond between {} and {}".format(self.site1,
self.site2)
def __str__(self):
return self.__repr__()
def get_bond_length(sp1, sp2, bond_order=1):
"""
Get the bond length between two species.
Args:
sp1 (Specie): First specie.
sp2 (Specie): Second specie.
bond_order: For species with different possible bond orders,
this allows one to obtain the bond length for a particular bond
order. For example, to get the C=C bond length instead of the
C-C bond length, this should be set to 2. Defaults to 1.
Returns:
Bond length in Angstrom. If no data is available, the sum of the atomic
radii is used.
"""
sp1 = get_el_sp(sp1)
sp2 = get_el_sp(sp2)
syms = tuple(sorted([sp1.symbol, sp2.symbol]))
if syms in bond_lengths:
all_lengths = bond_lengths[syms]
if bond_order:
return all_lengths.get(bond_order)
else:
return all_lengths.get(1)
warnings.warn("No bond lengths for %s-%s found in database. Returning sum"
"of atomic radius." % (sp1, sp2))
return sp1.atomic_radius + sp2.atomic_radius
| true
|
b701803736e2929be8efa648812df9b2d80498c9
|
Python
|
xzc5858/caigou
|
/plug.py
|
UTF-8
| 884
| 2.875
| 3
|
[] |
no_license
|
import requests
from bs4 import BeautifulSoup
def request_post(url, data):
try:
response = requests.post(url, data)
if response.status_code == 200:
return response
except requests.RequestException:
return None
def request_get(url):
try:
response = requests.get(url)
if response.status_code == 200:
return response
except requests.RequestException:
return None
def request_getsoup(url):
r = request_get(url)
soup = BeautifulSoup(r.text, "html5lib")
return soup
def request_postsoup(url, data):
r = request_post(url, data)
soup = BeautifulSoup(r.text, "html5lib")
return soup
def request_soup(url, isGet, data):
if isGet:
# print('get')
return request_getsoup(url)
else:
# print('post')
return request_postsoup(url, data)
| true
|
e164fab8ecd973f8126201010db55041988ade9b
|
Python
|
debasishdebs/parameterTesting
|
/Git/balanceClasses/algoScores.py
|
UTF-8
| 13,311
| 3.046875
| 3
|
[] |
no_license
|
__author__ = 'Debasish'
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from ggplot import *
from sklearn.metrics import *
import sys
f = open('output.txt', 'w')
sys.stdout = f
''' Todo : Form pairs. (Error & e_5), (error & e_10), (error & e_15) and so on. Total 6 pairs will be formed of different length. Each pair ll have length same as its e_x value.
Pass each list of pair together (error & e_5), (error & e_10) to pd.crosstab function after converting them to Pandas Series & Numpy.ndarray.
Save each crasstab result in different list thus giving f-table for each possible case.
Use each crosstab to plot ROC curve. (Part of skitlearn package) and find area under curve of each parameter. Once done, select the most optimized one.'''
def plotROC(fpr, tpr, preds_auc):
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)'%preds_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def getPredClsScore(dataset):
e5PredCls = []
e10PredCls = []
e15PredCls = []
e20PredCls = []
e25PredCls = []
e30PredCls = []
error_list = []
for j in range(1,len(dataset[0])):
e5PredCls.append(float(dataset[0][j][8]))
e10PredCls.append(float(dataset[0][j][9]))
e15PredCls.append(float(dataset[0][j][10]))
e20PredCls.append(float(dataset[0][j][11]))
e25PredCls.append(float(dataset[0][j][12]))
e30PredCls.append(float(dataset[0][j][13]))
error_list.append(float(dataset[0][j][1]))
pred_pos_cls = {}
pred_pos_cls['e_5'] = e5PredCls
pred_pos_cls['e_10'] = e10PredCls
pred_pos_cls['e_15'] = e15PredCls
pred_pos_cls['e_20'] = e20PredCls
pred_pos_cls['e_25'] = e25PredCls
pred_pos_cls['e_30'] = e30PredCls
return error_list, pred_pos_cls
def get_algo_scores(filename):
print "In Here"
dataset = [None]*1
for i in range(1):
fname = filename
lines = csv.reader(open(fname, "rt"))
lines = csv.reader(open(fname, "rt"))
dataset[i] = list(lines)
error_list, predsPosClsScore = getPredClsScore(dataset)
dictionaries = []
for i in range(1):
dictionaries = create_dicts(dataset[i]) #Dictionary[i] stores all the data in i_th.csv file
#print len(dictionaries)
dictL = [None]*7
for i in range(len(dictL)):
#print (dictionaries[i])
#print "Length of dictionary passed ", len(dictionaries[0])
dictL[i] = dict_to_list(dictionaries) #Each row of dictL holds errors for the whole day. Either error column or e_5 column or e_10 & so on..
#print dictL[5][1]
#print (dictL[1][0])
for i in range(1,len(dictL)):
if i == 1:
err_e5 = merge_lists(dictL[i], i, dictL[0])
elif i == 2:
err_e10 = merge_lists(dictL[i], i, dictL[0])
elif i == 3:
err_e15 = merge_lists(dictL[i], i, dictL[0])
elif i == 4:
err_e20 = merge_lists(dictL[i], i, dictL[0])
elif i == 5:
err_e25 = merge_lists(dictL[i], i, dictL[0])
elif i == 6:
err_e30 = merge_lists(dictL[i], i, dictL[0])
#print err_e10
auc = {}
for i in range(1, len(dictL)):
if i == 1:
err_list, ftr_err_list = data_crosstab(err_e5, i)
preds_auc = roc_auc_score(error_list, predsPosClsScore['e_5'])
auc['e_5'] = preds_auc
print "AUC for e_5 :", preds_auc
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_5'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(preds_auc))
ggsave(filename='auc_e5.png', plot=plt)
#plotROC(fpr, tpr, preds_auc)
crosstab(err_list, ftr_err_list)
elif i == 2:
err_list, ftr_err_list = data_crosstab(err_e10, i)
print "AUC for e_10 :", roc_auc_score(error_list, predsPosClsScore['e_10'])
auc['e_10'] = roc_auc_score(error_list, predsPosClsScore['e_10'])
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_10'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(auc['e_10']))
ggsave(filename='auc_e10.png', plot=plt)
#plotROC(fpr, tpr, preds_auc)
crosstab(err_list, ftr_err_list)
elif i == 3:
err_list, ftr_err_list = data_crosstab(err_e15, i)
print "AUC for e_15 :", roc_auc_score(error_list, predsPosClsScore['e_15'])
auc['e_15'] = roc_auc_score(error_list, predsPosClsScore['e_15'])
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_15'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(auc['e_15']))
ggsave(filename='auc_e15.png', plot=plt)
#plotROC(fpr, tpr, preds_auc)
crosstab(err_list, ftr_err_list)
elif i == 4:
err_list, ftr_err_list = data_crosstab(err_e20, i)
print "AUC for e_20 :", roc_auc_score(error_list, predsPosClsScore['e_20'])
auc['e_20'] = roc_auc_score(error_list, predsPosClsScore['e_20'])
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_20'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(auc['e_20']))
ggsave(filename='auc_e20.png', plot=plt)
crosstab(err_list, ftr_err_list)
elif i == 5:
err_list, ftr_err_list = data_crosstab(err_e25, i)
print "AUC for e_25 :", roc_auc_score(error_list, predsPosClsScore['e_25'])
auc['e_25'] = roc_auc_score(error_list, predsPosClsScore['e_25'])
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_25'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(auc['e_25']))
ggsave(filename='auc_e25.png', plot=plt)
crosstab(err_list, ftr_err_list)
elif i == 6:
err_list, ftr_err_list = data_crosstab(err_e30, i)
print "AUC for e_30 :", roc_auc_score(error_list, predsPosClsScore['e_30'])
auc['e_30'] = roc_auc_score(error_list, predsPosClsScore['e_30'])
fpr, tpr, threshold = roc_curve(error_list, predsPosClsScore['e_30'])
df = pd.DataFrame(dict(fpr=fpr,tpr=tpr))
plt = ggplot(df, aes(x='fpr',y='tpr')) + geom_line() + geom_abline(linetype='dashed') +\
ggtitle("ROC Curve w/ AUC=%s" % str(auc['e_30']))
ggsave(filename='auc_e30.png', plot=plt)
crosstab(err_list, ftr_err_list)
# with open ('auc.txt', 'w') as fp:
# for p in auc.items():
# fp.write("%s:%s\n" % p)
f.close()
def crosstab(err_list, ftr_err_list):
plt.plot(err_list, ftr_err_list)
tpr, fpr, tp, fp = precision_recall(err_list, ftr_err_list)
#print "TPR : {}, FPR : {} ".format(tpr, fpr)
#print "TP : {}, FP : {}".format(tp,fp)
#plt.show()
#print np.trapz(err_list, ftr_err_list)
#print set(ftr_err_list)
for i in range(len(err_list)):
err_list[i] = int(err_list[i])
ftr_err_list[i] = int(ftr_err_list[i])
print "Precision Score : ",precision_score(err_list, ftr_err_list, average='binary')
err_list = pd.Series(err_list)
ftr_err_list = np.array(ftr_err_list)
ct = pd.crosstab(err_list, ftr_err_list,rownames = ['actual'], colnames=['preds'])
print "Confusion Matrix"
print ct
print "\n"
def precision_recall(y_actual, y_pred):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(y_actual)):
if int(y_actual[i]) == int(y_pred[i]) == 1:
tp+=1
elif int(y_actual[i]) == 1 and int(y_pred[i]) == 0:
fn+=1
elif int(y_actual[i]) == 0 and int(y_pred[i]) == 1:
fp+=1
elif int(y_actual[i]) == 0 and int(y_pred[i]) == 0:
tn+=1
print "Total TP : {0}, Total TN : {1}".format(tp,tn)
print "Total FP : {0}, Total FN : {1}".format(fp,fn)
tpr = float(tp)/float(tp+fn)
fpr = float(fp)/float(tn+fp)
print "True Positive Rate : {0}, False Positive Rate : {1}.".format(tpr, fpr)
return tpr, fpr, tp, fp
def data_crosstab(data, x):
err_list = []
future_err_list = []
#print len(data)
for i in range(len(data)):
err_list.append(data[i]['err'])
future_err_list.append(data[i]['e_{}'.format(5*x)])
return err_list, future_err_list
def merge_lists(dicts, x, dict_err):
#print "Merge_lists"
#print len(dicts[0])
err_e = [dict() for p in range(len(dicts[0]))]
for i in range(len(dicts[0])):
for j in range(len(dict_err[0])):
if dict_err[0][j] == dicts[0][i]:
err_e[i] = {'ts' : dicts[0][i], 'err' : dict_err[1][j], 'e_{}'.format(str(x*5)) : dicts[1][i] }
break
#print err_e
return err_e
def dict_to_list(dictionary): #Converts each list of dictionary into list where only Values are stored and keys are skipped.
lDict = [[] for x in range(2)]
flag = 0
for j in range(len(dictionary[0])):
#print "length : ", len(dictionary[i][j])
for key, value in dictionary[0][j].iteritems():
if value!= 'timestamp' and "e" not in value:
if len(value) == 1:
lDict[1].append(value)
else:
lDict[0].append(value)
#print(len(lDict[0]))
return lDict
def create_dicts(dataset):
lDicts = [None]*7
print "Length of DS from create_dict func : ", len(dataset)
for i in range(len(lDicts)):
lDicts[i] = getDict(dataset, i) #lDict[i] stores either only {ts->error} dictionary, or {ts->e_5} or {ts->e_10} and so on depending on value of 'i'..
#print "lDict : ",len(lDicts[i])
return lDicts
def getDict(dataset, x): #Returns a dictionary of ts->error against 'x' specified. x=0(error), x=1(e_5), x=2(e_10) & so on for a particular file.
if x==0:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
inp_dict[i] = {'ts' : dataset[i][0], 'error' : dataset[i-x][x+1]}
return inp_dict
if x==1:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_5' : dataset[i-x][x+1]}
except:
continue
return inp_dict
if x==2:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_10' : dataset[i-x][x+1]}
except:
continue
return inp_dict
if x==3:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_15' : dataset[i-x][x+1]}
except:
continue
return inp_dict
if x==4:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_20' : dataset[i-x][x+1]}
except:
continue
return inp_dict
if x==5:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_25' : dataset[i-x][x+1]}
except:
continue
return inp_dict
if x==6:
inp_dict = [dict() for i in range(len(dataset))]
for i in range(len(dataset)):
try:
inp_dict[i] = {'ts' : dataset[i][0], 'e_30' : dataset[i-x][x+1]}
except:
continue
return inp_dict
filename = 'small_ds1_tse_temporal_lookback4_predictions_' + str(0+20) + '.csv'
get_algo_scores(filename)
| true
|
e0626d75da0973592edecbcb51f5c96331d96cdd
|
Python
|
guillaume-guerdoux/tournee_infirmiers
|
/tournee_infirmiers/patient/models.py
|
UTF-8
| 239
| 2.59375
| 3
|
[] |
no_license
|
from django.db import models
from user.models import Person
class Patient(Person):
information = models.CharField(max_length=255)
def __str__(self):
return ("{0} ".format(self.first_name) + "{0}".format(self.last_name))
| true
|
6429ab4ee1c0f939e1f32e345a34d46df89212eb
|
Python
|
trungnq2/build-tool-script
|
/result.py
|
UTF-8
| 809
| 2.84375
| 3
|
[] |
no_license
|
import os
import json
def createTxtFromJSON():
file = open("app_data.txt", "w")
with open("app_data.json") as app_data:
json_ = json.load(app_data)
apps = json_['apps']
# sortlist = sorted(apps, key=lambda k: k['appid'])
for app in apps:
file.write("App: %s \n"%app['appid'])
file.write("Name: %s \n"%app['zip'])
file.write("\n")
file.write("Version: \n" )
file.write("Environment: \n")
file.write("Platform: iOS + Android \n")
file.close()
def write_output(file_path, data):
print("Full Path %s" % os.getcwd())
print("Updating %s" % file_path)
with open(file_path, 'w') as outfile:
json.dump(data, outfile, indent=4)
def createAppJSON(data):
sortlist = sorted(data, key=lambda k: k['appid'])
write_output("app_data.json", {"apps": sortlist})
| true
|
9afe68618b90cba4799b3541cb732d5043bfd895
|
Python
|
cbbing/wealth_spider
|
/CollectiveIntelligence/generatefeedvector.py
|
UTF-8
| 2,009
| 2.953125
| 3
|
[] |
no_license
|
#coding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
__author__ = 'cbb'
import feedparser
import re
import jieba
def get_word_counts(url):
"""
返回一个RSS订阅源的标题和包含单词计数情况的字典
:param url:
:return:
"""
#解析订阅源
d = feedparser.parse(url)
wc = {}
#循环遍历所有的文章条目
for e in d.entries:
if 'summary' in e:
summary = e.summary
else:
summary = e.description
#提取一个单词列表
words = get_words(e.title + ' ' + summary)
for word in words:
wc.setdefault(word, 0)
wc[word] += 1
return d.feed.title, wc
def get_words(html):
#去除所有HTML标记
txt = re.compile(r'<[^>]+>').sub('', html)
txt = re.compile('\s').sub('', txt)
#利用所有非字母字符拆分单词
#words = re.compile(r'[^A-Z^a-z]+').split(txt) #英文分词
words = jieba.cut(txt)
#转化成小写形式
return [word for word in words if word !='']
# news = get_word_counts('http://news.baidu.com/n?cmd=1&class=stock&tn=rss')
# print news[0], news[1]
apcount = {}
wordcounts = {}
feedlist = [line for line in file('../Data/feedbaidu.txt', 'r')]
for feedurl in feedlist:
title, wc = get_word_counts(feedurl.strip())
wordcounts[title] = wc
for word, count in wc.items():
apcount.setdefault(word, 0)
if count > 1:
apcount[word] += 1
wordlist = []
for w, bc in apcount.items():
frac = float(bc)/len(feedlist)
if frac > 0.1 and frac < 0.5:
wordlist.append(w)
out = file('blogdata.txt', 'w')
out.write('Blog')
for word in wordlist:
out.write('\t{}'.format(word))
out.write('\n')
for blog, wc in wordcounts.items():
out.write(blog)
for word in wordlist:
if word in wc:
out.write('\t{}'.format(wc[word]))
else:
out.write('\t0')
out.write('\n')
out.close()
| true
|
7535bb5f0f69326b6f8de3c7ca14f0ab3e2eaf48
|
Python
|
thuuyen98/ML
|
/Gradient_descent.py
|
UTF-8
| 2,021
| 3.171875
| 3
|
[] |
no_license
|
from sklearn.model_selection import train_test_split
import numpy as np
import pandas as pd
dataset= pd.read_csv("/Users/macos/Downloads/filted_train.csv")
dataset =dataset.fillna(dataset.mean())
dataset= dataset.replace('male', 0)
dataset= dataset.replace('female', 1)
features= dataset.iloc[:,1:].values
labels= dataset.iloc[:,:1].values
labels = np.squeeze(labels)
features_train, features_valid, labels_train, labels_valid = train_test_split(features, labels, test_size = 0.20)
print(features_train.shape)
print(features_valid.shape)
print(labels_train.shape)
print(labels_valid.shape)
lr = 0.0001
a = np.ones(shape=(6,1))
b = 0
def predict(x):
return np.squeeze(np.matmul(x, a) + b)
# Tính đạo hàm loss theo a
def d_fa(X, Y, Y_pred):
n = float(X.shape[0])
return (-2 / n) * np.sum(np.matmul((Y - Y_pred), X), axis=0)
# Tính đạo hàm loss theo b
def d_fb(X, Y, Y_pred):
n = float(X.shape[0])
return (-2 / n) * np.sum(Y - Y_pred)
# Cập nhật giá trị mới cho a theo đạo hàm
def update_a(a, da):
return a - lr * da
# Cập nhật giá trị mới cho b theo đạo hàm
def update_b(b, db):
return b - lr * db
# Gradient Descent
def iris_gd(X_train, Y_train):
global a, b
iter_count = 0
for iter_count in range(10000):
train_pred = predict(X_train)
da = d_fa(X_train, Y_train, train_pred)
db = d_fb(X_train, Y_train, train_pred)
a = update_a(a, da)
b = update_b(b, db)
# Đánh giá mô hình
def eval(X_test, Y_test):
predictions = predict(X_test)
predictions = np.round(predictions)
correct_pred = np.count_nonzero(predictions == Y_test)
accuracy = correct_pred / predictions.shape[0]
return accuracy
# Huấn luyện và đánh giá mô hình
#print(features_train.shape)
#print(labels_train.shape)
iris_gd(features_train, labels_train)
print("Ma trận trọng số a:\n", a)
print("Tham số b:", b)
acc = eval(features_valid, labels_valid)
print("Accuracy tập test:", acc)
| true
|
ad489844ea60ee6e9d4adc5a8a60580d9dab1362
|
Python
|
apulps/LeetCode
|
/tests.py
|
UTF-8
| 28,707
| 3.09375
| 3
|
[] |
no_license
|
import unittest
from array_problems.remove_duplicates import remove_duplicates, remove_duplicates_2
from easy_problems.two_sum import two_sum, two_sum_2
from easy_problems.reverse_integer import reverse_integer
from easy_problems.running_sum import running_sum, running_sum_2, running_sum_3
from easy_problems.kids_with_candies import kids_with_candies, kids_with_candies_2
from easy_problems.shuffle import shuffle
from easy_problems.num_identical_pairs import num_identical_pairs
from easy_problems.defang_IP_addr import defang_IP_addr
from easy_problems.num_jewels_in_stones import num_jewels_in_stones, num_jewels_in_stones_2
from easy_problems.number_of_steps import number_of_steps, number_of_steps_2
from easy_problems.shuffle_array import shuffle_array
from easy_problems.smaller_numbers_than_current import smaller_numbers_than_current
from easy_problems.subtract_product_and_sum import subtract_product_and_sum
from easy_problems.decompress_RLE_list import decompress_RLE_list
from easy_problems.max_depth import max_depth
from easy_problems.create_target_array import create_target_array, create_target_array_2
from easy_problems.xor_operation import xor_operation
from easy_problems.parking_system import ParkingSystem
from easy_problems.reverse_string import reverse_string, reverse_string_2
from easy_problems.depth_of_binary_tree import depth_of_binary_tree
from easy_problems.single_number import single_number, single_number_2 ,single_number_3, single_number_4
from easy_problems.delete_node_linked_list import delete_node_linked_list
from easy_problems.reverse_linkedlist import reverse_linkedlist
from easy_problems.fizz_buzz import fizz_buzz
from easy_problems.majority_element import majority_element
from easy_problems.sorted_array_to_BTS import sorted_array_to_BTS
from easy_problems.move_zeroes import move_zeroes, move_zeroes_2
from medium_problems.subrectangle_queries import SubrectangleQueries
from medium_problems.group_the_people import group_the_people
from medium_problems.max_increase_keeping_skyline import max_increase_keeping_skyline
from medium_problems.get_target_copy import get_target_copy
from medium_problems.deepest_leaves_sum import deepest_leaves_sum
from medium_problems.permute import permute, permute_2
from medium_problems.inorder_traversal import inorder_traversal, inorder_traversal_2
from assets.problems_data_structures import TreeNode, LinkedList
class TestArrayProblems(unittest.TestCase):
def test_remove_duplicates(self):
nums = [1,1,2]
result = remove_duplicates(nums)
self.assertEqual(result, 2)
self.assertEqual(nums, [1,2])
nums = [0,0,1,1,1,2,2,3,3,4]
result = remove_duplicates(nums)
self.assertEqual(result, 5)
self.assertEqual(nums, [0,1,2,3,4])
nums = [1,1]
result = remove_duplicates(nums)
self.assertEqual(result, 1)
self.assertEqual(nums, [1])
nums = []
result = remove_duplicates(nums)
self.assertEqual(result, 0)
def test_remove_duplicates_2(self):
nums = [1,1,2]
result = remove_duplicates_2(nums)
self.assertEqual(result, 2)
self.assertEqual(nums, [1,2])
nums = [0,0,1,1,1,2,2,3,3,4]
result = remove_duplicates_2(nums)
self.assertEqual(result, 5)
self.assertEqual(nums, [0,1,2,3,4])
nums = [1,1]
result = remove_duplicates_2(nums)
self.assertEqual(result, 1)
self.assertEqual(nums, [1])
class TestEasyProblems(unittest.TestCase):
def test_two_sum(self):
nums = [2,7,11,15]
target = 9
result = two_sum(nums, target)
self.assertEqual(result, [0,1])
nums = [3,2,4]
target = 6
result = two_sum(nums, target)
self.assertEqual(result, [1,2])
nums = [3,3]
target = 6
result = two_sum(nums, target)
self.assertEqual(result, [0,1])
def test_two_sum_2(self):
nums = [2,7,11,15]
target = 9
result = two_sum_2(nums, target)
self.assertEqual(result, [0,1])
nums = [3,2,4]
target = 6
result = two_sum_2(nums, target)
self.assertEqual(result, [1,2])
nums = [3,3]
target = 6
result = two_sum_2(nums, target)
self.assertEqual(result, [0,1])
def test_reverse_integer(self):
x = 123
result = reverse_integer(x)
self.assertEqual(result, 321)
x = -123
result = reverse_integer(x)
self.assertEqual(result, -321)
x = 120
result = reverse_integer(x)
self.assertEqual(result, 21)
x = 0
result = reverse_integer(x)
self.assertEqual(result, 0)
def test_running_sum(self):
nums = [1,2,3,4]
result = running_sum(nums)
self.assertEqual(result, [1,3,6,10])
nums = [1,1,1,1,1]
result = running_sum(nums)
self.assertEqual(result, [1,2,3,4,5])
nums = [3,1,2,10,1]
result = running_sum(nums)
self.assertEqual(result, [3,4,6,16,17])
def test_running_sum_2(self):
nums = [1,2,3,4]
result = running_sum_2(nums)
self.assertEqual(result, [1,3,6,10])
nums = [1,1,1,1,1]
result = running_sum_2(nums)
self.assertEqual(result, [1,2,3,4,5])
nums = [3,1,2,10,1]
result = running_sum_2(nums)
self.assertEqual(result, [3,4,6,16,17])
def test_running_sum_3(self):
nums = [1,2,3,4]
result = running_sum_3(nums)
self.assertEqual(result, [1,3,6,10])
nums = [1,1,1,1,1]
result = running_sum_3(nums)
self.assertEqual(result, [1,2,3,4,5])
nums = [3,1,2,10,1]
result = running_sum_3(nums)
self.assertEqual(result, [3,4,6,16,17])
def test_kids_with_candies(self):
candies = [2,3,5,1,3]
extra_candies = 3
result = kids_with_candies(candies, extra_candies)
self.assertEqual(result, [True,True,True,False,True])
candies = [4,2,1,1,2]
extra_candies = 1
result = kids_with_candies(candies, extra_candies)
self.assertEqual(result, [True,False,False,False,False])
candies = [12,1,12]
extra_candies = 10
result = kids_with_candies(candies, extra_candies)
self.assertEqual(result, [True,False,True])
def test_kids_with_candies_2(self):
candies = [2,3,5,1,3]
extra_candies = 3
result = kids_with_candies_2(candies, extra_candies)
self.assertEqual(result, [True,True,True,False,True])
candies = [4,2,1,1,2]
extra_candies = 1
result = kids_with_candies_2(candies, extra_candies)
self.assertEqual(result, [True,False,False,False,False])
candies = [12,1,12]
extra_candies = 10
result = kids_with_candies_2(candies, extra_candies)
self.assertEqual(result, [True,False,True])
def test_shuffle(self):
nums = [2,5,1,3,4,7]
n = 3
result = shuffle(nums, n)
self.assertEqual(result, [2,3,5,4,1,7])
nums = [1,2,3,4,4,3,2,1]
n = 4
result = shuffle(nums, n)
self.assertEqual(result, [1,4,2,3,3,2,4,1])
nums = [1,1,2,2]
n = 2
result = shuffle(nums, n)
self.assertEqual(result, [1,2,1,2])
def test_num_identical_pairs(self):
nums = [1,2,3,1,1,3]
result = num_identical_pairs(nums)
self.assertEqual(result, 4)
nums = [1,1,1,1]
result = num_identical_pairs(nums)
self.assertEqual(result, 6)
nums = [1,2,3]
result = num_identical_pairs(nums)
self.assertEqual(result, 0)
nums = []
result = num_identical_pairs(nums)
self.assertEqual(result, 0)
def test_defang_IP_addr(self):
address = "1.1.1.1"
result = defang_IP_addr(address)
self.assertEqual(result, "1[.]1[.]1[.]1")
address = "255.100.50.0"
result = defang_IP_addr(address)
self.assertEqual(result, "255[.]100[.]50[.]0")
def test_num_jewels_in_stones(self):
J = "aA"
S = "aAAbbbb"
result = num_jewels_in_stones(J, S)
self.assertEqual(result, 3)
J = "z"
S = "ZZ"
result = num_jewels_in_stones(J, S)
self.assertEqual(result, 0)
def test_num_jewels_in_stones_2(self):
J = "aA"
S = "aAAbbbb"
result = num_jewels_in_stones_2(J, S)
self.assertEqual(result, 3)
J = "z"
S = "ZZ"
result = num_jewels_in_stones_2(J, S)
self.assertEqual(result, 0)
def test_number_of_steps(self):
num = 14
result = number_of_steps(num)
self.assertEqual(result, 6)
num = 8
result = number_of_steps(num)
self.assertEqual(result, 4)
num = 123
result = number_of_steps(num)
self.assertEqual(result, 12)
def test_number_of_steps_2(self):
num = 14
result = number_of_steps_2(num)
self.assertEqual(result, 6)
num = 8
result = number_of_steps_2(num)
self.assertEqual(result, 4)
num = 123
result = number_of_steps_2(num)
self.assertEqual(result, 12)
def test_shuffle_array(self):
s = "aiohn"
indices = [3,1,4,2,0]
result = shuffle_array(s, indices)
self.assertEqual(result, "nihao")
s = "aaiougrt"
indices = [4,0,2,6,7,3,1,5]
result = shuffle_array(s, indices)
self.assertEqual(result, "arigatou")
s = "art"
indices = [1,0,2]
result = shuffle_array(s, indices)
self.assertEqual(result, "rat")
s = "abc"
indices = [0,1,2]
result = shuffle_array(s, indices)
self.assertEqual(result, "abc")
def test_smaller_numbers_than_current(self):
nums = [8,1,2,2,3]
result = smaller_numbers_than_current(nums)
self.assertEqual(result, [4,0,1,1,3])
nums = [6,5,4,8]
result = smaller_numbers_than_current(nums)
self.assertEqual(result, [2,1,0,3])
nums = [7,7,7,7]
result = smaller_numbers_than_current(nums)
self.assertEqual(result, [0,0,0,0])
def test_subtract_product_and_sum(self):
n = 234
result = subtract_product_and_sum(n)
self.assertEqual(result, 15)
n = 4421
result = subtract_product_and_sum(n)
self.assertEqual(result, 21)
n = 450
result = subtract_product_and_sum(n)
self.assertEqual(result, -9)
def test_decompress_RLE_list(self):
nums = [1,2,3,4]
result = decompress_RLE_list(nums)
self.assertEqual(result, [2,4,4,4])
nums = [1,1,2,3]
result = decompress_RLE_list(nums)
self.assertEqual(result, [1,3,3])
def test_max_depth(self):
s = "(1+(2*3)+((8)/4))+1"
result = max_depth(s)
self.assertEqual(result, 3)
s = "(1)+((2))+(((3)))"
result = max_depth(s)
self.assertEqual(result, 3)
s = "1+(2*3)/(2-1)"
result = max_depth(s)
self.assertEqual(result, 1)
s = "1"
result = max_depth(s)
self.assertEqual(result, 0)
def test_create_target_array(self):
nums = [0,1,2,3,4]
index = [0,1,2,2,1]
result = create_target_array(nums, index)
self.assertEqual(result, [0,4,1,3,2])
nums = [1,2,3,4,0]
index = [0,1,2,3,0]
result = create_target_array(nums, index)
self.assertEqual(result, [0,1,2,3,4])
nums = [1]
index = [0]
result = create_target_array(nums, index)
self.assertEqual(result, [1])
def test_create_target_array_2(self):
nums = [0,1,2,3,4]
index = [0,1,2,2,1]
result = create_target_array_2(nums, index)
self.assertEqual(result, [0,4,1,3,2])
nums = [1,2,3,4,0]
index = [0,1,2,3,0]
result = create_target_array_2(nums, index)
self.assertEqual(result, [0,1,2,3,4])
nums = [1]
index = [0]
result = create_target_array_2(nums, index)
self.assertEqual(result, [1])
def test_xor_operation(self):
n = 5
start = 0
result = xor_operation(n, start)
self.assertEqual(result, 8)
n = 4
start = 3
result = xor_operation(n, start)
self.assertEqual(result, 8)
n = 1
start = 7
result = xor_operation(n, start)
self.assertEqual(result, 7)
n = 10
start = 5
result = xor_operation(n, start)
self.assertEqual(result, 2)
def test_parking_system(self):
parking_system = ParkingSystem(1, 1, 0)
self.assertTrue(parking_system.add_car(1))
self.assertTrue(parking_system.add_car(2))
self.assertFalse(parking_system.add_car(3))
self.assertFalse(parking_system.add_car(1))
parking_system = ParkingSystem(0, 0, 1)
self.assertTrue(parking_system.add_car(3))
self.assertRaises(ValueError, parking_system.add_car, 4)
def test_reverse_string(self):
s = ['h','e','l','l','o']
reverse_string(s)
self.assertEqual(s, ['o','l','l','e','h'])
s = ['a','b','c','d','e']
reverse_string(s)
self.assertEqual(s, ['e','d','c','b','a'])
def test_reverse_string_2(self):
s = ['h','e','l','l','o']
reverse_string_2(s)
self.assertEqual(s, ['o','l','l','e','h'])
s = ['a','b','c','d','e']
reverse_string_2(s)
self.assertEqual(s, ['e','d','c','b','a'])
def test_depth_of_binary_tree(self):
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
result = depth_of_binary_tree(root)
self.assertEqual(result, 3)
root = TreeNode(8)
root.left = TreeNode(6)
root.right = TreeNode(10)
root.right.left = TreeNode(5)
root.right.right = TreeNode(15)
root.left.left = TreeNode(2)
root.left.right = TreeNode(7)
root.left.right.left = TreeNode(4)
root.left.right.right = TreeNode(21)
result = depth_of_binary_tree(root)
self.assertEqual(result, 4)
def test_single_number(self):
nums = [2,2,1]
result = single_number(nums)
self.assertEqual(result, 1)
nums = [4,1,2,1,2]
result = single_number(nums)
self.assertEqual(result, 4)
nums = [1]
result = single_number(nums)
self.assertEqual(result, 1)
def test_single_number_2(self):
nums = [2,2,1]
result = single_number_2(nums)
self.assertEqual(result, 1)
nums = [4,1,2,1,2]
result = single_number_2(nums)
self.assertEqual(result, 4)
nums = [1]
result = single_number_2(nums)
self.assertEqual(result, 1)
def test_single_number_3(self):
nums = [2,2,1]
result = single_number_3(nums)
self.assertEqual(result, 1)
nums = [4,1,2,1,2]
result = single_number_3(nums)
self.assertEqual(result, 4)
nums = [1]
result = single_number_3(nums)
self.assertEqual(result, 1)
def test_single_number_4(self):
nums = [2,2,1]
result = single_number_4(nums)
self.assertEqual(result, 1)
nums = [4,1,2,1,2]
result = single_number_4(nums)
self.assertEqual(result, 4)
nums = [1]
result = single_number_4(nums)
self.assertEqual(result, 1)
def test_delete_node_linked_list(self):
head = LinkedList(4)
head.next = LinkedList(5)
head.next.next = LinkedList(1)
head.next.next.next = LinkedList(9)
delete_node_linked_list(head) # remove 4
self.assertEqual(head.val, 5)
self.assertEqual(head.next.val, 1)
self.assertEqual(head.next.next.val, 9)
self.assertEqual(head.next.next.next, None)
head = LinkedList(4)
head.next = LinkedList(5)
head.next.next = LinkedList(1)
head.next.next.next = LinkedList(9)
delete_node_linked_list(head.next) # remove 5
self.assertEqual(head.val, 4)
self.assertEqual(head.next.val, 1)
self.assertEqual(head.next.next.val, 9)
self.assertEqual(head.next.next.next, None)
head = LinkedList(4)
head.next = LinkedList(5)
head.next.next = LinkedList(1)
head.next.next.next = LinkedList(9)
delete_node_linked_list(head.next.next) # remove 1
self.assertEqual(head.val, 4)
self.assertEqual(head.next.val, 5)
self.assertEqual(head.next.next.val, 9)
self.assertEqual(head.next.next.next, None)
def test_reverse_linkedlist(self):
head = LinkedList(1)
head.next = LinkedList(2)
head.next.next = LinkedList(3)
head.next.next.next = LinkedList(4)
head.next.next.next.next = LinkedList(5)
result = reverse_linkedlist(head)
self.assertEqual(result.val, 5)
self.assertEqual(result.next.val, 4)
self.assertEqual(result.next.next.val, 3)
self.assertEqual(result.next.next.next.val, 2)
self.assertEqual(result.next.next.next.next.val, 1)
self.assertEqual(result.next.next.next.next.next, None)
head = LinkedList(6)
head.next = LinkedList(1)
head.next.next = LinkedList(9)
result = reverse_linkedlist(head)
self.assertEqual(result.val, 9)
self.assertEqual(result.next.val, 1)
self.assertEqual(result.next.next.val, 6)
self.assertEqual(result.next.next.next, None)
def test_fizz_buzz(self):
n = 15
result = fizz_buzz(n)
self.assertEqual(result, ["1","2","Fizz","4","Buzz","Fizz","7","8","Fizz","Buzz","11","Fizz","13","14","FizzBuzz"])
def test_majority_element(self):
nums = [3,2,3]
result = majority_element(nums)
self.assertEqual(result, 3)
nums = [2,2,1,1,1,2,2]
result = majority_element(nums)
self.assertEqual(result, 2)
nums = [3,3,4]
result = majority_element(nums)
self.assertEqual(result, 3)
def test_sorted_array_to_BTS(self):
nums = [-10,-3,0,5,9]
result = sorted_array_to_BTS(nums)
self.assertEqual(result.val, 0)
self.assertEqual(result.left.val, -3)
self.assertEqual(result.right.val, 9)
self.assertEqual(result.left.left.val, -10)
self.assertEqual(result.left.right, None)
self.assertEqual(result.right.left.val, 5)
def test_move_zeroes(self):
nums = [0,1,0,3,12]
move_zeroes(nums)
self.assertEqual(nums, [1,3,12,0,0])
nums = [1,2,3,4,5,0,7,8,1,0,19]
move_zeroes(nums)
self.assertEqual(nums, [1,2,3,4,5,7,8,1,19,0,0])
nums = []
move_zeroes(nums)
self.assertEqual(nums, [])
nums = [0,0,1]
move_zeroes(nums)
self.assertEqual(nums, [1,0,0])
def test_move_zeroes_2(self):
nums = [0,1,0,3,12]
move_zeroes_2(nums)
self.assertEqual(nums, [1,3,12,0,0])
nums = [1,2,3,4,5,0,7,8,1,0,19]
move_zeroes_2(nums)
self.assertEqual(nums, [1,2,3,4,5,7,8,1,19,0,0])
nums = []
move_zeroes_2(nums)
self.assertEqual(nums, [])
nums = [0,0,1]
move_zeroes_2(nums)
self.assertEqual(nums, [1,0,0])
class TestMediumProblems(unittest.TestCase):
def test_subrectangle_queries(self):
subrectangle_queries = SubrectangleQueries([[1,2,1],[4,3,4],[3,2,1],[1,1,1]])
result = subrectangle_queries.get_value(2,2)
self.assertEqual(result, 1)
result = subrectangle_queries.get_value(1,0)
self.assertEqual(result, 4)
result = subrectangle_queries.get_value(3,1)
self.assertEqual(result, 1)
subrectangle_queries.update_subrectangle(0,0,1,2,100)
result = subrectangle_queries.get_value(0,1)
self.assertEqual(result, 100)
subrectangle_queries.update_subrectangle(2,0,2,2,90)
result = subrectangle_queries.get_value(2,1)
self.assertEqual(result, 90)
subrectangle_queries.update_subrectangle(3,0,3,2,80)
result = subrectangle_queries.get_value(3,1)
self.assertEqual(result, 80)
subrectangle_queries = SubrectangleQueries([[6,9,6,1,2],[8,8,6,5,9],[7,6,10,8,2],[7,7,4,9,1]])
subrectangle_queries.update_subrectangle(1,4,2,4,5)
result = subrectangle_queries.get_value(3,4)
self.assertEqual(result, 1)
subrectangle_queries.update_subrectangle(3,4,3,4,8)
result = subrectangle_queries.get_value(2,0)
self.assertEqual(result, 7)
def test_subrectangle_queries_2(self):
subrectangle_queries = SubrectangleQueries([[1,2,1],[4,3,4],[3,2,1],[1,1,1]])
result = subrectangle_queries.get_value(2,2)
self.assertEqual(result, 1)
result = subrectangle_queries.get_value(1,0)
self.assertEqual(result, 4)
result = subrectangle_queries.get_value(3,1)
self.assertEqual(result, 1)
subrectangle_queries.update_subrectangle_2(0,0,1,2,100)
result = subrectangle_queries.get_value(0,1)
self.assertEqual(result, 100)
subrectangle_queries.update_subrectangle_2(2,0,2,2,90)
result = subrectangle_queries.get_value(2,1)
self.assertEqual(result, 90)
subrectangle_queries.update_subrectangle_2(3,0,3,2,80)
result = subrectangle_queries.get_value(3,1)
self.assertEqual(result, 80)
subrectangle_queries = SubrectangleQueries([[6,9,6,1,2],[8,8,6,5,9],[7,6,10,8,2],[7,7,4,9,1]])
subrectangle_queries.update_subrectangle_2(1,4,2,4,5)
result = subrectangle_queries.get_value(3,4)
self.assertEqual(result, 1)
subrectangle_queries.update_subrectangle_2(3,4,3,4,8)
result = subrectangle_queries.get_value(2,0)
self.assertEqual(result, 7)
def test_group_the_people(self):
group_sizes = [3,3,3,3,3,1,3]
result = group_the_people(group_sizes)
self.assertEqual(result, [[0,1,2],[3,4,6],[5]])
group_sizes = [2,1,3,3,3,2]
result = group_the_people(group_sizes)
self.assertEqual(result, [[0,5],[1],[2,3,4]])
def test_max_increase_keeping_skyline(self):
grid = [[3,0,8,4],[2,4,5,7],[9,2,6,3],[0,3,1,0]]
result = max_increase_keeping_skyline(grid)
self.assertEqual(result, 35)
grid = [[5,1,4],[0,2,3],[7,1,9]]
result = max_increase_keeping_skyline(grid)
self.assertEqual(result, 6)
grid = [[1,4,2,7,9],[8,3,5,7,4],[5,9,2,3,2],[3,8,1,5,1],[6,9,2,9,0]]
result = max_increase_keeping_skyline(grid)
self.assertEqual(result, 79)
def test_get_target_copy(self):
tree = TreeNode(7)
tree.right = TreeNode(3)
tree.left = TreeNode(4)
tree.right.right = TreeNode(19)
tree.right.left = TreeNode(6)
cloned = tree
target = TreeNode(3)
result = get_target_copy(tree, cloned, target)
assert result is cloned.right
tree = TreeNode(7)
cloned = tree
target = TreeNode(7)
result = get_target_copy(tree, cloned, target)
assert result is cloned
tree = TreeNode(8)
tree.right = TreeNode(6)
tree.right.right = TreeNode(5)
tree.right.right.right = TreeNode(4)
tree.right.right.right.right = TreeNode(3)
tree.right.right.right.right.right = TreeNode(2)
tree.right.right.right.right.right.right = TreeNode(1)
cloned = tree
target = TreeNode(4)
result = get_target_copy(tree, cloned, target)
assert result is cloned.right.right.right
tree = TreeNode(1)
tree.right = TreeNode(3)
tree.left = TreeNode(2)
tree.right.right = TreeNode(7)
tree.right.left = TreeNode(6)
tree.left.right = TreeNode(5)
tree.left.left = TreeNode(4)
tree.left.left.right = TreeNode(9)
tree.left.left.left = TreeNode(8)
tree.left.right.left = TreeNode(10)
cloned = tree
target = TreeNode(5)
result = get_target_copy(tree, cloned, target)
assert result is cloned.left.right
tree = TreeNode(1)
tree.left = TreeNode(2)
tree.left.left = TreeNode(3)
cloned = tree
target = TreeNode(2)
result = get_target_copy(tree, cloned, target)
assert result is cloned.left
def test_deepest_leaves_sum(self):
tree = TreeNode(1)
tree.right = TreeNode(3)
tree.left = TreeNode(2)
tree.right.right = TreeNode(6)
tree.left.right = TreeNode(5)
tree.left.left = TreeNode(4)
tree.left.left.left = TreeNode(7)
tree.right.right.right = TreeNode(8)
result = deepest_leaves_sum(tree)
self.assertEqual(result, 15)
tree = TreeNode(1)
tree.right = TreeNode(3)
tree.left = TreeNode(2)
tree.right.right = TreeNode(6)
tree.left.right = TreeNode(5)
tree.left.left = TreeNode(4)
result = deepest_leaves_sum(tree)
self.assertEqual(result, 15)
tree = TreeNode(3)
tree.right = TreeNode(5)
tree.left = TreeNode(2)
tree.right.right = TreeNode(8)
tree.left.left = TreeNode(1)
result = deepest_leaves_sum(tree)
self.assertEqual(result, 9)
tree = None
result = deepest_leaves_sum(tree)
self.assertEqual(result, 0)
def test_permute(self):
nums = [1,2,3]
result = permute(nums)
self.assertIn([1,2,3], result)
self.assertIn([1,3,2], result)
self.assertIn([2,1,3], result)
self.assertIn([2,3,1], result)
self.assertIn([3,1,2], result)
self.assertIn([3,2,1], result)
def test_permute_2(self):
nums = [1,2,3]
result = permute_2(nums)
self.assertIn([1,2,3], result)
self.assertIn([1,3,2], result)
self.assertIn([2,1,3], result)
self.assertIn([2,3,1], result)
self.assertIn([3,1,2], result)
self.assertIn([3,2,1], result)
def test_inorder_traversal(self):
root = TreeNode(1)
root.left = TreeNode(2)
result = inorder_traversal(root)
self.assertEqual(result, [2,1])
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
root.right.left.left = TreeNode(8)
root.right.right.right = TreeNode(9)
result = inorder_traversal(root)
self.assertEqual(result, [4,2,5,1,8,6,3,7,9])
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
result = inorder_traversal(root)
self.assertEqual(result, [1,3,2])
root = TreeNode(1)
result = inorder_traversal(root)
self.assertEqual(result, [1])
root = None
result = inorder_traversal(root)
self.assertEqual(result, [])
def test_inorder_traversal_2(self):
root = TreeNode(1)
root.left = TreeNode(2)
result = inorder_traversal_2(root)
self.assertEqual(result, [2,1])
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
root.right.left.left = TreeNode(8)
root.right.right.right = TreeNode(9)
result = inorder_traversal_2(root)
self.assertEqual(result, [4,2,5,1,8,6,3,7,9])
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
result = inorder_traversal_2(root)
self.assertEqual(result, [1,3,2])
root = TreeNode(1)
result = inorder_traversal_2(root)
self.assertEqual(result, [1])
root = None
result = inorder_traversal_2(root)
self.assertEqual(result, [])
if __name__ == '__main__':
unittest.main()
| true
|
a106ff3bd084218337129542f596344b29b292b9
|
Python
|
jpagani1984/Projects
|
/hello_flask/Understanding_routing.py
|
UTF-8
| 1,068
| 3.015625
| 3
|
[] |
no_license
|
from flask import Flask
app = Flask(__name__)
print(__name__)
@app.route('/dojo')
def Dojo():
return 'Dojo'
@app.route('/say/flask')
def hi_flask():
return 'Hi Flask'
@app.route('/say/micheal')
def say_micheal():
return 'HI MICHEAL'
@app.route('/say/john')
def say_john():
return 'HI JOHN'
@app.route('/repeat/35/hello')
def say_hello():
return 'hello' *int(35)
@app.route('/repeat/99/dogs')
def repeat_dogs():
return 'DOGS!!' *int(99)
if __name__=="__main__":
app.run(debug=True)
| true
|
c3f702bd8a29294316257b50a4c1d4a71e74706f
|
Python
|
BadrYoubiIdrissi/solvepuzzle
|
/puzzle.py
|
UTF-8
| 2,150
| 3.09375
| 3
|
[] |
no_license
|
import os
import numpy as np
import utils
import matplotlib.pyplot as plt
import matplotlib.image as image
from PIL import Image
from config import SAVE_FOLDER, HEIGHT, WIDTH, N_ROW, N_COL, HEIGHT_BLOCK, WIDTH_BLOCK
class Puzzle:
'''
A class that defines a puzzle.
It defines two kinds of images:
1. self.original_img : np.array of shape (HEIGHT, WIDTH)
2. self.cut_img : np.array of shape (N_ROW, N_COL, HEIGHT_BLOCK, WIDTH_BLOCK)
Therefore, self.cut_img[i][j] represents the sub-image at the row i and the col j
'''
def __init__(self, filepath):
# Open image
self.img_path = filepath
self.original_img = Image.open(self.img_path).convert("L")
# Resize image
self.original_img = self.original_img.resize((WIDTH, HEIGHT))
self.original_img = np.asarray(self.original_img)
# Create cut_image with shape (N_ROW, N_COL, HEIGHT_BLOCK, WIDTH_BLOCK)
self.cut_img = utils.cut_image(self.original_img)
def print(self):
'''
Shows the whole puzzle
'''
img = utils.recreate_cut_image(self.cut_img)
plt.imshow(img, cmap='gray')
plt.show()
def print_block(self,i,j):
'''
Show the block found at row i and col j with :
0 <= i <= N_ROW -1
0 <= j <= N_COL -1
'''
assert (i in range(N_ROW) and j in range(N_COL))
plt.imshow(self.cut_img[i][j], cmap='gray')
plt.show()
def shuffle(self):
np.random.shuffle(self.cut_img.reshape(N_ROW*N_COL, HEIGHT_BLOCK, WIDTH_BLOCK))
self.cut_img.reshape(N_ROW, N_COL, HEIGHT_BLOCK, WIDTH_BLOCK)
def move(self, i1, j1, i2,j2):
block1 = self.cut_img[i1][j1].copy()
block2 = self.cut_img[i2][j2].copy()
self.cut_img[i1,j1], self.cut_img[i2,j2] = block2, block1
def save(self, filename):
filepath = os.path.join(SAVE_FOLDER, filename)
if not os.path.exists(filepath):
to_save_img = utils.recreate_cut_image(self.cut_img)
plt.imsave(filepath, utils.recreate_cut_image(self.cut_img), cmap="gray")
| true
|
98198d78ecd24735cd6a53f5a66d09ac84f90385
|
Python
|
youngung/MK
|
/mk/materials/func_hard_char.py
|
UTF-8
| 1,008
| 2.921875
| 3
|
[] |
no_license
|
# ### characterize hardening functions
import numpy as np
from scipy.optimize import curve_fit
def wrapper(func,*args):
"""
Hardening function wrapper
Arguments
---------
func
*args
Returns
-------
func(x,*args) that is a function of only strain (x).
"""
def f_hard_char(x):
"""
Argument
--------
x
"""
return func(x,*args)
return f_hard_char
def main(exp_dat,f_hard,params):
"""
Arguments
---------
exp_dat
f_hard
params (initial guess)
"""
x,y = exp_dat
# bounds --
# print 'params:', params
popt, pcov = curve_fit(f_hard,x,y,p0=params)
return wrapper(f_hard,*popt), popt, pcov
def test1():
from func_hard import func_swift
popt_guess = (518.968, 0.0007648, 0.28985) ## ks, e0, n
x=np.linspace(0,0.2,1000)
y=func_swift(x,*popt_guess)
exp_dat= (x,y)
func = main(exp_dat, func_swift, popt_guess)
if __name__=='__main__':
test1()
| true
|
6806bf3b66f3cdfc337230db45b469e77d4d7178
|
Python
|
cashgithubs/mypro
|
/py_tools/qiubai_pyqt/qb0.2/qb_ui2.pyw
|
UTF-8
| 3,943
| 2.65625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Module implementing MainWindow.
"""
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import requests
import threading
from bs4 import BeautifulSoup
import datetime
from Ui_qb_ui2 import Ui_MainWindow
event = threading.Event()
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
isinit = False
def __init__(self, parent = None):
"""
Constructor
"""
QMainWindow.__init__(self, parent)
self.setupUi(self)
self.connect(self.listWidget.verticalScrollBar(), SIGNAL("valueChanged(int)"), self.LoadQB)
def LoadQB(self, position):
max_position = self.listWidget.verticalScrollBar().maximum()
if self.isinit:
if position < max_position:
return
self.isinit = True
global event
event.set()
def ProcessGui(self, qb_content, image_content, have_img):
#starttime = datetime.datetime.now()
image = QImage()
item_widget = QWidget()
item_widget_layout = QVBoxLayout()
item_widget_layout.setContentsMargins(0, 0, 0, 0)
item_widget.setLayout(item_widget_layout)
label = QLabel(qb_content)
label.setWordWrap(True)
item_widget_layout.addWidget(label)
if have_img:
image.loadFromData(image_content)
image_label = QLabel()
image_label.setPixmap(QPixmap.fromImage(image))
item_widget_layout.addWidget(image_label)
item = QListWidgetItem()
if have_img:
item.setSizeHint(QSize(100, 500))
else:
item.setSizeHint(QSize(100, 150))
self.listWidget.addItem(item)
self.listWidget.setItemWidget(item, item_widget)
#endtime = datetime.datetime.now()
#print 'UI'
#print (endtime - starttime)
class ParseThread(threading.Thread, QObject):
page = 1
def __init__(self):
threading.Thread.__init__(self)
QObject.__init__(self)
def run(self):
while(True):
#starttime = datetime.datetime.now()
global event
event.wait()
event.clear()
url = "http://www.qiushibaike.com/week/5/page/" + str(self.page)
self.page += 1
re = requests.get(url)
html = BeautifulSoup(re.text)
content_list = html.findAll("div", {"class":"article block untagged mb15"})
for i in range(len(content_list)):
have_img = False
for j in range(1, len(content_list[i].contents), 2):
if content_list[i].contents[j]['class'] == ['content']:
qb_content = content_list[i].contents[j].next
if content_list[i].contents[j]['class'] == ['thumb']:
image_url = content_list[i].contents[j].img['src']
re = requests.get(image_url)
image_content = re.content
have_img = True
if have_img == False:
image_content = ''
self.emit(SIGNAL("addItem(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)"), qb_content, image_content, have_img)
#endtime = datetime.datetime.now()
#print 'HTML'
#print (endtime - starttime)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
ui = MainWindow()
ui.setWindowIcon(QIcon('1.jpg'))
ui.show()
pt = ParseThread()
pt.setDaemon(True)
pt.start()
ui.connect(pt, SIGNAL("addItem(PyQt_PyObject, PyQt_PyObject, PyQt_PyObject)"), ui.ProcessGui)
ui.LoadQB(0)
sys.exit(app.exec_())
| true
|
3685c365effacfc7e64c01fd837923c46d5e4ef7
|
Python
|
florinpapa/muzee_romania
|
/app.py
|
UTF-8
| 8,890
| 2.625
| 3
|
[] |
no_license
|
import os
from flask import Flask, request, redirect
from flask import render_template
from re import sub, search
from os import listdir
from os.path import isfile, join
import pickle
import csv
UPLOAD_FOLDER = './static/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__, static_url_path='/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def search_key(index, keyword):
""" search museum dictionary """
#load dictionary
dict_file = open('data.pkl', 'rb')
dictionar = pickle.load(dict_file)
dict_file.close()
#load headers
head_file = open('headers.hd', 'rb')
header = pickle.load(head_file)
head_file.close()
#find keyword
muzee = []
if keyword == "":
for i in range(len(dictionar[header[0]])):
muzee.append({'cod': dictionar[header[0]][i],
'judet': dictionar[header[2]][i].decode(encoding="UTF-8"),
'nume': dictionar[header[3]][i].decode(encoding="UTF-8"),
'lat': sub(',', '.', dictionar[header[35]][i]),
'lng': sub(',', '.', dictionar[header[36]][i])})
else:
for i in range(len(dictionar[header[index]])):
new_word = dictionar[header[index]][i].decode(encoding='UTF-8').lower()
keyword = keyword.lower()
if keyword in new_word:
muzee.append({'cod': dictionar[header[0]][i],
'judet': dictionar[header[2]][i].decode(encoding="UTF-8"),
'nume': dictionar[header[3]][i].decode(encoding="UTF-8"),
'lat': sub(',', '.', dictionar[header[35]][i]),
'lng': sub(',', '.', dictionar[header[36]][i])})
return muzee
@app.route('/muzee/judet/<jud>')
def get_countys(jud):
"""intoarce toate muzeele dintr-un anumit judet"""
muzee = search_key(2, jud)
return render_template('lista_muzee_judet.html', muzee=muzee)
@app.route('/search')
def get_matches_void():
""" intoarce potrivirile gasite in numele muzeelor """
muzee = search_key(3, "")
return render_template('search_result.html', muzee=muzee)
@app.route('/search/<keyword>')
def get_matches(keyword):
""" intoarce potrivirile gasite in numele muzeelor """
muzee = search_key(3, keyword)
return render_template('search_result.html', muzee=muzee)
#metoda care intoarce indexul fisierului curent
def get_current_index(all_files, filename):
max_index = 0
for f_name in all_files:
print f_name
if len(f_name) > len(filename):
if f_name[0:len(filename)] == filename:
index = int(f_name[len(filename):len(f_name)])
if index > max_index:
max_index = index
return str(max_index + 1)
#metoda de upload imagini
@app.route('/upload/<cod>', methods=['POST', 'GET'])
def upload_file(cod):
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = cod + "_"
all_files = listdir('./static/images')
index = get_current_index(all_files, filename)
print index + "###"
filename = filename + str(index)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect("/muzee/" + cod)
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
def getImages(code):
""" get all images from /static/images associated with a code """
onlyfiles = [f for f in listdir(UPLOAD_FOLDER) if str(code) in f]
return onlyfiles
#afisare informatii in functie de codul entitatii
@app.route("/muzee/<int:code>")
def get_museum_by_code(code):
#load dictionary
dict_file = open('data.pkl', 'rb')
dictionar = pickle.load(dict_file)
dict_file.close()
#load headers
head_file = open('headers.hd', 'rb')
header = pickle.load(head_file)
head_file.close()
#cautare cod
try:
index = dictionar[header[0]].index(str(code))
nume = dictionar[header[3]][index].decode(encoding="UTF-8")
photo_query = search(r'".*"', nume)
if photo_query is None:
photo_query = ""
else:
photo_query = photo_query.group(0)[1:len(photo_query.group(0)) - 1]
new_d = {'judet': dictionar[header[2]][index].decode(encoding="UTF-8"),
'de_ro': nume,
'de_en': dictionar[header[4]][index].decode(encoding="UTF-8"),
'loc': dictionar[header[5]][index].decode(encoding="UTF-8"),
'adr': dictionar[header[7]][index].decode(encoding="UTF-8"),
'tel': dictionar[header[9]][index].decode(encoding="UTF-8"),
'p_ro': dictionar[header[12]][index].decode(encoding="UTF-8"),
'p_en': dictionar[header[13]][index].decode(encoding="UTF-8"),
'desc_ro': dictionar[header[17]][index].decode(encoding="UTF-8"),
'desc_en': dictionar[header[18]][index].decode(encoding="UTF-8"),
'lat': sub(',', '.', dictionar[header[35]][index]),
'lng': sub(',', '.', dictionar[header[36]][index]),
'coord': dictionar[header[38]][index],
'photo_query': '+'.join(photo_query.split(' ')),
'program': dictionar[header[13]][index].decode(encoding="UTF-8"),
'code': code,
'pictures': getImages(code)}
return render_template('muzeu.html', muzeu=new_d)
except:
return "Nu s-au gasit potriviri"
@app.route('/adauga')
def muzeu_nou():
return render_template('adauga_muzeu.html')
def get_next_code(dictionar, header):
maxi = 0
for w in dictionar[header[0]]:
if len(w) > 0 and int(w) > maxi:
maxi = int(w)
return str(maxi + 1)
@app.route('/adauga/<path:muzeu>', methods=['POST', 'GET'])
def adauga_muzeu(muzeu):
"""adauga intrare noua in dictionar"""
#load dictionary
dict_file = open('data.pkl', 'rb')
dictionar = pickle.load(dict_file)
dict_file.close()
#load headers
head_file = open('headers.hd', 'rb')
header = pickle.load(head_file)
head_file.close()
request.args.get('nume')
#read info from form
if request.method == 'GET':
target_fields = {3: 'nume', 2: 'judet', 17: 'descriere', 35: 'lat', 36: 'lng'}
for i in range(len(header)):
if i in target_fields.keys():
dictionar[header[i]].append(request.args.get(target_fields[i]))
elif i == 0:
dictionar[header[i]].append(get_next_code(dictionar, header))
else:
dictionar[header[i]].append("")
output = open('data.pkl', 'wb')
pickle.dump(dictionar, output)
output.close()
return redirect("/")
# @app.route('/csv')
# def getCSV():
# content = ""
# dictionar = {}
# header = []
# with open('static/date_muzee.csv', 'r') as csvfile:
# count = 0
# total = ""
# reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
# for row in reader:
# content = ' '.join(row)
# content_list = content.split('|')
# if count == 0:
# count += 1
# header += content_list
# print header
# for head in content_list:
# dictionar[head] = []
# else:
# for i in range(len(content_list)):
# dictionar[header[i]].append(content_list[i])
# total += content
# output = open('data.pkl', 'wb')
# pickle.dump(dictionar, output)
# output.close()
# headers = open('headers.hd', 'wb')
# pickle.dump(header, headers)
# headers.close()
# return "|".join(dictionar[header[3]])
@app.route('/')
def toateMuzeele():
# @codul entitatii muzeale pos = 0
# @judetul pos = 2
# @numirea (romana) pos = 3
header = pickle.load(open('headers.hd', 'rb'))
data = pickle.load(open('data.pkl', 'rb'))
muzee = []
for i in range(len(data[header[0]])):
muzee.append({'cod': data[header[0]][i],
'judet': data[header[2]][i].decode(encoding="UTF-8"),
'nume': data[header[3]][i].decode(encoding="UTF-8")})
return render_template('lista_muzee.html', muzee=muzee)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| true
|
3cdebaa208cc02a488d23276b0b2ea8a2e8d8b15
|
Python
|
lavenblue/LSSA
|
/data/generate_input.py
|
UTF-8
| 4,602
| 2.765625
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import copy
import pickle
class data_generation():
def __init__(self, type):
print('init------------')
self.data_type = type
self.dataset = self.data_type + '/'+self.data_type + '_dataset.csv'
self.train_users = []
self.train_sessions = [] # 当前的session
self.train_pre_sessions = [] # 之前的session集合
self.train_long_neg = [] # 长期集合,随机采样得到的negative
self.train_short_neg = [] # 短期集合,随机采样得到的negative
self.test_users = []
self.test_candidate_items = []
self.test_sessions = []
self.test_pre_sessions = []
self.test_real_items = []
self.user_number = 0
self.item_number = 0
self.gen_train_test_data()
train = (self.train_users, self.train_pre_sessions, self.train_sessions,
self.train_long_neg, self.train_short_neg)
test = (self.test_users, self.test_pre_sessions, self.test_sessions,
self.test_real_items)
pickle.dump(train, open(self.data_type + '/train.pkl', 'wb'))
pickle.dump(test, open(self.data_type + '/test.pkl', 'wb'))
def gen_train_test_data(self):
self.data = pd.read_csv(self.dataset, names=['user', 'sessions'], dtype='str')
is_first_line = 1
maxLen_long = 0
maxLen_short = 0
for line in self.data.values:
if is_first_line:
self.user_number = int(line[0])
self.item_number = int(line[1])
self.user_purchased_item = dict() # 保存每个用户购买记录,可用于train时负采样和test时剔除已打分商品
is_first_line = 0
else:
user_id = int(line[0])
sessions = [i for i in line[1].split('@')]
size = len(sessions)
the_first_session = [int(i)+1 for i in sessions[0].split(':')]
tmp = copy.deepcopy(the_first_session)
self.user_purchased_item[user_id] = tmp
for j in range(1, size - 1):
# 每个用户的每个session在train_users中都对应着其user_id
self.train_users.append(user_id)
# test = sessions[j].split(':')
current_session = [int(it)+1 for it in sessions[j].split(':')]
self.user_purchased_item[user_id].extend(current_session)
self.train_sessions.append(current_session)
short_neg_items = []
for _ in range(len(current_session)-1):
short_neg_items.append(self.gen_neg(user_id))
self.train_short_neg.append(short_neg_items)
long_neg_items = []
for _ in range(len(self.user_purchased_item[user_id]) - 1):
long_neg_items.append(self.gen_neg(user_id))
self.train_long_neg.append(long_neg_items)
tmp = copy.deepcopy(self.user_purchased_item[user_id])
self.train_pre_sessions.append(tmp)
if len(current_session) > maxLen_short:
maxLen_short = len(current_session)
# 对test的数据集也要格式化,test中每个用户都只有一个current session
self.test_users.append(user_id)
current_session = [int(it)+1 for it in sessions[size - 1].split(':')]
item = current_session[-1]
self.test_real_items.append(int(item))
current_session.remove(item)
self.test_sessions.append(current_session)
self.user_purchased_item[user_id].extend(current_session)
self.test_pre_sessions.append(self.user_purchased_item[user_id])
if len(self.user_purchased_item[user_id]) > maxLen_long:
maxLen_long = len(self.user_purchased_item[user_id])
print('maxLen_long = ', maxLen_long)
print('maxLen_short = ', maxLen_short)
def gen_neg(self, user_id):
neg_item = np.random.randint(self.item_number)
while neg_item in self.user_purchased_item[user_id]:
neg_item = np.random.randint(self.item_number)
return neg_item
if __name__ == '__main__':
type = ['gowalla']
dg = data_generation(type[0])
| true
|
29abc544d8d847160baafd57e939a4d26d225c72
|
Python
|
Zylanx/alex-bot
|
/setup.py
|
UTF-8
| 845
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
# creates databases in mongodb
import sys
def leave(str):
print(str)
exit(1)
try:
assert sys.version_info[0] == 3 and sys.version_info[1] > 5
except AssertionError:
leave("you need to have python 3.6 or later.")
try:
import config
import psycopg2
except ImportError(config):
leave("you need to make a config. please see example_config.py for help.")
except ImportError(psycopg2):
leave("you need to install the requirements.")
for i in [config.dsn, config.token]:
try:
assert isinstance(i, str)
except AssertionError:
leave("please fill in the config file.")
cur = None
try:
cur = psycopg2.connect(config.dsn).cursor()
except psycopg2.Error:
leave("uh ur auth is wrong kiddo, or smthin")
# build tables
with open('schema.sql', 'r') as f:
cur.execute(f)
print("Done!")
| true
|