blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30 values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2 values | text stringlengths 12 5.47M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ac92625bdda2c42ef21a7f74a7d52de72948192f | Python | reo11/AtCoder | /atcoder/AGC/agc036/agc036_b.py | UTF-8 | 371 | 2.765625 | 3 | [] | no_license | n, k = map(int, input().split())
a = list(map(int, input().split()))
a_idx = [-1] * (2 * 10 ** 5)
ans = [0] * (2 * 10 ** 5)
idx = 0
for i in range(n * k):
a_i = a[i % n]
if a_idx[a_i] == -1:
ans[idx] = a_i
a_idx[a_i] = idx
idx += 1
else:
idx = a_idx[a_i]
a_idx[a_i] = -1
print(" ".join(list(map(str, ans[:idx]))))
| true |
c12aafc1a54ce7b59376ffa467431f0a4a213d3e | Python | luizfelipers19/IPythonCourse-MIT-x-Unicamp | /Set3/p3_7.py | UTF-8 | 282 | 3.5625 | 4 | [] | no_license | def hailstone_sequence(a_0):
lista = [a_0]
while a_0 != 1:
if (a_0 %2) == 0:
a_0 = a_0 //2
lista.append(a_0)
else:
a_0 = (a_0 * 3) + 1
lista.append(a_0)
return lista
print(hailstone_sequence(3)) | true |
0c73161d1db94b1eb9f91cb5d6770d548decbe63 | Python | ericchen12377/Leetcode-Algorithm-Python | /1stRound/Easy/657 Robot Return to Origin/Complexnumssum.py | UTF-8 | 324 | 2.859375 | 3 | [
"MIT"
] | permissive | class Solution:
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
directs = {'L':-1, 'R':1, 'U':1j, 'D':-1j} # real for axis x and complex for axis y
return 0 == sum(directs[move] for move in moves)
moves = "UD"
p = Solution()
print(p.judgeCircle(moves)) | true |
56dfe8f16ca0777983b291333d6ff123155d6382 | Python | sonkute96/hocPython | /Function.py | UTF-8 | 309 | 3.625 | 4 | [] | no_license |
# cach 1 de khai bao mot function
def print_two (*args):
arg1,arg2 = args
print " arg1 = %r, arg2 = %r " % (arg1, arg2)
print_two("Zed", "Shaw")
# cach 2 de khai bao mot function
def print_two_again(arg1 , arg2):
print "arg1 = %r , arg2 = %r " % (arg1, arg2)
print_two_again("zed","Show")
| true |
f39c170f3b598710b128e7236940c906a03156c5 | Python | bcveber/COSC101 | /lab4/num_pizzas.py | UTF-8 | 534 | 3.71875 | 4 | [] | no_license | total_slices = 0
def num_pizzas (adults, boys, girls):
'''
(int, int, int) --> int
Adults, boys, and girls order pizza slices with a ratio for each type of person and 8 slices of pizza make one whole pizza.
'''
adults_pizza = adults * 2
boys_pizza = boys * 3
girls_pizza = girls * 1
total_slices = adults_pizza + boys_pizza + girls_pizza
if total_slices % 8 == 0:
return (total_slices / 8)
else:
return (total_slices // 8 + 1)
print(total_slices = num_pizzas(1,1,1))
| true |
89512aff2dc99429e0e2e6a358c23ae05cda423b | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2377/60810/289192.py | UTF-8 | 494 | 3.828125 | 4 | [] | no_license | '''
回旋镖定义为一组三个点,这些点各不相同且不在一条直线上。
给出平面上三个点组成的列表,判断这些点是否可以构成回旋镖。
'''
n = int(input())
inp1 = input()
point1 = inp1.split(',')
inp2 = input()
point2 = inp2.split(',')
inp3 = input()
point3 = inp3.split(',')
x1, y1 = int(point1[0]), int(point1[1])
x2, y2 = int(point2[0]), int(point2[1])
x3, y3 = int(point3[0]), int(point3[1])
print((y2 - y1) * (x3 - x1) != (y3 - y1) * (x2 - x1)) | true |
8bcf462762f702a1c2851b5f1fbfeaf2e240ed89 | Python | medoocs/SIRD-model-for-COVID-19-in-Croatia | /SIRD-COVID19.py | UTF-8 | 11,704 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 14:20:53 2021
@author: NIKOLA
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from sklearn.metrics import mean_squared_error
def plotPred(sve):
# Lockdown koji pomičemo
tren = 17
# Za koliko ga pomičemo
od = tren - 13
do = tren + 14
# Sa kojim danom uspoređujemo
#end = 344 # Zadnji dan u datasetu
end = 276 # Max zaraženih
sus = []
inf = []
rec = []
ded = []
for d in range(od, do):
# t beta gamma sigma
KOEFICIJENTI = np.array([[0, 0.7, 0.69, 0.049 ], #28-02-2020 Poziva se na nošenje maski
[3, 0.46, 0.3, 0.01 ], #03-03-2020 Poziv rizičnim grupama da ostanu kod kuće
[d, 0.79, 0.69, 0.044 ], #19-03-2020 Lockdown 1
[72, 0.2, 0.69, 0.01 ], #11-05-2020 Zatvaranje osnovnih škola
[79, 0.73, 0.69, 0.013 ], #18-05-2020 Ograničavanje javnog druženja na 50 osoba
[117, 0.4, 0.3, 0.01 ], #25-06-2020 Obavezne maske unutra
[135, 0.71, 0.69, 0.012 ], #13-07-2020 Ograničavanje privatnih skupova na 50 osoba, zabrana javih okupljanja i restrikcije na privatna okupljanja
[167, 0.6, 0.54, 0.013 ], #14-08-2020 Zatvaranje kafića i restorana
[242, 0.675, 0.64, 0.01 ], #28-10-2020 Obavezne maske svugdje
[273, 0.725, 0.69, 0.018 ], #28-11-2020 Poziva se na rad od doma, zatvaranje kafića, restorana i teretana
[300, 0.73, 0.717, 0.03 ], #25-12-2020 Lockdown 2
[350, 0.73, 0.69, 0.018 ]])
T = KOEFICIJENTI[:, 0]
BETA = KOEFICIJENTI[:, 1]
GAMMA = KOEFICIJENTI[:, 2]
SIGMA = KOEFICIJENTI[:, 3]
beta = interp1d(T, BETA, kind=0)
gamma = interp1d(T, GAMMA, kind=0)
sigma = interp1d(T, SIGMA, kind=0)
def SIRD(y, t):
S, I, R, D = y
dSdt = -beta(t) * I * S
dIdt = (beta(t) * I * S) - (sigma(t) * I + gamma(t) * I)
dRdt = gamma(t) * I
dDdt = sigma(t) * I
return dSdt, dIdt, dRdt, dDdt
days = 345
y0 = 1.0, 1/pop, 0.0, 0.0
t = np.linspace(0, days-1, days)
REZ = odeint(SIRD, y0, t)
S = REZ[: ,0]
I = REZ[: ,1]
R = REZ[: ,2]
D = REZ[: ,3]
sus.append(S[end]*pop)
inf.append(I[end]*pop)
rec.append(R[end]*pop)
ded.append(D[end]*pop)
sus = pd.Series(sus)
inf = pd.Series(inf)
rec = pd.Series(rec)
ded = pd.Series(ded)
if sve:
f, [ax, ax1, ax2, ax3] = plt.subplots(4,1,figsize=(10, 15), sharex=True)
else:
f, ax = plt.subplots(1,1,figsize=(10, 10), sharex=True)
f, ax1 = plt.subplots(1,1,figsize=(10, 10), sharex=True)
f, ax2 = plt.subplots(1,1,figsize=(10, 10), sharex=True)
f, ax3 = plt.subplots(1,1,figsize=(10, 10), sharex=True)
od += 3
do -= 3
ax.plot(np.arange(od, do), sus.values[3:-3], 'b', label='Susceptible')
ax.plot(np.arange(od, do), [susceptible[end]]*(do-od), c='k', label='Susceptible-real', ls='--')
ax1.plot(np.arange(od, do), inf.values[3:-3], 'r', label='Infected')
ax1.plot(np.arange(od, do), [infected[end]]*(do-od), c='k', label='Infected-real', ls='--')
ax2.plot(np.arange(od, do), rec.values[3:-3], 'g', label='Recovered')
ax2.plot(np.arange(od, do), [recovered[end]]*(do-od), c='k', label='Recovered-real', ls='--')
ax3.plot(np.arange(od, do), ded.values[3:-3], 'k', label='Dead')
ax3.plot(np.arange(od, do), [dead[end]]*(do-od), c='k', label='Dead-real', ls='--')
ax.title.set_text(f'Susceptible prediction vs real on day {end}')
ax1.title.set_text(f'Infected prediction vs real on day {end}')
ax2.title.set_text(f'Recovered prediction vs real on day {end}')
ax3.title.set_text(f'Dead prediction vs real on day {end}')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
legend1 = ax1.legend()
legend1.get_frame().set_alpha(0.5)
legend2 = ax2.legend()
legend2.get_frame().set_alpha(0.5)
legend3 = ax3.legend()
legend3.get_frame().set_alpha(0.5)
xos = []
for i in np.arange(od-tren,do-tren):
if i < 0:
xos.append(f"LC {i}")
else:
xos.append(f"LC +{i}")
plt.setp(ax, xticks=np.arange(od,do), xticklabels = xos)
ax.tick_params(axis='x', rotation=90)
plt.setp(ax1, xticks=np.arange(od,do), xticklabels = xos)
ax1.tick_params(axis='x', rotation=90)
plt.setp(ax2, xticks=np.arange(od,do), xticklabels = xos)
ax2.tick_params(axis='x', rotation=90)
plt.setp(ax3, xticks=np.arange(od,do), xticklabels = xos)
ax3.tick_params(axis='x', rotation=90)
ax.set(xlabel='Pomak u vremenu', ylabel='Vrijednost')
ax1.set(xlabel='Pomak u vremenu', ylabel='Vrijednost')
ax2.set(xlabel='Pomak u vremenu', ylabel='Vrijednost')
ax3.set(xlabel='Pomak u vremenu', ylabel='Vrijednost')
plt.show()
def plotK(beta, gamma, sigma):
beta2 = list(beta(np.arange(0,350)))
gamma2 = list(gamma(np.arange(0,350)))
sigma2 = list(sigma(np.arange(0,350)))
f, ax = plt.subplots(1,1,figsize=(10, 10), sharex=True)
f, ax1 = plt.subplots(1,1,figsize=(10, 10), sharex=True)
f, ax2 = plt.subplots(1,1,figsize=(10, 10), sharex=True)
ax.plot(np.arange(0, 350),beta2, 'blue', label='BETA')
ax1.plot(np.arange(0, 350),gamma2, 'blue', label='GAMMA')
ax2.plot(np.arange(0, 350),sigma2, 'blue', label='SIGMA')
ax.title.set_text('Tablična interpolacija BETA koeficijenta')
ax1.title.set_text('Tablična interpolacija GAMMA koeficijenta')
ax2.title.set_text('Tablična interpolacija SIGMA koeficijenta')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
legend1 = ax1.legend()
legend1.get_frame().set_alpha(0.5)
legend2 = ax2.legend()
legend2.get_frame().set_alpha(0.5)
ax.set(xlabel='Vrijeme', ylabel='Vrijednost')
ax1.set(xlabel='Vrijeme', ylabel='Vrijednost')
ax2.set(xlabel='Vrijeme', ylabel='Vrijednost')
plt.show()
def plotter(t, S, I, R, D):
f, [ax, ax1, ax2, ax3] = plt.subplots(4,1,figsize=(10, 10), sharex=True)
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(susceptible, c='k', ls='--', label='Susceptible-real')
ax1.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax1.plot(infected, c='k', ls='--', label='Infected-real')
ax2.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax2.plot(recovered, c='k', ls='--', label='Recovered-real')
ax3.plot(t, D, 'k', alpha=0.7, linewidth=2, label='Dead')
ax3.plot(dead, c='k', ls='--', label='Dead-real')
ax.title.set_text('SIRD-Model')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
legend1 = ax1.legend()
legend1.get_frame().set_alpha(0.5)
legend2 = ax2.legend()
legend2.get_frame().set_alpha(0.5)
legend3 = ax3.legend()
legend3.get_frame().set_alpha(0.5)
plt.setp(ax3, xticks=[3,17,72,79,117,135,167,242,273,300],
xticklabels=["03-03-2020","19-03-2020","11-05-2020","18-05-2020","25-06-2020","13-07-2020","14-08-2020","28-10-2020","28-11-2020","25-12-2020"])
ax3.tick_params(axis='x', rotation=90)
ax.set(ylabel='Vrijednost')
ax1.set(ylabel='Vrijednost')
ax2.set(ylabel='Vrijednost')
ax3.set(xlabel='Pomak u vremenu', ylabel='Vrijednost')
plt.show()
def SIRD(y, t):
S, I, R, D = y
dSdt = -beta(t) * I * S
dIdt = (beta(t) * I * S) - (sigma(t) * I + gamma(t) * I)
dRdt = gamma(t) * I
dDdt = sigma(t) * I
return dSdt, dIdt, dRdt, dDdt
# Inicijalizacija pravih podataka
pop = 4089636
data = pd.read_csv('download.txt', sep=",", header=0)
date = data["Datum"][::-1]
susceptible = pd.Series(data["SlucajeviHrvatska"][::-1]).rolling(window=7, center=True).mean()
susceptible = pop - susceptible.values[3:348]
susceptible[np.isnan(susceptible)] = 0
infected = pd.Series(np.diff(data["SlucajeviHrvatska"][::-1], n=1)).rolling(window=7, center=True).mean()
infected = infected.values[:347]
infected[np.isnan(infected)] = 0
recovered = pd.Series(data["IzlijeceniHrvatska"]).rolling(window=7, center=True).mean()#.iloc[::-1]
recovered = recovered.values[:2:-1]
recovered[np.isnan(recovered)] = 0
dead = pd.Series(data["UmrliHrvatska"]).rolling(window=7, center=True).mean()
dead = dead.values[:2:-1]
dead[np.isnan(dead)] = 0
# t beta gamma sigma
KOEFICIJENTI = np.array([[0, 0.7, 0.69, 0.049 ], #28-02-2020 Poziva se na nošenje maski
[3, 0.46, 0.3, 0.01 ], #03-03-2020 Poziv rizičnim grupama da ostanu kod kuće
[17, 0.79, 0.69, 0.044 ], #19-03-2020 Lockdown 1
[72, 0.2, 0.69, 0.01 ], #11-05-2020 Zatvaranje osnovnih škola
[79, 0.73, 0.69, 0.013 ], #18-05-2020 Ograničavanje javnog druženja na 50 osoba
[117, 0.4, 0.3, 0.01 ], #25-06-2020 Obavezne maske unutra
[135, 0.71, 0.69, 0.012 ], #13-07-2020 Ograničavanje privatnih skupova na 50 osoba, zabrana javih okupljanja i restrikcije na privatna okupljanja
[167, 0.6, 0.54, 0.013 ], #14-08-2020 Zatvaranje kafića i restorana
[242, 0.675, 0.64, 0.01 ], #28-10-2020 Obavezne maske svugdje
[273, 0.725, 0.69, 0.018 ], #28-11-2020 Poziva se na rad od doma, zatvaranje kafića, restorana i teretana
[300, 0.73, 0.717, 0.03 ], #25-12-2020 Lockdown 2
[350, 0.73, 0.69, 0.018 ]])
T = KOEFICIJENTI[:, 0]
BETA = KOEFICIJENTI[:, 1]
GAMMA = KOEFICIJENTI[:, 2]
SIGMA = KOEFICIJENTI[:, 3]
# Interpolacija koeficijenata
beta = interp1d(T, BETA, kind=0)
gamma = interp1d(T, GAMMA, kind=0)
sigma = interp1d(T, SIGMA, kind=0)
# Plot koeficijenata
plotK(beta, gamma, sigma)
# Računanje SIRD modela
days = 345
y0 = 1.0, 1/pop, 0.0, 0.0
t = np.linspace(0, days-1, days)
REZ = odeint(SIRD, y0, t)
S = REZ[: ,0]
I = REZ[: ,1]
R = REZ[: ,2]
D = REZ[: ,3]
# Plot SIRD modela uz prave podatke
plotter(t, S * pop, I * pop, R * pop, D * pop)
# Računanje RMSE
errS = mean_squared_error(susceptible[:345], S*pop, squared=False)
errI = mean_squared_error(infected[:345], I*pop, squared=False)
errR = mean_squared_error(recovered[:345], R*pop, squared=False)
errD = mean_squared_error(dead[:345], D*pop, squared=False)
print("RMSE susceptible: ", errS)
print("RMSE infected: ", errI)
print("RMSE recovered: ", errR)
print("RMSE dead: ", errD)
# True - sve na jednom grafu, False - pojedinačni grafovi
plotPred(True)
| true |
4060bd3d0479b22fd3d884a1b74fb7cdc6b7f476 | Python | zhengyi144/OCR_RESTFUL_SERVICE | /resources/common/utils.py | UTF-8 | 1,415 | 2.96875 | 3 | [] | no_license | import base64
import cv2
from PIL import Image
import numpy as np
from io import BytesIO
import re
def imageFromBase64(base64Str):
base64Data = re.sub('^data:image/.+;base64,', '', base64Str)
decodeData=base64.b64decode(base64Data)
return decodeData
def imageFileToCvImage(imageFile):
image=Image.open(imageFile)
return pImageToCvImage(image)
def pImageToCvImage(pImage):
return cv2.cvtColor(np.asarray(pImage),cv2.COLOR_RGB2BGR)
def base64ToCvImage(base64Data,orientation):
image=base64ToPilImage(base64Data,orientation)
image=pImageToCvImage(image)
return image
def base64ToPilImage(base64Data,orientation,imagePath=None):
"""
orientation:1,3,6,8分别代表0,180,顺时90,逆时90
"""
decodeData=imageFromBase64(base64Data)
image=BytesIO(decodeData)
image=Image.open(image)
if orientation!=None and orientation!="":
orientation=int(orientation)
if orientation==3:
image=image.transpose(Image.ROTATE_180)
elif orientation==6:
image=image.transpose(Image.ROTATE_270)
elif orientation==8:
image=image.transpose(Image.ROTATE_90)
if imagePath:
image.save(imagePath)
return image
def pilImageToBase64(imagePath):
image=Image.open(imagePath)
outBuffer=BytesIO()
image.save(outBuffer,format="JPEG")
base64Data=base64.b64encode(outBuffer.getvalue())
return base64Data | true |
2092fc667798be408b15067ad6d43ec8bf23e7a7 | Python | w51w/python | /0928/함수6_turtle.py | UTF-8 | 561 | 4.09375 | 4 | [] | no_license | import turtle
def drawBarChar(t, value):
t.begin_fill()
t.left(90)
t.forward(value)
t.right(90)
t.forward(40)
t.right(90)
t.forward(value)
t.left(90)
t.end_fill()
def bubble(alist):
for p in range(6):
for i in range(6):
if alist[i] > alist[i+1]:
temp = alist[i]
alist[i] = alist[i+1]
alist[i+1] = temp
t = turtle.Pen()
data=[100,120,300,80,90,130,250]
bubble(data)
t.color('red')
t.fillcolor('blue')
t.pensize(3)
for d in data:
drawBarChar(t, d) | true |
52ea791cee8b39c56e1bf6a1bf68c48c2fcf56a9 | Python | MarkBanford/Master_OOP | /dunders.py | UTF-8 | 854 | 3.25 | 3 | [] | no_license | class ASAPMob:
def __init__(self):
self._members = [
'A$AP Ant',
'A$AP Bari',
'A$AP Ferg',
'A$AP Illz',
'A$AP Lotto',
'A$AP Nast',
'A$AP Relli',
'A$AP Rocky',
'A$AP Snacks',
'A$AP TyY',
]
def __len__(self):
return len(self._members)
def __getitem__(self, key):
if isinstance(key, int):
return self._members.pop(key) # remove 1st member
raise TypeError('Cannot get key')
def __contains__(self, member):
return member in self._members
def __iter__(self):
while self._members:
yield self._members.pop() # gets each member and removes from list
asap_mob = ASAPMob()
for member in asap_mob:
print(member)
print(len(asap_mob))
| true |
60123b60bc24b3a7fbaeb7697a83e5f6c3983fa2 | Python | AbdulMalik-Marikar/COMP-1405 | /a2/a2q1b.py | UTF-8 | 364 | 3.734375 | 4 | [] | no_license | #Abdul-Malik Marikar
#101042166
#Key Reference: Gaddis, T. (2015). "Starting out with python" 3rd edition
#get user input
character = input("does you charachter have a beard? Type yes or no.\n")
#because no characters have a beard the program is forced down the else branch
if character == "yes" :
print("I know your lying")
else:
print("Great! I knew I would get it") | true |
85ac68b2ed5cdfe3bc4dcd57ca49497a32ee6513 | Python | SINHOLEE/Algorithm | /python/프로그래머스/호텔방배정_힌트보고.py | UTF-8 | 591 | 2.921875 | 3 | [] | no_license | # union find
def find(x):
global parent
if parent[x] == 0:
return x
parent[x] = find(parent[x])
return parent[x]
def solution(k, room_number):
global parent
parent = [0] * (k+1)
answer = [0] * len(room_number)
i = 0
for num in room_number:
if parent[num] == 0:
answer[i] = num
parent[num] = find(num+1)
else:
answer[i] = find(num)
parent[answer[i]] = find(answer[i]+1)
# union(num, parent[num]+1)
i+=1
return answer
parent = []
solution(10, [1,1,1,5,4,1,2])
| true |
3c27929fe74bfbd1ab0b5055ef879123df8363dc | Python | dansoh/python-intro | /python-crash-course/exercises/chapter-6/6-7-people.py | UTF-8 | 381 | 3.078125 | 3 | [] | no_license | favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
favorite_color = {
'dave': 'blue',
'simon': 'red',
'mike': 'purple',
'max': 'green',
}
favorite_number = {
'jesse': 5,
'daniel': 28,
'alex': 9,
'anthony': 4
}
favorites = [favorite_languages, favorite_color, favorite_number]
for favorite in favorites:
print(favorite)
| true |
965b1e5747e38c73f6ef245d26afe176333088c0 | Python | rewonderful/MLC | /src/problem_221.py | UTF-8 | 2,678 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env python
def maximalSquare(self, matrix):
"""
My Method
算法:动规
思路:
用dp[i][j]记录以matrix[i][j]为"1"矩形的右下角的矩形的最大边长
matrix[i][j] == 0 的显然dp[i][j] == 0
对matrix[i][j] == 1的来说,如果在第一行或者第一列,显然dp[i][j]=1,最大也就是这么大了
对于其他位置,
像下面这样,右下角的那个1称之为matrix[i][j],那么要检查它的左,上,左上三个位置是否都为1,
如果这三个位置有一个不为1的,那么显然以i,j为最大矩形的右下角的那个矩形,只能是matrix[i][j]本身
那么dp[i][j] =1
否则就要看这三个位置的dp情况,可以看到,如果右下角的i,j代表的那个矩形,如果想扩充边长的话,其
值应该是dp[i][j] = min(dp[左],dp[上],dp[左上])+1,如此便可以构建状态转移方程
1 1 1 1 1 0 1 1
1 1 --> 1 1 1 vs 1 1 1
1 1 1 1 1 1
"""
if matrix == [] or matrix[0] == []:
return 0
n = len(matrix)
m = len(matrix[0])
dp = [[0] * m for _ in range(n)]
ans = 0
for i in range(n):
for j in range(m):
if i == 0 or j == 0:
dp[i][j] = int(matrix[i][j])
elif matrix[i][j] == '1':
if matrix[i - 1][j - 1] == '1' and matrix[i][j - 1] == '1' and matrix[i - 1][j] == '1':
dp[i][j] = min(dp[i - 1][j - 1], dp[i][j - 1], dp[i - 1][j]) + 1
else:
dp[i][j] = 1
else:
dp[i][j] = 0
ans = max(ans, dp[i][j])
return ans ** 2
def maximalSquare1(self, matrix):
"""
Solution Method
事实上,在状态转移时,可以不用管上下左右是不是'1',如果不是1的话,那么最小值就是0,当前值就是0+1 = 1
0 1
1 1
其实就是把我冗余的判断精简了
"""
if matrix == [] or matrix[0] == []:
return 0
n = len(matrix)
m = len(matrix[0])
dp = [[0] * m for _ in range(n)]
ans = 0
for i in range(n):
for j in range(m):
if i == 0 or j == 0:
dp[i][j] = int(matrix[i][j])
elif matrix[i][j] == '1':
dp[i][j] = min(dp[i - 1][j - 1], dp[i][j - 1], dp[i - 1][j]) + 1
else:
dp[i][j] = 0
ans = max(ans, dp[i][j])
return ans ** 2
if __name__ == '__main__':
print(maximalSquare([["1","0","1","0","0"],["1","0","1","1","1"],["1","1","1","1","1"],["1","0","0","1","0"]])) | true |
41205e31ecab567d6357d3611f392566ca51f2e1 | Python | Eddie-yz/Frequent-Phrase-Mining-Document-Vector-Display | /DocDistribute.py | UTF-8 | 3,601 | 3.171875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import euclidean_distances
from sklearn import manifold
from sklearn.svm import SVC
import os
import re
class AuthorClassifier(object):
def __init__(self):
self.phrase_dict = Counter()
def dictConstruct(self, dir_name):
"""
Construct a dictionary which contains all the meaningful
phrase appearing in these documents.
"""
for _, _, files in os.walk(dir_name):
for file in files:
f = open(dir_name + '/' + file)
con = f.readlines()
for line in con:
phrases = re.split(r'[:,]', line.strip())
self.phrase_dict[phrases[0].strip()] += int(phrases[1].strip())
print ('Amount of phrases in dictionary: ', len(self.phrase_dict))
print ('\n')
def doc2vec(self, docDir_name):
"""
This function can transform each document in docDir_name to a vector
The vector is represented by the phrase-verion of tf-idf
"""
print('Processing files in ' + docDir_name)
for _, _, files in os.walk(docDir_name):
docMat = np.zeros((len(files), len(self.phrase_dict)))
for doc_index, file in enumerate(files):
f = open(docDir_name + '/' + file)
con = f.read()
con = con.replace('\n', ' ')
for phrase_index, phrase in enumerate(self.phrase_dict.keys()):
docMat[doc_index, phrase_index] = con.count(phrase) / self.phrase_dict[phrase]
return docMat
def _matJoint(self, matArray):
"""
This puts the document matrix from each author all
together and label them.
And also this can perfrom PCA on the document matrixes.
"""
docsMat = np.zeros((0, len(self.phrase_dict)))
labelsMat = np.zeros((0, 1))
for label, mat in enumerate(matArray):
docsMat = np.concatenate((docsMat, mat), axis=0)
labelsMat = np.concatenate((labelsMat, np.full((mat.shape[0], 1), label)), axis=0)
return docsMat, labelsMat
def plotDistribution(self, docVecArray, authors_name):
"""
Using Multi-dimension scaling algorithm to compress each
document vector to a 2-dimension vector, and plot it on
a 2-D figure.
"""
docM, labelM = self._matJoint(docVecArray)
print('Calculating similarities...')
similarity = euclidean_distances(docM)
print('Running MDS...')
mds = manifold.MDS(n_components=2, metric=True, max_iter=4000,
eps=1e-6, dissimilarity='precomputed', random_state=1)
docM = mds.fit_transform(similarity)
plt.figure(figsize=(15, 9))
plt.xlim([-5, 5])
plt.ylim([-4, 3])
plt.scatter(docM[labelM.ravel() == 0, 0], docM[labelM.ravel() == 0, 1], c='r', label=authors_name[0])
plt.scatter(docM[labelM.ravel() == 1, 0], docM[labelM.ravel() == 1, 1], c='b', label=authors_name[1])
plt.scatter(docM[labelM.ravel() == 2, 0], docM[labelM.ravel() == 2, 1], c='g', label=authors_name[2])
plt.scatter(docM[labelM.ravel() == 3, 0], docM[labelM.ravel() == 3, 1], c='y', label=authors_name[3])
plt.scatter(docM[labelM.ravel() == 4, 0], docM[labelM.ravel() == 4, 1], c='black', label=authors_name[4])
plt.legend()
plt.title('Documents Distribution')
plt.savefig('distribution.png')
plt.show()
return | true |
6c0d2ee01936a935570ade3403e09c260c586aff | Python | Spazzy757/neural-networks | /logistic_regression.py | UTF-8 | 612 | 3.03125 | 3 | [] | no_license | import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
# Importing dataset
X, y = load_iris(return_X_y=True)
# Scaling data
scaler = StandardScaler()
X = scaler.fit_transform(X)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Training
clf = LogisticRegression(solver='lbfgs',multi_class='multinomial')
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print('Accuracy:', acc) | true |
08e4cdcd642d8e75bd88fe80f0b2f1a395a3b89e | Python | shubhamjaiswal889/PREDICTION-USING-ARIMA-SARIMA-MODEL | /Sales Prediction Using Arima & Sarima Model.py | UTF-8 | 5,168 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[4]:
df = pd.read_csv(r'C:\Users\shubham.kj\Downloads\Perrin Freres monthly champagne sales millions.csv')
# In[5]:
df.head()
# In[6]:
## Change the Column Names
df.columns=["Month","Sales"]
df.head()
# In[7]:
df.tail()
# # Removal of Null values
#
# In[8]:
## Drop last 2 rows
df.drop(106,axis=0,inplace=True)
# In[9]:
df.drop(105,axis=0,inplace=True)
# In[10]:
# Convert Month into Datetime
df['Month']=pd.to_datetime(df['Month'])
# In[11]:
df.head()
# In[12]:
df.set_index('Month',inplace=True)
# In[13]:
df.head()
# In[14]:
df.describe()
# In[ ]:
# # Sales Visualization
# In[15]:
df.plot()
# In[16]:
from statsmodels.tsa.stattools import adfuller
# adfuller" is a function / module used to check the STATIONARITY in dataset.
# In[17]:
test_result=adfuller(df['Sales'])
# In[18]:
#HYPOTHESIS TEST:
#Ho: It is non stationary
#H1: It is stationary
def adfuller_test(sales):
result=adfuller(sales)
labels = ['ADF Test Statistic','p-value','#Lags Used','Number of Observations Used']
for value,label in zip(result,labels):
print(label+' : '+str(value) )
if result[1] <= 0.05:
print("strong evidence against the null hypothesis(Ho), reject the null hypothesis. Data has no unit root and is stationary")
else:
print("weak evidence against null hypothesis, time series has a unit root, indicating it is non-stationary ")
# In[19]:
adfuller_test(df['Sales'])
# Differencing is a popular and widely used data transform for making time series data stationary.
#
# Differencing can help stabilise the mean of a time series by removing changes in the level of a time series, and therefore eliminating (or reducing) trend and seasonality.
#
# Differencing shifts ONE/MORE row towards downwards.
# In[20]:
df['Seasonal First Difference']=df['Sales']-df['Sales'].shift(12)
# In[21]:
df.head(14)
# In[22]:
## Again test dickey fuller test
adfuller_test(df['Seasonal First Difference'].dropna())
# In[23]:
df['Seasonal First Difference'].plot()
# # AUTO-CORRELATION | PARTIAL AUTO-CORRELATION:
# In[24]:
from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
# In[25]:
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(df['Sales'])
plt.show()
# In[26]:
import statsmodels.api as sm
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(211)
fig = sm.graphics.tsa.plot_acf(df['Seasonal First Difference'].iloc[13:],lags=40,ax=ax1)
ax2 = fig.add_subplot(212)
fig = sm.graphics.tsa.plot_pacf(df['Seasonal First Difference'].iloc[13:],lags=40,ax=ax2)
# Here these two graphs will help us to find the p and q values.
# Partial AutoCorrelation Graph is for the p-value.
# AutoCorrelation Graph for the q-value
# # ARIMA MODEL
# AR: Autoregression. A model that uses the dependent relationship between an observation and some number of lagged observations.
#
# I: Integrated. The use of differencing of raw observations in order to make the time series stationary.
#
# MA: Moving Average. A model that uses the dependency between an observation and a residual error from a moving average model applied to lagged observations.
#
# The parameters of the ARIMA model are defined as follows:
#
# p: The number of lag observations included in the model, also called the lag order.
# d: The number of times that the raw observations are differenced, also called the degree of differencing.
# q: The size of the moving average window, also called the order of moving average.
# In[27]:
# For non-seasonal data
#p=1, d=1, q=0 or 1
from statsmodels.tsa.arima_model import ARIMA
# In[28]:
model=ARIMA(df['Sales'],order=(1,1,1))
model_fit=model.fit()
# In[29]:
model_fit.summary()
# In[30]:
df['forecast']=model_fit.predict(start=90,end=103,dynamic=True)
df[['Sales','forecast']].plot(figsize=(12,8))
# # SARIMA MODEL
# In[31]:
import statsmodels.api as sm
# In[32]:
model=sm.tsa.statespace.SARIMAX(df['Sales'],order=(1, 1, 1),seasonal_order=(1,1,1,12))
results=model.fit()
# In[33]:
df['forecast']=results.predict(start=90,end=103,dynamic=True)
df[['Sales','forecast']].plot(figsize=(12,8))
# HERE THE BLUE LINE IS ACTUAL DATA & ORANGE LINE IS PREDICTED DATA. HOW GOOD IT GAVE US THE RESULTS
# # PREDICTION
# In[34]:
from pandas.tseries.offsets import DateOffset
#Here USING FOR LOOP we are adding some additional data for prediction purpose:
future_dates=[df.index[-1]+ DateOffset(months=x)for x in range(0,24)]
# In[35]:
#Converting list into DATAFRAME:
future_datest_df=pd.DataFrame(index=future_dates[1:],columns=df.columns)
# In[36]:
future_datest_df.tail()
# In[37]:
#CONCATING THE ORIGINAL AND THE NEWLY CREATED DATASET FOR VISUALIZATION PURPOSE:
future_df=pd.concat([df,future_datest_df])
# In[38]:
#PREDICT
future_df['forecast'] = results.predict(start = 104, end = 120, dynamic= True)
future_df[['Sales', 'forecast']].plot(figsize=(12, 8))
# In[ ]:
| true |
4ce230196e74b10b147ea6191965830cb82f76ac | Python | Dmitry1973/Python_Basics | /py_basics_HW5_e.py | UTF-8 | 1,320 | 3.4375 | 3 | [] | no_license | # Задача-1:
# Напишите скрипт, создающий директории dir_1 - dir_9 в папке,
# из которой запущен данный скрипт.
# И второй скрипт, удаляющий эти папки.
import os
#from os import listdir
import shutil
dir_name = ''
for i in range(1, 10):
#dir_name = 'dir_' + str(i)
dir_path = os.path.join(os.getcwd(), 'dir_'+str(i))
try:
os.mkdir(dir_path)
except FileExistsError:
print('Такая директория уже существует')
for i in range(1, 10):
if os.path.isdir('dir_'+str(i)):
shutil.rmtree('dir_'+str(i)) # remove dir and all contains
else:
raise ValueError("file {} is not a file or dir.".format('dir_'+str(i)))
# Задача-2:
# Напишите скрипт, отображающий папки текущей директории.
print(os.listdir())
# Задача-3:
# Напишите скрипт, создающий копию файла, из которого запущен данный скрипт.
dir_path = os.path.join(os.getcwd(), 'New_dir')
try:
os.mkdir(dir_path)
except FileExistsError:
print('Такая директория уже существует')
shutil.copyfile(r'hw5_e.py', r'New_dir/hw5_e.py')
| true |
163fbee6e7faab0d2871beb9c81631a04df60e7e | Python | wattlebirdaz/geql | /TrainingStats.py | UTF-8 | 6,055 | 2.953125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
class TrainingStats:
def __init__(self, q_estimator_desc, action_policy_desc, comment=None, ma_width=20):
self.comment = '' if comment is None else '\t' + comment
self.ma_width = ma_width
self.n_episodes = 0
self.episode_fitness = []
self.episode_game_time = []
self.episode_time = []
self.episode_frame_count = []
self.q_estimator_desc = q_estimator_desc
self.action_policy_desc = action_policy_desc
self.fig = plt.figure()
self.fig.suptitle('$Q(s,a)$: ' + q_estimator_desc +
'\t $\pi(s,a)$:' + action_policy_desc +
self.comment, fontsize=8)
spec = gridspec.GridSpec(ncols = 1, nrows = 4, figure = self.fig)
self.episode_fitness_graph = self.fig.add_subplot(spec[0:3,0])
# self.time_graph = self.episode_fitness_graph.twinx()
self.eps_graph = self.fig.add_subplot(spec[3,0], sharex = self.episode_fitness_graph)
self.fps_graph = self.eps_graph.twinx()
plt.ion()
def moving_average(x, w):
if len(x) == 0:
return np.array([])
convolved = np.convolve(x, np.ones(w), 'full')
# Normalize the first elements separately (they are not over w samples)
first_element_normalizers = np.array(range(1, w))
convolved[0:w-1] = convolved[0:w-1] / first_element_normalizers
# Normalize the rest of the elements
convolved[w-1:len(x)] /= w
return convolved[0:len(x)]
def export(self, filename):
episode_number = list(range(1, self.n_episodes + 1))
table = np.column_stack([episode_number,
self.episode_fitness,
self.episode_game_time,
self.episode_time,
self.episode_frame_count])
np.savetxt(filename,
table,
fmt=['%d',
'%.2f',
'%d',
'%.5f',
'%d'],
header='episode_number episode_fitness game_time wall_time frame_count\t' +
' Q: {} P: {} Other: {}'.format(self.q_estimator_desc,
self.action_policy_desc,
self.comment))
def print_stats(self):
# TODO A bit overkill to calculate MA for the entire sequence when we only
# want the last
ma = TrainingStats.moving_average(self.episode_fitness, self.ma_width)
fps = self.episode_frame_count[-1] / self.episode_time[-1]
print('Episode #{} stats: fitness={} (MA{}={}), game_time={}, fps={}, frame_count={}, wall_time={}\n'.format(
self.n_episodes,
self.episode_fitness[-1],
self.ma_width,
ma[-1],
self.episode_game_time[-1],
fps,
self.episode_frame_count[-1],
self.episode_time[-1]
))
def add_episode_stats(self, real_time_elapsed, game_time_elapsed, frames, fitness):
self.episode_time.append(real_time_elapsed)
self.episode_game_time.append(game_time_elapsed)
self.episode_fitness.append(fitness)
self.episode_frame_count.append(frames)
self.n_episodes += 1
def plot(self):
n_episodes = len(self.episode_fitness)
# Episode fitness
self.episode_fitness_graph.clear()
self.episode_fitness_graph.set_ylabel('fitness')
self.episode_fitness_graph.tick_params(axis='y', colors='b')
x = list(range(1, n_episodes + 1))
# Samples (dots)
self.episode_fitness_graph.plot(x, self.episode_fitness,
color='cornflowerblue',
marker='.',
linestyle='',
zorder=5)
# Moving average
ma = TrainingStats.moving_average(self.episode_fitness, self.ma_width)
self.episode_fitness_graph.plot(x, ma, 'b--', zorder=10)
self.episode_fitness_graph.set_ylim(bottom=0)
# Show x on the lowest subgraph instead
self.episode_fitness_graph.grid(b=True, axis='both')
self.episode_fitness_graph.tick_params(axis='x', bottom=False, top=False, colors='w')
# Time
# self.time_graph.clear()
# self.time_graph.plot(x, self.episode_game_time,
# color='salmon',
# marker='.',
# linestyle='',
# zorder=1)
# self.time_graph.set_ylim(bottom=0)
# self.time_graph.tick_params(axis='y', colors='r')
# self.time_graph.set_ylabel('episode time')
# EPS
self.eps_graph.clear()
eps = (60*60) / np.array(self.episode_time)
eps_ma = TrainingStats.moving_average(eps, self.ma_width)
self.eps_graph.plot(x, eps,
color='cornflowerblue',
marker='.',
linestyle='')
self.eps_graph.plot(x, eps_ma, 'b--')
self.eps_graph.set_ylim(bottom=0)
self.eps_graph.set_ylabel('EPH')
self.eps_graph.tick_params(axis='y', colors='b')
# FPS
self.fps_graph.clear()
fps = np.array(self.episode_frame_count) / np.array(self.episode_time)
self.fps_graph.plot(x, fps, 'r')
self.fps_graph.set_ylabel('FPS')
self.fps_graph.set_ylim(bottom=0)
self.fps_graph.tick_params(axis='y', colors='r')
self.eps_graph.set_xlabel('episode')
self.eps_graph.set_xlim(left=1, right=max(2,n_episodes))
self.eps_graph.grid(b=True, axis='both')
plt.pause(0.1)
def close(self):
plt.close('all')
| true |
374bd51389b52a21c7fc2e589961154260e652db | Python | gjwei/leetcode-python | /easy/twoSum.py | UTF-8 | 521 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created by gjwei on 2016/12/6
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
elements = {}
for i in range(len(nums)):
if nums[i] not in elements:
elements[target - nums[i]] = i
else:
return list([elements[nums[i]], i])
s = Solution()
a = [3, 2, 4]
print(s.twoSum(a, 6))
| true |
b06af184b5e7d250a4a919d57396d039b20e85e1 | Python | matthiasamberg/TigerJython---The-fantastic-Elevator-Game | /functions.py | UTF-8 | 2,630 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | # coding=UTF-8
# code zum starten des Spiels - bitte ignorieren
import os, sys
def setElevatorDestination(floor):
if gs.elevators[0].state != "waitingForCommand":
msg="The Elevator is busy (Did you call setElevatorDestination() twice in the play() function?)"
msgDlg(msg,title="Error")
print (msg)
gs.reset()
if floor < 0 or floor >= getNumFloors():
message="This floor does not exist while calling setElevatorDestination(): "+str(floor)
msgDlg(message,title="Error!")
print (message)
gs.reset()
gs.elevators[0].setDestinationFloor(floor)
def getNumFloors():
return gs.getNumFloors()
def getTopFloor():
return gs.getNumFloors()-1
def getCurrentFloor():
return gs.elevators[0].getCurrentFloor()
def getNumberOfPassengersWithDestination(floorNum):
passengers=gs.elevators[0].getPassengers()
count=0
for person in passengers:
if person.getEndFloor()==floorNum:
count+=1
return count
def getNumberOfPassengers():
return len(gs.elevators[0].getPassengers())
def getNumberOfWaitingPassengers(floorNum):
return len(gs.floors[floorNum].getWaitingPersons())
def isElevatorFull():
return len(gs.elevators[0].getPassengers()) == gs.elevators[0].getMaxPassengers()
def isElevatorEmpty():
return len(gs.elevators[0].getPassengers()) == 0
# returns the closest floor number (not current floor) where passengers are waiting or -1 if there are no passengers anywhere
def closestFloorWithWaitingPassengers():
currentFloor = getCurrentFloor()
floorDistance = 99
resultFloor =-1
for i in range(0,getNumFloors()):
if i == currentFloor:
continue
if getNumberOfWaitingPassengers(i) > 0:
distance=abs(currentFloor-i)
if distance < floorDistance:
floorDistance=distance
resultFloor = i
return resultFloor
# returns the closest floor where passengers in the elevator want to exit
def closestDestinationFloor():
currentFloor = getCurrentFloor()
floorDistance = 99
resultFloor =-1
for i in range(0,getNumFloors()):
if i == currentFloor:
continue
if getNumberOfPassengersWithDestination(i) > 0:
distance=abs(currentFloor-i)
if distance < floorDistance:
floorDistance=distance
resultFloor = i
return resultFloor
# ignore...
globalvars.playfunction = play
globalvars.gs = GameState()
globalvars.gs.setup()
gs = globalvars.gs
# while not isDisposed():
# delay(100) | true |
04b34e6c6d3f62e31b9d01054b929b207c4cbd37 | Python | ashcoder2020/Python-Practice-Code | /take multiple user.py | UTF-8 | 209 | 3.484375 | 3 | [] | no_license |
num = lambda x: x + 5
print(num(10))
print("Program to take multiple user input ")
print("------------------------------------")
a,b=map(int,input("Enter two numbaers : ").split())
print(a,b) | true |
ac1c0307dc6013e3c10e737075240adfcb6377c5 | Python | chae-heechan/Codeup_Algorithm_Study | /CodeUp/1535.py | UTF-8 | 324 | 3.421875 | 3 | [
"MIT"
] | permissive | # 함수로 가장 큰 값 위치 리턴하기
count = int(input())
lst = [0]*count
elements = map(int, input().split())
times = 0
for i in elements:
lst[times] = i
times += 1
def f():
max_num = lst[0]
for i in range(count):
if max_num < lst[i]:
max_num = lst[i]
print(max_num)
f() | true |
3fc1855c77175b9bfc1ea27a62bde76f8eddd630 | Python | Adiel30/Windows | /venv/Comprehensions.py | UTF-8 | 957 | 4.03125 | 4 | [] | no_license | # Will put a list on evrey letter in th word
lst = [x for x in 'word'] # x in 'word' PRINT W O R D # x for x Crete the , fo the list
print(lst)
# Example 2
lst = [x**2 for x in range(0,11)] # # in Range of 0-10 Make A list of evrey number in power of 2
print(lst)
#Example 3
lst = [x for x in range(11) if x % 2 == 0] # In range of 0-10 make a list just for modlue = 0 # כלומר תעשה את הפעולה רק כאשר המשתנה יהיה שווה ל...
print(lst)
# Example 4
lst = [x for x in range(11) if x % 2 != 0]
print(lst)
#Example 5
# Convert Celsius to Fahrenheit
celsius = [0,10,20.1,34.5]
fahrenheit = [((9/5)*temp + 32) for temp in celsius ] # Divid 9/5*temp(varlible) + 32 FOR EACH value in Celsius List
print(fahrenheit)
#Example 6
# Nasted FOR INSIDE FOR
lst = [ x**2 for x in [x**2 for x in range(11)]] #Start For Inside # X in power of 2 in range 0-10 List # once Again X power of 2 for evrey number in the first list
print(lst) | true |
09d6d241342d52142c2d606b88be2a74d38ee6c4 | Python | ritou11/wxPubVis | /backend/dataproc/pub_theme.py | UTF-8 | 6,521 | 2.578125 | 3 | [
"MIT"
] | permissive | # encoding=utf-8
import os
import jieba
from pymongo import MongoClient
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfTransformer
import pickle
def load_stopwords():
f_stop = open('stop_words.txt', 'r')
sw = [line.strip() for line in f_stop]
f_stop.close()
return sw
# 分词+过滤停用词
def word_cut(text):
text = str(text)
seg = jieba.cut(text.strip())
outstr = ""
for word in seg:
if word not in stopwords:
if word != '\t':
outstr += word
outstr += " "
return outstr
# 打印每个主题的前50个相关词
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([str(feature_names[i])
for i in topic.argsort()[:-n_top_words - 1:-1]]))
# 文章主题权重
def doc_top(model, tf):
docres = model.fit_transform(tf)
return docres
def extractPubPosts(msg, pname):
pid = list()
pubname = list()
tit = list()
dig = list()
con = list()
readNum = list()
pstcursor = pstcol.find(msg)
prfcursor = prfcol.find_one(msg)
for i, s in enumerate(pstcursor):
if 'content' in s:
pubname.append(pname)
pid.append(str(s['_id']))
tit.append(str(s['title']))
dig.append(str(s['digest']))
con.append(str(s['content']))
if 'readNum' in s:
readNum.append(s['readNum'])
else:
readNum.append(0)
df = pd.DataFrame({
'pid': pid,
'pubname': pubname,
'title': tit,
'digest': dig,
'content': con,
'readNum': readNum
})
return df
# theme:{msgBiz, theme, weight}
# post:{msgBiz, pid, theme:[name, weight, contrib]}
# 连接数据库
conn = MongoClient("mongodb://localhost:27017")
db = conn.wechat_spider
pstcol = db.posts
prfcol = db.profiles
stopwords = load_stopwords()
post = db.pubposts
theme = db.perpub
prfcursor = prfcol.find(no_cursor_timeout=True)
for num, pn in enumerate(prfcursor):
print(f'#{num + 1} profile {pn["title"]}...')
cuttedName = f'{pn["msgBiz"]}.pkl'
if os.path.exists(cuttedName):
with open(cuttedName, 'rb') as f:
df = pickle.load(f)
print("Cut loaded from old")
else:
print('jieba...')
df = extractPubPosts({
'msgBiz': pn['msgBiz']
}, pn['title'])
con = df['title'] + df['content']
df['con'] = con
df['con_cutted'] = df.con.apply(word_cut)
with open(cuttedName, 'wb') as f:
pickle.dump(df, f)
print('Done jieba!')
print(f'{len(df)} posts in the profile.')
if len(df) <= 1:
continue
n_features = 1000
n_topics = 30
n_top_words = 50
tf_vectorizer = CountVectorizer(max_features=n_features,
stop_words='english',
max_df=0.4,
min_df=10)
tf = tf_vectorizer.fit_transform(df.con_cutted)
print('Done fit_transform!')
ldaName = f'{pn["msgBiz"]}.lda'
if os.path.exists(ldaName):
with open(ldaName, 'rb') as f:
lda = pickle.load(f)
print("LDA loaded from old")
else:
print('lda...')
lda = LatentDirichletAllocation(learning_method='online',
n_components=n_topics,
perp_tol=0.001,
doc_topic_prior=0.001,
topic_word_prior=0.001,
max_iter=300,
n_jobs=-1,
verbose=1)
lda.fit(tf)
with open(ldaName, 'wb') as f:
pickle.dump(lda, f)
print('Done lda!')
tf_feature_names = tf_vectorizer.get_feature_names()
print("文章-主题权重")
docresName = f'{pn["msgBiz"]}-docres.lda'
if os.path.exists(docresName):
with open(docresName, 'rb') as f:
docres = pickle.load(f)
print("Docres loaded from old")
else:
print('docres...')
docres = doc_top(lda, tf)
with open(docresName, 'wb') as f:
pickle.dump(docres, f)
print('Done docres!')
print("文章-主题贡献")
readn = df['readNum']
readnum = np.array(df['readNum']).reshape(len(readn), 1)
readnum = readnum.repeat(30, axis=1)
readnum = readnum.astype(np.float)
contrib = np.multiply(docres, readnum)
for idx in range(0, len(df)):
post_dict = dict()
post_dict['msgBiz'] = str(pn['msgBiz'])
post_dict['pId'] = str(df['pid'][idx])
for j in range(n_topics):
if 'themes' in post_dict:
post_dict['themes'].append({
'name': f'主题{j + 1}',
'weight': docres[idx][j],
'contrib': contrib[idx][j]
})
else:
post_dict['themes'] = [{
'name': f'主题{j + 1}',
'weight': docres[idx][j],
'contrib': contrib[idx][j]
}]
result = post.update_one({'pId': post_dict['pId']},
{'$set': post_dict}, upsert=True)
top_dict = {
'msgBiz': pn['msgBiz'],
'themes': []
}
for idx in range(n_topics):
sum_contrib = 0
for j in range(len(df)):
sum_contrib += contrib[j][idx]
keywords = [str(tf_feature_names[i])
for i in lda.components_[idx].argsort()[:-n_top_words - 1:-1]]
top_dict['themes'].append({
'name': f'主题{idx + 1}',
'importance': sum_contrib.item(),
'keywords': keywords
})
print(top_dict)
result = theme.update_one({
'msgBiz': top_dict['msgBiz']
}, {'$set': top_dict}, upsert=True)
prfcursor.close()
| true |
e31e1dedd8bd8e214550a931e9f80fa1a05b9607 | Python | forana/simplesvg | /example.py | UTF-8 | 365 | 3.03125 | 3 | [] | no_license | """
python example.py
"""
import simplesvg
svg = simplesvg.SVG(200, 200)
svg.circle(100, 100, 100, fill = "blue", stroke = "green", strokeWidth = "3", id="c1")
svg.rectangle(50, 50, 100, 50, fill = "green")
svg.line(100, 0, 100, 180, stroke = "red", strokeWidth = "5")
svg.polygon([(50, 100), (100, 100), (150, 150)], fill = simplesvg.rgb(255,200,0))
print(svg)
| true |
36e74227fc22842d0bcef287dc62e59e5ad7fb94 | Python | ciaranB3/CameraCal | /CameraCalibration_CB.py | UTF-8 | 3,774 | 3.359375 | 3 | [] | no_license | '''cs410 camera calibration assignment
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import axes3d
from numpy.linalg import eig
def calibrateCamera3d(data):
"""Calculates perspective projection matrix for given data"""
# Create an ampty numpy matrix to store the measurement matrix, A
N = data.shape[0]
A = np.empty([(2*(N)),12])
# Iterate through the data matrix and set the values of the measurement matrix
for i in range(N):
index = 2*i # Each row of data = two rows of A
P = np.matrix(data[i,0:3]) # A1 -> Xi Yi Zi
P = np.hstack([P, [[1]]]) # A1 -> Xi Yi Zi 1
zeros = np.zeros((1,4))
lin = np.hstack([P, zeros]) # A1 -> Xi Yi Zi 1 0 0 0 0
# obtain xi and yi to perform multiplications
x = data[i,3]
y = data[i,4]
xEnd = np.multiply(-x,P) # -xiXi -xiYi -xiZi -xi
yEnd = np.multiply(-y,P) # -yiXi -yiYi -yiZi -yi
lin = np.hstack([lin, xEnd])
A[index] = lin # A1 -> Xi Yi Zi 1 0 0 0 0 -xiXi -xiYi -xiZi -xi
index = index + 1 # next row of A using same data row
lin2 = np.hstack([zeros,P,yEnd])
A[index] = lin2 # A2 -> 0 0 0 0 Xi Yi Zi 1 -yiXi -yiYi -yiZi -yi
AtA = ((A.transpose()).dot(A)) # Transpose of A dot A for 12 x 12 matrix
d,v = eig(AtA) # Eigenvalues and eigenvectors of AtA
v = v[:,-1]/v[1,-1] # Last eigenvalue is minimum, take corresponding vector
M = np.vstack([v[0:4],v[4:8],v[8:12]])
# Stack the first, middle and last four elements for perspective projection matrix, M
return M
def visualiseCameraCalibration3D(data, P):
"""Renders a 2D plot showing
i) the measured 2D image point and
ii) the reprojection of the 3D calibraion points as computed by P """
coords = matrixP(data) # Matrix of world coordinates
p = P.dot(coords) # p = mP from notes
w = p[-1] # The last element of each row, w
p = p / w[None,:] # Dived each column by w to scale
fig = plt.figure()
ax = fig.gca()
ax.set_title('Measured Image Points and Reprojected Points Computed by P')
ax.axis([0, 800, 0, 700])
ax.plot(data[:,3], data[:,4],'r.')
ax.plot(p[0,:], p[1,:], 'b.')
blue_dots = mpatches.Patch(color='blue', label='Reprojected points')
red_dots = mpatches.Patch(color='red', label='Measured points')
plt.legend(handles=[red_dots, blue_dots])
plt.show()
def evalutateCameraCalibration3D(data, P):
"""Prints the mean, variance, minimum and maximum distances in
pixels between the measured and reprojected image feature locations"""
coords = matrixP(data) # Matrix of world coordinates
p = P.dot(coords) # p = mP from notes
w = p[-1] # The last element of each row, w
p = p / w[None,:] # Dived each column by w to scale
# Create empty numpy matrix to store the error of each point
errors = np.empty([p.shape[1],1])
for i in range(p.shape[1]):
# The error of each point is the hypothenuse of the distance in x and y
x = p[0,i] - data[i,3]
y = p[1,i] - data[i,4]
errors[i] = (x**2 + y**2)**0.5
print ' Mean error \t= ', np.mean(errors), '\tpixels'
print ' Variance \t= ', np.var(errors), '\tpixels'
print ' Minimum error \t= ', np.amin(errors), '\tpixels'
print ' Maximum error \t= ', np.amax(errors), '\tpixels'
def matrixP(data):
"""Creates 'P' matrix of the World Coords given in data"""
P = data[:,0:3] # X Y Z of each point
P = np.insert(P,3,1, axis=1) # -> X Y Z 1
P = P.transpose() # Each of above bceoms a column (required for later multiplication)
return P
if __name__ == "__main__":
print "\nStarting programme...\n"
data = np.loadtxt('data.txt')
M = calibrateCamera3d(data)
evalutateCameraCalibration3D(data,M)
visualiseCameraCalibration3D(data, M) | true |
aaabb99f4cf99d13fe9d58200a0cbc75b9cfca95 | Python | kartikeya-shandilya/project-euler | /python/116.py | UTF-8 | 259 | 3.078125 | 3 | [] | no_license | #!/usr/bin/python
# generalized:
arr1=[0,1,2,3,5]
arr2=[0,1,1,2,3]
arr3=[0,1,1,1,2]
for i in range(5,51):
m=i-1
n=i-2
o=i-3
p=i-4
j=arr1[m]+arr1[n]
k=arr2[m]+arr2[o]
l=arr3[m]+arr3[p]
print i,j+k+l-3
arr1.append(j)
arr2.append(k)
arr3.append(l)
| true |
87228349d7911fac20a7db66840db61bc5e7cb50 | Python | rheehot/problem_solving-1 | /BOJ/백트래킹/신기한소수.py | UTF-8 | 1,277 | 3.0625 | 3 | [] | no_license | import sys
sys.stdin = open("신기한소수.txt","r")
def solve(index, word):
global start
if index == N+1:
number = int(word)
result.append(number)
return
val = word[:index+1]
val = int(val)
val = int(val**0.5)
for i in range(2, val+1):
for j in range(len(prime)):
if prime[j]>i:
break
elif not i%prime[j] and i!=j:
# start = i+1
return
else:
prime.append(i)
solve(index+1, word)
N = int(input())
result = []
prime = [2]
start = 2
# for num in range(2*(10**(N-1)),10**N):
# word = str(num)
# solve(0, word)
solve(0,'4000')
result.sort()
for i in range(len(result)):
print(result[i])
# #에라토스테네스의 체
# def solve(index, word):
# if index == N+1:
# number = int(word)
# result.append(number)
# return
# val = word[:index+1]
# val = int(val)
# n = int(val**0.5)
# for j in range(2, n+1):
# if not (val % j):
# return
# solve(index+1, word)
#
# N = int(input())
# result = []
# for num in range(2*(10**(N-1)),10**N):
# word = str(num)
# solve(0, word)
# result.sort()
# for i in range(len(result)):
# print(result[i]) | true |
c1905822c40d8d174264d4b335ad6c580a892115 | Python | amaranmk/comp110-21f-workspace | /exercises/ex03/happy_trees.py | UTF-8 | 298 | 3.3125 | 3 | [] | no_license | """Drawing forests in a loop."""
__author__ = "730484862"
# The string constant for the pine tree emoji
TREE: str = '\U0001F332'
output: str = ""
i: int = 0
j: int = 0
user_depth: int = int(input("Depth: "))
while i < user_depth:
output = output + TREE
print(output)
i = i + 1
| true |
c8d7d5365e9ed5243a9db9e69d1f1fd837dfad8d | Python | smart-trains/raspberry-pi | /test/read_temp.py | UTF-8 | 657 | 2.671875 | 3 | [] | no_license | from digitemp.master import UART_Adapter
from digitemp.device import DS18B20
import http.client as http
import json
server = "52.65.244.105"
api = "/api/temperature"
bus = UART_Adapter('/dev/serial0') # DS9097 connected to COM1
# only one 1-wire device on the bus:
sensor = DS18B20(bus)
sensor.info()
temp = sensor.get_temperature()
# get temperature
print(temp)
conn = http.HTTPConnection(server)
headers = {'Content-type': 'application/json'}
try:
conn.request("POST", api, json.dumps({'temperature': temp}), headers)
print("report to {0} successful".format(server))
except:
print("report to {0} failed".format(server))
finally:
conn.close()
| true |
49a3925a4709a2e5a093a8bfdb4ebb1feeffa725 | Python | upskyy/Baekjoon-Online-Judge | /Data-Structures/(9093)단어 뒤집기.py | UTF-8 | 539 | 3.3125 | 3 | [] | no_license | import sys
input = sys.stdin.readline
num = int(input())
for _ in range(num):
string = input()
sentence = list()
stack1 = list()
stack2 = list()
for j in string:
sentence.append(j)
sentence.append('\n')
for ch in sentence:
if (ch == ' ') or (ch == '\n'):
while len(stack1) != 0:
top = stack1.pop()
stack2.append(top)
if ch != '\n':
stack2.append(' ')
else:
stack1.append(ch)
print("".join(stack2))
| true |
1602c4df5a340b432f768bd0e5fed0afcc9d08bf | Python | douyixuan/LeetCode | /786.py | UTF-8 | 366 | 3.09375 | 3 | [] | no_license | #!/usr/bin/python
class Solution(object):
def kthSmallestPrimeFraction(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: List[int]
"""
ans = ()
cur = 0
l = len(A)
for i in range(0,l):
for j in range(i+1,l):
ans[cur] = [A[j]/A[i],A[i],A[j]]
cur = cur+1
ans.sort(key = lambda x:x['x'])
return {ans[k-1][1],ans[k-1][2]}
| true |
7759367fa444ea49df687baa4da3d5c3609ff1f6 | Python | nuvention-web/A-2019-backend | /testsite/utils/weather.py | UTF-8 | 365 | 2.859375 | 3 | [] | no_license | import requests
import pytemperature
def getWeatherInfo():
api_address = 'http://api.openweathermap.org/data/2.5/weather?q=Evanston,us&APPID=00635a2705abb24f3c1e116788d7614e'
json_data = requests.get(url=api_address).json()
formatted_data = json_data['main']
temperature = pytemperature.k2c(formatted_data['temp'])
return round(temperature, 2) | true |
14f83620e4b59b2cd2067b682289e4996f22b318 | Python | THeK3nger/yoshix | /yoshix/yoshix.py | UTF-8 | 6,872 | 3.125 | 3 | [] | no_license | from itertools import product
from yoshix.yoshiegg import YoshiEgg, YoshiEggKeyException
class YoshiExperiment(object):
"""
`YoshiExperiment` is the base class for every user created experiment.
The class provide the basic interface and infrastructure to register data,
run single experiments, generate input data and so on.
The class should never be initialized by its own. This should be always
extended by a child class.
"""
def __init__(self):
self.name = self.__class__.__name__
self._generators = {} # Dictionary to map a parameter to a generator.
self.__egg = None
self._egg_is_ready = False # Egg is ready only after the experiment.
self.__empty_row = None # Store the initialization values for the egg rows.
self.__run_counter = 0 # Store the iteration number.
self.__fixed_parameters = {} # Store the fixed parameters.
self.__generators_iterator = None # Store the combined product-iterator for every generators.
self.__parameter_transformer = {} # Store a transformation function for the egg representation of the parameter.
self.__private_generators = [] # List of hidden generators. Hidden generators are not put in the egg.
def setup(self):
"""
This function is called before the experiment is started. This can be
used to initialize variables, generators and every other detail.
:return: None
"""
pass
def single_run(self, params):
"""
This represent the atomic experiment run.
:param params List of parameters for the experiment run. This is automatically
generated by the parent method.
:return: None
"""
pass
def _run_experiment(self):
"""
The wrapping experiment loop. This function invokes single_run for
every combination of **variable parameters** provided by the generators.
:return: None
"""
self.__run_counter = 0
while True: # TODO: Is there a better way to iterate until the Iterator is empty?
try:
# Generating Parameters dictionary
params = self.__fixed_parameters.copy()
params.update(self.__generate())
self.__egg.add_row(self.__empty_row)
self.__run_counter += 1
# Add the variable parameters to the egg.
for g in self._generators.keys():
if g not in self.__private_generators:
self.__egg[g] = self.__apply_transformer(g, params[g])
self.single_run(params)
except StopIteration:
break
self._egg_is_ready = True
def after_run(self):
"""
This method is invoked after the experiment is completed.
Can be used to package the result Egg, clean up the disk, export to CSV and more.
:return:
"""
pass
@property
def partial_egg(self):
"""
:return: Return an external reference to the experiment Egg to be used **during** the experiment.
"""
if self.__egg is None:
raise EggNotReady("Try to access an egg that is None")
else:
return self.__egg
@property
def egg(self):
"""
:return: Return an external reference to the experiment **AFTER** the experiment.
"""
if self._egg_is_ready:
return self.__egg
if self.__egg is None:
raise EggNotReady("Try to access an egg that is None")
elif not self._egg_is_ready:
raise EggNotReady("The egg is there but the experiment is not completed yet!\n\
Maybe you are looking for partial_egg?")
else:
raise Exception("Something is really wrong there!")
@property
def run_counter(self):
"""
:return: Return the number of the current iteration.
"""
return self.__run_counter
def setup_egg(self, data_headers, row_initialization=None):
"""
This method is used to initialize the experiment egg.
:param data_headers: The tuple of the experiment parameters and desired computed outputs.
:param row_initialization: A vector representing an empty row. Default is a vector of zeros.
:return:
"""
self.__egg = YoshiEgg(data_headers)
# If row_init is None we assume all zeroes.
if row_initialization is None:
row_initialization = tuple((0 for _ in data_headers))
if len(row_initialization) == len(data_headers):
self.__empty_row = row_initialization
else:
raise YoshiEggKeyException("Initialization vector does not match the header.")
def assign_generators(self, key, generator, private=False):
"""
Link a generator with a particular parameter of the algorithm.
:param key: The parameter key identifier.
:param generator: The desired generator.
:return:
"""
if key not in self.__egg and not private:
raise YoshiEggKeyException("It is not possible to attach a generator to an unknown key!")
self._generators[key] = generator
gen_list = [v for _, v in self._generators.items()]
self.__generators_iterator = product(*gen_list)
if private:
self.__private_generators.append(key)
def assign_transformer(self, key, transformer):
if key not in self.__egg:
raise YoshiEggKeyException("It is not possible to attach a transformator to an unknown key!")
self.__parameter_transformer[key] = transformer
def assign_fixed_parameter(self, key, value):
"""
Link a parameter with a fixed value.
:param key: The parameter key identifier.
:param value: The desired value.
:return:
"""
if key not in self.__egg:
raise YoshiEggKeyException("It is not possible to attach a value to an unknown key!")
self.__fixed_parameters[key] = value
def __generate(self):
"""
This is used in order to generate a new set of variable parameters (using the generators list)
:return: A dictionary with the current variable parameters values.
"""
if self.__generators_iterator is not None:
current_iteration = next(self.__generators_iterator)
return {k: v for k, v in zip(self._generators.keys(), current_iteration)}
def __apply_transformer(self, key, value):
return self.__parameter_transformer[key](value) \
if key in self.__parameter_transformer else value
def run(self):
self.setup()
self._run_experiment()
self.after_run()
class EggNotReady(Exception):
pass
| true |
5ea87279a0762c5372c787822d3d775d447face3 | Python | nanxung/-Scrapy | /zhihu/pipelines.py | UTF-8 | 1,373 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
class ZhihuPipeline(object):
def process_item(self, item, spider):
return item
class MysqlPipeline(object):
def __init__(self):
self.conn=pymysql.connect(
host='localhost', #本地127.0.0.1
port=3306, #默认3306端口
user='root', #mysql最高权限用户
passwd='****', #root用户密码
db='zh', #database name
charset='utf8'
)
def process_item(self,item,spider):
self._conditional_insert(self.conn.cursor(),item)#调用插入的方法
# query.addErrback(self._handle_error,item,spider)#调用异常处理方法
return item
def _conditional_insert(self,tx,item):
sql="insert into user(id,url,nick_name,summary,content) values(%s,%s,%s,%s,%s)"
params=(item["Id"],item["Url"],item['Nick_name'],item['Summary'],item['Content'])
tx.execute(sql,params)
print('已经插入一条数据!')
tx.close()
self.conn.commit()
# self.conn.close()
#错误处理方法
def _handle_error(self, failue, item, spider):
print(failue)
| true |
5a813967667fa134bd8ddc8237a8ba774123a907 | Python | ariannedee/intro-to-python | /Problems/problem_7_new_years.py | UTF-8 | 120 | 2.6875 | 3 | [] | no_license | """
Start at 10 seconds and count down until 1 and then print "Happy New Year! 🎉"
"""
print('Happy New Year! 🎉')
| true |
d54963d896ef47815e1aaae42bf9ef366143027d | Python | inkyu0103/BOJ | /DFS , BFS/1926.py | UTF-8 | 868 | 3.203125 | 3 | [] | no_license | #1926 그림
from collections import deque
import sys
input = sys.stdin.readline
def sol():
n,m = map(int,input().split())
dirs=[[0,1],[0,-1],[1,0],[-1,0]]
room,area = 0,0
graph = [list(map(int,input().split())) for _ in range(n)]
def bfs(r,c):
q = deque([[r,c]])
graph[r][c] = 0
area = 1
while q:
cur_r,cur_c = q.popleft()
for dr,dc in dirs:
new_r,new_c = cur_r+dr,cur_c+dc
if 0<=new_r<n and 0<=new_c<m and graph[new_r][new_c]:
graph[new_r][new_c] = 0
q.append([new_r,new_c])
area+=1
return area
for r in range(n):
for c in range(m):
if graph[r][c]:
room += 1
area = max(bfs(r,c),area)
print(room)
print(area)
sol()
| true |
d867ba0a92468421b04fb933f14123ccde65e5f9 | Python | IbrahimAC/programming-python | /functions/fruit_questions/trash_fruit.py | UTF-8 | 882 | 4.40625 | 4 | [] | no_license | """Find trashfruits"""
# Go through list of fruits. Check if the fruit is trash or good.
# Trash fruits are any fruits longer than 5 letters
# Change the names of the trash fruits to "Trash" in the list.
# Return the newlistoffruits
listoffruits = ["Cherry", "Mango", "Apple", "Peach", "Banana", "Plum", "Grape", "Lemon", "Jackfruit"]
trashfruits = []
def trashFruitDetector(fruits):
""" Returns an array where every word longer than 5 letters has been turned into the string 'trash'
input: an array
output: an array"""
for x in fruits:
if len(x) >5:
indexstore =fruits.index(x)
indexedfruit = fruits[indexstore].replace(x,'trash')
trashfruits.append(indexedfruit)
else:
trashfruits.append(x)
return trashfruits
print(trashFruitDetector(listoffruits))
| true |
826304c7b511bf5a7d4da9f6799d74d39df44c5b | Python | dockerizeme/dockerizeme | /hard-gists/7310160/snippet.py | UTF-8 | 2,543 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #! /usr/bin/python
import Image
#_______________________________________________________load image/create 'canvas'
source = Image.open("test26.jpg")
img = source.load()
print source.format
print source.size
print source.mode
x = source.size[0]
y = source.size[1]
scale=int(raw_input("\nscale: (the multiple the image is enlarged by .. original is '1') >>>"))
if scale>10:
print "scale too high .. is >10 and for the sake of your RAM .. NO!"
scale=10
raw_input()
canvas2 = Image.new("RGB",(x*scale,y*scale),(240,240,240))
img00 = canvas2.load()
#_______________________________________________________run
j_spacing=int(raw_input("\nj_spacing (# of pixels between each row of peaks:) .. I like 8-15 usually >>>"))
j=j_spacing
points=[]
l1=1
while l1==1:
if j%10==0:
print j,"/",y
points.append([])
i=0
l2=1
jold=j
iold=i
while l2==1:
r1=img[i,j][0]
g1=img[i,j][1]
b1=img[i,j][2]
ave1=(r1+g1+b1)/3
r2=img[(i+1),j][0]
g2=img[(i+1),j][1]
b2=img[(i+1),j][2]
ave2=(r2+g2+b2)/3
altitude=ave1
if altitude>0:
#altitude=math.log(altitude,1.1)
altitude=(altitude*altitude)/2000
inew=i*scale
jnew=(j-altitude)*scale
if jnew>0:
points[len(points)-1].append([inew,jnew])
di=inew-iold
dj=jnew-jold
icurrent=float(0)
jcurrent=float(0)
if abs(di)>abs(dj):
for k in range(0,abs(di)):
jcurrent=((k/float(abs(di)))*dj)+jold
icurrent=((k/float(abs(di)))*di)+iold
if jcurrent>=0:
img00[icurrent,jcurrent]=(100,100,100)
points[len(points)-1].append([icurrent,jcurrent])
else:
for k in range(0,abs(dj)):
icurrent=(k/float(abs(dj)))*di+iold
jcurrent=(k/float(abs(dj)))*dj+jold
icurrent=round(icurrent)
jcurrent=round(jcurrent)
if jcurrent>=0:
img00[icurrent,jcurrent]=(100,100,100)
points[len(points)-1].append([icurrent,jcurrent])
iold=i*scale
jold=(j-altitude)*scale
i=i+1
if i>=(x-1):
l2=0
j=j+j_spacing
if j>=y:
l1=0
#clear overlaps then re-write line
print
for k in xrange(0,len(points)):
print k,"/",(len(points)-1)
for l in xrange(0,len(points[k])):
i=points[k][l][0]
j=points[k][l][1]
j0=j_spacing*(k+1)
dj=int(abs(j-j0))
for m in xrange(0,dj):
if (j+m)<y:
img00[i,j+m]=(240,240,240)
for l in xrange(0,len(points[k])):
i=points[k][l][0]
j=points[k][l][1]
if l==0:
j0=j
dj=int(abs(j-j0))*10
img00[i,j]=(100,100,100)
#_______________________________________________________save
#source.save("template.png")
canvas2.save("peaks.png")
| true |
5c918d3fa9e4304847dc2890af963f080d6fd2f8 | Python | stocyr/BassNotes | /main.py | UTF-8 | 1,937 | 3.203125 | 3 | [] | no_license | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.config import Config
from kivy.clock import Clock
from random import choice
from math import floor
from kivy.core.audio import SoundLoader
class BassNotes(BoxLayout):
notes = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'Ab', 'Bb', 'Db', 'Eb', 'Gb', 'A#', 'C#', 'D#', 'F#', 'G#']
sounding = True
sound_delay = 0.15
def __init__(self):
super(BassNotes, self).__init__()
self.beat = 0
self.next_note = choice(self.notes)
self.sound_high = SoundLoader.load('metronome_click.ogg')
self.sound_low = SoundLoader.load('metronome_click_low.ogg')
self.on_beat()
def on_beat(self, *args):
beat_duration = 60.0 / self.ids.bpm_slider.value
# Update beat
if self.beat == 4:
self.beat = 1
self.next_note = choice(self.notes)
else:
self.beat += 1
Clock.schedule_once(self.update_beat_text, self.sound_delay)
# Play sound
if self.ids.sound.state == 'down':
if self.beat == 1:
self.sound_high.play()
else:
self.sound_low.play()
# Calculate when to show next note
if self.beat == floor(self.ids.show_slider.value):
# We must display the next note within *this* beat.
delay = beat_duration * (self.ids.show_slider.value - self.beat)
Clock.schedule_once(self.update_note, delay + self.sound_delay)
# Re-schedule next beat
Clock.schedule_once(self.on_beat, beat_duration)
def update_beat_text(self, *args):
self.ids.beat.text = str(int(round(self.beat)))
def update_note(self, *args):
self.ids.note.text = self.next_note
class BassNotesApp(App):
title = 'Bass Notes'
def build(self):
return BassNotes()
if __name__ == '__main__':
BassNotesApp().run()
| true |
0620732f52f7027914169c297b7ee8589d72978d | Python | a-yasar/streaming_MapReduce | /mapupdate.py | UTF-8 | 1,686 | 2.703125 | 3 | [] | no_license | from optparse import OptionParser
from operators import Source, Mapper, Reducer
from states import StateManager
import os, logging, imp, Queue
def parse_arguments(args):
if(len(args) != 3):
print 'Invalid number of arguments'
print 'Usage: %s TASKFILE FILE' % (args[0])
print 'Arguments:'
print ' TASKFILE \t Script that contains map and update functions'
print ' FILE \t File that will be processed'
return (None, None)
else:
taskfile = args[1]
filename = args[2]
return (taskfile, filename)
def prepare_taskfile(taskfile):
path = os.path.dirname(taskfile)
taskmodulename = os.path.splitext(os.path.basename(taskfile))[0]
fp, pathname, description = imp.find_module(taskmodulename, [path])
try:
return imp.load_module(taskmodulename, fp, pathname, description)
finally:
if fp:
fp.close()
def main(args):
# It takes user defined functions, prepares queues and
# and runs tasks.
taskfile, filename = parse_arguments(args)
if not taskfile or not filename:
return
else:
taskmodule = prepare_taskfile(taskfile)
line_q = Queue.Queue()
map_q = Queue.Queue()
reduce_q = Queue.Queue()
update_q = Queue.Queue()
source = Source(filename, line_q = line_q)
mapper = Mapper(taskmodule.mapf, line_q, map_q)
state_man = StateManager(100, map_q, reduce_q, update_q)
reducer = Reducer(taskmodule.reducef, reduce_q, update_q)
source.daemon = True
mapper.daemon = True
reducer.daemon = True
state_man.daemon = True
source.start()
mapper.start()
state_man.start()
reducer.start()
source.join()
mapper.join()
reducer.join()
state_man.join()
if __name__ == "__main__":
import sys
main(sys.argv)
| true |
5c64b0bd4b3305646519c74711dc1caa4709305c | Python | aayushkumarjvs/Next-Tech-Reads | /Recommendation based on Age/collaborative_filtering_age.py | UTF-8 | 4,178 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python
# Implementation of collaborative filtering recommendation engine
from recommendation_data_age import dataset_age
from math import sqrt
def similarity_score(person1,person2):
# Returns ratio Euclidean distance score of person1 and person2
both_viewed = {} # To get both rated items by person1 and person2
for item in dataset_age[person1]:
if item in dataset_age[person2]:
both_viewed[item] = 1
# Conditions to check they both have an common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_eclidean_distance = []
for item in dataset_age[person1]:
if item in dataset_age[person2]:
sum_of_eclidean_distance.append(pow(dataset_age[person1][item] - dataset_age[person2][item],2)) #example:- sqrt(pow(5-4,2)+pow(4-1,2))
sum_of_eclidean_distance = sum(sum_of_eclidean_distance)
return 1/(1+sqrt(sum_of_eclidean_distance)) #euclidean distance is only successful when the similarity score is one, so we divide it by zero and add 1 to the denominator.
def pearson_correlation(person1,person2):
# To get both rated items
both_rated = {}
for item in dataset_age[person1]:
if item in dataset_age[person2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
person1_preferences_sum = sum([dataset_age[person1][item] for item in both_rated])
person2_preferences_sum = sum([dataset_age[person2][item] for item in both_rated])
# Sum up the squares of preferences of each user
person1_square_preferences_sum = sum([pow(dataset_age[person1][item],2) for item in both_rated])
person2_square_preferences_sum = sum([pow(dataset_age[person2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([dataset_age[person1][item] * dataset_age[person2][item] for item in both_rated])
# Calculate the pearson score
numerator_value = product_sum_of_both_users - (person1_preferences_sum*person2_preferences_sum/number_of_ratings)
#numerator => Sxy = sum(xy)-[(sum x)(sum y)]/n
denominator_value = sqrt((person1_square_preferences_sum - pow(person1_preferences_sum,2)/number_of_ratings) * (person2_square_preferences_sum -pow(person2_preferences_sum,2)/number_of_ratings))
#denominator => sqrt(Sxx.Syy)
#Sxx = sum(x,2) - [(sum x),2]/n
#Syy = sum(y,2) - [(sum y),2]/n
if denominator_value == 0:
return 0
else:
r = numerator_value/denominator_value
return r
def most_similar_users(person,number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(pearson_correlation(person,other_person),other_person) for other_person in dataset_age if other_person != person ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return scores[0:number_of_users]
def user_reommendations(person):
# Gets recommendations for a person by using a weighted average of every other user's rankings
totals = {}
simSums = {}
rankings_list =[]
for other in dataset_age:
# don't compare me to myself
if other == person:
continue
sim = pearson_correlation(person,other)
#print ">>>>>>>",sim
# ignore scores of zero or lower
if sim <=0:
continue
for item in dataset_age[other]:
# only score books I haven't read yet
if item not in dataset_age[person] or dataset_age[person][item] == 0:
# Similrity * score
totals.setdefault(item,0)
totals[item] += dataset_age[other][item]* sim
# sum of similarities
simSums.setdefault(item,0)
simSums[item]+= sim
# Create the normalized list
rankings = [(total/simSums[item],item) for item,total in totals.items()]
rankings.sort()
rankings.reverse()
# returns the recommended items
recommendataions_list = [recommend_item for score,recommend_item in rankings]
return recommendataions_list
print (user_reommendations('Mark'))
| true |
b8e592be69cef06e80522f33552c645e8c23c5f6 | Python | JumperC2P/PandaDiary_PyTest | /src/Sprint3/PBI_04/Buy_DiaryTest.py | UTF-8 | 2,629 | 2.6875 | 3 | [] | no_license | import unittest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pathlib
from Buy_Diary import Buy_Diary
import platform
from datetime import date
WEB_URL = "http://localhost:3000/"
class Buy_DiaryTest(unittest.TestCase):
def setUp(self):
self.user = {
'email': 'test@gmail.com',
'password': '12345678',
}
self.cover_color = '5'
self.paper_color = '1'
self.title = 'UNST'
self.paper_type = '4-Coated paper'
self.info = {
'payment_option': '2',
'card_number': '1234567812345678',
'expired_date_m': '04',
'expired_date_m': '2022',
'security_code': '123',
'delivery_option': '2',
'username': 'Testing',
'phone': '0123456789',
'address': '777 Swanston Street',
}
chrome_options = Options()
if platform.system() == 'Windows':
self.edge_driver = webdriver.Edge()
self.edge_driver.implicitly_wait(10)
self.edge_driver.get(WEB_URL)
else:
driver_path = (str(pathlib.Path().absolute())) + '/linux_driver'
chrome_driver_path = driver_path + '/chromedriver'
firefox_driver_path = driver_path + '/geckodriver'
self.chrome_driver = webdriver.Chrome(executable_path=((str)(chrome_driver_path)),
chrome_options=chrome_options)
# self.firefox_driver = webdriver.Firefox(executable_path=((str)(firefox_driver_path)))
# self.safari_driver = webdriver.Safari()
self.drivers = {}
if platform.system() == 'Windows':
self.drivers['Edge'] = self.edge_driver
else:
self.drivers['Chrome'] = self.chrome_driver
# self.drivers['Firefox'] = self.firefox_driver
# self.drivers['Safari'] = self.safari_driver
for browser in self.drivers:
self.drivers[browser].implicitly_wait(10)
self.drivers[browser].get(WEB_URL)
self.drivers[browser].set_window_size(1440, 1080)
def tearDown(self):
for k in self.drivers:
self.drivers[k].close()
def test_buy_diary(self):
expected = date.today().strftime("%Y%m%d")
for k in self.drivers:
result = Buy_Diary().start(self.drivers[k], self.user, self.cover_color, self.title, self.paper_color, self.paper_type, self.info)
self.assertEqual(expected, result[21:29])
if __name__ == '__main__':
unittest.main()
| true |
25d30585b142745e0b9a3662a5384649bcd7b208 | Python | mgbo/My_Exercise | /2017/Turtule/lesson_1/turtle_5.py | UTF-8 | 355 | 3.5 | 4 | [] | no_license |
import turtle
t=turtle.Turtle()
t.shape("circle")
x=75
ang=90
t.forward(x)
t.left(ang)
t.forward(x)
t.right(ang)
x=50
t.forward(x)
t.left(ang)
t.forward(x)
t.right(ang)
x=x-5
t.forward(x)
t.left(ang)
t.forward(x)
t.right(ang)
t.forward(x)
t.left(ang)
t.forward(x)
t.right(ang)
t.forward(x)
t.left(ang)
t.forward(x)
t.right(ang)
turtle.mainloop()
| true |
b94c79c06ccba7aca9b61ac8a86763ae98fdd7e9 | Python | AlbertoIHP/detectorImagenOpenCV | /Primeros videos/video6.py | UTF-8 | 1,736 | 2.8125 | 3 | [] | no_license | import cv2
import numpy as np
try:
img = cv2.imread('bookpage.jpg')
# Se realiza thresh binary con la imagen a color
retval, treshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Se realiza thresh binary con la imagen a escala de grises
retval2, treshold2 = cv2.threshold(grayscaled, 12, 255, cv2.THRESH_BINARY)
#A diferencia de los anteriores, este genera un valor thresh para cada seccion
#de la imagen, de manera que se adecua a las condiciones de cada region de la imagen
#Con mean se obtiene una media de la zona que se evalua, y con gaussian se realiza una suma ponderada de los valores de la zona
#Primer parametro la imagen sobre la cual se creara la silueta
#Segundo parametro el valor maximo
#Tercer parametro el metodo adaptativo para definir el Tresh
#Cuarto parametro el metodo de treshold
#Quinto parametro tamaño del area en pixeles mediante la cual se ira calculando el tresh adaptativo
#Sexto parametro valor que se resta a la suma ponderada en el caso de utilizar gaussian puede ser 1 y en el caso de usar mean puede ser 0
gaus = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
#Otsu, , se calcula automaticamente el valor thresh a partir del histograma de una imagen bimodal.
#(Para las imagenes que no son bimodal, binarizacion no sera exacta.)
retval2,otsu = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('original', img)
cv2.imshow('treshold', treshold)
cv2.imshow('treshold2', treshold2)
cv2.imshow('gaus', gaus)
cv2.imshow('otsu', otsu)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as e:
print str(e)
raw_input(">") | true |
d44bbcc0ae8caed55a4cea335f79123eec44761e | Python | Agos95/Projects | /Neural Network and Deep Learning/Autoencoders for digit reconstruction/test.py | UTF-8 | 8,061 | 2.5625 | 3 | [] | no_license | # %%
import os
import numpy as np
import pandas as pd
import torch
import matplotlib.pyplot as plt
import random
from torch import nn
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import MNIST
from tqdm import tqdm
import json
from sklearn.manifold import TSNE
#%% Define paths
data_root_dir = '~/Downloads/datasets'
#%% Create dataset
"""train_transform = transforms.Compose([
transforms.ToTensor(),
])"""
test_transform = transforms.Compose([
transforms.ToTensor(),
])
#train_dataset = MNIST(data_root_dir, train=True, download=True, transform=train_transform)
test_dataset = MNIST(data_root_dir, train=False, download=True, transform=test_transform)
test_dataloader = DataLoader(test_dataset, batch_size=len(test_dataset), shuffle=False)
#%% Define the network architecture
class Autoencoder(nn.Module):
def __init__(self, encoded_space_dim):
super().__init__()
### Encoder
self.encoder_cnn = nn.Sequential(
nn.Conv2d(1, 8, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(8, 16, 3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(16, 32, 3, stride=2, padding=0),
nn.ReLU(True)
)
self.encoder_lin = nn.Sequential(
nn.Linear(3 * 3 * 32, 64),
nn.ReLU(True),
nn.Linear(64, encoded_space_dim)
)
### Decoder
self.decoder_lin = nn.Sequential(
nn.Linear(encoded_space_dim, 64),
nn.ReLU(True),
nn.Linear(64, 3 * 3 * 32),
nn.ReLU(True)
)
self.decoder_conv = nn.Sequential(
nn.ConvTranspose2d(32, 16, 3, stride=2, output_padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(True),
nn.ConvTranspose2d(8, 1, 3, stride=2, padding=1, output_padding=1)
)
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return x
def encode(self, x):
# Apply convolutions
x = self.encoder_cnn(x)
# Flatten
x = x.view([x.size(0), -1])
# Apply linear layers
x = self.encoder_lin(x)
return x
def decode(self, x):
# Apply linear layers
x = self.decoder_lin(x)
# Reshape
x = x.view([-1, 32, 3, 3])
# Apply transposed convolutions
x = self.decoder_conv(x)
x = torch.sigmoid(x)
return x
# %% create autoencoder with best params
model_params = json.load(open("best_params.json"))
net = Autoencoder(encoded_space_dim=model_params["hidden"])
net.load_state_dict(torch.load('net_params.pth', map_location=torch.device('cpu')))
# %% predi
def predict(net, dataloader, corruption=None, corruption_level=0.1):
net.eval()
loss_fn = torch.nn.MSELoss()
with torch.no_grad(): # No need to track the gradients
original = torch.Tensor().float()
img = torch.Tensor().float()
pred = torch.Tensor().float()
for sample_batch in dataloader:
# Extract data and move tensors to the selected device
image_batch = sample_batch[0]
if corruption == "noise":
# save original image
original = torch.cat([original, image_batch])
# add noise
corruption = corruption_level * torch.randn(*image_batch.shape)
image_batch = image_batch + corruption
image_batch = torch.clamp(image_batch, 0., 1.)
elif corruption == "occlusion":
# save original image
original = torch.cat([original, image_batch])
# add occlusion
corruption = np.random.choice([0.,1.], size=image_batch.shape, p=[corruption_level, 1.-corruption_level])
image_batch = image_batch * torch.FloatTensor(corruption)
image_batch = torch.clamp(image_batch, 0., 1.)
elif corruption is None:
original = torch.cat([original, image_batch])
pass
# Forward pass
out = net(image_batch)
# Concatenate with previous outputs
img = torch.cat([img, image_batch])
pred = torch.cat([pred, out])
# Evaluate global loss
loss = float(loss_fn(pred, original))
if corruption is None:
return loss, img.numpy(), pred.numpy()
else:
return loss, img.numpy(), pred.numpy(), original.numpy()
# %% predictions on original images
loss, img, out = predict(net, test_dataloader, corruption=None)
print("Loss with original images = {}".format(loss))
# %% plotting function
def plot_corruption(original, feed, out, fname=None):
fig, axs = plt.subplots(1, 3, figsize=(9,3))
axs[0].imshow(original.squeeze(), cmap='gist_gray')
axs[0].set_title("Original Image", fontsize=14)
axs[0].axis("off")
axs[1].imshow(feed.squeeze(), cmap='gist_gray')
axs[1].set_title("Autoencoder Input", fontsize=14)
axs[1].axis("off")
axs[2].imshow(out.squeeze(), cmap='gist_gray')
axs[2].set_title("Autoencoder Output", fontsize=14)
axs[2].axis("off")
plt.tight_layout()
if fname is not None: fig.savefig(fname)
plt.close()
return
# %% noisy images
corruption = "noise"
corruption_level = np.arange(0., 1., 0.1)
loss_list = []
os.makedirs(corruption, exist_ok=True)
for i, c in enumerate(corruption_level):
loss, img, out, original = predict(net, test_dataloader, corruption=corruption, corruption_level=c)
loss_list.append(loss)
idx = 13
fname = "{}/{:02d}.pdf".format(corruption, i)
plot_corruption(original[idx], img[idx], out[idx], fname)
plt.figure(figsize=(8,5))
plt.grid()
plt.plot(corruption_level, loss_list, "o--")
plt.xlabel("Noise level", fontsize=14)
plt.ylabel("Loss", fontsize=14)
plt.savefig("{}/loss.pdf".format(corruption))
# %% occluded images
corruption = "occlusion"
corruption_level = np.arange(0., 1., 0.1)
loss_list = []
os.makedirs(corruption, exist_ok=True)
for i, c in enumerate(corruption_level):
loss, img, out, original = predict(net, test_dataloader, corruption=corruption, corruption_level=c)
loss_list.append(loss)
idx = 100
fname = "{}/{:02d}.pdf".format(corruption, i)
plot_corruption(original[idx], img[idx], out[idx], fname)
plt.figure(figsize=(8,5))
plt.grid()
plt.plot(corruption_level, loss_list, "o--")
plt.xlabel("Noise level", fontsize=14)
plt.ylabel("Loss", fontsize=14)
plt.savefig("{}/loss.pdf".format(corruption))
# %% Get the encoded representation of the test samples
encoded_image = []
encoded_label = []
for sample in tqdm(test_dataset):
img = sample[0].unsqueeze(0)
label = sample[1]
# Encode image
net.eval()
with torch.no_grad():
encoded_img = net.encode(img)
# Append to list
encoded_image.append(encoded_img.flatten().numpy())
encoded_label.append(label)
encoded_image = np.array(encoded_image)
tsne = TSNE(n_components=2, init='pca', random_state=0, n_jobs=-1)
X_tsne = tsne.fit_transform(encoded_image)
encoded_samples = [(X_tsne[x], encoded_label[x]) for x in range(len(encoded_label))]
# %% Visualize encoded space
color_map = {
0: '#1f77b4',
1: '#ff7f0e',
2: '#2ca02c',
3: '#d62728',
4: '#9467bd',
5: '#8c564b',
6: '#e377c2',
7: '#7f7f7f',
8: '#bcbd22',
9: '#17becf'
}
# Plot just 1k points
encoded_samples_reduced = random.sample(encoded_samples, 2000)
plt.figure(figsize=(10,10))
for enc_sample, label in tqdm(encoded_samples_reduced):
plt.plot(enc_sample[0], enc_sample[1], marker='.', color=color_map[label])
plt.grid(True)
plt.tick_params(labelbottom=False, labelleft=False)
plt.legend([plt.Line2D([0], [0], ls='', marker='.', color=c, label=l) for l, c in color_map.items()], color_map.keys())
plt.tight_layout()
plt.savefig("encoded_space.pdf")
plt.show()
# %%
| true |
49b09e8d71708831bb60b49bd5ceb638a6a9e866 | Python | aditya25022001/general-purpose-programs | /use_model.py | UTF-8 | 681 | 2.546875 | 3 | [] | no_license | import cv2 as cv
import numpy as np
import tensorflow as tf
CATEGORY = ["adi" , "Nadi"]
CATEGORY_SHOW = ["this is Aditya" , "this is not Aditya"]
'''
face_cascade = 'haarcascade_frontalface_alt.xml'
face_cascade_name = face_cascade
face_cascade = cv.CascadeClassifier()
face_cascade.load(face_cascade_name)'''
def prepare(filepath):
IMAGE_SIZE = 100
image_array = cv.imread(filepath , cv.IMREAD_GRAYSCALE)
new_array = cv.resize(image_array , (IMAGE_SIZE,IMAGE_SIZE))
return new_array.reshape(-1 , IMAGE_SIZE , IMAGE_SIZE ,1)
model = tf.keras.models.load_model('addi.model')
prediction = model.predict([prepare('2.jpg')])
print(CATEGORY_SHOW[int(prediction)])
| true |
5a695d082e6183cbd1b6a9fcc5941997c113fc22 | Python | saltstack/salt | /salt/returners/kafka_return.py | UTF-8 | 2,143 | 2.75 | 3 | [
"Apache-2.0",
"MIT",
"BSD-2-Clause"
] | permissive | """
Return data to a Kafka topic
:maintainer: Justin Desilets (justin.desilets@gmail.com)
:maturity: 20181119
:depends: confluent-kafka
:platform: all
To enable this returner install confluent-kafka and enable the following
settings in the minion config:
returner.kafka.bootstrap:
- "server1:9092"
- "server2:9092"
- "server3:9092"
returner.kafka.topic: 'topic'
To use the kafka returner, append `--return kafka` to the Salt command, eg;
salt '*' test.ping --return kafka
"""
import logging
import salt.utils.json
try:
from confluent_kafka import Producer
HAS_KAFKA = True
except ImportError:
HAS_KAFKA = False
log = logging.getLogger(__name__)
__virtualname__ = "kafka"
def __virtual__():
if not HAS_KAFKA:
return (
False,
"Could not import kafka returner; confluent-kafka is not installed.",
)
return __virtualname__
def _get_conn():
"""
Return a kafka connection
"""
if __salt__["config.option"]("returner.kafka.bootstrap"):
bootstrap = ",".join(__salt__["config.option"]("returner.kafka.bootstrap"))
else:
log.error("Unable to find kafka returner config option: bootstrap")
return None
return bootstrap
def _delivery_report(err, msg):
"""Called once for each message produced to indicate delivery result.
Triggered by poll() or flush()."""
if err is not None:
log.error("Message delivery failed: %s", err)
else:
log.debug("Message delivered to %s [%s]", msg.topic(), msg.partition())
def returner(ret):
"""
Return information to a Kafka server
"""
if __salt__["config.option"]("returner.kafka.topic"):
topic = __salt__["config.option"]("returner.kafka.topic")
conn = _get_conn()
producer = Producer({"bootstrap.servers": conn})
producer.poll(0)
producer.produce(
topic,
salt.utils.json.dumps(ret),
str(ret).encode("utf-8"),
callback=_delivery_report,
)
producer.flush()
else:
log.error("Unable to find kafka returner config option: topic")
| true |
13c67a4bea7d93a113becd70ba3e4f66da9948f5 | Python | oscarburga/tutorias-complejidad-algoritmica-2021-1 | /s3/clase/max-subarray-sum.py | UTF-8 | 924 | 3.953125 | 4 | [] | no_license |
# a = [-5, -5, -5, -5, -5]
# subarreglo vacio [] con suma 0
# nosotros no vamos a considerar el subarreglo vacío
inf = 10**18
def merge(a, l, mid, r): # mezclar respuestas
# calcular b1 (bloque de la izquierda que termina en el medio)
b1 = -inf # inicializar en -infinito para no considerar subarreglos vacíos
suma = 0
for i in range(mid, l-1, -1):
suma += a[i]
b1 = max(b1, suma)
b2 = -inf
suma = 0
for i in range(mid+1, r+1):
suma += a[i]
b2 = max(b2, suma)
return b1 + b2
def conquer(a, l, r): # resolver recursivamente y mezclar respuestas
# l: left
# r: right
if l == r: return a[l]
mid = (l+r) // 2
max_L = conquer(a, l, mid)
max_R = conquer(a, mid+1, r)
return max(max_L, max_R, merge(a, l, mid, r))
a = [-2,1,-3,4,-1,2,1,-5,4] # la respuesta es 6, [4, -1, 2, 1]
n = len(a)
print(conquer(a, 0, len(a)-1))
| true |
ee50783817e46a7ee08f954f4981b92980ebaa11 | Python | sutarnilesh/DataStructuresPython | /algorithms/Sorting/quick_sort.py | UTF-8 | 1,507 | 4.34375 | 4 | [] | no_license |
"""
The quick sort uses divide and conquer to gain the same advantages as the merge sort,
while not using additional storage.
A quick sort first selects a value, which is called the pivot value. We will simply use the first item in the list.
The role of the pivot value is to assist with splitting the list. The actual position where the
pivot value belongs in the final sorted list, commonly called the split point,
will be used to divide the list for subsequent calls to the quick sort.
"""
def quickSort(alist):
quickSortHelper(alist, 0, len(alist) - 1)
def quickSortHelper(alist, first, last):
if first < last:
splitpoint = partition(alist, first, last)
quickSortHelper(alist, first, splitpoint - 1)
quickSortHelper(alist, splitpoint + 1, last)
def partition(alist, first, last):
pivotvalue = alist[first]
leftmark = first + 1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and alist[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while alist[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark = rightmark - 1
if rightmark < leftmark:
done = True
else:
temp = alist[leftmark]
alist[leftmark] = alist[rightmark]
alist[rightmark] = temp
temp = alist[first]
alist[first] = alist[rightmark]
alist[rightmark] = temp
return rightmark
alist = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quickSort(alist)
print(alist)
| true |
88dfda8cd47bc0c48acf4757a4af993463b5392b | Python | lesterfernandez/Messenger | /client.py | UTF-8 | 1,160 | 2.84375 | 3 | [] | no_license | import socket
import threading
HEADER = 8
PORT = 5050
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!leave"
SERVER = "IPV4 ADDR" # Enter the IPV4 address that you are hosting
ADDR = (SERVER, PORT) # the server with here
# use "IPCONFIG" on windows or "hostname -I" on linux
name = " "
set_name = False
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
def send(msg):
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b" " * abs((HEADER - len(send_length)))
client.send(send_length)
client.send(message)
def getInput():
global name
global set_name
while True:
if not set_name:
name = input("Please enter your username: ")
set_name = True
else:
text = input()
send(f"\n{name}: {text}")
connected = True
thread = threading.Thread(target=getInput)
thread.start()
while connected:
reply_len = client.recv(HEADER).decode(FORMAT)
if reply_len:
reply_len = int(reply_len)
print(client.recv(reply_len).decode(FORMAT))
| true |
e88d0990f05c16bced90946bf108e051f3a5d0b7 | Python | wyaadarsh/LeetCode-Solutions | /C++/0819-Most-Common-Word/soln-1.py | UTF-8 | 589 | 2.8125 | 3 | [
"MIT"
] | permissive | class Solution {
public:
string mostCommonWord(string paragraph, vector<string>& banned) {
unordered_set<string> banset(banned.begin(), banned.end());
unordered_map<string, int> counter;
for(auto & c : paragraph) c = isalpha(c) ? tolower(c) : ' ';
istringstream iss(paragraph);
string word, ans = "";
int mx = 0;
while (iss >> word) {
if (banset.find(word) == banset.end() && ++counter[word] > mx) {
mx = counter[word];
ans = word;
}
}
return ans;
}
};
| true |
0dc79614166f42a6951bde844c9577f57398ef47 | Python | nthanhtung/vn_stock_analysis | /source/xxx/load/to_df.py | UTF-8 | 573 | 2.8125 | 3 | [] | no_license | ###############
import pandas as pd
import glob
import datetime as dt
def csv_path_to_df(path: str = "C:/data", file_name_to_exclude: str = "abc.csv"):
all_files = glob.glob(path + "/*.csv")
file_path_to_exclude = [s for s in all_files if file_name_to_exclude in s]
try:
all_files.remove(file_path_to_exclude[0])
except Exception as e:
print("")
l = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
l.append(df)
frame = pd.concat(l, axis=0, ignore_index=True)
return frame
| true |
f50625ac7f20d9f5f04f87000ef7ebbc09ca772c | Python | efratkohen/python_HW | /HW1/question3.py | UTF-8 | 548 | 3.71875 | 4 | [] | no_license | def check_palindrome():
"""Runs through all 6-digit numbers and checks the mentioned conditions.
The function prints out the numbers that satisfy this condition.
Notes
-----
It should print out the first number (with a palindrome in its last 4 digits),not all four "versions" of it.
"""
# Your code goes here...
def polindrom(a):
s=str(a)
l=len(s)
half=l/2
if (l % 2) == 0:
i=0
while i<half:
if s[i] != s[len-i]:
return False
b=1441
print(polindrom(b))
| true |
d45589459d045cb1e6e4d21f80f9e46852a14e6f | Python | arolariu/2NHACK2020 | /main.py | UTF-8 | 673 | 2.625 | 3 | [] | no_license | from config import *
from tkinter import *
#INTERFATA GRAFICA:
def update(ind):
frame = frames[ind]
ind += 3
if ind == frameCnt:
ind = 0
label.configure(image=frame)
app.after(100, update, ind)
app = Tk()
app.title('Soft Squad - School Assistant')
app.geometry('800x600')
app.resizable(False, False)
app.configure(bg='#000000')
frameCnt = 60
frames = [PhotoImage(master = app, file='Sufletul.gif', format = 'gif -index %i' %(i)) for i in range(frameCnt)]
label = Label(app)
label.pack()
Button(master = app, text="Push to talk!", command = vorbit, width=10, height=1).place(x=350, y=550)
app.after(0, update, 0)
wishMe()
app.mainloop() | true |
c2ec7d8274c0b2a14de558cf17052191f2eec8b0 | Python | SeitzhagyparovaTE/web | /week_7/2.HackerRank/9.py | UTF-8 | 320 | 3.03125 | 3 | [] | no_license | if __name__ == '__main__':
marklist = []
for _ in range(0,int(input())):
marklist.append([input(), float(input())])
second = sorted(list(set([marks for name, marks in marklist])))[1]
marklist.sort()
for name, mark in marklist:
if mark == second:
print(name, end = '\n')
| true |
e4d836fe94dc05c8f47d0fa9bb332453eca03553 | Python | WertheimKhon/CompMatSciTools | /Stress_Strain_Analysis/ElasticMD/extract/__init__.py | UTF-8 | 7,389 | 3 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2021 Dr. William A. Pisani
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Created on Tue Sep 1 14:39:10 2020
@author: William A. Pisani, Ph.D.
This module will read in a LAMMPS log file and extract all thermodynamic data.
"""
import numpy as np
class log:
def __init__(self,logname,suppressOutput=False):
self.keywords = [] # List of lists of thermo keywords (Step, Temp, Press, etc.), if different runs have different numbers of keywords
self.nkeywords = [] # List of numbers of keywords
self.keywordIndices = [] # List of dictionaries of thermo keyword-index pairs corresponding to each run
self.headerIndices = [] # List of indices where thermo keywords are
self.endRunIndices = [] # List of indices where "Loop time string is, marking the end of each run
self.data = [] # List of 2d numpy arrays
self.logname = logname # Name of log file
self.extractKeywordsAndData()
if suppressOutput == False:
self.printHeaders()
def __repr__(self):
return f"{self.__class__.__name__} object containing thermodynamic data from {self.logname}"
def printHeaders(self):
"""
Prints thermodynamic headers in order.
Returns
-------
None.
"""
print(f"Data extraction from {self.logname} was successful!")
print(f"{len(self.keywords)} thermodynamic section(s) found")
print("\nThermodynamic keyword headers are as follows (index, header):")
print("-------------------------------------------------------------")
for index,header in enumerate(self.keywords):
print(f"{index}\t{header}\n")
if len(self.keywords) > 1:
print("Please note that since more than one (1) section of non-identical thermodynamic data was found, you will need to specify which section of data you wish to extract.")
print("For example, thermo.get(('Step','Temp','Press'),0) to get the step, temperature, and pressure from the first section of data")
def extractKeywordsAndData(self):
"""
Get all keyword/data sections from all LAMMPS runs in log file
Returns
-------
None.
"""
with open(self.logname,'r') as logfile:
logContents = logfile.read()
splitLogContents = logContents.split('\n')
for index,line in enumerate(splitLogContents):
if line.find("Per MPI ") > -1 or line.find("Memory ") > -1: # Per MPI is for modern versions of LAMMPS, Memory is for older versions
self.headerIndices.append(index+1) # Per MPI rank memory always occurs one line before the thermo keywords line
elif line.find("Loop time ") > -1:
self.endRunIndices.append(index)
for index,headerIndex in enumerate(self.headerIndices):
# Thermo keywords
line = splitLogContents[headerIndex]
headerLine = " ".join(line.split()).split(' ')
# Raw Data
start, stop = headerIndex+1, self.endRunIndices[index]
rawData = splitLogContents[start:stop]
if headerLine not in self.keywords:
self.keywords.append(headerLine)
self.nkeywords.append(len(headerLine))
keywordPairs = {}
for index,keyword in enumerate(headerLine):
keywordPairs.update({keyword:index})
self.keywordIndices.append(keywordPairs)
# Convert raw data to numpy array
npData = np.zeros((len(rawData),len(headerLine)))
for i,dataLine in enumerate(rawData):
dataLine = " ".join(dataLine.split())
for j,value in enumerate(dataLine.split(' ')):
npData[i,j] = value
self.data.append(npData)
else: # If thermo header is identical to one already stored
# Get index of first occurence of thermo header that is identical to the next header/data set to be stored
firstOccurenceIndex = self.keywords.index(headerLine)
# Convert raw data to numpy array
npData = np.zeros((len(rawData),len(headerLine)))
for i,dataLine in enumerate(rawData):
dataLine = " ".join(dataLine.split())
for j,value in enumerate(dataLine.split(' ')):
npData[i,j] = value
# Add numpy array to numpy array of first occurence
self.data[firstOccurenceIndex] = np.concatenate((self.data[firstOccurenceIndex],npData))
def get(self,keys,index=0):
"""
Parameters
----------
keys : tuple
Tuple of thermodynamic keywords. Common examples are "Step", "Temp", and "Press".
Fixes, computes, and variables can also be extracted if given the appropriate term (e.g. "f_sxx_ave").
index : int, optional
Index for which section of data you wish to pull from. The default is 0.
Raises
------
Exception
If no keywords are specified, an exception will be raised. At least one keyword must be specified.
Returns
-------
properties : list
List of 1d numpy arrays corresponding to the input tuple.
"""
if len(keys) == 0:
raise Exception("no keywords specified, you must specify at least one keyword (e.g. Step, Temp, etc)")
if type(keys) == tuple:
properties = []
for key in keys:
keyValue = self.keywordIndices[index][key]
data = self.data[index][:,keyValue]
properties.append(data)
elif type(keys) == str:
keyValue = self.keywordIndices[index][keys]
data = self.data[index][:,keyValue]
properties = data
return properties
| true |
29d9a56987b817f8a8642faa10d9a2c01c5ab149 | Python | shiva-pole/opencv-practice | /video-player.py | UTF-8 | 771 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import cv2
import matplotlib.pyplot as plt
def main():
windowName = 'Live Video Feed'
video_file = "F:\\Projects\\Mine\\Python\\open-cv\\output\\out.avi"
cv2.namedWindow(windowName)
cap = cv2.VideoCapture(video_file)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
# img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#
# plt.imshow(img)
# plt.show()
while cap.isOpened():
ret, frame = cap.read()
if ret:
cv2.imshow(windowName, frame)
if cv2.waitKey(33)==27:
break
else:
break
cap.release()
cv2.destroyWindow(windowName)
if __name__ == "__main__":
main()
| true |
eecd7db79365c1e2ed7df822c98b9e14034d3f2b | Python | mahyar-osn/Stride-and-Slice-Images | /strideslice.py | UTF-8 | 9,466 | 2.921875 | 3 | [
"MIT"
] | permissive | import numpy as np
import cv2
import os
import tifffile
import nibabel as nib
config = dict()
config['nibfile'] = False
config['tiffile'] = False
config['volumetric'] = False
def read_images(dir_path):
Images = []
image_names = sorted(os.listdir(dir_path))
for im in image_names:
if len(im) > 2:
config['volumetric'] = True
if im.endswith('tif') or im.endswith('tiff'):
config['tiffile'] = True
image_tmp = tifffile.imread(os.path.join(dir_path, im))
image = image_tmp.T
elif im.endswith('nii') or im.endswith('nii.gz'):
config['nibfile'] = True
image = nib.load(os.path.join(dir_path, im))
else:
image = cv2.imread(os.path.join(dir_path, im))
Images.append(image)
return Images
def image_size(image):
return image.shape
def save_images(transformed, save_dir):
if not (os.path.exists(save_dir)):
os.mkdir(save_dir)
k = 1
for key, val in transformed.items():
for i, j in enumerate(val):
dir_name = str(k)
path = os.path.join(save_dir, dir_name)
if not os.path.exists(path):
os.mkdir(path)
if config['nibfile']:
nib.save(j, os.path.join(path, str(i + 1) + '.nii.gz'))
elif config['tiffile']:
tifffile.imsave(os.path.join(path, str(i + 1) + '.tiff'), j)
else:
cv2.imwrite(os.path.join(path, str(i+1)+'.png'), j)
k+=1
def Offset_op(input_length, output_length, stride):
"""
Takes input(height, width), output(height, width) and strides
:param input_length:
:param output_length:
:param stride:
:return: offset, i.e. left out portion after applying strides
"""
offset = (input_length) - (stride * ((input_length - output_length) // stride) + output_length)
return offset
def Padding_op(Image, strides, offset_x, offset_y):
"""
Takes an image, offset required to fit output image dimensions with given strides and calculates the
padding it needs for perfect fit.
:param Image:
:param strides:
:param offset_x:
:param offset_y:
:return: Padded image
"""
if config['volumetric']:
raise Exception("3D Padding not yet implemented!")
padding_x = strides[0] - offset_x
padding_y = strides[1] - offset_y
Padded_Image = np.zeros(shape=(Image.shape[0] + padding_x, Image.shape[1] + padding_y, Image.shape[2]),
dtype=Image.dtype)
Padded_Image[padding_x // 2:(padding_x // 2) + (Image.shape[0]), padding_y // 2:(padding_y // 2) + Image.shape[1],
:] = Image
return Padded_Image
def Convolution_op(Image, size, strides):
"""
Takes an image, Dimensions of the desired image and Strides.
:param Image:
:param size:
:param strides:
:return: List of cropped images
"""
start_x = 0
start_y = 0
n_rows = Image.shape[0] // strides[0]
n_columns = Image.shape[1] // strides[1]
if config['volumetric']:
start_z = 0
n_depths = Image.shape[2] // strides[2]
small_images = []
if config['volumetric']:
for i in range(n_rows):
for j in range(n_columns):
for k in range(n_depths):
new_start_x = start_x + i * strides[0]
new_start_y = start_y + j * strides[1]
new_start_z = start_z + k * strides[2]
if config['nibfile']:
small_image_temp = Image.get_fdata()[new_start_x:new_start_x + size[0], new_start_y:new_start_y + size[1], new_start_z:new_start_z + size[2]]
small_image_temp_1 = nib.Nifti1Image(small_image_temp, Image.affine)
small_images.append(small_image_temp_1)
else:
small_images.append(Image[new_start_x:new_start_x + size[0], new_start_y:new_start_y + size[1], new_start_z:new_start_z + size[2]])
else:
for i in range(n_rows):
for j in range(n_columns):
new_start_x = start_x + i * strides[0]
new_start_y = start_y + j * strides[1]
if config['nibfile']:
small_image_temp = Image.get_fdata()[new_start_x:new_start_x + size[0], new_start_y:new_start_y + size[1]]
small_image_temp_1 = nib.Nifti1Image(small_image_temp, Image.affine)
small_images.append(small_image_temp_1)
else:
small_images.append(Image[new_start_x:new_start_x + size[0], new_start_y:new_start_y + size[1]])
return small_images
def transform(source_dir, size, strides=[None, None, None], PADDING=False):
"""
Transforms the images/image into desired small images provided the strides
If no strides are provided, the strides will default to the size of the desired image, i.e no overlapping will
take place.
:param source_dir:
:param size:
:param strides:
:param PADDING:
:return: dictionary with string of count starting from 1 as key and list of images as values.
"""
if not (os.path.exists(source_dir)):
raise Exception("Path does not exist!")
else:
im_path = None
dir_path = None
splits = source_dir.split('/')
last = splits[-1].split('.')
if len(last) > 1:
im_path = source_dir
else:
dir_path = source_dir
if im_path:
Image = cv2.imread(im_path)
Images = [Image]
else:
Images = read_images(source_dir)
transformed_images = dict()
Images = np.array(Images)
if PADDING:
if config["volumetric"]:
raise Exception("3D not yet implemented!")
padded_images = []
if strides[0] is None and strides[1] is None:
strides[0] = size[0]
strides[1] = size[1]
offset_x = Images.shape[1] % size[0]
offset_y = Images.shape[2] % size[1]
for Image in Images:
Image_Padded = Padding_op(Image, strides, offset_x, offset_y)
padded_images.append(Image_Padded)
elif strides[0] is None and strides[1] is not None:
strides[0] = size[0]
offset_x = Images.shape[1] % size[0]
if strides[1] <= Images.shape[2]:
offset_y = Offset_op(Images.shape[2], size[1], strides[1])
else:
print("stride_y must be between {0} and {1}".format(1, Images.shape[2] - size[1]))
for Image in Images:
Image_Padded = Padding_op(Image, strides, offset_x, offset_y)
padded_images.append(Image_Padded)
elif strides[0] is not None and strides[1] is None:
strides[1] = size[1]
offset_y = Images.shape[2] % size[1]
if strides[0] <= Images.shape[1]:
offset_x = Offset_op(Images.shape[1], size[0], strides[0])
else:
print("stride_x must be between {0} and {1}".format(1, Images.shape[1] - size[0]))
for Image in Images:
Image_Padded = Padding_op(Image, strides, offset_x, offset_y)
padded_images.append(Image_Padded)
else:
if strides[0] > Images.shape[1] or strides[1] > Images.shape[2]:
print("stride_x must be between {0} and {1} and stride_y must be between {2} and {3}"
.format(1, Images.shape[1] - size[0], 1, Images.shape[2] - size[1]))
else:
offset_x = Offset_op(Images.shape[1], size[0], strides[0])
offset_y = Offset_op(Images.shape[2], size[1], strides[1])
for Image in Images:
Image_Padded = Padding_op(Image, strides, offset_x, offset_y)
padded_images.append(Image_Padded)
count = 0
for Image in padded_images:
count += 1
transformed_images[str(count)] = Convolution_op(Image, size, strides)
else:
if strides[0] is None and strides[1] is None:
strides[0] = size[0]
strides[1] = size[1]
elif strides[0] is None and strides[1] is not None:
strides[0] = size[0]
elif strides[0] is not None and strides[1] is None:
strides[1] = size[1]
count = 0
for Image in Images:
count += 1
transformed_images[str(count)] = Convolution_op(Image, size, strides)
return transformed_images
def main():
source_dir = './data/3D'
size = (100, 100, 50)
strides = [95, 95, 48]
padding = False
grid_images = transform(source_dir, size, strides=strides, PADDING=padding)
save_dir = 'output'
save_images(grid_images, save_dir)
if __name__ == '__main__':
main() | true |
6e203982376f1137731eb79f4e72d144e31f9cc1 | Python | KevinJ-Huang/StereoLow-Light | /merge.py | UTF-8 | 3,739 | 2.75 | 3 | [] | no_license | import cv2
import numpy as np
import os
def calWeight(d, k):
'''
:param d: 融合重叠部分直径
:param k: 融合计算权重参数
:return:
'''
x = np.arange(-d / 2, d / 2)
y = 1 / (1 + np.exp(-k * x))
return y
def imgFusion(img1, img2, overlap, left_right=True):
'''
图像加权融合
:param img1:
:param img2:
:param overlap: 重合长度
:param left_right: 是否是左右融合
:return:
'''
# 这里先暂时考虑平行向融合
w = calWeight(overlap, 0.05) # k=5 这里是超参
if left_right: # 左右融合
row, col, channels = img1.shape
row1, col1, channels1 = img2.shape
img_res = np.zeros((row, col + col1 - overlap,3))
for c in range(channels):
img_new = np.zeros((row, col + col1 - overlap))
img_new[:, :col] = img1[:,:,c]
w_expand = np.tile(w, (row, 1)) # 权重扩增
img_new[:, col - overlap:col] = (1 - w_expand) * img1[:, col - overlap:col, c] + w_expand * img2[:, :overlap,c]
img_new[:, col:] = img2[:, overlap:,c]
img_res[:,:,c] = img_new
else: # 上下融合
row, col, channels = img1.shape
img_res = np.zeros((row, 2 * col - overlap, 3))
for c in range(channels):
img_new = np.zeros((2 * row - overlap, col))
img_new[:row, :] = img1[:,:,c]
w = np.reshape(w, (overlap, 1))
w_expand = np.tile(w, (1, col))
img_new[row - overlap:row, :] = (1 - w_expand) * img1[row - overlap:row, :,c] + w_expand * img2[:overlap, :,c]
img_new[row:, :] = img2[overlap:, :,c]
img_res[:, :, c] = img_new
return img_res
if __name__ =="__main__":
if not os.path.exists('/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/left/'):
os.makedirs('/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/left/')
files = sorted(os.listdir('/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/results1'))
file_list = []
for file in files:
filename =file[:-6]
if not filename in file_list:
img1 = cv2.imread("/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/results1/"+filename+'_0.png',cv2.IMREAD_UNCHANGED)
img2 = cv2.imread("/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/results1/"+filename+'_1.png',cv2.IMREAD_UNCHANGED)
img1 = (img1 - img1.min())/img1.ptp()
img2 = (img2 - img2.min())/img2.ptp()
img_new1 = imgFusion(img1,img2,overlap=64,left_right=True)
# img3 = cv2.imread(
# "/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_result/MIRNet/results1/" + filename + '_2.png',
# cv2.IMREAD_UNCHANGED)
# img4 = cv2.imread(
# "/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_result/MIRNet/results1/" + filename + '_3.png',
# cv2.IMREAD_UNCHANGED)
# img3 = (img3 - img3.min()) / img3.ptp()
# img4 = (img4 - img4.min()) / img4.ptp()
# img_new2 = imgFusion(img3, img4, overlap=64, left_right=True)
#
# img_new1 = (img_new1 - img_new1.min()) / img_new1.ptp()
# img_new2 = (img_new2 - img_new2.min()) / img_new2.ptp()
# img_new = imgFusion(img_new1, img_new2, overlap=64, left_right=True)
cv2.imwrite('/media/ustc-ee-huangjie/KESU/Datasets/stereodataset/Midllery_small_result/Stereo/left/'+filename+'.png',np.uint8(img_new1*255.0))
file_list.append(filename) | true |
5e8dfa89497316e17ec56152801ebf4f4c960fc9 | Python | qdufour/module | /sklearn.py | UTF-8 | 536 | 3.78125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
from sklearn import tree
features = [[7, 0.6, 40], [7, 0.6, 41], [37, 600, 37], [37, 600, 38]] #definition des caractéristiques de classification
#labels = [chicken, chicken, horse, horse]
labels = [0, 0, 1, 1] #définition des résultats de classification
classif = tree.DecisionTreeClassifier() #définition de la variable de classification
classif.fit(features, labels) #feed or fit your data to the classifier
print(classif.predict([[7, 0.6, 41]])) #prédiction
#output
# [0] == Chicken | true |
c0c40e66ddea5f659a4bf9d5355c1b1e25744c04 | Python | e-kolpakov/e-kolpakov.github.io | /_code/2020-01-19-building-tests/src/ints.py | UTF-8 | 167 | 3.296875 | 3 | [] | no_license | def multiply(i1: int, i2: int) -> int:
return i1 * i2
def self_test():
assert(multiply(1, 2) == 2)
assert(multiply(3, 4) == 12)
print("Tests passed") | true |
3578e54eba0bef321a71b81d9dbad9c3c3220271 | Python | Annish1234/My-python-files | /mark2.py | UTF-8 | 3,597 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
import smtplib
import time
import os
import RPi.GPIO as GPIO
import speech_recognition as sr
import random
a=0
x=0
print("secrity system with high security opens only for authorised users only")
r=input("press the letter s for entering into the acceing menu :::")
if r=="s":
print("your now in front of a most securable safety wall")
print("press the letter v for opening the door by voice command .open it if your a authorised users only")
print("press the letter s for opening the door by sensing.strictly only for authorised users only")
print("press the letter q for opening the door by access code only authorised,registered user can access this ")
print("your under surviliance be carefull.")
b=input("press your choice to get the most secure bank locker in this world::::")
if b=="v":
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source,duration=5)
print("Say something!")
while True:
audio = r.listen(source)
print("You said: " + r.recognize_google(audio))
if r.recognize_google(audio)=="hello":
print("the door will be open for 3seconds")
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.output(7,True)
GPIO.output(11,False)
time.sleep(4)
GPIO.output(7,False)
GPIO.output(11,True)
time.sleep(1)
GPIO.cleanup()
os.system("fswebcam -F 3 --fps 20 -r 1200x800 DOOR.jpg")
s = smtplib.SMTP('smtp.gmail.com',)
s.starttls()
s.login("balajikumar189@gmail.com", "balaji@google")
message = "ALERT,ALERT TO THE AUTHORIZED USERS OF HI TECH SAFETY DOOR OF THE BANK IS BEEN ACCESSED USING THE SECRET PASSWORD"
s.sendmail("balajikumar189@gmail.com", "balajikumar189@gmail.com", message)
s.quit()
if b=="s":
print("this part of the door security is unprotected with no password and the door will be opened for 3seconds,the door is opened by means of sensing only")
pir = MotionSensor(27)
while True:
if pir.motion_detected:
print("SOME STRANGER IS IN FRONT OF THE DOOR")
os.system("fswebcam -F 3 --fps 20 -r 1200x800 DOOR1.jpg")
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.output(7,True)
GPIO.output(11,False)
time.sleep(4)
GPIO.output(7,False)
GPIO.output(11,True)
time.sleep(1)
GPIO.cleanup()
print("the door is been opened by u but the authority will get the message that the door is opened by sensing,your in hi.tech security system")
s = smtplib.SMTP('smtp.gmail.com',)
s.starttls()
s.login("balajikumar189@gmail.com", "balaji@google")
message = "ALERT,ALERT TO THE AUTHORIZED USERS OF HI TECH SAFETY DOOR OF THE BANK IS BEEN ACCESSED USING THE SECRET PASSWORD"
s.sendmail("balajikumar189@gmail.com", "balajikumar189@gmail.com", message)
s.quit()
os.system("fswebcam -F 3 --fps 20 -r 1200x800 DOOR2.jpg")
if b=="q":
print("you will get otp to your mail if and only if your a authorized user")
bal=input("press g to get the access code::")
if bal=="g":
s = smtplib.SMTP('smtp.gmail.com',)
s.starttls()
s.login("balajikumar189@gmail.com","indian-american")
m=str(random.randint(111111,999999))
s.sendmail("balajikumar189@gmail.com", "balajikumar189@gmail.com", m)
s.quit()
print("please enter the otp sent to your authorised email id ")
a=str(input("enter the otp send to your mail id:::"))
if a=="m":
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7,GPIO.OUT)
GPIO.setup(11,GPIO.OUT)
GPIO.output(7,True)
GPIO.output(11,False)
time.sleep(4)
GPIO.output(7,False)
GPIO.output(11,True)
time.sleep(1)
GPIO.cleanup()
print("thank u")
| true |
5c13075751a78b79f7d681dcb54ce3c7a5f6eea6 | Python | mariia-kiko/AdequateNameForPythonLab | /REFACTORING.py | UTF-8 | 5,106 | 3.40625 | 3 | [] | no_license | import pygame
from pygame.draw import *
import math as m
pygame.init()
#SCREEN PARAMETERS
WIDTH = 1000
HEIGHT = 600
#LIST OF COLORS
LIGHT_OLIVE = (206, 235, 206)
BLUE = (44, 117, 255)
YELLOW = (237, 255, 33)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BROWN = (168,47,20)
LIGHT_RED = (235, 76, 66)
FPS = 30
screen = pygame.display.set_mode((WIDTH, HEIGHT))
def background (color1 = LIGHT_OLIVE, color_2 = BLUE, color3 = YELLOW):
rect(screen, LIGHT_OLIVE, (0, 0 ,WIDTH, HEIGHT/3))
rect(screen, BLUE, (0, HEIGHT/3, WIDTH, HEIGHT/3))
rect(screen, YELLOW, (0, 2*HEIGHT/3, WIDTH, HEIGHT/3))
background()
def cloud (x0, y0, r, surf_color = LIGHT_OLIVE, cloud_color = WHITE):
'''
x0, y0 - coordinates of upper left corner of the surface
cloud_color - color of cloud in RGB
surf_color - surface color in RGB
r - radius of each circle
'''
surf = pygame.Surface((6*r, 4*r))
surf.fill(surf_color)
for i in range (3):
circle(surf, cloud_color, ((i+2)*r, 1.5*r), r)
circle(surf, BLACK, ((i+2)*r, 1.5*r), r, 1)
for j in range (4):
circle(surf, cloud_color, (2*r - 2*r/3 + j*r, 2.5*r), r)
circle(surf, BLACK, (2*r - 2*r/3 + j*r, 2.5*r), r, 1)
screen.blit(surf, (x0, y0))
cloud(0, 0, 30, LIGHT_OLIVE, WHITE)
def sun (x0, y0, r, color = YELLOW):
'''
x0, y0 - coordinates of center of the Sun
r - radius of the Sun
'''
circle(screen, color, (x0, y0), r)
sun(830, 100, 50, YELLOW)
def umbrella(x0, y0, hat_width, hat_height, stick_width, stick_height, n):
'''
x0, y0 - coordinates of top of umbrella hat
n - number of lines on umbrella hat
'''
surf = pygame.Surface((hat_width, hat_height + stick_height), pygame.SRCALPHA)
#surf.fill(BLACK)
surf.set_alpha(100)
rect(surf, (168, 47, 20, 128), (x0 - stick_width/2, y0 + hat_height, stick_width, stick_height))
polygon(surf, (235, 76, 66, 128), [(x0 - (hat_width/2), y0 + hat_height), (x0, y0),
(x0 + (hat_width/2), y0 + hat_height)])
for i in range (n + 1):
line(surf, BLACK, (x0 - hat_width/2 + i*hat_width/(n+1), y0 + hat_height), (x0, y0))
screen.blit(surf, (x0 - 0.5*hat_width, y0))
umbrella(525, 350, 140, 30, 20, 180, 6)
def boat (x0, y0, bottom_width, boat_height, stick_width, stick_height):
'''
x0, y0 - coordinates of bow
'''
surf = pygame.Surface((1.5*bottom_width + boat_height, boat_height + stick_width))
rect(surf, (168, 47, 20, 128), (x0 - 1.5*bottom_width, y0, bottom_width, boat_height))
polygon(surf, (168, 47, 20, 128), [(x0 - bottom_width/2, y0), (x0, y0),
(x0 - bottom_width/2, y0 + boat_height)])
arc(surf, (168, 47, 20, 128), [x0 - 1.5*bottom_width - boat_height, y0 - boat_height, 2*boat_height, 2*boat_height],m.pi,m.pi*1.5, boat_height)
arc(surf, (168, 47, 20, 128), [x0 - 1.5*bottom_width - boat_height + 1, y0 - boat_height, 2*boat_height, 2*boat_height],m.pi,m.pi*1.5, boat_height)
arc(surf, (168, 47, 20, 128), [x0 - 1.5*bottom_width - boat_height + 2, y0 - boat_height, 2*boat_height, 2*boat_height],m.pi,m.pi*1.5, boat_height)
rect(surf, BLACK, (x0 - bottom_width, y0 - stick_height, stick_width, stick_height))
polygon(surf, (255, 255, 255, 128), [(x0 - bottom_width + stick_width, y0 - stick_height), (x0 - 0.5*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height),
(x0 - 0.8*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height)])
polygon(surf, (255, 255, 255, 128), [(x0 - bottom_width + stick_width, y0 - 0.2*stick_height), (x0 - 0.5*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height),
(x0 - 0.8*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height)])
line(surf, BLACK, (x0 - bottom_width + stick_width, y0 - stick_height), (x0 - 0.5*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height))
line(surf, BLACK, (x0 - bottom_width + stick_width, y0 - 0.2*stick_height), (x0 - 0.5*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height))
line(surf, BLACK, (x0 - 0.5*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height),
(x0 - 0.8*bottom_width + stick_width, y0 - stick_height + 0.4*stick_height))
line(surf, BLACK, (x0 - 0.8*bottom_width + stick_width, y0 - 0.6*stick_height), (x0 - bottom_width + stick_width, y0 - 0.2*stick_height))
line(surf, BLACK, (x0 - 0.8*bottom_width + stick_width, y0 - 0.6*stick_height), (x0 - bottom_width + stick_width, y0 - stick_height))
circle(surf, WHITE, (x0 - 0.6*bottom_width, y0 + 0.5*boat_height), 0.35*boat_height)
circle(surf, BLACK, (x0 - 0.6*bottom_width, y0 + 0.5*boat_height), 0.35*boat_height, 2)
boat(725, 270, 250, 50, 10, 200)
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
pygame.quit()
| true |
93e765deb4a05a105e8f9458e25079c05a4a3477 | Python | rbiegelmeyer/CodeFights-Python | /Arcade/Core/24 - equalPairOfBits.py | UTF-8 | 75 | 2.6875 | 3 | [] | no_license | def equalPairOfBits(n, m):
return 2**(str(bin(~n ^ m)[::-1]).find('1')) | true |
00c3ff8a607f398f527108ae7451dd955b97fbad | Python | ZsNagy89/Python | /PE_01_LAB_Day_of_the_year.py | UTF-8 | 1,379 | 3.40625 | 3 | [] | no_license | def day_of_year(year,month,day):
months=range(month)
for i in range(month):
days_vector=[]
if year%4==0: #maybe Leap year
if year%100==0 and year%400!=0: #not a leap year
if months[i] in x:
if months[i]==2:
test_days.append(28)
else:
test_days.append(30)
else:
test_days.append(31)
else: #leap year
if months[i] in x:
if months[i]==2:
test_days.append(29)
else:
test_days.append(30)
else:
test_days.append(31)
else: #not a leap year
if months[i] in x:
if months[i]==2:
test_days.append(28)
else:
test_days.append(30)
else:
test_days.append(31)
del test_days[0] # delete the 0th item
test_days.append(day) # add day number to elem
sum(test_days)
x=(2,4,6,9,11)
test_days=[]
day_of_year(1989,2,10)
print(sum(test_days))
| true |
ede40fe2a86f05fe52f011607b3ab293378ecb40 | Python | stahl/adventofcode | /2017/day4/a.py | UTF-8 | 397 | 3.046875 | 3 | [] | no_license | """Counts passphrases as defined by https://adventofcode.com/2017/day/4."""
import fileinput
from collections import Counter
def valid_passphrase(passphrase):
cardinalities = Counter(passphrase)
return all(cardinality == 1 for cardinality in cardinalities.values())
phrases = (line.split() for line in fileinput.input())
print(sum(1 for phrase in phrases if valid_passphrase(phrase)))
| true |
fbba64103888fec29c9081601219a861bdc991c6 | Python | omidmogasemi/stock-trading-bot | /Stock.py | UTF-8 | 1,481 | 2.765625 | 3 | [] | no_license | from Algorithms.MomentumAlgorithm import MomentumAlgorithm
class Stock:
ORDER_QUANTITY = 5
def __init__(self, ticker, api):
self.ticker = ticker
self.api = api
self.current_pos = None
self.barset = None
self.algo = None
def get_ticker(self):
return self.ticker
def set_barset(self, barset):
self.barset = barset
self.algo = MomentumAlgorithm(
self.ticker, self.barset, self.current_pos)
def set_current_pos(self, pos):
self.temp_pos_aggregate = self.api.get_position(self.ticker)
# you can choose to extract any necessary information here and pass it into the
# current position, and then extract it into local vars in the close analysis
# to use in your custom analysis
# i should make an abstract class that each algorithm must extend
# the necessary functions from
self.current_pos.append(self.temp_pos_aggregate.avg_entry_price)
self.current_pos.append(self.temp_pos_aggregate.unrealized_pl)
def analyze_bars(self):
result = self.algo.perform_analysis()
if (result == "buy"):
self.api.submit_order(
self.ticker, self.ORDER_QUANTITY, "buy", "market", "gtc")
self.owned = True
elif (result == "sell"):
self.api.submit_order(
self.ticker, self.ORDER_QUANTITY, "sell", "market", "gtc")
self.owned = False
| true |
cbf32ef8297a95a0a2914a99ca7df04b9e99f675 | Python | aayushi-droid/Python-Thunder | /Solutions/Geometry1-LengthOfLineSegment.py | UTF-8 | 394 | 3.6875 | 4 | [
"MIT"
] | permissive | '''
Problem statement: Write a function that takes coordinates of two points on a two-dimensional plane and returns the length of the line segment connecting those two points.
Problem Link: https://edabit.com/challenge/3Ekam9jvbNKHDtx4K
'''
import math
def line_length(dot1, dot2):
x1, y1 = dot1
x2, y2 = dot2
dis = math.pow(x1-x2, 2) + math.pow(y1-y2, 2)
return round(math.sqrt(dis), 2)
| true |
45eae450bea6cfc9f685a96d402efe4d0f864b23 | Python | ncastal/sqlalchemy-challenge | /app.py | UTF-8 | 4,937 | 2.78125 | 3 | [] | no_license | import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
app=Flask(__name__)
#home page
@app.route("/")
def home():
return(
f'Welcome to the home page <br/>'
f'Links<br/>'
f'/api/v1.0/precipitation<br/>'
f'/api/v1.0/stations<br/>'
f'/api/v1.0/tobs<br/>'
f'/api/v1.0/ enter date in yyyy-mm-dd format<br/>')
#returns json of precipition data
@app.route('/api/v1.0/precipitation')
def precipitation():
session=Session(engine)
result=session.query(Measurement.date,Measurement.prcp).all()
session.close()
precip=[]
for date,prcp in result:
precip_dict={}
precip_dict['date']=date
precip_dict['prcp']=prcp
precip.append(precip_dict)
return jsonify(precip)
#returns json of station names
@app.route('/api/v1.0/stations')
def station():
session=Session(engine)
result=session.query(Station.name).all()
session.close()
names=list(np.ravel(result))
return jsonify(names)
#returns json of temp data from last year
@app.route('/api/v1.0/tobs')
def temperature():
session=Session(engine)
result=session.query(Measurement.date,Measurement.tobs).filter(Measurement.date>'2016-08-23').all()
session.close()
temp=[]
for date,tobs in result:
temp_dict={}
temp_dict['date']=date
temp_dict['tobs']=tobs
temp.append(temp_dict)
return jsonify(temp)
#returns json of temp max, min, and average from evry date between start date to end of data
@app.route('/api/v1.0/<start>')
def start_avg(start):
#create datetime object from <start>
start_date_str=start.split('-')
start_year_int=int(start_date_str[0])
start_month_int=int(start_date_str[1])
start_day_int=int(start_date_str[2])
start_date=dt.date(start_year_int,start_month_int,start_day_int)
session=Session(engine)
#create datetime object from last date in data
last_date=session.query(func.strftime("%Y-%m-%d",Measurement.date)).order_by(Measurement.date.desc()).first()
end_date=last_date[0].split('-')
end_year_int=int(end_date[0])
end_month_int=int(end_date[1])
end_day_int=int(end_date[2])
end_date=dt.date(end_year_int,end_month_int,end_day_int)
#create list of date between start and last date
date_list=[]
date=start_date
while date!=end_date:
date_list.append(date)
date=date+dt.timedelta(days=1)
date_list.append(end_date)
#formatted list for query
formated_date_list=[]
for date in date_list:
date=date.strftime('%Y-%m-%d')
formated_date_list.append(date)
temp_list=[]
#loop through formatted date list to query for average, max, and min temps each date
for date in formated_date_list:
result= session.query(Measurement.date,func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date==date).all()
temps=result[0]
temp_list.append(temps)
return jsonify(temp_list)
#returns json of temp max, min, and average of every date between a start date to an end date
@app.route('/api/v1.0/<start>/<end>')
def start_end_avg(start,end):
#start date datetime object
start_date_str=start.split('-')
start_year_int=int(start_date_str[0])
start_month_int=int(start_date_str[1])
start_day_int=int(start_date_str[2])
start_date=dt.date(start_year_int,start_month_int,start_day_int)
#end date datetime object
end_date_str=end.split('-')
end_year_int=int(end_date_str[0])
end_month_int=int(end_date_str[1])
end_day_int=int(end_date_str[2])
end_date=dt.date(end_year_int,end_month_int,end_day_int)
session=Session(engine)
#list of dates between start date and end date
date_list=[]
date=start_date
while date!=end_date:
date_list.append(date)
date=date+dt.timedelta(days=1)
date_list.append(end_date)
formated_date_list=[]
for date in date_list:
date=date.strftime('%Y-%m-%d')
formated_date_list.append(date)
temp_list=[]
#loop through date list to query for avg, max, and min temps between start and end date
for date in formated_date_list:
result= session.query(Measurement.date,func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date==date).all()
temps=result[0]
temp_list.append(temps)
return jsonify(temp_list)
if __name__ == "__main__":
app.run(debug=True)
| true |
01036fe6474cdfd7d9de1e9e8a4a3d8404d98f3a | Python | Jan200101/Sentry-Cogs | /nep/nep.py | UTF-8 | 2,149 | 2.9375 | 3 | [
"WTFPL"
] | permissive | import discord
from discord.ext import commands
from random import choice
from cogs.utils.dataIO import dataIO
from os import path, makedirs
class Nep:
"Nep Nep"
def __init__(self, bot):
self.bot = bot
self.nep = dataIO.load_json('data/nep/images.json')
self.nepsay = dataIO.load_json('data/nep/text.json')
@commands.command(aliases=["nep"])
async def Nep(self):
"""Displays a random Nep."""
nep = choice(self.nep)
nepsay = choice(self.nepsay)
if not nep or not nepsay:
await self.bot.say('Something went wrong')
return
colour = ''.join([choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
data = discord.Embed(
title=nepsay, colour=discord.Colour(value=colour))
data.set_image(url=nep)
try:
await self.bot.say(embed=data)
except:
await self.bot.say("I need the `Embed links` permission "
"to send this")
def check_folder():
if not path.exists("data/nep"):
print("[Nep]Creating data/nep folder...")
makedirs("data/nep")
def check_file():
images = ["http://i.imgur.com/13hoMVJ.jpg",
"http://i.imgur.com/kIzXdwN.jpg",
"http://i.imgur.com/DICh64t.jpg",
"http://i.imgur.com/nMp3NMp.png",
"http://i.imgur.com/MMf1YfR.png",
"http://i.imgur.com/CGABJEs.jpg",
"http://i.imgur.com/GRz1oCo.jpg"]
i = "data/nep/images.json"
if not dataIO.is_valid_json(i):
print("[Nep]Creating default images.json...")
dataIO.save_json(i, images)
text = ["Nep!!11",
"Neeeeeeepppppp",
"Neeeeeeeeeeeeeeeeeeeeeeepppppppppp",
"Nep Nep",
"I ran out of Nep so here is some more",
"Nep²",
"Nep³"]
l = "data/nep/text.json"
if not dataIO.is_valid_json(l):
print("[Nep]Creating default text.json...")
dataIO.save_json(l, text)
def setup(bot):
check_folder()
check_file()
bot.add_cog(Nep(bot))
| true |
14834e6e89da4922ffd9970c591b27edd0f0f9ab | Python | Asunqingwen/LeetCode | /Cookbook/String/括号生成.py | UTF-8 | 914 | 3.828125 | 4 | [] | no_license | '''
数字 n 代表生成括号的对数,请你设计一个函数,用于能够生成所有可能的并且 有效的 括号组合。
示例 1:
输入:n = 3
输出:["((()))","(()())","(())()","()(())","()()()"]
示例 2:
输入:n = 1
输出:["()"]
提示:
1 <= n <= 8
'''
from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
def helper(s=[], lc=0, rc=0):
if len(s) == 2 * n:
res.append(''.join(s))
return
if lc < n:
s.append('(')
helper(s, lc + 1, rc)
s.pop()
if rc < lc:
s.append(')')
helper(s, lc, rc + 1)
s.pop()
res = []
helper()
return res
if __name__ == '__main__':
n = 3
sol = Solution()
print(sol.generateParenthesis(n))
| true |
38419209c45078bf240a2965103a4ccdc0e19a08 | Python | K4RI/Half-Vie-3 | /HALF-VIE 3.py | UTF-8 | 19,065 | 2.515625 | 3 | [] | no_license | # Ceci est le code en Python de "Half-Vie 3".
# Voilà.
import random, math, decimal, pygame
from pygame.locals import *
from classes import *
from constantes import *
pygame.init() # initialisation de Pygame
#Ouverture de la fenêtre Pygame
fenetre = pygame.display.set_mode((1024, 768))
continuer_accueil = 1
continuer = 1
testrand = 0
ntour = 0
choixattperso = 0
font=pygame.font.Font(None, 24)
fontg=pygame.font.Font(None, 32)
fontgg=pygame.font.Font(None, 128)
pévé = font.render(str("/ PV"),1,(255,255,255))
tpoison = font.render(str("POISON PV/SEC"),1,(0,255,0))
tfeu = font.render(str("ENFLAMMÉ PV/SEC"),1,(255,0,0))
tparade = font.render(str("PARADE %"),1,(0,0,255))
tpeur = font.render(str("EFFRAYÉ"),1,(0,255,255))
tarach = font.render(str("ÉTOURDI"),1,(255,0,255))
tleth = font.render(str("LÉTHARGIE"),1,(255,255,0))
tcam = font.render(str("CAMOUFLÉ"),1,(255,255,255))
zik = pygame.mixer.Sound("ressources/zik.wav")
zik.play(loops=-1, maxtime=0, fade_ms=0)
#BOUCLE PRINCIPALE
while continuer:
while continuer_accueil: #PHASE 1 : TITRE
#Chargement et affichage de l'écran d'accueil
fenetre.blit(image_accueil, (0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE: #quitter le jeu
pygame.quit()
elif event.type == KEYDOWN and event.key == K_F1: #PHASE 1B : CREDITS
continuer_credit = 1
while continuer_credit:
fenetre.blit(image_credit, (0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_ESCAPE:
continuer_credit = 0
fenetre.blit(image_accueil, (0,0))
pygame.display.flip()
elif event.type == KEYDOWN and event.key == K_F2: #PHASE 1C : AIDE (à terminer, peut-être, non ?)
continuer_aide = 1
while continuer_aide:
fenetre.blit(image_aide, (0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_RETURN:
continuer_aide2 = 1
while continuer_aide2:
fenetre.blit(image_aide2, (0,0))
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_RETURN:
continuer_aide2 = 0
continuer_aide = 0
fenetre.blit(image_accueil, (0,0))
pygame.display.flip()
elif event.type == KEYDOWN and event.key == K_RETURN: #PHASE 2 : CHOIX PERSO #sprites de 4 persos en 250*200
perso = Perso('guerrier', 'mage', 'archer', 'paladin')
continuer_choixperso = 1
while continuer_choixperso:
fenetre.blit(image_choixperso, (0,0))
if n_perso == 1:
perso.choix = "Guerrier"
perso.image = perso.guerrier
perso.force = 6
perso.dexterite = 3
perso.constitution = 7
perso.agilite = 4
perso.potions = 2
tcarac = font.render(str("Force : 6 Dextérité : 3 Constitution : 7 Agilité : 4"),1,(255,255,255)) #afficher caractéristiques en-dessous
fenetre.blit(tcarac, (310, 515))
textattp1 = fontg.render(str("Coup d'épée"),1,(255,255,255)) #nom des actions possibles en jeu
textattp2 = fontg.render(str("Parade au bouclier"),1,(255,255,255))
textattp3 = fontg.render(str("Potion de vie"),1,(255,255,255))
elif n_perso == 2:
perso.choix = "Mage"
perso.image = perso.mage
perso.force = 2
perso.dexterite = 9
perso.constitution = 5
perso.agilite = 4
perso.potions = 3
tcarac = font.render(str("Force : 2 Dextérité : 9 Constitution : 5 Agilité : 4"),1,(255,255,255))
fenetre.blit(tcarac, (310, 515))
textattp1 = fontg.render(str("Coup de bâton"),1,(255,255,255))
textattp2 = fontg.render(str("Boule de feu"),1,(255,255,255))
textattp3 = fontg.render(str("Sort de soin"),1,(255,255,255))
elif n_perso == 3:
perso.choix = "Archer"
perso.image = perso.archer
perso.force = 4
perso.dexterite = 6
perso.constitution = 3
perso.agilite = 7
perso.rescamouflage = 2
tcarac = font.render(str("Force : 3 Dextérité : 7 Constitution : 2 Agilité : 8"),1,(255,255,255))
fenetre.blit(tcarac, (310, 515))
textattp1 = fontg.render(str("Coup de dague"),1,(255,255,255))
textattp2 = fontg.render(str("Tir à l'arc"),1,(255,255,255))
textattp3 = fontg.render(str("Camouflage"),1,(255,255,255))
elif n_perso == 4:
perso.choix = "Paladin"
perso.image = perso.paladin
perso.force = 8
perso.dexterite = 2
perso.constitution = 9
perso.agilite = 1
perso.rescri = 1
perso.potions = 1
tcarac = font.render(str("Force : 8 Dextérité : 2 Constitution : 9 Agilité : 1"),1,(255,255,255))
fenetre.blit(tcarac, (310, 515))
textattp1 = fontg.render(str("Coup de glaive"),1,(255,255,255))
textattp2 = fontg.render(str("Cri de guerre"),1,(255,255,255))
textattp3 = fontg.render(str("Second souffle"),1,(255,255,255))
fenetre.blit(perso.image, (400,200)) #afficher image
fenetre.blit(fontg.render(str(perso.choix),1,(255,255,255)), (470,450)) #afficher nom du perso
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_ESCAPE:
continuer_choixperso = 0
fenetre.blit(image_accueil, (0,0))
pygame.display.flip()
if event.type == QUIT: #quitter le jeu
pygame.quit()
elif event.type == KEYDOWN and event.key == K_LEFT and n_perso >= 2:
n_perso = n_perso - 1
elif event.type == KEYDOWN and event.key == K_RIGHT and n_perso <= 3:
n_perso = n_perso + 1
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 160 and event.pos[0] < 240 and event.pos[1] > 260 and event.pos[1] < 390 and n_perso >= 2:
n_perso = n_perso - 1
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 790 and event.pos[0] < 880 and event.pos[1] > 260 and event.pos[1] < 380 and n_perso <= 3:
n_perso = n_perso + 1
elif event.type == KEYDOWN and event.key == K_RETURN: #PHASE 3 : CHOIX ADVERSAIRE #sprites de 4 mobs en 250*200
ennemi = Ennemi('gobelin', 'araignee', 'paladin de lombre', 'sorcier')
n_perso = 1
continuer_choixmob = 1
while continuer_choixmob:
fenetre.blit(image_choixmob, (0,0))
if n_mob == 1:
ennemi.choix = "Gobelin"
ennemi.image = ennemi.gobelin
ennemi.force = 5
ennemi.dexterite = 5
ennemi.constitution = 4
ennemi.agilite = 6
tcarace = font.render(str("Force : 5 Dextérité : 5 Constitution : 4 Agilité : 6"),1,(255,255,255))
fenetre.blit(tcarace, (310, 515))
if n_mob == 2:
ennemi.choix = "Araignee"
ennemi.image = ennemi.araignee
ennemi.force = 7
ennemi.dexterite = 4
ennemi.constitution = 8
ennemi.agilite = 1
ennemi.resarach = 1
tcarace = font.render(str("Force : 7 Dextérité : 4 Constitution : 8 Agilité : 1"),1,(255,255,255))
fenetre.blit(tcarace, (310, 515))
if n_mob == 3:
ennemi.choix = "Paladin de lombre"
ennemi.image = ennemi.paladin2
ennemi.force = 8
ennemi.dexterite = 3
ennemi.constitution = 7
ennemi.agilite = 2
ennemi.potions = 1
tcarace = font.render(str("Force : 8 Dextérité : 3 Constitution : 7 Agilité : 2"),1,(255,255,255))
fenetre.blit(tcarace, (310, 515))
if n_mob == 4:
ennemi.choix = "Sorcier"
ennemi.image = ennemi.sorcier
ennemi.force = 3
ennemi.dexterite = 8
ennemi.constitution = 5
ennemi.agilite = 4
ennemi.resleth = 1
ennemi.energ = 3
tcarace = font.render(str("Force : 3 Dextérité : 8 Constitution : 5 Agilité : 4"),1,(255,255,255))
fenetre.blit(tcarace, (310, 515))
fenetre.blit(ennemi.image, (400,200))
fenetre.blit(fontg.render(str(ennemi.choix),1,(255,255,255)), (470,450))
pygame.display.flip()
for event in pygame.event.get():
if event.type == KEYDOWN and event.key == K_ESCAPE:
continuer_choixmob = 0
fenetre.blit(image_choixperso, (0,0))
elif event.type == QUIT: #quitter le jeu
pygame.quit()
elif event.type == KEYDOWN and event.key == K_LEFT and n_mob >= 2:
n_mob = n_mob - 1
elif event.type == KEYDOWN and event.key == K_RIGHT and n_mob <= 3:
n_mob = n_mob + 1
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 160 and event.pos[0] < 240 and event.pos[1] > 260 and event.pos[1] < 390 and n_mob >= 2:
n_mob = n_mob - 1
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 790 and event.pos[0] < 880 and event.pos[1] > 260 and event.pos[1] < 380 and n_mob <= 3:
n_mob = n_mob + 1
elif event.type == KEYDOWN and event.key == K_RETURN:
continuer_choixmob = 0
continuer_choixperso = 0
continuer_accueil = 0
continuer_jeu = 1
n_mob = 1
pygame.display.flip()
#des trucs de correspondances entre caractéristiques et influences dans le jeu
perso.coupmax = 20 + perso.force * 15
perso.limite = int((random.randint(2,6) * 5) + perso.dexterite * 2.5)
perso.pv = 50 + perso.constitution * 30
pvmax = perso.pv
tpvmax = font.render(str(pvmax),1,(255,255,255))
perso.armure = perso.constitution * 20
perso.ecritique = perso.agilite * 5
print ("\nperso =", perso.choix, "\ncoupmax =", perso.coupmax, "\nlimite =", int(10 + perso.dexterite * 2.5), "à", int(30 + perso.dexterite * 2.5), "\nPV =", perso.pv, "\nEsquive critique =", perso.ecritique)
ennemi.coupmaxe = 20 + ennemi.force * 15
ennemi.limitee = (random.randint(2,6) * 5) + ennemi.dexterite * 5
ennemi.pve = 50 + ennemi.constitution * 30
pvemax = ennemi.pve
tpvmaxe = font.render(str(pvemax),1,(255,255,255))
ennemi.armuree = ennemi.constitution * 20
ennemi.ecritiquee = ennemi.agilite * 5
print ("\nennemi =", ennemi.choix, "\ncoupmax =", ennemi.coupmaxe, "\nlimite =", 10 + ennemi.dexterite * 5, "à", 30 + ennemi.dexterite * 5, "\nPV =", ennemi.pve, "\nEsquive critique =", ennemi.ecritiquee)
def reload():
"recharger l'écran"
global fenetre, font, textpv, textpve, image_jeu, pévé, randparade, tparade, poison, tpoison, \
tnpoison, arach, tarach, leth, tleth, camouflage, tcam, randparadee, tparade, \
poisone, tfeu, tnfeu, peure, tpeur, textattp1, textattp2, textattp3
fenetre = pygame.display.set_mode((1024, 768))
textpv = font.render(str(perso.pv),1,(255,255,255)) # afficher points de vie
textpve = font.render(str(ennemi.pve),1,(255,255,255))
fenetre.blit(image_jeu, (0,0))
if perso.pv > 0: # barre de vie perso
pygame.draw.rect(fenetre, (0,0,255), Rect((106,61), (364*perso.pv/pvmax, 37)))
if ennemi.pve > 0: # barre de vie ennemi
pygame.draw.rect(fenetre, (255,0,0), Rect((972-364*ennemi.pve/pvemax,61), (364*ennemi.pve/pvemax, 37)))
fenetre.blit(perso.image, (50,200)) #afficher les sprites des personnages
fenetre.blit(ennemi.image, (662,200))
fenetre.blit(textpv, (360, 70))
fenetre.blit(tpvmax, (400, 70))
fenetre.blit(pévé, (390, 70))
fenetre.blit(textpve, (620, 70))
fenetre.blit(pévé, (650, 70))
fenetre.blit(tpvmaxe, (660, 70))
if perso.randparade<1: # afficher les effets secondaires actifs
fenetre.blit(tparade, (5, 10))
fenetre.blit(font.render(str(int(perso.randparade*100)),1,(0,0,255)), (80, 10))
if perso.poison:
fenetre.blit(tpoison, (135, 10))
fenetre.blit(font.render(str(perso.poison),1,(0,255,0)), (205, 10))
if perso.arach:
fenetre.blit(tarach, (5, 10))
if perso.leth:
fenetre.blit(tleth, (5, 10))
if perso.camouflage:
fenetre.blit(tcam, (5, 10))
if ennemi.randparadee<1:
fenetre.blit(tparade, (670, 10))
fenetre.blit(font.render(str(int(ennemi.randparadee*100)),1,(0,0,255)), (745, 10))
if ennemi.poisone:
fenetre.blit(tfeu, (800, 10))
fenetre.blit(font.render(str(ennemi.poisone),1,(255,0,0)), (900, 10))
if ennemi.peure:
fenetre.blit(tpeur, (800, 10))
pygame.display.flip() # recharger l'image
while continuer_jeu:
while ennemi.pve > 0 and perso.pv > 0:
reload() #
ntour = ntour + 1
fontg=pygame.font.Font(None, 48)
textcn1 = fontgg.render(str(ntour),1,(255,255,255))
fenetre.blit(fontgg.render(str("TOUR N°"),1,(255,255,255)), (300,640))
fenetre.blit(textcn1, (700,640))
pygame.display.flip()
fontg=pygame.font.Font(None, 32)
pygame.time.delay(2000) #PAUSE 2 SEC
perso.limite = int((random.randint(2,6) * 5) + perso.dexterite * 2.5)
for event in pygame.event.get():
if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE: #quitter le jeu
pygame.quit()
reload()
if perso.arach <= 0: #BOUCLE D'ATTAQUE PERSO
fenetre.blit(image_choixatt, (5, 580))
fenetre.blit(textattp1, (685, 590))
fenetre.blit(textattp2, (550, 620))
fenetre.blit(textattp3, (820, 620))
pygame.display.flip()
while choixattperso==0:
for event in pygame.event.get(): #Attente des événements
if event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 650 and event.pos[0] < 1020 and event.pos[1] > 575 and event.pos[1] < 615:
choixattperso = 1
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 520 and event.pos[0] < 790 and event.pos[1] > 610 and event.pos[1] < 650:
choixattperso = 2
elif event.type == MOUSEBUTTONUP and event.button == 1 and event.pos[0] > 790 and event.pos[0] < 1020 and event.pos[1] > 610 and event.pos[1] < 650:
choixattperso = 3
reload()
if choixattperso == 1:
if perso.choix == 'Guerrier':
perso.coup_epee(perso.coup, perso.coupmax, perso.pv, ennemi, ennemi.pve, ennemi.randparadee)
if perso.choix == 'Mage':
perso.coup_baton(perso.coup, perso.coupmax, perso.pv, ennemi, ennemi.pve, ennemi.randparadee)
if perso.choix == 'Archer':
perso.coup_dague(perso.coup, perso.coupmax, perso.pv, ennemi, ennemi.pve, ennemi.randparadee)
if perso.choix == 'Paladin':
perso.coup_glaive(perso.coup, perso.coupmax, perso.pv, ennemi, ennemi.pve, ennemi.randparadee)
if choixattperso == 2:
if perso.choix == 'Guerrier':
perso.parade_bouclier(perso.pv, perso.randparade)
if perso.choix == 'Mage':
perso.bouledefeu(ennemi, ennemi.pve, ennemi.poisone)
if perso.choix == 'Archer':
perso.tir_arc(perso.coup, perso.coupmax, ennemi, ennemi.pve)
if perso.choix == 'Paladin':
perso.crideguerre(perso.rescri, ennemi, ennemi.peure)
if choixattperso == 3:
if perso.choix == 'Guerrier':
perso.potion_guerrier(perso.pv, perso.pvplus)
if perso.choix == 'Mage':
perso.sort_soin(perso.pv, perso.pvplus)
if perso.choix == 'Archer':
perso.acamouflage(perso.camouflage, perso.rescamouflage)
if perso.choix == 'Paladin':
perso.secondsouffle(perso.pv, perso.pvplus)
else:
perso.arach = perso.arach - 1
pygame.time.delay(3000) #PAUSE 2 SEC
for event in pygame.event.get():
if event.type == QUIT: #quitter le jeu
pygame.quit()
reload() #
ennemi.randparadee = 1
choixattperso = 0
perso.pv = perso.pv - perso.poison
ennemi.limitee = int((random.randint(2,6) * 5) + ennemi.dexterite * 2.5)
if ennemi.peure <= 0: #BOUCLE ATTAQUE ENNEMIE
choixattennemi = random.randint(1,3)
if choixattennemi == 1:
if ennemi.choix == 'Gobelin':
ennemi.coup_lance(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if ennemi.choix == 'Araignee':
ennemi.coup_mandibule(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if ennemi.choix == 'Paladin de lombre':
ennemi.coup_glaivee(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if ennemi.choix == 'Sorcier':
ennemi.coup_baton(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if choixattennemi == 2:
if ennemi.choix == 'Gobelin':
ennemi.coup_lance(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if ennemi.choix == 'Araignee':
ennemi.morsure(perso, perso.pv, perso.poison)
if ennemi.choix == 'Paladin de lombre':
ennemi.parade_boucliere(ennemi.pve, ennemi.randparadee, perso, perso.coup)
if ennemi.choix == 'Sorcier':
ennemi.lethargie(ennemi.resleth, perso, perso.coupmax, perso.limite)
if choixattennemi == 3:
if ennemi.choix == 'Gobelin':
ennemi.coup_lance(ennemi.coupe, ennemi.coupmaxe, ennemi.pve, perso, perso.pv, perso.camouflage)
if ennemi.choix == 'Araignee':
ennemi.arachno(ennemi.resarach, perso, perso.arach)
if ennemi.choix == 'Paladin de lombre':
ennemi.secondsoufflee(ennemi.pve, ennemi.pvpluse, ennemi.potions)
if ennemi.choix == 'Sorcier':
ennemi.boule_energie(ennemi.coupe, ennemi.coupmaxe, ennemi.energ, perso, perso.arach, perso.pv, perso.camouflage)
else:
ennemi.peure = ennemi.peure - 1
pygame.time.delay(2000) #PAUSE 2 SEC
for event in pygame.event.get():
if event.type == QUIT: #quitter le jeu
pygame.quit()
reload() #
perso.randparade = 1
ennemi.pve = ennemi.pve - ennemi.poisone
if perso.camouflage:
perso.camouflage = perso.camouflage - 1
continuer_fin = 1
ntour = 0
while continuer_fin: #FIN DU JEU (victoire, défaite, ou nul)
fenetre = pygame.display.set_mode((1024, 768))
fenetre.blit(image_fin, (0,0))
if ennemi.pve <= 0 and perso.pv >= 0: # mettre des musiques ptn
fenetre.blit(fontgg.render("VOUS AVEZ GAGNÉ!",1,(255,0,0)), (100,200))
if ennemi.pve >= 0 and perso.pv <= 0:
fenetre.blit(fontgg.render("Vous avez perdu...",1,(255,0,0)), (100,200))
if ennemi.pve <= 0 and perso.pv <= 0:
fenetre.blit(pygame.font.Font(None, 72).render("Dans un dernier échange de coups,",1,(255,0,0)), (70,200))
fenetre.blit(pygame.font.Font(None, 72).render("vous vous entretuez.",1,(255,0,0)), (200,300))
pygame.display.flip()
for event in pygame.event.get():
if event.type == QUIT: #quitter le jeu
pygame.quit()
elif event.type == KEYDOWN and event.key == K_RETURN: #quitter le jeu
continuer_fin = 0
continuer_jeu = 0
continuer_accueil = 1
| true |
9accca202124907094f7f48b1fc881c36accaedb | Python | fennerm/i3ark | /i3ark/workspace.py | UTF-8 | 816 | 3.421875 | 3 | [
"MIT"
] | permissive | """Functions for examining and modifying the i3 workspace"""
def get_empty_workspace(i3):
"""Get the index of the first empty workspace"""
full_workspaces = get_workspace_indices(i3)
i = 1
while i in full_workspaces:
i = i + 1
return i
def get_workspace_indices(i3):
"""Get list of current workspace indices"""
workspaces = i3.get_tree().workspaces()
indices = [workspace.num for workspace in workspaces]
return indices
def get_num_windows(i3, workspace_index):
"""Get the number of windows in an i3 workspace"""
tree = i3.get_tree()
try:
windows = tree.workspaces()[workspace_index - 1].leaves()
num_windows = len(windows)
except IndexError:
# Thrown if the workspace is empty
num_windows = 0
return num_windows
| true |
6810338c96ce2979f85a3f7a978970bd6c21bf38 | Python | shubhamgupta30/Collective-Intelligence | /deliciousrec.py | UTF-8 | 1,121 | 2.90625 | 3 | [] | no_license | from pydelicious import get_popular, get_userposts, get_urlposts
import time
# Get the list of users who recently posted a popular link with a specified tag
# The API returns only 30 users who posted a recent link, and thus gather users
# from top 5 links shared.
def initializeUserDict(tag, count=5):
top_users= {}
for popular_post in get_popular(tag=tag)[0:count]:
for post in get_urlposts(popular_post['url']):
top_users[post['user']] = {}
return top_users
# Create a dicionary of "ratings", where a user rates a particular link as
# either 1 or 0 depending on if she shared the link or not
def fillItems(users):
all_posts = {}
for user in users:
posts = []
for i in range(3):
try:
posts = get_userposts(user)
print "Succedded for user " + user + " :)"
break
except:
print "Failed User " + user + ", retrying"
time.sleep(4)
for post in posts:
users[user][post["url"]] = 1.0
all_posts[post["url"]] = 1
for ratings in users.values():
for post in all_posts:
if post not in ratings:
ratings[post] = 0.0
| true |
69bc50e8b2ec5705b7f726b8437fd3b6f28ddd6f | Python | itsolutionscorp/AutoStyle-Clustering | /all_data/exercism_data/python/nucleotide-count/a419b016e856423983f2af9cf1284de5.py | UTF-8 | 488 | 3.484375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from collections import Counter
class DNA:
def __init__(self, strand):
counts = {'A':0, 'C':0, 'G':0, 'T':0}
counts.update(Counter(strand))
self.counts = counts
def count(self, nucleotide):
if not nucleotide in 'ACGTU':
raise ValueError("{} is not a nucleotide.".format(nucleotide))
return self.counts.get(nucleotide, 0)
def nucleotide_counts(self):
return self.counts
| true |
d7607e51839cab21f402b2582e086ce2555254a4 | Python | Boris-2021/Location_awareness- | /net_structure.py | UTF-8 | 1,556 | 3.09375 | 3 | [] | no_license | # ==================================
# !/usr/bin/python3
# --coding:utf-8--
# Author : time-无产者
# @time : 2021/8/24 10:04
# ==================================
import torch.nn as nn
import torch.nn.functional as F
import torch
import pdb
# 网络结构
# 基本定义__init__, 前向传播forward
class LeNet(nn.Module):
def __init__(self, classes):
# 初始化函数中,定义每层
super(LeNet, self).__init__()
# 输入的通道数3,输出的通道数6,卷积核的宽和高都是5
# 卷积核:6*3*5*5
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5) # 卷积核 16*6*5*5
self.fc1 = nn.Linear(16*13*13, 120) # 全连接
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, classes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# X: B*C*H*W
# X: 1*3*64*64
out = F.relu(self.conv1(x)) # 1*6*64*64
out = F.max_pool2d(out, 2) # 核的大小2*2; 1*6*60*60
out = F.relu(self.conv2(out)) # 1* 16*30*30
out = F.max_pool2d(out, 2) # 核的大小2*2;1*16*26*26
out = out.view(out.size(0), -1) # 展平(1, 16*13*13)==(1, 2704)
out = F.relu(self.fc1(out)) # full connect (1, 120)
out = F.relu(self.fc2(out)) # (1, 84)
out = self.fc3(out) # (1, 类别数)
return out
if __name__ == '__main__':
model = LeNet(classes=5)
img = torch.randn(1, 3, 64, 64)
print(model(img))
| true |
f5b8cde6cc1fd72dbabac81912b2082d0e3527cb | Python | morsvox/face_detector | /facedetect.py | UTF-8 | 828 | 2.8125 | 3 | [] | no_license | import cv2
import sys
import os
# Get user supplied values
imagePath = sys.argv[1]
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(30, 30)
#flags = cv2.CV_HAAR_SCALE_IMAGE
)
print("Found {0} face!".format(len(faces)))
# Draw a rectangle around the faces
i = 0
for (x, y, w, h) in faces:
crop_img = image[y:y+h, x:x+w]
filename = 'founded/face{0}.png'.format(i)
directory = os.path.dirname(filename)
try:
os.stat(directory)
except:
os.mkdir(directory)
cv2.imwrite(filename,crop_img)
i+=1
cv2.waitKey(0) | true |
e5e15c2aaaad9b0fc93ccd372c43cecd63e228d6 | Python | KilHwanKim/practiceB | /code/2096.py | UTF-8 | 482 | 3.09375 | 3 | [] | no_license | n = int(input())
number =[list(map(int,input().split())) for _ in range(n)]
big = number[0]
small = number[0]
for i in range(1,n):
big = [max ( big[0],big[1])+ number[i][0] , \
max ( big[0],big[1],big[2])+ number[i][1], \
max ( big[1],big[2])+ number[i][2]]
small = [min(small[0], small[1]) + number[i][0], \
min(small[0], small[1], small[2]) + number[i][1], \
min(small[1],small[2]) + number[i][2]]
print(max(big),min(small))
| true |
07a66aba49d6308725c0d0d01d4ef2cee6a59de8 | Python | jeowsome/Python-Adventures | /Coffee Machine/Problems/The Louvre/main.py | UTF-8 | 352 | 3.609375 | 4 | [] | no_license | class Painting:
place = "Louvre"
def __init__(self, title, artist, year):
self.title = title
self.artist = artist
self.year = year
def get_info(self):
print(f'"{self.title}" by {self.artist} ({self.year}) hangs in the {Painting.place}.')
painting = Painting(input(), input(), input())
painting.get_info() | true |
e0fe35f64c589aafe040d6c995c076f5359b9997 | Python | yolkoo95/flask | /sql/sqlalchemy/flask-sqlalchemy/print1.py | UTF-8 | 700 | 2.796875 | 3 | [] | no_license | import os
from flask import Flask
from models import *
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("database_url")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["DEBUG"] = True
db.init_app(app)
def main():
flights = Flight.query.all() # compare with print.py in sqlalchemy
print("Flights Info:")
for flight in flights:
print(f"{flight.origin} to {flight.destination}, {flight.duration} minutes.")
passengers = Passenger.query.all()
print("Passengers Info:")
for passenger in passengers:
print(f"{passenger.name}: {passenger.flight_id}")
if __name__ == "__main__":
with app.app_context():
main() | true |
7db070354eb8fdc308fb73715bb4112275b262f9 | Python | thetremendous/facebook-automated-invite | /facebook-invite.py | UTF-8 | 2,761 | 2.65625 | 3 | [] | no_license | #----------------------------
# |
# Facebook - invite to group|
#https://github.com/thetremendous/facebook-automated-invite
# UPDATE: 18.08.2021 |
#----------------------------
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
import random
import string
browser = webdriver.Chrome(executable_path= r"C:\PATH_TO_CHROMEDRIVER_FOLDER\chromedriver.exe")
browser.get(('https://www.facebook.com/groups/#NAME_OF_YOUR_GROUP_OR_ID'))
sleep(2)
def start():
acceptCookies = browser.find_element_by_xpath('/html[1]/body[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div[1]/div[1]/div[1]');
acceptCookies.click();
sleep(4);
#browser.implicitly_wait(3) #this is another wait function.If you would like to run the script faster, change all sleep() to this
username = browser.find_element_by_name('email')
username.send_keys('YOUR_USERNAME') # <- INSERT YOUR USERNAME HERE -------------------------------------------------------------------------------------------------------------------------
password = browser.find_element_by_name('pass')
password.send_keys('YOUR_PASSWORD') # <- INSERT YOUR PASSWORD HERE -----------------------------------------------------------------------------------------------------------------------
nextButton = browser.find_element_by_xpath('/html[1]/body[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div[2]/div[2]/div[1]/form[1]/div[2]/div[3]/div[1]/div[1]/div[1]/div[1]') # <--- Clicking on login button
nextButton.click()
#browser.quit()
sleep(4)
#Start the programm
start()
#start the invite
def invite():
for x in range(1,1000):
invite = browser.find_element_by_xpath('/html[1]/body[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[1]/div[1]/div[1]/div[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/div[1]') #<--- Clicking on Invite button first
invite.click()
sleep(3)
mark_user = browser.find_element_by_xpath('/html[1]/body[1]/div[1]/div[1]/div[1]/div[1]/div[4]/div[1]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[2]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/div[1]/div[1]/i[1]') #<---- Then we select the checkbox for the first user in the list
mark_user.click()
sleep(1)
send_user_invite = browser.find_element_by_xpath('/html[1]/body[1]/div[1]/div[1]/div[1]/div[1]/div[4]/div[1]/div[1]/div[1]/div[1]/div[2]/div[1]/div[1]/div[1]/div[1]/div[1]/div[3]/div[3]/div[1]/div[1]/div[2]/div[1]/div[1]')
send_user_invite.click() #<---- And we send t he invite
sleep(3)
#invite repeat
invite()
| true |
c81d53c04d350e9d221555482572b957c6671021 | Python | AlexandertheG/crypto-challenges | /set_2/cbc_bitflipping_attack.py | UTF-8 | 3,029 | 2.75 | 3 | [] | no_license | #!/usr/bin/python
import sys
import random
import binascii
import base64
from Crypto.Cipher import AES
def sanitize_input(in_str):
build_str = ''
for i in range(0, len(in_str)):
if in_str[i] == ";" or in_str[i] == "=":
build_str = build_str
else:
build_str+=in_str[i]
return build_str
def pad_msg(msg, key_length):
global msg_is_padded
padding_length = key_length - len(msg)%key_length
if padding_length > 0:
msg_is_padded = True
for i in range(0, padding_length):
msg+=chr(padding_length)
return msg
def aes_cbc_encrypt(msg, key, iv):
encr_res = ''
num_of_blocks = len(msg)/len(key)
for block_num in range(0, num_of_blocks):
iv = aes_ecb_encrypt(xor_bytes(msg[block_num*len(key):block_num*len(key)+len(key)], iv), key)
encr_res+=iv
return base64.b64encode(encr_res)
def xor_bytes(byte_arr1, byte_arr2):
xored_arr = ''
for b in range(0, len(byte_arr1)):
xored_arr+=chr(ord(byte_arr1[b])^ord(byte_arr2[b]))
return xored_arr
def aes_ecb_encrypt(byte_array, key):
aes = AES.new(key, AES.MODE_ECB)
return aes.encrypt(byte_array)
def aes_ecb_decrypt(byte_array, key):
aes = AES.new(key, AES.MODE_ECB)
return aes.decrypt(byte_array)
def aes_cbc_decrypt(msg, key, cipher_block):
decr_res = ''
msg = base64.b64decode(msg)
num_of_blocks = len(msg)/len(key)
for block_num in range(0, num_of_blocks):
tmp_cipher = msg[block_num*len(key):block_num*len(key)+len(key)]
decr_res += xor_bytes(aes_ecb_decrypt(msg[block_num*len(key):block_num*len(key)+len(key)], key), cipher_block)
cipher_block = tmp_cipher
if msg_is_padded == True:
decr_res = remove_padding(decr_res)
return decr_res
def split_string_by_char(str, by_char):
return str.split(by_char)
def remove_padding(padded_msg):
padded_msg = bytes(padded_msg)
pad_lngth = int(binascii.hexlify(padded_msg[len(padded_msg)-1]), base=16)
return padded_msg[0:len(padded_msg) - pad_lngth]
def flip_bits(msg):
byte_msg = bytes(base64.b64decode(msg))
flp_byte1 = ord(byte_msg[21])^0x01
flp_byte2 = ord(byte_msg[27])^0x01
flp_msg = ''
for i in range(0, len(byte_msg)):
if i == 21:
flp_msg+=chr(flp_byte1)
elif i == 27:
flp_msg+=chr(flp_byte2)
else:
flp_msg+=byte_msg[i]
return base64.b64encode(flp_msg)
usr_input = sys.argv[1]
prepend_str = "comment1=cooking%20MCs;userdata="
append_str = ";comment2=%20like%20a%20pound%20of%20bacon"
key = "lhjlHKhLJhgOHyoh"
iv = "XakUhKeGGHbswRMl"
msg_is_padded = False
sanitized_usr_input = prepend_str + sanitize_input(usr_input) + append_str
padded_str = pad_msg(sanitized_usr_input, len(key))
base64_cbc_str = aes_cbc_encrypt(padded_str, key, iv)
base64_cbc_str = flip_bits(base64_cbc_str)
cbc_decr = aes_cbc_decrypt(base64_cbc_str, key, iv)
tuples = split_string_by_char(cbc_decr, ";")
if "admin=true" in tuples:
print cbc_decr
| true |
ba62cb24983f5e2bf316f84405fe5340dbba4aee | Python | longjiemin/Interviews-and-algorithms-python- | /coder-interview-guide/5-用一个堆栈来实现另一个堆栈的排序.py | UTF-8 | 381 | 3.6875 | 4 | [] | no_license | #5
#用一个堆栈实现另一个堆栈的排序,不允许额外变量
#功能实现,基本没有问题
def sort_another(nums):
if len(nums)==0:
return []
stack2 = [nums.pop()]
while len(nums)>0:
cur = nums.pop()
while len(stack2) != 0 and cur>stack2[-1] :
nums.append(stack2.pop())
stack2.append(cur)
return stack2
| true |
cceeb22f2e92cb8f64363916ad0a314458105827 | Python | sagarsharma122000/Sudoku-Game | /sudoku (1).py | UTF-8 | 11,071 | 3.28125 | 3 | [] | no_license | from tkinter import *
board = []
def main_screen():
top = Tk()
top.title("SUDOKU")
top.configure(background='antiquewhite1')
top.geometry("300x360")
lb = Label(top, text="Select Level",fg='navy',bg='antiquewhite1', font=("Arial Black", 30))
lb.pack(pady=5)
l1 = Button(top, text="Level 1",bg='cyan4',bd=5,fg='white',font=("Arial Black", 12), command=level1)
l1.pack(pady=15)
l2 = Button(top, text="Level 2",bg='cyan4',bd=5, fg='white',font=("Arial Black", 12),command=level2)
l2.pack(pady=10)
l3 = Button(top, text="Level 3",bg='cyan4',bd=5,fg='white', font=("Arial Black", 12),command=level3)
l3.pack(pady=10)
l4 = Button(top, text="Level 4", bg='cyan4',fg='white',bd=5,font=("Arial Black", 12),command=level4)
l4.pack(pady=10)
top.mainloop()
def level1():
level = [[5,1,7,6,0,0,0,3,4],
[2,8,9,0,0,4,0,0,0],
[3,4,6,2,0,5,0,9,0],
[6,0,2,0,0,0,0,1,0],
[0,3,8,0,0,6,0,4,7],
[0,0,0,0,0,0,0,0,0],
[0,9,0,0,0,0,0,7,8],
[7,0,3,4,0,0,5,6,0],
[0,0,0,0,0,0,0,0,0]]
load_game(level, 1)
def sol1():
solution = [[5,1,7,6,9,8,2,3,4],
[2,8,9,1,3,4,7,5,6],
[3,4,6,2,7,5,8,9,1],
[6,7,2,8,4,9,3,1,5],
[1,3,8,5,2,6,9,4,7],
[9,5,4,7,1,3,6,8,2],
[4,9,5,3,6,2,1,7,8],
[7,2,3,4,8,1,5,6,9],
[8,6,1,9,5,7,4,2,3], ]
solution_lvl(solution, 1)
def level2():
level = [[5,1,7,6,0,0,0,3,4],
[0,8,9,0,0,4,0,0,0],
[3,0,6,2,0,5,0,9,0],
[6,0,0,0,0,0,0,1,0],
[0,3,0,0,0,6,0,4,7],
[0,0,0,0,0,0,0,0,0],
[0,9,0,0,0,0,0,7,8],
[7,0,3,4,0,0,5,6,0],
[0,0,0,0,0,0,0,0,0]]
load_game(level, 2)
def sol2():
solution = [[5,1,7,6,9,8,2,3,4],
[2,8,9,1,3,4,7,5,6],
[3,4,6,2,7,5,8,9,1],
[6,7,2,8,4,9,3,1,5],
[1,3,8,5,2,6,9,4,7],
[9,5,4,7,1,3,6,8,2],
[4,9,5,3,6,2,1,7,8],
[7,2,3,4,8,1,5,6,9],
[8,6,1,9,5,7,4,2,3], ]
solution_lvl(solution, 1)
def level3():
level = [[8,5,0,0,0,2,4,0,0],
[7,2,0,0,0,0,0,0,9],
[0,0,4,0,0,0,0,0,0],
[0,0,0,1,0,7,0,0,2],
[3,0,5,0,0,0,9,0,0],
[0,4,0,0,0,0,0,0,0],
[0,0,0,0,8,0,0,7,0],
[0,1,7,0,0,0,0,0,0],
[0,0,0,0,3,6,0,4,0]]
load_game(level, 3)
def sol3():
solution = [[8,5,9,6,1,2,4,3,7],
[7,2,3,8,5,4,1,6,9],
[1,6,4,3,7,9,5,2,8],
[9,8,6,1,4,7,3,5,2],
[3,7,5,2,6,8,9,1,4]
,[2,4,1,5,9,3,7,8,6],
[4,3,2,9,8,1,6,7,5],
[6,1,7,4,2,5,8,9,3],
[5,9,8,7,3,6,2,4,1] ]
solution_lvl(solution, 3)
def level4():
level =[[0,0,5,3,0,0,0,0,0],
[8,0,0,0,0,0,0,2,0],
[0,7,0,0,1,0,5,0,0],
[4,0,0,0,0,5,3,0,0],
[0,1,0,0,7,0,0,0,6],
[0,0,3,2,0,0,0,8,0],
[0,6,0,5,0,0,0,0,9],
[0,0,4,0,0,0,0,3,0],
[0,0,0,0,0,9,7,0,0]]
load_game(level, 3)
def sol4():
solution = [[1,4,5,3,2,7,6,9,8],
[8,3,9,6,5,4,1,2,7],
[6,7,2,9,1,8,5,4,3],
[4,9,6,1,8,5,3,7,2],
[2,1,8,4,7,3,9,5,6],
[7,5,3,2,9,6,4,8,1],
[3,6,7,5,4,2,8,1,9],
[9,8,4,7,6,1,2,3,5],
[5,2,1,8,3,9,7,6,4]]
solution_lvl(solution, 4)
def check():
flag=1
x=0
for i in range(0,9):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 1st Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(9,18):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 2nd Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(18,27):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 3rd Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(27,36):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 4th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(36,45):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 5th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(45,54):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 6th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(54,63):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 7th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(63,72):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 8th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
x=0
for i in range(72,81):
if (board[i].get())=='':
root6=Tk()
root6.configure(background='antiquewhite1')
root6.title("LOST")
EMPTY=Label(root6, text="Entry in 9th Row is Empty", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
EMPTY.pack(padx=20,pady=20)
x=(x+(int(board[i].get())))
if x!=45:
flag=1
if flag==1:
root3=Tk()
root3.title("LOST")
root3.configure(background='antiquewhite1')
fail=Label(root3, text="YOU LOST TRY AGAIN", bg='antiquewhite1',fg='red',font=("Arial Black", 24))
fail.pack(padx=20,pady=20)
else:
root4=Tk()
root4.title("WON")
root4.configure(background='antiquewhite1')
pas=Label(root4 ,text="YOU PASSED", bg='antiquewhite1',fg='green',font=("Arial Black", 24))
pas.pack(padx=20,pady=20)
def load_game(level, lvl):
if lvl == 1:
sol = sol1
elif lvl == 2:
sol = sol2
elif lvl == 3:
sol = sol3
elif lvl == 4:
sol = sol4
root = Tk()
root.title("Level "+str(lvl))
root.geometry("720x650")
game = Frame(root, bg='misty rose')
board_frame = Frame(game, bg='powder blue')
button_frame = Frame(game,bg='misty rose')
for x1 in range(0, 3):
for y1 in range(0, 3):
if (x1+y1) % 2 == 0:
Frame(board_frame, bg="gold2", height=200,
width=240).grid(row=x1, column=y1)
else:
Frame(board_frame, bg="red2", height=200,
width=240).grid(row=x1, column=y1)
board.clear()
for x1 in range(9):
for y1 in range(9):
if level[x1][y1] != 0:
var = StringVar(board_frame, value=str(level[x1][y1]))
entry = Entry(board_frame, state=DISABLED, textvariable=var, justify=CENTER,
bd=5, bg="light grey", width=3, font=("Arial Black", 24))
board.append(entry)
entry.place(relx=y1*(1/9), rely=x1*(1/8.9))
else:
entry = Entry(board_frame, justify=CENTER, fg="maroon", bd=5,
bg="light grey", width=3, font=("Arial Black", 24))
board.append(entry)
entry.place(relx=y1*(1/9), rely=x1*(1/8.9))
board_frame.pack()
solution_button = Button(button_frame,bg='tan1',fg='white', font=("Arial Black", 14),text="Solution",
bd=3,height=50, command=sol)
check_button = Button(button_frame,bg='lime green', fg='white',font=("Arial Black", 14), text="Submit",
bd=3,height=50, command=check)
check_button.pack(side=LEFT,padx=20)
solution_button.pack()
button_frame.pack()
game.pack()
def solution_lvl(level, lvl):
root1 = Tk()
root1.title("Solution "+str(lvl))
root1.geometry("380x320")
game = Frame(root1, bg='thistle1')
board_frame = Frame(game, bg='cyan4')
button_frame = Frame(game, bg='blue')
for x1 in range(0, 3):
for y1 in range(0, 3):
if (x1+y1) % 2 == 0:
Frame(board_frame, bg="red4", height=100,
width=120).grid(row=x1, column=y1)
else:
Frame(board_frame, bg="green4", height=100,
width=120).grid(row=x1, column=y1)
for x1 in range(9):
for y1 in range(9):
if level[x1][y1] != 0:
var = StringVar(board_frame, value=str(level[x1][y1]))
entry = Entry(board_frame, state=DISABLED, textvariable=var, justify=CENTER,
bd=5, bg="light grey", width=2, font=("Arial Black", 12))
entry.place(relx=y1*(1/9), rely=x1*(1/8.9))
else:
entry = Entry(board_frame, justify=CENTER, fg="maroon", bd=5,
bg="light grey", width=2, font=("Arial Black", 12))
entry.place(relx=y1*(1/9), rely=x1*(1/8.9))
board_frame.pack(padx=10,pady=10)
button_frame.pack()
game.pack()
main_screen()
| true |
545a83335f8c44c5dea3d9f884448200688b0a7b | Python | Sapnil98/Python | /Eyantra/task_1b/task2_final.py | UTF-8 | 2,673 | 2.71875 | 3 | [] | no_license | import cv2
import imutils
import numpy as np
from math import exp
def find_contours(image):
gray=image.copy()
blur=cv2.GaussianBlur(gray,(7,7),0)
ret,thresh = cv2.threshold(blur,200,255,cv2.THRESH_BINARY)
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if imutils.is_cv2() else contours[1]
return contours
def side(contours):
perimeter=cv2.arcLength(contours,True)
num_side=cv2.approxPolyDP(contours,0.02*perimeter,True)
return num_side
def area_of(contours):
Area=cv2.contourArea(contours)
return Area
def size_of(Area):
area=Area
if area>=(5000.00):
size='large'
elif area<=(3500.00):
size='small'
else:
size='medium'
return size
def shape_of(num_side):
num_of_side=len(num_side)
if num_of_side==3:
shape='Triangle'
elif num_of_side==4:
z=cv2.boundingRect(num_side)
ar=z[2]/z[3]
shape='Square' if ar>=0.95 and ar<=1.05 else 'Rectangle'
elif num_of_side==5:
shape='Pentagon'
elif num_of_side==6:
shape='Hexagon'
else:
shape='Circle'
return shape
def show(cnts,img1,colour,m):
for c in cnts:
x=np.ceil(np.random.randn(3)*200)
y=0
num_side=side(c)
area=area_of(c)
size=size_of(area)
shape=shape_of(num_side)
M = cv2.moments(c)
cY = int(M["m10"] / M["m00"])
cX = int(M["m01"] / M["m00"])
col=colour
cv2.drawContours(img2,[c], -1, x, 5)
cv2.circle(img2, (cY, cX), 4, (255, 255, 255), -1)
cv2.putText(img2,col+size+shape, (cY - (m*10)+10, cX - (m*10)-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,y, 1)
cv2.putText(img2, "("+str(cY)+","+str(cX)+")", (cY-(m*10)+5, cX-(m*10)+10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5,y, 1)
cv2.imshow("Image", img2)
cv2.waitKey(10000)
img=cv2.imread('test1.png')
img2=np.copy(img)
count=0
col_dic={'Red':([0,0,230],[0,0,255]),
'Green':([0,230,0],[0,255,0]),
'Blue':([230,0,0],[255,0,0]),
'Yellow':([0,230,230],[0,255,255]),
'Orange':([0,130,0],[0,150,255])}
col=['Red','Green','Blue','Yellow','Orange']
for m in range(5):
colour=col[m]
print (colour)
(l,u)=col_dic[colour]
img1=np.copy(img)
img1=cv2.inRange(img1,np.array(l),np.array(u))
cnts=np.array(find_contours(img1))
lst=show(cnts,img2,colour,int((1/(1+exp(-m)))))
print ('COmplete')
cv2.destroyAllWindows()
| true |
cc7c68c4db3c631a484375a48496aea5783bd428 | Python | cooLBooy1128/cpython39 | /tsTclntSS.py | UTF-8 | 513 | 2.796875 | 3 | [] | no_license | import socket
HOST = 'localhost'
PORT = 8000
ADDR = (HOST, PORT)
BUFSIZ = 1024
def main():
while True:
tcpCliSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpCliSock.connect(ADDR)
data = input('> ')
if not data:
break
tcpCliSock.send(b'%s\r\n' % data.encode())
data = tcpCliSock.recv(BUFSIZ)
if not data:
break
print(data.strip().decode())
tcpCliSock.close()
if __name__ == '__main__':
main()
| true |
be25cb33b4564df84ea30a77c2e1baee3431987c | Python | djrrb/Python-for-Visual-Designer-Summer-2021 | /session-3/waves.py | UTF-8 | 743 | 3.359375 | 3 | [] | no_license | def myShape(sh=200):
# handle starting length
hsl = 130
# get the right and left handle lengths
rightHandleLength = randint(-hsl, hsl)
leftHandleLength = randint(-hsl, hsl)
# define a bezier path
bp = BezierPath()
# move to my starting point
bp.moveTo((0, 0))
# straight line across
bp.lineTo((width(), 0))
# straight line up
bp.lineTo((width(), sh))
# make my curve
bp.curveTo(
(width(), sh+rightHandleLength), #handle on right
(0, sh-leftHandleLength), #handle on left
(0, sh) # left corner point
)
drawPath(bp)
fill(random(), random(), random(), .2)
shapeHeight = 100
for i in range(10):
myShape(shapeHeight)
shapeHeight += 100 | true |
91ff55e807ff40de390d0b1e6693511f942b94e1 | Python | Electrostatics/APBS_Sphinx | /plugins/PDB2PQR/extensions/newresinter.py | UTF-8 | 13,193 | 2.515625 | 3 | [] | no_license | """
Resinter extension
Print interaction energy between each residue pair in the protein.
"""
__date__ = "21 October 2011"
__authors__ = "Kyle Monson and Emile Hogan"
import extensions
from ..src.hydrogens import Optimize
#itertools FTW!
from itertools import product, permutations, count
from ..src.hydrogens import hydrogenRoutines
#Here are the Ri -> [Ri0, Ri1] maps:
#ARG -> [{AR0}, {ARG}]
#ASP -> [{ASP}, {ASH}]
#GLU -> [{GLU}, {GLH}]
#CYS -> [{CYM}, {CYS}]
#LYS -> [{LYN}, {LYS}]
#TYR -> [{TYM}, {TYR}]
#CTERM -> [{CTERM}, {NEUTRAL-CTERM}]
#NTERM -> [{NEUTRAL-NTERM}, {NTERM}]
#HIS -> [{HSD, HSE}, {HIS}]
#HIP -> [{HID, HIE}, {HIP}]
#HSP -> [{HSD, HSE}, {HSP}]
_titrationSets = ((('AR0',), 'ARG'),
(('ASH',), 'ASP'),
(('CYX',), 'CYS'),
(('GLU',), 'GLH'),
(('HSD', 'HSE'), 'HSP'),
(('HID', 'HIE'), 'HIP'),
(('LYN',), 'LYS'),
(('TYM',), 'TYR'),
(('CTERM',), 'NEUTRAL-CTERM'),
(('NEUTRAL-NTERM',), 'NTERM'))
_titrationSetsMap = {}
for tsSet in _titrationSets:
for ts in tsSet[0]:
_titrationSetsMap[ts] = tsSet
_titrationSetsMap[tsSet[1]] = tsSet
#loose ends.
_titrationSetsMap['HIS'] = _titrationSetsMap['HSD']
_titrationSetsMap['CYM'] = _titrationSetsMap['CYS']
def usage():
"""
Returns usage text for newresinter.
"""
txt = 'Print interaction energy between each residue pair in the protein to {output-path}.newresinter.'
return txt
def run_extension(routines, outroot, options):
outname = outroot + ".newresinter"
with open(outname, "w") as outfile:
processor = ResInter(routines, outfile, options)
processor.generate_all()
processor.write_resinter_output()
class ResInter(object):
def __init__(self, routines, outfile, options):
self.pairEnergyResults = {}
self.combinationCount = 0
self.totalCombinations = 0
self.options = options
self.output = extensions.extOutputHelper(routines, outfile)
self.routines = routines
def save_interation_energy(self, first, second):
energy = get_residue_interaction_energy(first, second)
pairText = str(first) + ' ' + str(second)
if pairText in self.pairEnergyResults:
txt = '#%s re-tested!!! LOLWAT?\n' % pairText
self.output.write(txt)
else:
self.pairEnergyResults[pairText] = energy
def save_all_residue_interaction_energies(self):
"""
Writes out the residue interaction energy for each possible
residue pair in the protein.
"""
residuepairs = permutations(self.routines.protein.getResidues(), 2)
for pair in residuepairs:
self.save_interation_energy(pair[0], pair[1])
def save_one_with_all_interaction_energies(self, i):
"""
Writes out the residue interaction energy for each possible
residue pair in the protein.
"""
residues = list(self.routines.protein.getResidues())
target = residues[i]
del residues[i]
for residue in residues:
self.save_interation_energy(target, residue)
self.save_interation_energy(residue, target)
def save_pair_interaction_energies(self, i, j):
"""
Writes out the residue interaction energy for each possible
residue pair in the protein.
"""
residues = list(self.routines.protein.getResidues())
self.save_interation_energy(residues[i], residues[j])
self.save_interation_energy(residues[j], residues[i])
def create_all_protonated(self):
residueSet = get_residue_titration_set_protonated(self.routines.protein.getResidues())
self.process_residue_set(residueSet,
clean = self.options.clean,
neutraln = self.options.neutraln,
neutralc = self.options.neutralc,
ligand = self.options.ligand,
assign_only = self.options.assign_only,
chain = self.options.chain,
debump = self.options.debump,
opt = self.options.opt)
self.save_all_residue_interaction_energies()
def create_all_single_unprotonated(self):
combinations = residue_set_single_unprotonated_combinations(self.routines.protein.getResidues())
for residueSet, i in combinations:
self.process_residue_set(residueSet,
clean = self.options.clean,
neutraln = self.options.neutraln,
neutralc = self.options.neutralc,
ligand = self.options.ligand,
assign_only = self.options.assign_only,
chain = self.options.chain,
debump = self.options.debump,
opt = self.options.opt)
self.save_one_with_all_interaction_energies(i)
def create_all_pair_unprotonated(self):
combinations = residue_set_pair_unprotonated_combinations(self.routines.protein.getResidues())
for residueSet, i, j in combinations:
self.process_residue_set(residueSet,
clean = self.options.clean,
neutraln = self.options.neutraln,
neutralc = self.options.neutralc,
ligand = self.options.ligand,
assign_only = self.options.assign_only,
chain = self.options.chain,
debump = self.options.debump,
opt = self.options.opt)
self.save_pair_interaction_energies(i, j)
def count_combinations(self):
n = 0 # total iterable residues
k = 0 # total iterable residues with two possible choices.
allProtonated = get_residue_titration_set_protonated(self.routines.protein.getResidues())
for name in allProtonated:
if name in _titrationSetsMap:
n += 1
if len(_titrationSetsMap[name][0]) == 2:
k += 1
self.totalCombinations = (((n+k)**2)+(n-k)+2)/2
def generate_all(self):
"""
For every titration state combination of residue output the
interaction energy for all possible residue pairs.
"""
self.routines.write("Printing residue interaction energies...\n")
self.count_combinations()
#Phase 1: Everything protonated
self.create_all_protonated()
#Phase 2: Single unprotonated paired with everything else.
self.create_all_single_unprotonated()
#Phase 2: Pair unprotonated paired with each other.
self.create_all_pair_unprotonated()
def write_resinter_output(self):
"""
Output the interaction energy between each possible residue pair.
"""
for resultKey in sorted(self.pairEnergyResults.keys()):
self.output.write(resultKey + ' ' + str(self.pairEnergyResults[resultKey]) + '\n')
self.routines.write(str(self.combinationCount)+' residue combinations tried\n')
def process_residue_set(self, residueSet,
clean = False,
neutraln = False,
neutralc = False,
ligand = None,
assign_only = False,
chain = False,
debump = True,
opt = True):
self.combinationCount += 1
txt = "Running combination {0} of {1}\n".format(self.combinationCount, self.totalCombinations)
self.routines.write(txt)
self.routines.write(str(residueSet)+'\n')
self.routines.removeHydrogens()
for newResidueName, oldResidue, index in zip(residueSet, self.routines.protein.getResidues(), count()):
if newResidueName is None:
continue
chain = self.routines.protein.chainmap[oldResidue.chainID]
chainIndex = chain.residues.index(oldResidue)
residueAtoms = oldResidue.atoms
#Create the replacement residue
newResidue = self.routines.protein.createResidue(residueAtoms, newResidueName)
#Make sure our names are cleaned up for output.
newResidue.renameResidue(newResidueName)
#Drop it in
self.routines.protein.residues[index] = newResidue
chain.residues[chainIndex] = newResidue
#Run the meaty bits of PDB2PQR
self.routines.setTermini(neutraln, neutralc)
self.routines.updateBonds()
if not clean and not assign_only:
self.routines.updateSSbridges()
if debump:
self.routines.debumpProtein()
self.routines.addHydrogens()
hydRoutines = hydrogenRoutines(self.routines)
if debump:
self.routines.debumpProtein()
if opt:
hydRoutines.setOptimizeableHydrogens()
hydRoutines.initializeFullOptimization()
hydRoutines.optimizeHydrogens()
else:
hydRoutines.initializeWaterOptimization()
hydRoutines.optimizeHydrogens()
# Special for GLH/ASH, since both conformations were added
hydRoutines.cleanup()
def get_residue_titration_set_protonated(residues):
"""
Returns residue set when everything is protonated.
"""
result = []
for residue in residues:
residueTest = _titrationSetsMap.get(residue.name)
if residueTest:
residueTest = residueTest[1]
else:
residueTest = residue.name
result.append(residueTest)
return result
def residue_set_single_unprotonated_combinations(residues):
"""
Yields pair (residue set, residue index) for
every "single unprotonated" combination.
residue set - set for process_residue_set
residue index - index of residue that was left unprotonated
"""
protonatedNames = get_residue_titration_set_protonated(residues)
for name, i in zip(protonatedNames, count()):
if not name in _titrationSetsMap:
continue
tStateSet = _titrationSetsMap[name][0]
for tState in tStateSet:
result = list(protonatedNames)
result[i] = tState
yield result, i
def residue_set_pair_unprotonated_combinations(residues):
"""
Yields pair (residue set, 1rst residue index, 2nd residue index) for
every "single unprotonated" combination.
residue set - set for process_residue_set
1rst residue index - index of 1rst residue that was left unprotonated
2nd residue index - index of 2nd residue that was left unprotonated
"""
protonatedNames = get_residue_titration_set_protonated(residues)
for i in range(0,len(protonatedNames)):
firstName = protonatedNames[i]
if not firstName in _titrationSetsMap:
continue
firstStateSet = _titrationSetsMap[firstName][0]
for j in range(0,i):
secondName = protonatedNames[j]
if not secondName in _titrationSetsMap:
continue
secondStateSet = _titrationSetsMap[secondName][0]
for firstState in firstStateSet:
for secondState in secondStateSet:
result = list(protonatedNames)
result[i] = firstState
result[j] = secondState
yield result, i, j
def get_residue_interaction_energy(residue1, residue2):
"""
Returns to total energy of every atom pair between the two residues.
Uses Optimize.getPairEnergy and it's donor/accepter model
to determine energy.
residue1 - "donor" residue
residue2 - "acceptor" residue
THE RESULTS OF THIS FUNCTION ARE NOT SYMMETRIC. Swapping
residue1 and residue2 will not always produce the same result.
"""
energy = 0.0
for pair in product(residue1.getAtoms(), residue2.getAtoms()):
energy += Optimize.getPairEnergy(pair[0], pair[1])
return energy
| true |
9c7f9dde7d7cc6fe14c10393beb0afde742c8353 | Python | chinmairam/Python | /positional_only_arg.py | UTF-8 | 223 | 3.703125 | 4 | [] | no_license | # To specify positional-only arguments,you include a forward slash in your
# function's arguments.
def number_length(x, /):
return len(str(x))
print(number_length(2112))
#print(number_length(x=31557600)) #TypeError
| true |
324c0e7ac798be4fc3c83dc9a40e5bee620f06d6 | Python | g3rv4/notify-me-anything | /notifications/mac_os_notification.py | UTF-8 | 1,903 | 2.53125 | 3 | [] | no_license | import Foundation
import objc
from notifications.base_notification import BaseNotification
class MacOSNotification(BaseNotification):
def __enter__(self):
self.helper = NotificationHelper.alloc().init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.helper.dealloc()
def do_notify(self, notifications):
for notification in notifications:
self.helper.notify(notification.title, notification.subtitle, notification.text, notification.sound)
class NotificationHelper(Foundation.NSObject):
def init(self):
self = objc.super(NotificationHelper, self).init()
if self is None:
return None
# Get objc references to the classes we need.
self.NSUserNotification = objc.lookUpClass('NSUserNotification')
self.NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
return self
def clearNotifications(self):
"""Clear any displayed alerts we have posted. Requires Mavericks."""
NSUserNotificationCenter = objc.lookUpClass('NSUserNotificationCenter')
NSUserNotificationCenter.defaultUserNotificationCenter().removeAllDeliveredNotifications()
def notify(self, title, subtitle, text, sound):
"""Create a user notification and display it."""
notification = self.NSUserNotification.alloc().init()
notification.setTitle_(str(title))
if subtitle:
notification.setSubtitle_(str(subtitle))
if text:
notification.setInformativeText_(str(text))
if sound:
notification.setSoundName_("%s.aiff" % sound)
notification.setHasActionButton_(False)
self.NSUserNotificationCenter.defaultUserNotificationCenter().setDelegate_(self)
self.NSUserNotificationCenter.defaultUserNotificationCenter().scheduleNotification_(notification)
| true |
f025a0838a97e5d8c06dbafc364cc956e5ebea95 | Python | samikhailov/coursera | /python_osnovy_programmirovaniya/week_7/polighloty.py | UTF-8 | 536 | 3.234375 | 3 | [] | no_license | amount_pupils = int(input())
famous_languages = set()
all_languages = set()
for counter, i in enumerate(range(amount_pupils)):
known_languages = int(input())
pupils_languages = set()
for j in range(known_languages):
pupils_languages.add(input())
if counter == 0:
famous_languages = pupils_languages
famous_languages &= pupils_languages
all_languages |= pupils_languages
print(len(famous_languages), *sorted(famous_languages), sep="\n")
print(len(all_languages), *sorted(all_languages), sep="\n")
| true |
12f40f75e97ea94001fbf50034c89b77e6f48454 | Python | ctreffe/alfred | /src/alfred3/cli/extract.py | UTF-8 | 11,337 | 3.0625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | r"""
Provides a command line interface for transforming .json data files
into .csv files.
By default, the command expects to find the .json files in the current
working directory. A basic example would look something like this. You
have run some local experiment sessions. Now you have an experiment directory
that looks something like this::
data/
save/
exp/
2021-04-27_12.03.58_data_0f278318f0bb4ea6b8cb6a58e8a5afc3.json
2021-04-27_13.06.01_data_d06249f45d66494192aca0cc7b91b54d.json
2021-04-29_12.08.59_data_5999f44eaea0447ca0abbf079824b9ce.json
log/
script.py
config.conf
The .json files in the directory ``save/exp/`` hold data of *individual sessions*.
We want to combine them into a .csv file for further data anaylsis. Usually,
alfred3 will do this automatically for you after each experiment run and
save the result in the ``data/`` directory, but lets assume that this
does not work in our case. To transform the data, we follow these steps:
**1. Open up a terminal.**
On Mac, this is just the *Terminal* app. On Windows,
this is the command line application. If you are using an IDE like PyCharm,
there is most likely a terminal integrated into the user interface.
**2. Make sure that you are in the correct working directory.**
You can go to a specific directory by running the following code::
$ cd path/to/directory
Replace ``path/to/directory`` with your actual full path to the ``save/exp/``
directory of your experiment.
Note that, on Windows, you probably need to use backshlashes (\) instead
of ordinary slashes (/) in the path.
**3. Run the command in the terminal**
Run the following command::
$ alfred3 json-to-csv
Et voilà! This will place the .csv file inside the current directory.
If you run into problem at this point, make sure that you have alfred3
installed in your current environment. If you are usually working in a
virtual environment, you may need to activate that environment.
You can access the full manual to all available options of the ``json-to-csv``
command by executing::
$ alfred3 json-to-csv --help
The current version is::
Usage: alfred3 json-to-csv [OPTIONS]
Options:
--dtype TEXT The data type to extratct form .json files. Can be
'exp_data', 'codebook', 'move_history', and
'unlinked_data'. [default: exp_data]
--in_path TEXT Path to directory containing json files. If None
(default), the current working directory will be used.
--out_path TEXT Path to directory in which the output csv file will be
place. If None (default), the current working directory
will be used.
--exp_version TEXT The experiment version for which codebook data should be
extracted. Only relevant for codebook data.
--delimiter TEXT Delimiter to use in the resulting csv file. Defaults to
';'
--help Show this message and exit.
"""
from itertools import chain
from pathlib import Path
import click
from alfred3.data_manager import DataManager
from alfred3.export import Exporter, find_unique_name
class Extractor:
"""
Turns uncurated alfred data from json format into csv format.
Args:
in_path (str): Path to directory containing json files. If None
(default), the current working directory will be used.
out_path (str): Path to directory in which the output csv file
will be place. If None (default), the current working
directory will be used.
delimiter (str): Delimiter to use in the resulting csv file.
Defaults to ";"
Examples:
The extractor is used by calling one of its four methods. The
following python code can be used to turn all alfred json
datasets in the current working directory into a nice csv file.
>>> from alfred3.export import Extractor
>>> ex = Extractor()
>>> ex.extract_exp_data()
"""
def __init__(self, in_path: str = None, out_path: str = None, delimiter: str = ";"):
self.in_path = Path(in_path) if in_path is not None else Path.cwd()
self.out_path = Path(out_path) if out_path is not None else Path.cwd()
self.delimiter = delimiter
def extract_exp_data(self):
"""
Extracts the main experiment data from json files in the
Extractors *in_path*.
Examples:
Turn all alfred json datasets in the current working
directory into a nice csv file.
>>> from alfred3.cli.extract import Extractor
>>> ex = Extractor()
>>> ex.extract_exp_data()
"""
data = list(
DataManager.iterate_local_data(
data_type=DataManager.EXP_DATA, directory=self.in_path
)
)
fieldnames = DataManager.extract_ordered_fieldnames(data)
alldata = [DataManager.flatten(d) for d in data]
csvname = find_unique_name(directory=self.out_path, filename="exp_data.csv")
Exporter.write(
data=alldata,
fieldnames=fieldnames,
path=self.out_path / csvname,
delimiter=self.delimiter,
)
return csvname
def extract_unlinked_data(self):
"""
Extracts unlinked data from json files in the Extractors
*in_path*.
Examples:
Turn all alfred json datasets in the current working
directory into a nice csv file.
>>> from alfred3.cli.extract import Extractor
>>> ex = Extractor()
>>> ex.extract_unlinked_data()
"""
existing_data = list(
DataManager.iterate_local_data(
data_type=DataManager.UNLINKED_DATA, directory=self.in_path
)
)
data = [DataManager.flatten(d) for d in existing_data]
fieldnames = DataManager.extract_fieldnames(data)
csvname = find_unique_name(directory=self.out_path, filename="unlinked.csv")
Exporter.write(
data=data,
fieldnames=fieldnames,
path=self.out_path / csvname,
delimiter=self.delimiter,
)
return csvname
def extract_codebook(self, exp_version: str):
"""
Extracts codebook data from json files in the Extractors
*in_path*.
Args:
exp_version (str): Experiment version. Codebook data must
be exported for specific experiment versions.
Examples:
Get a nice csv codebook for the json data in the current
working directory.
>>> from alfred3.cli.extract import Extractor
>>> ex = Extractor()
>>> ex.extract_codebook("1.0")
"""
cursor = DataManager.iterate_local_data(
data_type=DataManager.EXP_DATA,
directory=self.in_path,
exp_version=exp_version,
)
cursor_unlinked = DataManager.iterate_local_data(
data_type=DataManager.UNLINKED_DATA,
directory=self.in_path,
exp_version=exp_version,
)
# extract individual codebooks for each experimen session
cbdata_collection = []
for entry in cursor:
cb = DataManager.extract_codebook_data(entry)
cbdata_collection.append(cb)
for entry in cursor_unlinked:
cb = DataManager.extract_codebook_data(entry)
cbdata_collection.append(cb)
# combine them to a single dictionary, overwriting old values
# with newer ones
data = {}
for entry in cbdata_collection:
data.update(entry)
fieldnames = DataManager.extract_fieldnames(data.values())
fieldnames = DataManager.sort_codebook_fieldnames(fieldnames)
csvname = find_unique_name(
directory=self.out_path, filename=f"codebook_{exp_version}.csv"
)
Exporter.write(
data=data.values(),
fieldnames=fieldnames,
path=self.out_path / csvname,
delimiter=self.delimiter,
)
return csvname
def extract_move_history(self):
"""
Extracts movement data from json files in the Extractors
*in_path*.
Examples:
Get a nice csv of movement data for json data in the
current working directory.
>>> from alfred3.cli.extract import Extractor
>>> ex = Extractor()
>>> ex.extract_move_history()
"""
existing_data = DataManager.iterate_local_data(
data_type=DataManager.EXP_DATA, directory=self.in_path
)
history = [d["exp_move_history"] for d in existing_data]
fieldnames = DataManager.extract_fieldnames(chain(*history))
history = chain(*history)
csvname = find_unique_name(directory=self.out_path, filename="move_history.csv")
Exporter.write(
data=history,
fieldnames=fieldnames,
path=self.out_path / csvname,
delimiter=self.delimiter,
)
return csvname
@click.command()
@click.option(
"--dtype",
default="exp_data",
help=(
"The data type to extratct form .json files. Can be 'exp_data', 'codebook',"
" 'move_history', and 'unlinked_data'."
),
show_default=True,
)
@click.option(
"--in_path",
default=None,
help=(
"Path to directory containing json files. If None (default), the current"
" working directory will be used."
),
)
@click.option(
"--out_path",
default=None,
help=(
"Path to directory in which the output csv file will be place. If None"
" (default), the current working directory will be used."
),
)
@click.option(
"--exp_version",
default=None,
help=(
"The experiment version for which codebook data should be extracted. Only"
" relevant for codebook data."
),
)
@click.option(
"--delimiter",
default=";",
help="Delimiter to use in the resulting csv file. Defaults to ';'",
)
def json_to_csv(dtype, in_path, out_path, exp_version, delimiter):
extractor = Extractor(in_path=in_path, out_path=out_path, delimiter=delimiter)
if dtype == "exp_data":
csvname = extractor.extract_exp_data()
elif dtype == "codebook":
if exp_version is None:
raise ValueError(
"You must specify an experiment version for codebook extraction. See"
" 'alfred3 json-to-csv --help' for more."
)
csvname = extractor.extract_codebook(exp_version=exp_version)
elif dtype == "move_history":
csvname = extractor.extract_move_history()
elif dtype == "unlinked_data":
csvname = extractor.extract_unlinked_data()
else:
msg = (
f"Value {dtype} for option '--dtype' is not valid. See 'alfred3 json-to-csv"
" --help' for more."
)
raise ValueError(msg)
msg = (
f"Data transformed to csv. File '{csvname}' was placed in directory"
f" '{extractor.out_path}'"
)
click.echo(msg)
| true |
70018785948566c05025fb023d4edc249aa17212 | Python | optionalg/cracking_the_coding_interview | /chapter02_lists/03_delete_middle.py | UTF-8 | 624 | 3.34375 | 3 | [] | no_license | from ctci.chapter02_lists.LinkedList import LinkedList
def find_middle_element(self):
p1 = self.head
p2 = self.head.next
while p2.next:
p1 = p1.next
p2 = p2.next.next
return p1.data
def delete_middle_element(self, k):
p1 = self.head
tmp = self.head
for i in range(k):
p1 = p1.next
while p1.next:
if p1.next.next is None:
tmp.next = tmp.next.next
else:
tmp = tmp.next
p1 = p1.next
ll = LinkedList()
ll.generate(10, 0, 99)
ll.print_list()
print(find_middle_element(ll))
delete_middle_element(ll, 5)
ll.print_list() | true |
0a1e1598485397f04ff0b5284e70806961373684 | Python | dsapan/hotel-management-system | /Hotel-Management-System/roomhistory.py | UTF-8 | 4,716 | 2.671875 | 3 | [] | no_license | from tkinter import *
from subprocess import call
import mysql.connector
from tkinter import messagebox
from tkinter import scrolledtext
root = Tk(className=" HOTEL MANAGEMENT")
root.geometry('1020x700+200+20')
# calling functions
def click_vacancy():
call(["python", "vacancy.py"])
def click_developers():
call(["python", "developers.py"])
def click_branches():
call(["python", "branches.py"])
def click_contact_us():
call(["python", "contact_us.py"])
def click_staff():
call(["python", "staff.py"])
def click_allcust():
call(['python', 'all_details.py'])
# variables
Room_no = IntVar()
# database
def click_proceed():
room_no = Room_no.get()
formatting = "-------------------------------------------------------------------------------------------" \
"-------------------------------------------------------------------------------------------" \
"-----------------------------------------------------------------------------------------------------------------\n"
if room_no == "":
rn_entry.delete(0, 'end')
messagebox.showwarning("Warning", "Incomplete Data Entry")
else:
mydb = mysql.connector.connect(host='localhost', user='root', password='abc456', database='hotel')
cur = mydb.cursor()
cur.execute('Select Exists(select * from all_data where Room_No=%s)', (room_no,))
res = cur.fetchall()
avail = 0
for i in res:
a = list(i)
avail = a[0]
if avail == 1:
cur.execute('SELECT Cust_Id,Room_No,First_Name,Last_Name,Room_Type,No_Days,Checked_In,Room_Rate,Room_Desc from all_data where Room_No =%s', (room_no,))
result = cur.fetchall()
text.config(state=NORMAL)
text.delete(1.0,END)
for d in result:
final_detail = "\nCustomer Id : \t"+ str(d[0])+"\t\tRoom_No : \t"+ str(d[1])+"\n\n"+"First Name : \t " + d[2] + "\t\t Last Name : \t "+d[3]+ "\n\n"+"Room Type : \t"+ d[4] +"\n\n"+"Booked for Days : "+d[5]+"\n\n"+"Checked In Date & Time : "+str(d[6])+"\n\n"+"Room Rate (Rs): "+d[7]+"\n\nRoom Desc : "+d[8]+"\n"
text.config(state=NORMAL)
text.insert(INSERT, final_detail)
text.insert(INSERT, formatting)
rn_entry.delete(0,"end")
text.config(state=DISABLED)
else:
text.insert(INSERT, "The Entered Room Number Havent OCCUPIED Till Now, \t Please Enter a Valid Room Number !")
# Menu Bar
menu_bar = Menu(root)
root.config(menu=menu_bar)
home_menu = Menu(menu_bar)
menu_bar.add_cascade(label="Home", menu=home_menu)
home_menu.add_command(label="All Customers", command=click_allcust)
home_menu.add_separator()
home_menu.add_command(label="Vacancy", command=click_vacancy)
home_menu.add_separator()
home_menu.add_command(label="Exit", command=root.quit)
about_menu = Menu(menu_bar)
menu_bar.add_cascade(label="About", menu=about_menu)
about_menu.add_command(label="Branches", command=click_branches)
about_menu.add_separator()
about_menu.add_command(label="Staff", command=click_staff)
about_menu.add_separator()
about_menu.add_command(label="Developers", command=click_developers)
help_menu = Menu(menu_bar)
menu_bar.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="Contact Us", command=click_contact_us)
# heading
heading_label = Label(root, text="--------- ROOM HISTORY ---------", bg="deep sky blue", fg="white", font=('Times New Roman', 15,'bold'))
heading_label.pack(fill=X)
title_label = Label(root, text="", height=1,fg='white',font=('Times New Roman', 15,'bold'), bg="medium blue")
title_label.pack(fill=X)
root.configure(background='alice blue')
topFrame = Frame(root)
topFrame.pack()
topFrame.configure(background='alice blue')
blankspace = Label(topFrame, text="\n\n\n\n\n")
blankspace.grid(row=0)
# Room Number
rn_label = Label(topFrame, text="Room Number : ",font=('Times New Roman', 20,"bold"))
rn_entry = Entry(topFrame, textvar=Room_no, bd=5, bg="#ccefff", fg='blue', width=15, font=('Arial', 15))
rn_label.grid(row=1, column=0, padx=15, pady=10, sticky=E)
rn_entry.grid(row=1, column=1, ipady=5, ipadx=60, sticky=W)
# Search Button
submit_button = Button(root, text="SEARCH", width=16, bg="medium blue", fg='White', font=('ARIAL BLACK', 15), relief=RAISED,
command=click_proceed)
submit_button.place(relx=0.5, rely=0.40, anchor=S)
# text bar
text = scrolledtext.ScrolledText(root, bd=5, bg="white", fg='blue', height=16,width=98, font=('Arial', 15))
text.place(rely=0.45)
root.mainloop()
| true |
d8d30a1d4fc60e4ce16947f12fade84c19ebb2d9 | Python | Xiangyu-Han/autoclip | /autoclip.py | UTF-8 | 992 | 2.578125 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
from ignite.engine import EventEnum
def _get_grad_norm(model):
total_norm = 0
for p in model.parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
# written for pytorch ignite
# fire this on backwards pass
class BackwardsEvents(EventEnum):
BACKWARDS_COMPLETED = 'backwards_completed'
def add_autoclip_gradient_handler(engine, model, clip_percentile):
# Keep track of the history of gradients and select a cutoff
# to clip values to based on percentile.
grad_history = []
@engine.on(BackwardsEvents.BACKWARDS_COMPLETED)
def autoclip_gradient(engine):
obs_grad_norm = _get_grad_norm(model)
grad_history.append(obs_grad_norm)
clip_value = np.percentile(grad_history, clip_percentile)
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_value)
| true |
7b4452e714190a01d49374d0761f8dcc355e3182 | Python | ProspePrim/PythonGB | /Lesson 3/task_3_4.py | UTF-8 | 1,164 | 4.4375 | 4 | [] | no_license | # Программа принимает действительное положительное число x и целое отрицательное число y.
# Необходимо выполнить возведение числа x в степень y.
# Задание необходимо реализовать в виде функции my_func(x, y).
# При решении задания необходимо обойтись без встроенной функции возведения числа в степень.
# ** Подсказка:** попробуйте решить задачу двумя способами. Первый — возведение в степень с помощью оператора **.
# Второй — более сложная реализация без оператора **, предусматривающая использование цикла.
def pow_func_1(a, b):
return 1 / a ** abs(b)
#return x ** y
print(pow_func_1(3, -5))
def pow_func_2(a, b):
i = 1
c = 1
while i <= abs(b):
c = c * a
i += 1
return 1/c
#return x ** y
print(pow_func_2(3, -5)) | true |
e3fa3d8df8826e519a3ce1f806f5dd4586d3353e | Python | nikita494/BioInf | /29.06.21/Interleaving Two Motifs.py | UTF-8 | 949 | 2.9375 | 3 | [] | no_license | #http://rosalind.info/problems/scsp/
def common_supersequence(s, t):
m, n, l = len(s), len(t), [[0] * (len(t) + 1)] * (len(s) + 1)
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
l[i][j] = max(i, j)
elif s[i - 1] == t[j - 1]:
l[i][j] = 1 + l[i - 1][j - 1]
else:
l[i][j] = 1 + min(l[i - 1][j], l[i][j - 1])
x, res, i, j = l[m][n], str(), m, n
while i > 0 and j > 0:
if s[i - 1] == t[j - 1]:
res, i, j, x = res + s[i - 1], i - 1, j - 1, x - 1
elif l[i - 1][j] > l[i][j - 1]:
res, j, x = res + t[j - 1], j - 1, x - 1
else:
res, i, x = res + s[i - 1], i - 1, x - 1
while i > 0:
res, i, x = res + s[i - 1], i - 1, x - 1
while j > 0:
res, j, x = res + t[j - 1], j - 1, x - 1
return res[::-1]
print(common_supersequence(input(), input()))
| true |
b9bb602d5eb377ffa3c21f6cb135a32c9c619f75 | Python | daboross/quick-repo-backup-tagit-python | /rename_music_album_and_artist_folder_names_in_music_dir.py | UTF-8 | 2,130 | 2.8125 | 3 | [
"MIT"
] | permissive | # Note: this does require music to already be in a two-directory format, of ~/Music/<some text>/<some text>/track-name.file-format
# if the directory depth in ~/Music/ is greater than 2, this script will malfunction.
import os
from tinytag import TinyTag
print("Valid responses:\nY: Rename\nS: Skip album\nN: Do nothing\n")
continuing = False
for (path, directories, files) in os.walk("/home/daboross/Music"):
if continuing:
print("\n")
continuing = False
split = path.rsplit("/", 2)
if len(split) < 3:
continue
rest = split[0]
artist = split[1]
album = split[2]
for file in files:
try:
tag = TinyTag.get(os.path.join(path, file))
except LookupError as e:
continue
if tag.artist is None or tag.artist[:30] == artist[:30] and len(artist) > len(tag.artist):
new_artist = artist.replace('/', '_')
else:
new_artist = tag.artist.replace('/', '_')
if tag.album is None or tag.artist[:30] == artist[:30] and len(album) > len(tag.album):
new_album = album.replace('/', '_')
else:
new_album = tag.album.replace('/', '_')
if new_album != album:
print_album = "({} -> {})".format(album, new_album)
else:
print_album = album
if new_artist != artist:
print_artist = "({} -> {})".format(artist, new_artist)
else:
print_artist = artist
if new_artist != artist or new_album != album:
print("{} - {} :: {}".format(print_artist, print_album, file))
if continuing:
continue
response = input("?> ")
if response.startswith('y'):
os.makedirs(os.path.join(rest, new_artist, new_album), exist_ok=True)
os.rename(os.path.join(path, file), os.path.join(rest, new_artist, new_album, file))
elif response.startswith('sh'):
continuing = True
elif response.startswith('s'):
break
elif response.startswith('q'):
exit(0)
| true |