blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0aeb4c0f3c5d7bce7dfd338337afb5ed89f67e14
|
Python
|
adilsonLuz/OficinaPython
|
/O2-Ex-027.py
|
UTF-8
| 203
| 3.1875
| 3
|
[] |
no_license
|
lista = [ "b", "d", "c", "a", "z", "f", "x", "a", "a"]
print("\n lista ")
print(lista)
print("\n quantidade de a: ")
print(lista.count("a"))
print("\n quantidade de z: ")
print(lista.count("z"))
| true
|
ea408204a993c32cae3bdbfb5ae3687e9a77f7e6
|
Python
|
YanisAhdjoudj/Regression_Lineaire_Scratch
|
/1_Programs/1_linear_regression_main.py
|
UTF-8
| 2,351
| 2.671875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 14 22:04:19 2021
@author: yanis
"""
import os
from datetime import date
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from scipy.special import ndtri
# Main class : Linear_Regression
# Main methods : fit
# : Predict
# Additional classes :
# Plots
#
class Linear_Regression:
def __init__(self,data,varX,varY,intercept=True):
"""
Parameters
----------
intercept : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
None.
"""
self.coef_=None
self.intercept_= None
self.data=data
self.varX=varX
self.varY=varY
self.intercept=intercept
Nobs=len(X)
X=X.to_numpy()
tX=X.transpose()
Y=Data[varY].to_numpy()
Px=X.dot((np.linalg.inv(tX.dot(X)))).dot(tX)
Mx=np.identity(Nobs)-Px
if Const==True:
df_model=np.size(X,1)-1
else:
df_model= np.size(X,1)
df_resid=Nobs-np.size(X,1)
def __repr__(self):
return " This programm provide a Linear regression model"
def data_preparation(self):
# adding a intercept to the data if needed
if self.intercept==True:
try:
self.data.insert(0,"const",1)
except:
pass
self.varX.insert(0,"const")
self.X=self.data[self.varX]
else:
self.X=self.data[self.varX]
return self.X
def fit(self,Estimation="AL",Nb_ite=100000,Learning_rate=0.001,Precision=0.0000000000001):
# Tree type of fitting methods :
# Classical econometric approach I : Least squared
# Classical econometric approach II : Log likelihood
# Statistical learning approach : Lost function
# For the Log likelihood and the lost function methods
# two optimisation methods can be used :
# gradiant descent and newpthon raphston
if
LR= Linear_Regression()
print(LR)
| true
|
be611f2c6b48375e50441995aadf1d37be4bc778
|
Python
|
AndreaPicasso/NLFF
|
/model/benchmark/rule_classifier.py
|
UTF-8
| 6,660
| 2.5625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import tensorflow as tf
import math
from datetime import datetime, timedelta
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix
from math import sqrt
import matplotlib.pyplot as plt
#tf.logging.set_verbosity(tf.logging.INFO)
skip_vector_dim = 7
n_y = 1 #Numero di output, Per ora sali / scendi poi metteremo neutrale
def sign(x):
if x >= 0:
return 1
elif x < 0:
#return -1
return 0
class Data():
X = []
Y = []
def get_train_test_set(test_percentage=0.3):
idx_split = math.floor(len(Data.pos)*(1-test_percentage))
train_pos = Data.pos[:idx_split]
train_neg = Data.neg[:idx_split]
train_y = Data.Y[:idx_split]
test_pos = Data.pos[idx_split:]
test_neg = Data.neg[idx_split:]
test_y = Data.Y[idx_split:]
return (train_pos, train_neg, train_y), (test_pos, test_neg, test_y)
def load_data(ticker='AAPL', momentum_window=30, newsTimeToMarket =0, X_window_average=40, set_verbosity=True):
X_path = '../tensorflow_model/for_server/SentimentSingleNewsFullNoNorm/'+str(ticker)+'.csv'
Y_path = '../tensorflow_model/for_server/DataSetIndexes/indexes'+str(ticker)+'.csv'
x = pd.read_csv(X_path)
x.drop('Unnamed: 0', axis=1, inplace=True)
x = x.rename(index=str, columns={"initTime": "PUBLICATION_DATE"})
x = x.sort_values(by=['PUBLICATION_DATE'])
x = x.reset_index(drop=True)
y = pd.read_csv(Y_path)
for i, row in x.iterrows():
x.at[i,'PUBLICATION_DATE'] =datetime.strptime(x['PUBLICATION_DATE'][i], '%Y-%m-%d %H:%M:%S') + timedelta(hours=newsTimeToMarket)
momentum_window = 30
y = y.rename(index=str, columns={"Unnamed: 0": "DATE"})
for i, row in y.iterrows():
y['DATE'].at[i] = datetime.strptime(y['DATE'][i], '%Y-%m-%d %H:%M:%S')
z = list()
for i in range(0,y.shape[0]-momentum_window):
z.append((y['close'][i] - y['close'][i-momentum_window])/y['close'][i])
y = y.reset_index(drop=True)
y.drop(np.arange(y.shape[0]-momentum_window, y.shape[0]), inplace=True)
y = y.reset_index(drop=True)
y['labels'] = [sign(entry) for entry in z]
min_max_scaler = preprocessing.MinMaxScaler()
initDate = max(y['DATE'][0], x['PUBLICATION_DATE'][0])
finalDate = min(y['DATE'][len(y)-1], x['PUBLICATION_DATE'][len(x)-1])
i = 0
j = 0
close = []
labels = []
pos = []
neg = []
dates = []
# ALLINEAMENTO INIZIO
while(y['DATE'][j] < initDate):
j+=1
while(x['PUBLICATION_DATE'][i] < initDate):
i+=1
while(x['PUBLICATION_DATE'][i] < finalDate and y['DATE'][j] < finalDate ):
timeSlotPos = list()
timeSlotNeg = list()
while(i<len(x)-1 and y['DATE'][j] > x['PUBLICATION_DATE'][i]):
timeSlotPos.append(x['POSITIVE'][i])
timeSlotNeg.append(x['NEGATIVE'][i])
i+=1
if(len(timeSlotPos)==0):
timeSlotPos.append(0)
timeSlotNeg.append(0)
pos.append(np.mean(np.asarray(timeSlotPos), axis=0))
neg.append(np.mean(np.asarray(timeSlotNeg), axis=0))
close.append(y['close'][j])
labels.append(y['labels'][j])
dates.append(str(y['DATE'][j].year)+'/'+str(y['DATE'][j].month))
j+=1
pos = np.convolve(np.asarray(pos), np.repeat(1.0, X_window_average)/X_window_average, 'same')
neg = np.convolve(np.asarray(neg), np.repeat(1.0, X_window_average)/X_window_average, 'same')
Data.pos = pos
Data.neg = neg
Data.Y = labels
class ModelSelection():
def modelSelectionFixedTTM(ticker='AAPL'):
print('\n\n\n==================== '+str(ticker)+' ==================== \n\n\n')
test_accs = []
MCCs = []
MCCsReal = []
TP = []
TN = []
FP = []
FN = []
Ttm_range = [0, 7, 14, 21,28, 35, 70, 105, 210]
for ttm in Ttm_range:
Data.load_data(ticker=ticker, momentum_window=30, newsTimeToMarket =ttm, X_window_average=30, set_verbosity=False)
(train_pos, train_neg, train_y), (test_pos, test_neg, test_y) = Data.get_train_test_set()
# best_MCC = 0
# best_b = 0
# for bias in np.linspace(-1,1,20):
# yhat = list()
# for i in range(len(train_y)):
# yhat.append(1 if train_pos[i]+bias >= train_neg[i] else 0)
# cm = confusion_matrix(train_y, yhat)
# tn, fp, fn, tp = cm.ravel()
# denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)
# curr_MCC = 0 if denom== 0 else (tp*tn -fp*fn)/sqrt(denom)
# if(curr_MCC > best_MCC):
# best_MCC = curr_MCC
# best_b = bias
# bias = best_b
bias = np.mean(train_neg) - np.mean(train_pos)
yhat = list()
for i in range(len(test_y)):
yhat.append(1 if test_pos[i]+bias >= test_neg[i] else 0)
cm = confusion_matrix(test_y, yhat)
tn, fp, fn, tp = cm.ravel()
denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)
MCCsReal.append(0 if denom== 0 else (tp*tn -fp*fn)/sqrt(denom) )
TP.append(tp)
TN.append(tn)
FN.append(fn)
FP.append(fp)
test_accs.append((tp+tn)/(tp+tn+fp+fn))
if(tp + fp == 0):
tp = 1
if(tp + fn == 0):
tp = 1
if(tn + fp == 0):
tn = 1
if(tn + fn == 0):
tn = 1
MCCs.append((tp*tn -fp*fn)/sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
#print(ticker)
#print('best b: '+str(bias))
#print('Ttm_range, '+str(Ttm_range))
print('test acc,'+str(ticker)+', '+str(test_accs))
#print('MCC,'+str(ticker)+', '+str(MCCs))
print('MCC_R,'+str(ticker)+', '+str(MCCsReal))
print('TN,'+str(ticker)+', '+str(TN))
print('FP,'+str(ticker)+', '+str(FP))
print('FN,'+str(ticker)+', '+str(FN))
print('TP,'+str(ticker)+', '+str(TP))
tickers = ['AAPL','AMZN','GOOGL','MSFT','FB','INTC','CSCO','CMCSA','NVDA','NFLX']
for tic in tickers:
ModelSelection.modelSelectionFixedTTM(ticker=tic)
| true
|
bad4cb76164639622428588b9e0f90b6566b7de7
|
Python
|
RasmusSpangsberg/TankGame
|
/TankGame.py
|
UTF-8
| 4,768
| 3.5625
| 4
|
[] |
no_license
|
import pygame
from math import pi, sqrt
pygame.init()
display_width = 800
display_height = 600
game_display = pygame.display.set_mode((display_width, display_height))
clock = pygame.time.Clock()
class Tank:
def __init__(self, pos_x, pos_y, width, height, color, is_enemy=False):
self.pos_x = pos_x
self.pos_y = pos_y
self.width = width
self.height = height
self.color = color
self.is_enemy = is_enemy
def draw(self, mouse_x=None, mouse_y=None):
rect_top_x = self.pos_x + int(self.width/2)
rect_top_y = self.pos_y + 5
pygame.draw.rect(game_display, self.color, [self.pos_x, self.pos_y, self.width, self.height])
pygame.draw.circle(game_display, self.color, [rect_top_x, rect_top_y], 30)
if self.is_enemy:
pygame.draw.line(game_display, self.color, [rect_top_x, rect_top_y], [rect_top_x - 50, rect_top_y - 50], 5)
else:
# if not enemy, make the cursor follow the mouse
x = mouse_x - rect_top_x
y = display_height - mouse_y - (display_height - rect_top_y)
barrel_len = 50
mouse_vector_len = sqrt(x**2 + y**2)
barrel_unit_vector = [x/mouse_vector_len,
y/mouse_vector_len]
barrel_vector = [barrel_unit_vector[0] * barrel_len + rect_top_x,
(display_height - barrel_unit_vector[1] * barrel_len) - (display_height - rect_top_y)]
start_pos = [rect_top_x, rect_top_y]
pygame.draw.line(game_display, RED, start_pos, barrel_vector, 5)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
enemy_x = 650
enemy_y = 500
enemy_width = 100
enemy_height = 50
enemy = Tank(enemy_x, enemy_y, enemy_width, enemy_height, RED, is_enemy=True)
player_x = 50
player_y = 500
player_width = 100
player_height = 50
player = Tank(player_x, player_y, player_width, player_height, GREEN)
player_top_x = player_x + int(player_width/2)
player_top_y = player_y + 5
class Projectile:
def __init__(self, x, y, radius, color, mouse_x, mouse_y):
self.pos_x = x
self.pos_y = y
self.radius = radius
self.color = color
self.delta_time = 1/60
self.mass = 50.0
self.g = 9.82
self.velocity_x = mouse_x - player_top_x
self.velocity_y = display_height - mouse_y - (display_height - player_top_y)
def draw(self):
pygame.draw.circle(game_display, self.color, [self.pos_x, self.pos_y], self.radius)
def update(self):
self.pos_x += int(self.velocity_x * self.delta_time)
self.pos_y -= int(self.velocity_y * self.delta_time)
# velocity_x only gets affected by wind/friction
self.velocity_x -= 1
self.velocity_y -= (self.mass * self.g) * self.delta_time
def collided(self, obj):
# parentheses for easier reading
if (self.pos_x + self.radius) >= (obj.pos_x) and (self.pos_x - self.radius) <= (obj.pos_x + obj.width):
if (self.pos_y + self.radius) >= (obj.pos_y) and (self.pos_y - self.radius) <= (obj.pos_y + obj.height):
return True
return False
ball_x = 100
ball_y = 500
ball_radius = 20
balls = []
enemies_hit = 0
enemies_missed = 0
fire = False
game_exit = False
arc_enabled = False
cheat_code_str = ""
myfont = pygame.font.SysFont("Comic Sans MS", 30)
# variables used to calculate the arc
delta_time = 1/60
mass = 50.0
g = 9.82
while not game_exit:
game_display.fill(BLACK)
mouse_x, mouse_y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_exit = True
if event.type == pygame.KEYDOWN:
cheat_code_str += pygame.key.name(event.key)
if cheat_code_str == "hi":
arc_enabled = True
if event.type == pygame.MOUSEBUTTONDOWN:
balls.append(Projectile(ball_x, ball_y, ball_radius, BLUE, mouse_x, mouse_y))
if arc_enabled:
pos_x = ball_x
pos_y = ball_y
velocity_x = mouse_x - player_top_x
velocity_y = (display_height - mouse_y) - (display_height - player_top_y)
for i in range(100):
pos_x += int(velocity_x * delta_time)
pos_y -= int(velocity_y * delta_time)
# velocity_x only gets affected by wind/friction
velocity_x -= 1
velocity_y -= (mass * g) * delta_time
if i % 8 == 0:
pygame.draw.circle(game_display, BLUE, [pos_x, pos_y], 5)
for ball in balls:
ball.update()
ball.draw()
if ball.pos_y >= display_height + ball_radius:
balls.remove(ball)
enemies_missed += 1
if ball.collided(enemy):
balls.remove(ball)
enemies_hit += 1
enemies_hit_str = "Enemies hit: " + str(enemies_hit)
enemies_missed_str = "Enemies missed: " + str(enemies_missed)
enemies_hit_surface = myfont.render(enemies_hit_str, False, WHITE)
enemies_missed_surface = myfont.render(enemies_missed_str, False, WHITE)
game_display.blit(enemies_hit_surface, (0, 0))
game_display.blit(enemies_missed_surface, (0, 30))
enemy.draw()
player.draw(mouse_x, mouse_y)
pygame.display.update()
clock.tick(60)
pygame.quit()
| true
|
52aeb42fe1d034e8bc6db2f7ea134aad44ce451f
|
Python
|
bradywatkinson/2041ass1
|
/myTests/sub3.py
|
UTF-8
| 855
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/usr/python -w
import sys
# Finding squares
x = 2
print "Squares between 4 and 256"
while x < 101:
x = x ** 2
print x
for i in range(2): print i
#print a checker board thing
print
print "Checkers!"
sys.stdout.write("Enter a number plz: ")
s = int(int(int(sys.stdin.readline())))
for x in range(s):
for y in range(s):
if (x+y) % 2 == 1: sys.stdout.write("*")
else: sys.stdout.write("o")
print
q = "101"
print "Values of q is", int(q)
print "201 looks like", int("201")
sys.stdout.write("\n");
print "Halfing from 100"
x = 100
while 1:
print x; x = x >> 1
if x < 10: break
print
print "Its foobar!"
for x in range(10):
if x%2==0 and x %3==0: print "Foobar is", x
elif x % 2 == 0: print "Foo is", x
elif x % 3 == 0: print "Bar is", x
else:
print x, "is not foo or bar!"
| true
|
6fdbbe35bbb281dd95d6bd1fc86c37d7c3c01e8a
|
Python
|
sebaslherrera/algorithmic-toolbox
|
/week3_greedy_algorithms/7_maximum_salary/largest_number.py
|
UTF-8
| 564
| 4
| 4
|
[
"MIT"
] |
permissive
|
#Uses python3
def isGreaterOrEqual(a, b):
"""Compare the two options and choose best permutation"""
ab = str(a) + str(b)
ba = str(b) + str(a)
if ab > ba:
return a
else:
return b
def largest_number(a):
ans = ''
while a:
maxDigit = 0
for digit in a:
maxDigit = isGreaterOrEqual(digit, maxDigit)
ans += maxDigit
a.remove(maxDigit)
return ans
if __name__ == '__main__':
n = int(input())
a = list(map(str, input().split()))
print(largest_number(a))
| true
|
7ad5d8573273b71b40b29cc67555c1e08a5a2d9a
|
Python
|
Doreen162/Python-Exercises
|
/Import math.py
|
UTF-8
| 140
| 3.578125
| 4
|
[] |
no_license
|
# Variables to be use
a = 8
b = 2
c = 1
d = 4
# Equation to solve x
x = math.sqrt(a - 3) / (b * b + c * c + d * d)
# Answer to x
print(x)
| true
|
62da70ddf65942fed3274efd34af62c200119d41
|
Python
|
FrostyX/fedora-infra-ansible
|
/roles/modernpaste/files/paste-info.py
|
UTF-8
| 458
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
import sys
sys.path.append('/usr/share/modern-paste/app')
import modern_paste
from util.cryptography import get_decid
from database.paste import get_paste_by_id
paste_id = get_decid(sys.argv[1])
paste = get_paste_by_id(paste_id)
print('Decrypted ID: ' + str(paste_id))
print('Title : ' + paste.title)
print('Language : ' + paste.language)
print('Views : ' + str(paste.views))
print('Contents : \n' + paste.contents)
| true
|
9ecfc948dad462445bf740f4a2ba63b249a17e14
|
Python
|
ongaaron96/kattis-solutions
|
/python3/1_4-apaxiaaans.py
|
UTF-8
| 147
| 3.8125
| 4
|
[] |
no_license
|
name = input()
prev_char = result = ''
for char in name:
if char == prev_char:
continue
result += char
prev_char = char
print(result)
| true
|
c841fab634b2bafa92c00ab19dec4838feb99c33
|
Python
|
NEWPLAN/mars_torch
|
/Network/critic.py
|
UTF-8
| 1,081
| 2.78125
| 3
|
[] |
no_license
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Critic(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Critic, self).__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, s, a):
x = torch.cat([s, a], 1)
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class CriticNet(nn.Module):
def __init__(self, state_dim, action_dim):
super(CriticNet, self).__init__()
self.linear1 = nn.Linear(state_dim, 128)
self.linear2 = nn.Linear(128, 32)
self.linear3 = nn.Linear(32 + action_dim, 64)
self.linear4 = nn.Linear(64, 1)
def forward(self, s, a):
x = F.relu(self.linear1(s))
x = F.relu(self.linear2(x))
x = torch.cat([x, a], 1)
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
return x
| true
|
1afd3222471e9553b92214e4a0b31701cdb91b8d
|
Python
|
estroud1991/Python-Examples
|
/guassianFilterGenerator 2.py
|
UTF-8
| 1,820
| 3.171875
| 3
|
[] |
no_license
|
import math
import numpy as np
import cv2
def generateGuass():
sigma = float(input("Please enter your sigma/variance: "))
size = int(input("Please enter the size of the guassian filter, must be odd: "))
x = int((size-1)/2)
valueList = []
for i in range(-x,x+1,1):
for j in range(-x,x+1,1):
#using formula to calulate value for filter
value = (1/(2*math.pi*(sigma**2)))*(math.exp(-(((i**2)+(j**2))/(2*(sigma**2)))))
valueList.append(value)
scaler = valueList[0]
filterMat = np.zeros([size,size, 3],dtype = float)
index = 0
for i in range(size):
for j in range(size):
filterMat[i][j] = valueList[index], valueList[index], valueList[index]
index+=1
return filterMat
def applyFilter(image, filterMat):
padding = (len(filterMat)-1)//2
iRow = len(image)
iCol = len(image[0])
#Adds border that is equal to the padding in order to get the corner pixels to correct values
image = cv2.copyMakeBorder(image, padding, padding, padding, padding, cv2.BORDER_REPLICATE)
processedImage = np.zeros((iRow, iCol), dtype="float32")
for i in range(padding, iRow+padding):
for j in range(padding, iCol+padding):
#Getting portion of image for convolution, summing the multiplied values, setting new image values
iBlock = image[i - padding:i + padding + 1, j - padding: j + padding + 1]
iSum = (iBlock * filterMat).sum()
processedImage[i - padding, j - padding] = iSum
return processedImage
image = cv2.imread("cat.png")
cv2.imshow("orig",image/255.0)
for i in range(3):
filterMat = generateGuass()
newImage = applyFilter(image, filterMat)
cv2.imshow("New" + str(i), newImage/255.0)
| true
|
54cc45c52157b696ca2598e53160c60c892e34ea
|
Python
|
dionvargas/TCCII
|
/Software Pi/util.py
|
UTF-8
| 6,404
| 2.671875
| 3
|
[] |
no_license
|
import cv2
import numpy as np
import json
import os
from PIL import Image, ImageTk
def removeReflexos(frame):
image_in = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Load the glared image
h, s, v = cv2.split(cv2.cvtColor(image_in, cv2.COLOR_RGB2HSV)) # split into HSV components
ret, th = cv2.threshold(h, 20, 255, cv2.THRESH_BINARY_INV)
disk = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (13, 13))
mascara = cv2.dilate(th.astype(np.uint8), disk)
corrected = cv2.inpaint(image_in, mascara, 10, cv2.INPAINT_TELEA)
return corrected
def findIris(frame, posH, posV, line=8):
matriz = frame
original = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # converte para escala de cinza
posCart = (posH**2+posV**2)**(1/2)
# Filtro de média para remover ruídos
suavizada = cv2.medianBlur(original, 15)
cimg = matriz.copy()
circles = cv2.HoughCircles(suavizada, cv2.HOUGH_GRADIENT, 1, 100, param1=40, param2=30, minRadius=100, maxRadius=400)
if(circles is not None):
circles = np.int16(np.around(circles))
maiorRaio = 0
pupila = None
distancia = 0
for i in circles[0, :]:
disCar = abs(posCart - (i[0] ** 2 + i[1] ** 2) ** (1 / 2))
if(posH == 0 and posV == 0):
if (i[2] > maiorRaio and i[2] > 100 and i[2] < 220):
maiorRaio = i[2]
distancia = disCar
pupila = i
else:
if (distancia > disCar and i[2] > 100 and i[2] < 220):
maiorRaio = i[2]
distancia = disCar
pupila = i
if(pupila is not None):
# draw the outer circle
cv2.circle(cimg, (pupila[0], pupila[1]), pupila[2], (0, 255, 0), line)
# draw the center of the circle
cv2.circle(cimg, (pupila[0], pupila[1]), 2, (0, 0, 255), line)
area = float(np.pi * pupila[2]**2)
circularidade = 1
centroX = pupila[0]
centroY = pupila[1]
angulo = 0
final = cimg.copy()
else:
area = 0
circularidade = 0
centroX = 0
centroY = 0
angulo = 0
final = matriz.copy()
else:
area = 0
circularidade = 0
centroX = 0
centroY = 0
angulo = 0
final = matriz.copy()
return area, circularidade, centroX, centroY, angulo, final
def findPupila(frame, line=5):
matriz = frame
image_in = cv2.cvtColor(matriz, cv2.COLOR_BGR2RGB) # Load the glared image
h, s, v = cv2.split(cv2.cvtColor(image_in, cv2.COLOR_RGB2HSV)) # split into HSV components
ret, s = cv2.threshold(s, 20, 255, cv2.THRESH_BINARY)
ret, reflexos = cv2.threshold(h, 10, 255, cv2.THRESH_BINARY_INV)
s = cv2.add(s, reflexos)
disk = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50, 50))
s = cv2.morphologyEx(s, cv2.MORPH_CLOSE, disk)
elementReflexos = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
s = cv2.morphologyEx(s, cv2.MORPH_OPEN, elementReflexos)
# Calculando a circularidade
modo = cv2.RETR_TREE
metodo = cv2.CHAIN_APPROX_SIMPLE
contornos, hierarquia = cv2.findContours(s, modo, metodo)
maiorArea = 0
circularidade = 0
area = 0
pupila = None
for c in contornos:
if (int(len(c) > 5)):
area = cv2.contourArea(c)
perimetro = cv2.arcLength(c, True)
circularidade = (4 * np.pi * area) / (perimetro * perimetro)
if ((area > maiorArea) and (circularidade > 0.50) and (area > 3000) and (area < 7000)):
maiorArea = area
pupila = c
else:
print("Elipse muito pequena")
final = matriz.copy()
width, height = final.shape[:2]
if (pupila is None):
centroX = 0
centroY = 0
else:
ellipse = cv2.fitEllipse(pupila)
cv2.ellipse(final, ellipse, (0, 0, 255), line)
centroX = int(ellipse[0][0])
centroY = int(ellipse[0][1])
# linha horizontal
cv2.line(final, (centroX, 0), (centroX, width), (0, 0, 255), line)
# linha vertical
cv2.line(final, (0, centroY), (height, centroY), (0, 0, 255), line)
angulo = 1
return area, circularidade, centroX, centroY, angulo, final
def convertToExibe(frame, x=0, y=0):
if ((x == 0) or (y == 0)):
y = np.size(frame, 0)
x = np.size(frame, 1)
frame = cv2.resize(frame, (int(x), int(y)))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = ImageTk.PhotoImage(image=Image.fromarray(frame))
return frame
def setPaciente(paciente):
diretorio = os.getcwd() + "/pacientes/" + paciente["nome"]
with open(diretorio + '/paciente.json', 'w') as outfile:
json.dump(paciente, outfile)
def getPaciente(nomePaciente):
global paciente
diretorio = os.getcwd() + "/pacientes/" + nomePaciente + '/paciente.json'
with open(diretorio) as json_file:
paciente = json.load(json_file)
return paciente
def setExame(exame, dir):
with open(dir + '/anamnese.json', 'w') as outfile:
json.dump(exame, outfile)
def getExame(dir):
global exame
dir = dir + '/anamnese.json'
with open(dir) as json_file:
exame = json.load(json_file)
return exame
def getConfig():
global config
with open('configs.json') as json_file:
config = json.load(json_file)
return config
def setConfig(config):
with open('configs.json', 'w') as outfile:
json.dump(config, outfile)
def getExam():
global exam
with open('exam.json') as json_file:
exam = json.load(json_file)
return exam
def setExam(exam):
with open('exam.json', 'w') as outfile:
json.dump(exam, outfile)
def setDados(location, dados):
with open(location + 'dados.json', 'w') as outfile:
json.dump(dados, outfile)
def getDados(location):
global dados
with open(location+'/dados.json') as json_file:
dados = json.load(json_file)
return dados
def validateInt(value):
try:
v = int(value)
return True
except ValueError:
return False
def validateFloat(value):
try:
v = float(value)
return value
except ValueError:
return None
| true
|
16c9b3c13109619e80fb37eea9d02583a5ae963f
|
Python
|
clean-exe/coctail-monaco
|
/main.py
|
UTF-8
| 5,024
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/python3
import random
""" This is a simple program that simulate a coctail monaco game.
Enter a list of players, and the program will get 1 out per time. """
# global person_id
class Person:
"""Simple person class with First and Second name."""
def __init__(self, uid, first_name, family_name):
self.first_name = first_name
self.family_name = family_name
def print(self):
print(self.first_name, self.family_name)
class Game:
"""docstring for Game"""
person_id = 0
bid_id = 0
def __init__(self, ):
self.bids = []
self.persons = []
self.persons_out = {}
def add_person(self, first_name, family_name):
self.person_id += 1
person = Person(self.person_id, first_name, family_name)
self.persons.append(person)
print("Person added : %s %s %s" %(self.person_id, first_name, family_name))
def get_person(self, first_name=None, family_name=None):
results1 = []
results2 = []
if first_name is not None:
results1 = [x for x in self.persons if x.first_name == first_name]
if family_name is not None:
results2 = [x for x in self.persons if x.family_name == family_name]
results = results1 + results2
if len(results) == 0:
print("no results found")
return None
if len(results) == 1:
return results[0]
if len(results) > 1:
print("too many results refine search")
return None
def add_bid(self, ):
self.bid_id += 1
person1_name = input("Enter the name of the first person >>" )
person1_family_name = input("Enter the family name of the first person >>")
person1 = self.get_person(first_name=person1_name, family_name=person1_family_name)
person2_name = input("Enter the name of the second person >>")
person2_family_name = input("Enter the family name of the second person >>")
person2 = self.get_person(first_name=person2_name, family_name=person2_family_name)
amount = float(input("What is the amount? >>"))
bid = Bid(self.bid_id, person1, person2, amount)
self.bids.append(bid)
print("Bid added between %s %s and %s %s of %s$" %(person1.first_name, person1.family_name, person2.first_name, person2.family_name, amount))
def shuffle_persons(self):
random.shuffle(self.persons)
def get_one(self, position):
the_one = self.persons.pop(0)
self.persons_out[position] = the_one
the_one.print()
def play(self):
for k in range(len(self.persons)):
value = input("Ready to pick one person? >>")
self.get_one(k+1)
# print(self.persons_out)
def print_persons_out(self):
count = 0
for key in self.persons_out:
print("%3s %s %s" %(key, self.persons_out[key].first_name, self.persons_out[key].family_name))
if not ((count+1) % 5):
print(" ")
count += 1
class Bid:
"""docstring for Bid"""
def __init__(self, uid, person1, person2, amount):
self.person1 = person1
self.person2 = person2
self.amount = amount
def add(self, person1, person2, amount):
self.person1 = person1
self.person2 = person2
self.amount = amount
class Editor(Game):
def __init__(self):
game = Game()
game.add_person('Peter', 'Quill')
game.add_person('Yondu', 'Udonta')
game.add_person('Gamora')
game.add_person('Korath')
game.add_person('Rocket')
game.add_person('Groot')
game.add_person('Drax')
self.menu_map = {
"bid": game.add_bid(),
"test": self.test,
"change": self.change,
"quit": self.quit,
}
def test(self):
if self.is_permitted("test program"):
print("Testing program now...")
def change(self):
if self.is_permitted("change program"):
print("Changing program now...")
def quit(self):
raise SystemExit()
def menu(self):
try:
answer = ""
while True:
print(
"""
Please enter a command:
\tbid\tAdd bid
\ttest\tTest the program
\tchange\tChange the program
\tquit\tQuit
"""
)
answer = input("enter a command: ").lower()
try:
print("ok")
func = self.menu_map[answer]
except KeyError:
print("{} is not a valid option".format(answer))
else:
func()
finally:
print("Thank you for testing the auth module")
if __name__ == "__main__":
Editor().menu()
game.add_bid()
game.shuffle_persons()
game.play()
game.print_persons_out()
| true
|
a3d47f333bb0f79164b2df4c670ccdfc5480d6e5
|
Python
|
Iceman1590/AT-TVectorAirgig
|
/Project Files/Basic Navigation/Distance2.py
|
UTF-8
| 691
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
import anki_vector
from anki_vector.util import degrees, distance_mm, speed_mmps
import time
args = anki_vector.util.parse_command_args()
with anki_vector.Robot() as robot:
for _ in range(10):
if robot.proximity.last_sensor_reading:
distance = robot.proximity.last_sensor_reading.distance
prox = distance.distance_mm
print("=====================================================================")
print(prox)
print("=====================================================================")
time.sleep(1.0)
if ((prox) < 100.0):
robot.behavior.turn_in_place(degrees(-90))
else:
robot.behavior.drive_straight(distance_mm(50), speed_mmps(100))
| true
|
4b55ba88e94c14aada58799d9ea2e6db07c59836
|
Python
|
pixelsomatic/python-notes
|
/teste_operador.py
|
UTF-8
| 618
| 4.46875
| 4
|
[] |
no_license
|
import math
# Anterior e Sucessor
num = int(input('Digita um número aí: '))
ant = num - 1
suc = num + 1
print('O número antes de {} é {} e o depois dele é {}'.format(num, ant, suc))
# Dobro, Triplo e Raiz quadrada
n = int(input('Manda um número: '))
d = n * 2
t = n * 3
r = math.sqrt(n)
# print('O dobro de {} é {}'.format(n, d))
# print('O triplo de {} é {}'.format(n, t))
# print('A raiz quadrada de {} é {:.3f}'.format(n, r))
print('O dobro de {} vale {}.'.format(n, (n*2)))
print('O triplo de {} vale {}. \n A raiz quadrada de {} vale {:.2f}'.format(n, (n*3), n, pow(n, (1/2)))) #pow(base, expoente)
| true
|
1bbf880bcd02e5634b53fe4ef7952cca15022580
|
Python
|
recepsirin/djforeingkeys
|
/src/cars/models.py
|
UTF-8
| 1,799
| 2.53125
| 3
|
[] |
no_license
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
User = settings.AUTH_USER_MODEL # 'auth.User'
def set_delete_user():
user_inner = get_user_model()
return user_inner.objects.get_or_create(username='deleted')[0] # get_or_create --> (obj, bool)
def limit_car_choices_to():
# return {'is_staff': True}
Q = models.Q
return Q(username__icontains='x') | Q(username__icontains='e')
class Car(models.Model):
user = models.ForeignKey(User,
on_delete=models.SET(set_delete_user),
limit_choices_to=limit_car_choices_to
)
updated_by = models.ForeignKey(User, related_name='updated_car_user', null=True, blank=True)
# on_delete=models.SET_NULL, null=True
# on_delete=models.SET_DEFAULT, default=1
# user = models.ForeignKey(User)
# drivers = models.ManyToManyField(User)
# first_owner = models.OneToOneField(User)
# passengers = models.ManyToManyField(User)
name = models.CharField(max_length=120)
def __str__(self):
return self.name
# ForeignKey = ManyToOneField() # Many users can have any car, car can only have one user
# car_obj = Car.objects.first()
# car_obj.user # notation here
#
# User = car_obj.user.__class__
#
# abc = User.objects.all().last() # filter queryset
#
# # below query sets are doing same thing
# user_cars = abc.car_set.all() # reverse relationship
# user_cars_qs = Car.objects.filter(user=abc) # forward relationship
#
#
# class Comment(models.Model):
# user = models.ForeignKey(User)
# content = models.CharField(max_length=120)
#
#
# comments = abc.comment_set.all()
# comments_qs = Comment.objects.filter(user=abc)
| true
|
54656694fcf829a734f32fc3d6b81c60dddb2647
|
Python
|
gscho74/ImageProcessing
|
/중간고사/Ex3.py
|
UTF-8
| 1,995
| 2.828125
| 3
|
[] |
no_license
|
import numpy as np
from scipy import signal, misc
import matplotlib.pyplot as plt
from scipy import ndimage
from mpl_toolkits.mplot3d import Axes3D
sigma = 30
x=np.arange(-128,127,1.0)
y=np.arange(-128,127,1.0)
X,Y=np.meshgrid(x,y)
s=1/(np.pi*pow(sigma,4))
a=-(pow(X,2)+pow(Y,2))/(2*pow(sigma,2))
g=-s*(1+a)*np.exp(a)
#a
plt.imshow(g)
plt.gray()
plt.title('LoG(x,y)')
plt.axis('off')
plt.show()
#b
fig=plt.figure()
ax=Axes3D(fig)
ax.plot_surface(X,Y,g)
plt.show()
#c
#9*9 LoG 필터
def LoG_FIlter(sigma,Filter_Size):
g = np.zeros(shape=(Filter_Size,Filter_Size), dtype=np.float)
for y in range(-4,5):
for x in range(-4,5):
s=1/(np.pi*pow(sigma,4))
a=-(pow(x,2)+pow(y,2))/(2*pow(sigma,2))
p=-s*(1+a)*np.exp(a)
g[y+4,x+4]=p
return g
Filter_Size = 9
print(LoG_FIlter(0.8,Filter_Size))
#d
def im_filtering(im, Filter, FilterSize):
row, col = im.shape
padding=int(FilterSize/2)
Image_Buffer = np.zeros(shape=(row+2*padding,col+2*padding), dtype=np.uint8)
Image_Buffer[padding:row+padding, padding:col+padding] = im[:,:]
Image_New = np.zeros(shape=(row,col), dtype=np.uint8)
for y in range(padding,row+padding):
for x in range(padding,col+padding):
buff = Image_Buffer[y-padding:y+padding+1,x-padding:x+padding+1]
pixel = np.sum(buff * Filter)
pixel = np.uint8(np.where(pixel>255,255,np.where(pixel<0,0,pixel)))
Image_New[y-padding,x-padding] = pixel
return Image_New
lena = misc.imread('image/lena_256.bmp')
filtering_img = im_filtering(lena, LoG_FIlter(0.8,Filter_Size), Filter_Size)
# 이미지 출력 함수
def image_print(img, title, print_num, current_num):
plt.subplot(print_num[0],print_num[1],current_num)
plt.title(title)
plt.gray()
plt.imshow(img)
plt.axis('off')
print_num = [1,2]
image_print(lena, "InputImage", print_num, 1)
image_print(filtering_img, "Log Filtering Image", print_num, 2)
plt.show()
| true
|
1f1e90bfc00e17f42c9ad4a4e6b88ff6ea6b0a19
|
Python
|
sauln/pyjanitor
|
/tests/io/test_read_csvs.py
|
UTF-8
| 3,953
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
import glob
import os
import pandas as pd
import pytest
from janitor import io
CSV_FILE_PATH = "my_test_csv_for_read_csvs_{}.csv"
def create_csv_file(number_of_files, col_names=None):
for i in range(number_of_files):
filename = CSV_FILE_PATH.format(i)
df = pd.DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
df.to_csv(filename, index=False)
def remove_csv_files():
# Get a list of all the file paths matching pattern in specified directory
fileList = glob.glob(CSV_FILE_PATH.format("*"))
# Iterate over the list of filepaths & remove each file.
for filePath in fileList:
os.remove(filePath)
@pytest.mark.functions
def test_read_csvs_one_csv_path():
# Setup
# When a CSV file with 3 cols and 4 rows is on disk
number_of_files = 1
create_csv_file(number_of_files)
# If the csv file is read into DataFrame
df = io.read_csvs(CSV_FILE_PATH.format("*"))
# Then the dataframe has 3 cols and 4 rows
try:
assert len(df.columns) == 3
assert len(df) == 4
finally:
# Cleanup
remove_csv_files()
@pytest.mark.functions
def test_read_csvs_zero_csv_path():
# Setup
# When no CSV files are on disk
# When reading files the functions raises ValueError.
try:
io.read_csvs("nofilesondisk.csv")
raise Exception
except ValueError:
pass
finally:
remove_csv_files()
@pytest.mark.functions
def test_read_csvs_three_csv_path():
# Setup
# When a CSV file with 3 cols and 4 rows is on disk
number_of_files = 3
create_csv_file(number_of_files)
# If the csv file is read into DataFrame
df = io.read_csvs(CSV_FILE_PATH.format("*"))
# Then the dataframe has 3 cols and 12 rows
try:
assert len(df.columns) == 3
assert len(df) == 4 * number_of_files
finally:
# Cleanup
remove_csv_files()
@pytest.mark.functions
def test_read_csvs_three_separated_csv_path():
# Setup
# When a CSV file with 3 cols and 4 rows is on disk
number_of_files = 3
create_csv_file(number_of_files)
# If the csv file is read into DataFrame
dfs_dict = io.read_csvs(CSV_FILE_PATH.format("*"), separate_df=True)
# Then the dataframe list has 3 dataframes
try:
assert len(dfs_dict) == number_of_files
for df in dfs_dict.values(): # noqa: PD011
assert len(df) == 4
assert len(df.columns) == 3
finally:
# Cleanup
remove_csv_files()
@pytest.mark.functions
def test_read_csvs_two_unmatching_csv_files():
# Setup
# When two csv files do not have same column names
df = pd.DataFrame(
[[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["a", "b", "c"]
)
df.to_csv(CSV_FILE_PATH.format(0), index=False)
df = pd.DataFrame(
[[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["d", "e", "f"]
)
df.to_csv(CSV_FILE_PATH.format(1), index=False)
# If the csv files are read into DataFrame
try:
io.read_csvs(CSV_FILE_PATH.format("*"))
# if read does read the unmatching files give an error
raise ValueError
except ValueError:
# If the read raises an exception it is ok
pass
finally:
remove_csv_files()
@pytest.mark.functions
def test_read_csvs_lists():
# Setup
# When a CSV file with 3 cols and 4 rows is on disk
number_of_files = 3
create_csv_file(number_of_files)
csvs_list = [CSV_FILE_PATH.format(i) for i in range(number_of_files)]
# If the list of csv files is read into DataFrame
dfs_list = io.read_csvs(files_path=csvs_list, separate_df=True)
# Then the dataframe list has 3 dataframes
try:
assert len(dfs_list) == number_of_files
for df in dfs_list.values(): # noqa: PD011
assert len(df) == 4
assert len(df.columns) == 3
finally:
# Cleanup
remove_csv_files()
| true
|
833c5c511902809b809cd9e409968122089f8171
|
Python
|
danielobmann/desyre
|
/imports/util.py
|
UTF-8
| 3,215
| 2.78125
| 3
|
[] |
no_license
|
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import *
class Util:
def __init__(self):
pass
@staticmethod
def cosine_decay(epoch, total, initial=1e-3):
return initial / 2. * (1 + np.cos(np.pi * epoch / total))
@staticmethod
def project(x):
return np.clip(x, 0, 1)
@staticmethod
def psnr(y_true, y_pred):
return tf.image.psnr(y_true, y_pred, max_val=1.0)
@staticmethod
def nmse(y_true, y_pred):
m = tf.reduce_mean(tf.squared_difference(y_true, y_pred))
n = tf.reduce_mean(tf.squared_difference(y_true, 0))
return m / n
@staticmethod
def psnr_numpy(x, xhat, maxvalue=1.):
return 10 * np.log10(maxvalue / np.mean((x - xhat) ** 2))
@staticmethod
def nmse_numpy(x, x_hat):
error = np.mean((x - x_hat) ** 2)
normalizer = np.mean(x ** 2)
return error / normalizer
@staticmethod
def _mark_inset(parent_axes, inset_axes, **kwargs):
# This code is copied from the matplotlib source code and slightly modified.
# This is done to avoid the 'connection lines'.
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
if 'fill' in kwargs:
pp = BboxPatch(rect, **kwargs)
else:
fill = bool({'fc', 'facecolor', 'color'}.intersection(kwargs))
pp = BboxPatch(rect, fill=fill, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=1, **kwargs)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=1, **kwargs)
p2.set_clip_on(False)
return pp, p1, p2
def zoomed_plot(self, x, xlim, ylim, zoom=2, text=None, textloc=[], fsize=18, cmap='bone'):
# This function allows one to create plots with "zoomed in" windows.
# The rectangle where one desires to zoom in is given using the xlim and ylim arguments.
# xlim and ylim should contain pixel values, e.g. if we haven an image of size 512 x 512 then
# xlim = [100, 150] and ylim = [100, 150] shows a zoomed in version of the pixels at locations in xlim and ylim.
color = 'orange'
fig, ax = plt.subplots()
ax.imshow(np.flipud(x), cmap=cmap, vmin=0.0, vmax=1.0, origin="lower")
ax.axis('off')
axins = zoomed_inset_axes(ax, zoom, loc=4)
axins.set_xlim(xlim[0], xlim[1])
axins.set_ylim(ylim[0], ylim[1])
self._mark_inset(ax, axins, fc='none', ec=color)
axins.imshow(np.flipud(x), cmap=cmap, vmin=0.0, vmax=1.0, origin="lower")
axins.patch.set_edgecolor(color)
axins.patch.set_linewidth('3')
axins.set_xticks([], [])
axins.set_yticks([], [])
# axins.axis('off')
if not (text is None):
ax.text(textloc[0], textloc[1], text, color=color, fontdict={'size': fsize}, transform=ax.transAxes)
pass
@staticmethod
def setup_path(path, verbose=0):
if not os.path.exists(path):
os.mkdir(path)
if verbose:
print("Created new path %s." % path)
| true
|
e8e361978849a99143b5fec93063b920de2ba0f5
|
Python
|
andrezzadede/Curso_Python_Guanabara_Mundo_1
|
/Exercicios/1Exercicio.py
|
UTF-8
| 244
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
print ('Script Aula 1 - Desafio 1')
print ('Crie um script python que leia o nome de uma pessoa e mostra uma mensagemde boas vindas de acordo com o valor digitado')
nome = input ('Qual seu nome?')
print ('Seja bem vindo gafanhoto', nome)
| true
|
6a2e32d90d8c3127007d8bbd935e043ddca0ef06
|
Python
|
JoaoPedroBarros/exercicios-antigos-de-python
|
/Exercícios/Exercícios Mundo 1/ex009.py
|
UTF-8
| 488
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
i = int(input('Digite um número:'))
print('A tabuada de {} é a seguinte:'.format(i))
print('\033[1;40m{}\033[m'.format(i*1))
print('\033[1;41m{}\033[m'.format(i*2))
print('\033[1;42m{}\033[m'.format(i*3))
print('\033[1;43m{}\033[m'.format(i*4))
print('\033[1;44m{}\033[m'.format(i*5))
print('\033[1;45m{}\033[m'.format(i*6))
print('\033[1;46m{}\033[m'.format(i*7))
print('\033[1;47m{}\033[m'.format(i*8))
print('\033[1;40m{}\033[m'.format(i*9))
print('\033[1;41m{}\033[m'.format(i*10))
| true
|
0693d7d9c6d8cefafed41398c30c25afbef91a8a
|
Python
|
sophiepopow/ASD-AGH
|
/Graphs/TopologicalSorting.py
|
UTF-8
| 635
| 3.859375
| 4
|
[] |
no_license
|
#Algorytm z wykorzystaniem DFS
def topologicalDFS(graph, vertex,visited, sortedNodesStack):
visited[vertex] = True
for neighbour in graph[vertex]:
if not visited[neighbour]:
topologicalDFS(graph,neighbour, visited, sortedNodesStack)
sortedNodesStack.insert(0,vertex)
def topologicalSort(graph):
sortedNodesStack = []
visited = [False]*len(graph)
for vertex in range(len(graph)):
if not visited[vertex]:
topologicalDFS(graph, vertex,visited,sortedNodesStack)
return sortedNodesStack
graph = [[2], [0,2], [], [0,1,4], [1,2], [0,4]]
print(topologicalSort(graph))
| true
|
507f9fbc65f16fc67fd844fde94824b571dca09b
|
Python
|
hyoseok-bang/leetcode
|
/215_kth_largest_element_in_an_array.py
|
UTF-8
| 801
| 3.4375
| 3
|
[] |
no_license
|
class Solution(object):
def findklargest_push(self, nums, k):
# Use heappush
heap = []
for n in nums:
heapq.heappush(heap, -n)
for _ in range(1,k):
heapq.heappop(heap)
return -heapq.heappop(heap)
def findklargest_heapify(self, nums, k):
# Use heapify
heapq.heapify(nums)
for _ in range(len(nums) - k): # Since heapq module is min-heap, pop n-kth element form the heap
heapq.heappop(nums)
return heapq.heappop(nums)
def findklargest_nlargest(self, nums, k): # Return 1 ~ k largest values from the array nums
return heapq.nlargest(k, nums)[-1]
def findklargest_sort(self, nums, k):
return sorted(nums, reverse=True)[k-1]
| true
|
219360782b0d3e3910a10f7739c1249858025b7d
|
Python
|
jiravani/PythonProjects
|
/Project/driverscanner/Volume.py
|
UTF-8
| 708
| 3.078125
| 3
|
[] |
no_license
|
class Volume:
total_volumes = 0
file_system = ""
def __init__(self, name, volume_name):
self.name = name
self.volume_name = volume_name
Volume.total_volumes += 1
print self.name + " " + "{:>10}".format(volume_name)
def get_volume_name(self):
print self.volume_name
def set_file_system(self, file_system):
Volume.file_system = file_system
def get_file_system(self):
return Volume.file_system
def get_total_volumes(self):
return Volume.total_volumes
get_total_volume = staticmethod(get_total_volumes)
get_file_system = staticmethod(get_file_system)
set_file_system = staticmethod(set_file_system)
| true
|
c9749a5159200f24aefcdc763978539476a4fddd
|
Python
|
torebre/essentia_test
|
/python/MicrophoneInput.py
|
UTF-8
| 827
| 2.546875
| 3
|
[] |
no_license
|
import pyaudio
import wave
CHUNK = 256
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 10
WAVE_OUTPUT_FILENAME = 'output3.wav'
p = pyaudio.PyAudio()
print("Default input: ", p.get_default_input_device_info())
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=20,
frames_per_buffer=CHUNK)
print("Recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Finished")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(''.join(frames))
wf.close()
| true
|
da87090e80b9157e9de7272813d053a5049715d2
|
Python
|
meera-ramesh19/codewars
|
/homework/pycaptestanswers/pycaptest.py
|
UTF-8
| 1,169
| 4.3125
| 4
|
[] |
no_license
|
print(2 ** 3 ** 2 ** 1)
a = 0
b = a ** 0
if b < a + 1:
c = 1
elif b == 1:
c = 2
else:
c = 3
print(a + b + c)
for i in range(1, 4, 2):
print("*")
# Example 2
for i in range(1, 4, 2):
print("*", end="")
for i in range(1, 4, 2):
print("*", end="**")
print("\n")
for i in range(1, 4, 2):
print("*", end="**")
print("***")
s = "Hello, Python!"
print(len(s),s[-14:15])
lst = [[c for c in range(r)] for r in range(3)]
print(lst)
for x in lst:
for y in x:
if y < 2:
print('*', end='')
def fun(a, b=0, c=5, d=1):
return a ** b ** c
print(fun(b=2, a=2, c=3))
# Example 1
x = 1
y = 0
z = x % y
print(z)
# Example 2
x = 1
y = 0
z = x / y
print(z)
x = 0
try:
print(x)
print(1 / x)
except ZeroDivisionError:
print("ERROR MESSAGE")
finally:
print(x + 1)
print(x + 2)
"""
class A:
def a(self):
print("A", end='')
class B(A):
def a(self):
print("B", end='')
class C(B):
def b(self):
print("B", end='')
a = A()
b = B()
c = C()
a.a()
b.a()
c.b()
try:
print("Hello")
raise Exception
print(1/0)
except Exception as e:
print(e)
"""
| true
|
0820faa61dad8e69bb8e390ed1c38aae10641949
|
Python
|
nthiery/sage-semigroups
|
/sage_semigroups/monoids/free_partially_commutative_left_regular_band.py
|
UTF-8
| 13,659
| 2.6875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
r"""
Free partially commutative left regular band
EXAMPLES::
sage: import sage_semigroups
Loading sage-semigroups and patching its features into Sage's library: ...
"""
from functools import reduce
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.structure.element_wrapper import ElementWrapper
from sage.misc.cachefunc import cached_method
from sage.graphs.graph import Graph
from sage.graphs.digraph import DiGraph
class FreePartiallyCommutativeLeftRegularBand(UniqueRepresentation, Parent):
r"""
TESTS::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: n = 6
sage: C = graphs.CycleGraph(n)
sage: M = FreePartiallyCommutativeLeftRegularBand(C)
sage: M.cardinality()
721
"""
@staticmethod
def __classcall__(cls, graph):
r"""
Normalize the input: convert vertices to instances of ``str`` and
delete edge labels.
"""
if isinstance(graph, Graph):
graph = graph.relabel(str, inplace=False)
vertices = tuple(graph.vertices())
edges = tuple((u, v) for (u, v, l) in graph.edges())
elif isinstance(graph, tuple) and len(graph) == 2:
vertices, edges = graph
else:
raise ValueError("incorrect input to __classcall__")
return super(FreePartiallyCommutativeLeftRegularBand, cls).__classcall__(cls, (vertices, edges))
def __init__(self, args):
r"""
The free partially commutative left regular band associated to the
(undirected) graph ``graph``.
This is the left regular band generated by the vertices of the graph
and relations `xy = yx` for every edge `(x,y)` of the graph.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({0:[],1:[],2:[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G); S
Free partially commutative left regular band on Graph on 3 vertices
sage: K = graphs.CompleteGraph(4)
sage: S = FreePartiallyCommutativeLeftRegularBand(K); S
Free partially commutative left regular band on Graph on 4 vertices
sage: TestSuite(S).run(skip=["_test_elements", "_test_pickling"])
"""
(vertices, edges) = args
graph = Graph()
graph.add_vertices(vertices)
graph.add_edges(edges)
self._graph = graph
from sage_semigroups.categories.finite_left_regular_bands import FiniteLeftRegularBands
Parent.__init__(self, category=FiniteLeftRegularBands().FinitelyGenerated())
def __iter__(self):
from sage.combinat.backtrack import TransitiveIdeal
return TransitiveIdeal(self.succ_generators(side="right"), [self.one()]).__iter__()
def associated_graph(self):
return self._graph
def _repr_(self):
return "Free partially commutative left regular band on %s" % (repr(self.associated_graph()),)
@cached_method
def one(self):
r"""
Returns the one of the monoid, as per :meth:`Monoids.ParentMethods.one`.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: S.one()
''
"""
return self("")
@cached_method
def semigroup_generators(self):
r"""
Returns the generators of the semigroup.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: S.semigroup_generators()
Family ('a', 'b', 'c', 'd')
"""
from sage.sets.family import Family
return Family([self(i) for i in self.associated_graph().vertices()])
def an_element(self):
r"""
Returns an element of the semigroup.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: S.an_element()
'a'
sage: K = graphs.CompleteGraph(3)
sage: S = FreePartiallyCommutativeLeftRegularBand(K)
sage: S.an_element()
'0'
"""
return self.semigroup_generators()[0]
def product(self, x, y):
r"""
Returns the product of two elements of the semigroup.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: S('a') * S('b')
'ab'
sage: S('a') * S('b') * S('a')
'ab'
sage: S('a') * S('a')
'a'
"""
return self._cached_product(x.value, y.value)
@cached_method
def _cached_product(self, x, y):
r"""
Returns the product of two elements of the semigroup.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: S('a') * S('b')
'ab'
sage: S('a') * S('b') * S('a')
'ab'
sage: S('a') * S('a')
'a'
"""
xy = x + ''.join(c for c in y if c not in x)
return self.normal_form(xy)
@cached_method
def normal_form(self, w):
r"""
Map a word to its Foata-Cartier normal form.
TESTS::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G); S
Free partially commutative left regular band on Graph on 4 vertices
sage: S.normal_form(S('cdab'))
'cbda'
sage: S.normal_form(S('dab'))
'bda'
"""
return self.element_class(self, self._normalize_word(w))
def _normalize_word(self, w):
if isinstance(w, self.element_class):
w = w.value
F = self.vertex_sequence(w)
return ''.join(''.join(sorted(Fj)) for Fj in F)
def vertex_sequence(self, w):
r"""
Return the Foata-Cartier *V-sequence* for the word `w`. It is uniquely
defined.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: n = 4
sage: C = graphs.CycleGraph(n)
sage: M = FreePartiallyCommutativeLeftRegularBand(C)
sage: M.vertex_sequence('0123')
({'1', '0'}, {'3', '2'})
"""
if isinstance(w, self.element_class):
w = w.value
return reduce(self._vertex_sequence_action_by_letter, w, ())
def _vertex_sequence_action_by_letter(self, F, z):
r"""
DEFINITION: Suppose `F = (F_0, \dots, F_r)`.
(1) If `z` is connected to `F_r`, then `F \cdot z = (F_0, \dots, F_r, \{z\})`.
(2) Otherwise, let `j` be the smallest index such that `z` is not
connected to any set `F_j, F_{j+1}, \dots, F_r`, and define
`F \cdot z = (F_0, \dots, F_{j-1}, F_j \cup \{z\}, F_{j+1}, \dots)`.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: n = 4
sage: C = graphs.CycleGraph(n)
sage: M = FreePartiallyCommutativeLeftRegularBand(C)
sage: M.vertex_sequence('0123')
({'1', '0'}, {'3', '2'})
sage: F = ()
sage: for z in '0123':
....: F = M._vertex_sequence_action_by_letter(F, z)
....: print map(sorted, F)
[['0']]
[['0', '1']]
[['0', '1'], ['2']]
[['0', '1'], ['2', '3']]
"""
from sage.sets.set import Set
if len(F) == 0:
return (Set([z]),)
j = len(F) - 1
while j >= 0:
if self._is_connected(z, F[j]):
break
else:
j -= 1
if j + 1 == len(F):
return F + (Set([z]),)
else:
return F[:j + 1] + (F[j + 1].union(Set([z])),) + F[j + 2:]
def _is_connected(self, z, F_j):
r"""
Return whether `z` is connected to the set `F_j`.
"""
return z in F_j or any(not self._graph.has_edge(x, z) for x in F_j)
def _element_constructor_(self, x):
if isinstance(x, str):
return self.normal_form(x)
else:
return super(FreePartiallyCommutativeLeftRegularBand, self)._element_constructor_(x)
def quiver_v2(self):
# if hasattr(self, "_quiver_cache"):
# return self._quiver_cache
from sage.combinat.subset import Subsets
from sage.graphs.digraph import DiGraph
Q = DiGraph(multiedges=True)
Q.add_vertices(self.j_transversal())
g = self.associated_graph()
for U in Subsets(g.vertices()):
for W in Subsets(U):
h = g.subgraph(U.difference(W))
n = h.connected_components_number() - 1
if n > 0:
u = self.j_class_representative(self.j_class_index(self(''.join(U))))
w = self.j_class_representative(self.j_class_index(self(''.join(W))))
for i in range(n):
Q.add_edge(w, u, i)
return Q
# miscellaneous methods
def iter_from_free_lrb(self):
r"""
Iterate through elements of the semigroup by projection elements of the
free left regular band on the given generators.
"""
from free_left_regular_band import FreeLeftRegularBand
F = FreeLeftRegularBand(alphabet=tuple(x.value for x in self.semigroup_generators()))
seen = {}
for w in F:
x = self.normal_form(w)
if x not in seen:
seen[x] = True
yield x
def induced_orientation(self, w):
r"""
The induced subgraph of the complement of the underlying graph with an
orientation determined by `w`: an edge `(x,y)` is directed from `x` to
`y` if `x` comes before `y` in `w`.
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G); S
Free partially commutative left regular band on Graph on 4 vertices
sage: w = S('cdab')
sage: H = S.induced_orientation(w)
sage: H.vertices()
['a', 'b', 'c', 'd']
sage: H.edges()
[('c', 'a', None), ('c', 'b', None), ('c', 'd', None), ('d', 'a', None)]
sage: w = S('dab')
sage: H = S.induced_orientation(w)
sage: H.vertices()
['a', 'b', 'd']
sage: H.edges()
[('d', 'a', None)]
"""
pos = {wi: i for i, wi in enumerate(w.value)}
D = DiGraph()
D.add_vertices(pos)
for (u, v, l) in self.associated_graph().complement().edges():
if u in pos and v in pos:
if pos[u] < pos[v]:
D.add_edge(u, v)
else:
D.add_edge(v, u)
return D
class Element (ElementWrapper):
wrapped_class = str
__lt__ = ElementWrapper._lt_by_value
def __eq__(self, other):
r"""
EXAMPLES::
sage: from sage_semigroups.monoids.free_partially_commutative_left_regular_band import FreePartiallyCommutativeLeftRegularBand
sage: G = Graph({'a':['b'],'b':['d'],'c':[],'d':[]})
sage: S = FreePartiallyCommutativeLeftRegularBand(G)
sage: w, u = S('cdab'), S('cbda')
sage: w == w
True
sage: u == u
True
sage: w == u
True
sage: a, b = S('dab'), S('dba')
sage: a == b
True
sage: a == w
False
"""
return (self.__class__ is other.__class__ and
self.parent() == other.parent() and
self.value == other.value)
def length(self):
return len(self.value)
FPCLRB = FreePartiallyCommutativeLeftRegularBand
| true
|
6e453f8488772fb30e002a5ba1e321c5c874d470
|
Python
|
hyunjun/practice
|
/python/problem-string/determine_if_string_halves_are_alike.py
|
UTF-8
| 1,237
| 3.953125
| 4
|
[] |
no_license
|
# https://leetcode.com/problems/determine-if-string-halves-are-alike
class Solution:
# runtime: 36 ms, 65.55%
# memory: 14.3 MB, 68.01%
def halvesAreAlike0(self, s: str) -> bool:
m, s, vowels, c = len(s) // 2, s.lower(), set(['a', 'e', 'i', 'o', 'u']), 0
for i in range(m):
if s[i] in vowels:
c += 1
for i in range(m, len(s)):
if s[i] in vowels:
c -= 1
return c == 0
# lower()를 사용하지 않고 vowels에서 대소문자 모두 처리
# runtime: 32 ms, 85.83%
# memory Usage: 14.5 MB, 9.61%
def halvesAreAlike(self, s: str) -> bool:
m, vowels, c = len(s) // 2, set(['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']), 0
for i in range(m):
if s[i] in vowels:
c += 1
for i in range(m, len(s)):
if s[i] in vowels:
c -= 1
return c == 0
solution = Solution()
data = [("book", True),
("textbook", False),
("MerryChristmas", False),
("AbCdEfGh", True),
]
for s, expect in data:
real = solution.halvesAreAlike(s)
print(f'{s} expect {expect} real {real} result {expect == real}')
| true
|
b8b5345bea1566f1420fecca0ba76c194ac9ba3b
|
Python
|
fox-io/udemy-100-days-of-python
|
/day_018.py
|
UTF-8
| 1,999
| 3.734375
| 4
|
[] |
no_license
|
"""
-----
Day 18 Project: Turtle
-----
(c)2021 John Mann <gitlab.fox-io@foxdata.io>
"""
from turtle import Turtle, Screen
import random
# Shapes with 3-10 sides, random colors
# def main():
# t = Turtle()
# for sides in range(3, 11):
# t.color((random.random(), random.random(), random.random()))
# for _ in range(0, sides):
# t.forward(100)
# t.right(360/sides)
#
# screen = Screen()
# screen.exitonclick()
# # Random Walk
# def main():
# t = Turtle()
# t.width(5)
# t.speed(0)
#
# # Do 100 walks
# for walk_num in range(200):
#
# # Face random direction
# turns = random.randint(1, 4)
# # print(f"{walk_num}/100: Turning {turns} times.")
# for _ in range(turns):
# t.right(90)
#
# # Pick random color
# t.color((random.random(), random.random(), random.random()))
#
# # Draw walk
# t.forward(20)
#
# s = Screen()
# s.exitonclick()
# # Spirograph
# def main():
# t = Turtle()
# t.speed(0)
#
# # Make 36 circles, 10 degrees
# for deg in range(0, 360, 5):
# t.color((random.random(), random.random(), random.random()))
# t.setheading(float(deg))
# t.circle(100)
#
# s = Screen()
# s.exitonclick()
# Hirst Painting
def main():
start_row = 300
start_col = 300
circle_size = 10
circle_distance = 40
t = Turtle()
t.speed(0)
for row in range(10):
for col in range(10):
t.penup()
t.goto(((row + 1) * circle_distance) - start_row, ((col + 1) * circle_distance) - start_col)
r_color = (random.random(), random.random(), random.random())
t.color(r_color)
t.fillcolor(r_color)
t.setheading(180.0)
t.pendown()
t.begin_fill()
t.circle(circle_size)
t.end_fill()
t.penup()
s = Screen()
s.exitonclick()
if __name__ == "__main__":
main()
| true
|
601683f955403de094466e36cbd659e9e15a09a3
|
Python
|
mahagony/Pi-DigiAMP
|
/iqmute.py
|
UTF-8
| 1,153
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import sys, os
import argparse
import pigpio
class IQaudIO:
def __init__(self):
self.port = 22
self.pi = pigpio.pi()
self.pi.set_mode(self.port, pigpio.OUTPUT)
def output(self, value):
self.pi.write(self.port, value)
def mute(self):
self.output(0)
def unmute(self):
self.output(1)
def show(self):
if self.pi.read(self.port):
print("Pi-DigiAMP+ is in UNMUTE state")
else:
print("PI-DigiAMP+ is in MUTE state")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="mute/unmute IQAudIO Pi-DigiAMP+")
parser.add_argument("--mute", action="store_true", help="mute Pi-DigiAMP")
parser.add_argument("--unmute", action="store_true", help="unmute Pi-DigiAMP")
parser.add_argument("--show", action="store_true", help="show status")
args = parser.parse_args()
if args.show:
IQaudIO().show()
exit()
if args.unmute:
IQaudIO().unmute()
exit()
if args.mute:
IQaudIO().mute()
exit()
else:
IQaudIO().show()
exit()
| true
|
be86b8c745f240cba43fd5306935b6573cb58b9f
|
Python
|
awesomepotato2016/applied-cs
|
/Lab03.py
|
UTF-8
| 1,887
| 3.421875
| 3
|
[] |
no_license
|
#Name: Karthik and Vivian
#Date: 10/04/2019
from random import random
inp = int(raw_input("Enter 1 or 2: "))
# 1 gives the percent of Trials where First Step matches Final Direction
# 2 gives the percent of Trials where First Edge matches Final Direction
if inp == 1:
matchnum = []
for n in range(1,26):
matches = 0
for trial in range(10000):
m = 2*n +1
j = n+1
steps = 0
while 1<=j<=m:
r = random()
if r < 0.5:
j+=1
else:
j-=1
if steps==0:
veryFirstStep=(j-(n+1))
steps += 1
if veryFirstStep== 1 and j==m+1:
matches+=1
if veryFirstStep==-1 and j==0:
matches+=1
matchnum.append((100.0*matches)/10000)
md = open('matchdirection.txt','w')
for i in range(1,26):
md.write(str(i) + " "+str(matchnum[i-1]) + '\n')
md.close()
print 'Option 1 selected'
elif inp== 2:
matchnum = []
for n in range(1,26):
matches = 0
for trial in range(10000):
m = 2*n +1
j = n+1
times = 0
while 1<=j<=m:
r = random()
if r < 0.5:
j+=1
else:
j-=1
if (j==m or j ==1) and (times == 0):
firstEdge = j
times += 1
if firstEdge == m and j==m+1:
matches+=1
if firstEdge==1 and j==0:
matches+=1
matchnum.append((100.0*matches)/10000)
md = open('matchEdge.txt','w')
for i in range(1,26):
md.write(str(i) + " "+str(matchnum[i-1]) + '\n')
md.close()
print 'Option 2 selected'
| true
|
c551cb75cf1efb226c4570332abb6293a331ddea
|
Python
|
sjlee4108/robot-deliverer
|
/scripts/deleted.py
|
UTF-8
| 3,141
| 3
| 3
|
[] |
no_license
|
# IGNORE: deleted files
# updates robot packages and weight accordingly
def add_package(self, package):
# adds package to robot and adds to robot weight
self.robot_packages.add(package)
self.robot_weight += self.package2weight[package]
def remove_package(self, package):
# removes package from robot and decreases robot weight
self.robot_packages.remove(package)
self.robot_weight -= self.package2weight[package]
# picks up packages from warehouse and updates information accordingly
def pickup(self, packages):
# store pertinent information to undo later if needed
warehouse = self.robot_location
picked_packages = set()
# add packages to robot, remove from warehouse
for package in packages:
self.add_package(package)
self.warehouse2packages[warehouse].remove(package)
picked_packages.add(package)
return warehouse, picked_packages
# moves robot to specified location and updates information accordingly
def travel(self, location):
# store pertinent information to undo later if needed
prev_location = self.robot_location
dropped_packages = set()
self.robot_location = location
if self.robot_location in self.recipient2packages:
# assumes that robot drops off all packages that can be dropped off if travelling to recipient
packages_to_drop = self.recipient2packages[location]
for package in packages_to_drop:
# drops off package if robot is carrying
if package in self.robot_packages:
self.remove_package(package)
dropped_packages.add(package)
return prev_location, dropped_packages
# undoes travel to new location
def undo_travel(self, old_location, dropped_packages):
# change location
self.robot_location = old_location
# robot "picks up" packages from where it dropped them off
for package in dropped_packages:
self.add_package(package)
# undoes package pickup from warehouse
def undo_pickup(self, warehouse, packages):
# add packages back to warehouse and remove from robot
for package in packages:
self.warehouse2packages[warehouse].add(package)
self.remove_package(package)
# pushes specified action
def push_action(self, action, parameter):
if action == PICKUP:
location, packages = self.pickup(parameter)
else:
location, packages = self.travel(parameter)
return location, packages
# pops (undoes) specified action
def pop_action(self, action, location, packages):
if action == PICKUP:
self.undo_pickup(location, packages)
else:
self.undo_travel(location, packages)
| true
|
1465d4630d106f306cf54fb7958fc7cbada3fd15
|
Python
|
tnyng/dnn
|
/sheet1/layers.py
|
UTF-8
| 738
| 3.109375
| 3
|
[] |
no_license
|
import numpy
class Sequential:
def __init__(self,layers): self.layers = layers
def forward(self,Q):
for l in self.layers: Q = l.forward(Q)
return Q
def backward(self,DQ):
for l in self.layers[::-1]: DQ = l.backward(DQ)
return DQ
class Linear:
def __init__(self,W,B): self.W,self.B = W,B
def forward(self,A):
self.A = A*1;
return A.dot(self.W)+self.B
def backward(self,DZ):
self.DW = numpy.dot(self.A.T,DZ)/len(self.A)
self.DB = DZ.mean(axis=0)
return DZ.dot(self.W.T)
class Tanh:
def forward(self,Z): self.A = numpy.tanh(Z); return self.A
def backward(self,DA): return DA*(1-self.A**2)
| true
|
4f861ef88bd341e90107ee10438395d4d0620548
|
Python
|
sebbacon/bbc-radio-schedules
|
/bbcradio/cli.py
|
UTF-8
| 2,415
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# encoding: utf-8
"""
bbcradio.cli
------------
This module implements a CLI using the unofficial bbcradio API.
Copyright (c) 2021 Steven Maude
Licensed under the MIT License, see LICENSE.
"""
import argparse
import sys
import bbcradio
import requests
def list_stations():
"""Retrieves a list of radio stations and prints them.
Arguments:
None.
Returns:
None.
"""
stations = bbcradio.Stations()
for name, url in stations.urls.items():
print(f"{name} {url}")
def retrieve_schedule(station_name, date):
"""Retrieves and prints a schedule for a station on a given date.
Arguments:
station_name: string, radio station name.
date: string, date in YYYY-MM-DD format.
Returns:
None.
"""
stations = bbcradio.Stations()
station = stations.select(station_name)
schedule = bbcradio.Schedule(station, date)
try:
schedule.programmes
except (requests.exceptions.HTTPError, ValueError):
print(f"Unable to retrieve schedule for {station_name} on {date}.")
sys.exit(1)
print(f"Schedule for {schedule.station.name} on {schedule.date}")
for programme in schedule.programmes:
p = programme.info
print("*")
print(p["start_date"])
print(
"|".join(
[
p["series_name"] or "<No series name found>",
p["name"] or "<No programme name found>",
p["description"] or "<No programme description found>",
]
)
)
print(p["url"])
def main():
parser = argparse.ArgumentParser(prog="bbcradio_cli")
subparsers = parser.add_subparsers(
dest="subparser_name", help="sub-command help"
)
subparsers.add_parser("stations", help="list stations")
schedule_parser = subparsers.add_parser(
"schedule", help="retrieve a schedule"
)
schedule_parser.add_argument(
"station_name", help="name of a station, e.g. BBC Radio 1", type=str
)
schedule_parser.add_argument(
"date", help="date in YYYY-MM-DD format", type=str
)
args = parser.parse_args()
if args.subparser_name == "stations":
list_stations()
elif args.subparser_name == "schedule":
retrieve_schedule(args.station_name, args.date)
if __name__ == "__main__":
main()
| true
|
fc39cfc2ced142c49d507c0608f794e035a50215
|
Python
|
haohaiwei/hhw
|
/code/python/pygame/game_functions.py
|
UTF-8
| 1,785
| 2.984375
| 3
|
[] |
no_license
|
import sys
import pygame
from bullet import Bullet
'''def check_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()'''
'''def check_events(ship):
for event in pygame.event.get():
if pygame.KEYDOWN==event.type:
if event.key == pygame.K_RIGHT:
ship.moving_right = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RIGHT:
ship.moving_right = False
elif event.key == pygame.K_LEFT:
ship.moving_left = False'''
def check_keydown_events(event, ai_settings, screen, ship,bullets):
if event.key == pygame.K_d:
ship.moving_right = True
elif event.key == pygame.K_a:
ship.moving_left = True
elif event.key ==pygame.K_s:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
def check_keyup_events(event, ship):
if event.key == pygame.K_d:
ship.moving_right = False
elif event.key == pygame.K_a:
ship.moving_left = False
def check_events(ai_settings,screen,ship,bullets):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings,screen,ship,bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
def update_bullets(bullets):
bullets.update()
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
def update_screen(ai_settings,screen, ship,bullets):
screen.fill(ai_settings.bg_color)
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
pygame.display.flip()
| true
|
f2dbcd98c9e4a15391c836cb89422e6b7e7108f7
|
Python
|
franciscoquinones/Python
|
/clase4/clase/main.py
|
UTF-8
| 492
| 2.875
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 17 16:52:09 2016
@author: Josue
"""
#Importar archivos py nos permite,emplear o reutilizar
#las posibles funciones o clases
import texto
import saludo
import primo
#Instanciamos del modulo texto la clase saludo
juan=texto.saludo()
#condicion que nos permite ejecutar instrucciones
#siempre que inicialicemos el mismo archivo
#No funciona cuando es importado por otro archivo.py
if __name__=="__main__":
print "yo"
| true
|
2917236c53af22dd621316f71d969a1af9f8ab25
|
Python
|
skylin008/uPython-switch-control
|
/relay.py
|
UTF-8
| 1,109
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
# Micropython PIR Switch Control
# Erni Tron ernitron@gmail.com
# Copyright (c) 2016
import time
from machine import Pin
# The Relay Switch Class
class Relay():
# D8 GPIO15 Pin(15)
# D5 GPIO14 Pin(14)
# D0 GPIO0 Pin(0)
def __init__(self, p=14, sensor='relay', place='nowhere', server=''):
self.pin = Pin(p, Pin.OUT)
self.pin.value(0)
self.status = 0
self.count = 0
self.sensor = sensor
self.place = place
self.server = server
def get(self):
return self.status
def set(self, position):
if position != self.status:
self.pin.value(position)
self.status = position
self.count += 1
return (self.status, self.count)
def toggle(self):
self.set(1 - self.status)
def status(self):
T = {}
T['place'] = self.place
T['server'] = self.server
T['switch'] = self.status
T['temp'] = str(self.status)
T['count'] = self.count
T['sensor'] = self.sensor
T['date'] = time.time()
return T
# Initialize
relay = None
| true
|
a3de0405b680edc6c64ee56b8945e0253681d762
|
Python
|
alisonkozol/CellProfiler
|
/cellprofiler/modules/loaddata.py
|
UTF-8
| 67,790
| 3.3125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
'''<b>Load Data</b> loads text or numerical data to be associated with images, and
can also load images specified by file names.
<hr>
This module loads a file that supplies text or numerical data associated with
the images to be processed, e.g., sample names, plate names, well
identifiers, or even a list of image filenames to be processed in the analysis run.
<p><i>Disclaimer:</i> Please note that the Input modues (i.e., <b>Images</b>, <b>Metadata</b>, <b>NamesAndTypes</b>
and <b>Groups</b>) largely supercedes this module. However, old pipelines loaded into
CellProfiler that contain this module will provide the option of preserving them;
these pipelines will operate exactly as before.</p>
<p>The module currently reads files in CSV (comma-separated values) format.
These files can be produced by saving a spreadsheet from Excel as
"Windows Comma Separated Values" file format.
The lines of the file represent the rows, and each field in a row is
separated by a comma. Text values may be optionally enclosed by double
quotes. The <b>LoadData</b> module uses the first row of the file as a header.
The fields in this row provide the labels for each column of data. Subsequent rows
provide the values for each image cycle.<p>
<p>There are many reasons why you might want to prepare a CSV file and load it
via <b>LoadData</b>. Below, we describe how the column nomenclature allows for special
functionality for some downstream modules:
<ul>
<li><i>Columns with any name:</i> Any data loaded via <b>LoadData</b> will be exported
as a per-image measurement along with CellProfiler-calculated data. This is a
convenient way for you to add data from your own sources to the files exported by
CellProfiler.</li>
<li><i>Columns whose name begins with Image_FileName or Image_PathName:</i>
A column whose name begins with "Image_FileName" or "Image_PathName" can be used to
supply the file name and path name (relative to the base folder) of an image that you want to load.
The image's name within CellProfiler appears afterward. For instance,
"Image_FileName_CY3" would supply the file name for the CY3-stained image, and
choosing the <i>Load images based on this data?</i> option allows the CY3 images to be
selected later in the pipeline. "Image_PathName_CY3" would supply the path names
for the CY3-stained images. The path name column is optional; if all image files are in the base
folder, this column is not needed.</li>
<li><i>Columns whose name begins with Image_ObjectsFileName or Image_ObjectsPathName:</i>
The behavior of these columns is identical to that of "Image_FileName" or "Image_PathName"
except that it is used to specify an image that you want to load as objects. </li>
<li><i>Columns whose name begins with Metadata:</i> A column whose name begins with
"Metadata" can be used to group or associate files loaded by <b>LoadData</b>.
<p>For instance, an experiment might require that images created on the same day
use an illumination correction function calculated from all images from that day,
and furthermore, that the date be captured in the file names for the individual image
sets and in a CSV file specifying the illumination correction functions.
<p>In this case, if the illumination correction images are loaded with the
<b>LoadData</b> module, the file should have a "Metadata_Date"
column which contains the date metadata tags. Similarly, if the individual images
are loaded using the <b>LoadImages</b> module, <b>LoadImages</b> should be set to extract the
<Date> metadata tag from the file names (see <b>LoadImages</b> for more details
on how to do so). The pipeline will then match the individual image with
their corresponding illumination correction functions based on matching
"Metadata_Date" tags. This is useful if the same data is associated with several
images (for example, multiple images obtained from a single well).</li>
<li><i>Columns whose name begins with Series or Frame:</i> A columns whose name begins
with "Series" or "Frame" refers to CSVs containing information about image stacks or movies.
The name of the image within CellProfiler appears afterward an underscore character. For
example, "Frame_DNA" would supply the frame number for the movie/image stack file specified
by the "Image_FileName_DNA" and "Image_PathName_DNA" columns.
<p>Using a CSV for loading frames and/or series from an movie/image stack allows you more
flexibility in assembling image sets for operations that would difficult or impossible
using the Input modules alone. For example, if you wanted to analyze a movie of 1,000 frames
by computing the difference between frames, you could create two
image columns in a CSV, one for loading frames 1,2,...,999, and the other for loading
frames 2,3,...,1000. In this case, CellProfiler would load the frame and its predecessor
for each cycle and <b>ImageMath</b> could be used to create the differece image for downstream use.</p>
</li>
<li><i>Columns that contain dose-response or positive/negative control information:</i>
The <b>CalculateStatistics</b> module can calculate metrics of assay quality for
an experiment if provided with information about which images represent positive
and negative controls and/or what dose of treatment has been used for which images.
This information is provided to <b>CalculateStatistics</b> via the <b>LoadData</b>
module, using particular formats described in the help for <b>CalculateStatistics</b>.
Again, using <b>LoadData</b> is useful if the same data is associated with several
images (for example, multiple images obtained from a single well).</li>
</ul>
<h5>Example CSV file:</h5>
<tt><table border="0">
<tr><td>Image_FileName_FITC,</td><td>Image_PathName_FITC,</td><td>Metadata_Plate,</td><td>Titration_NaCl_uM</td></tr><br>
<tr><td>"04923_d1.tif",</td><td>"2009-07-08",</td><td>"P-12345",</td><td>750</td></tr>
<tr><td>"51265_d1.tif",</td><td>"2009-07-09",</td><td>"P-12345",</td><td>2750</td></tr>
</table></tt>
After the first row of header information (the column names), the first
image-specific row specifies the file, "2009-07-08/04923_d1.tif" for the FITC
image (2009-07-08 is the name of the subfolder that contains the image,
relative to the Default Input Folder). The plate metadata is "P-12345" and
the NaCl titration used in the well is 750 uM. The second image-specific row
has the values "2009-07-09/51265_d1.tif", "P-12345" and 2750 uM. The NaCl
titration for the image is available for modules that use numeric metadata,
such as <b>CalculateStatistics</b>; "Titration" will be the category and "NaCl_uM"
will be the measurement.
<h5>Using metadata in LoadData</h5>
<p>If you would like to use the metadata-specific settings, please see <i>Help > General help > Using
metadata in CellProfiler</i> for more details on metadata usage and syntax. Briefly, <b>LoadData</b> can
use metadata provided by the input CSV file for grouping similar images together for the
analysis run and for metadata-specfic options in other modules; see the settings help for
<i>Group images by metadata</i> and, if that setting is selected, <i>Select metadata tags for grouping</i>
for details.</p>
<h6>Using MetaXpress-acquired images in CellProfiler</h6>
<p>To produce a CSV file containing image location and metadata from a <a href=
"http://www.moleculardevices.com/Products/Software/High-Content-Analysis/MetaXpress.html">MetaXpress</a>
imaging run, do the following:
<ul>
<li>Collect image locations from all files that match the string <i>.tif</i> in the desired image folder,
one row per image.</li>
<li>Split up the image pathname and filename into separate data columns for <b>LoadData</b> to read.</li>
<li>Remove data rows corresponding to:
<ul>
<li>Thumbnail images (do not contain imaging data)</li>
<li>Duplicate images (will cause metadata mismatching)</li>
<li>Corrupt files (will cause failure on image loading) </li>
</ul></li>
<li>The image data table may be linked to metadata contained in plate maps. These plate maps should
be stored as flat files, and may be updated periodically via queries to a laboratory information
management system (LIMS) database. </li>
<li>The complete image location and metadata is written to a CSV file where the headers can easily
be formatted to match <b>LoadData</b>'s input requirements (see column descriptions above). Single
plates split across multiple directories (which often occurs in MetaXpress) are written
to separate files and then merged, thereby removing the discontinuity.</li>
</ul>
For a GUI-based approach to performing this task, we suggest using <a href="http://accelrys.com/products/pipeline-pilot/">Pipeline
Pilot</a>.
<p>For more details on configuring CellProfiler
(and LoadData in particular) for a LIMS environment, please see our
<a href="https://github.com/CellProfiler/CellProfiler/wiki/Adapting-CellProfiler-to-a-LIMS-environment">wiki</a> on the subject.</p>
<h4>Available measurements</h4>
<ul>
<li><i>Pathname, Filename:</i> The full path and the filename of each image, if
image loading was requested by the user.</li>
<li>Per-image information obtained from the input file provided by the user.</li>
<li><i>Scaling:</i> The maximum possible intensity value for the image format.</li>
<li><i>Height, Width:</i> The height and width of the current image.</li>
</ul>
See also the <b>Input</b> modules, <b>LoadImages</b> and <b>CalculateStatistics</b>.
'''
import csv
import hashlib
import logging
import os
import sys
import numpy as np
logger = logging.getLogger(__name__)
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
import matplotlib.mlab
import cellprofiler.cpmodule as cpm
import cellprofiler.objects as cpo
import cellprofiler.measurements as cpmeas
import cellprofiler.settings as cps
from cellprofiler.settings import YES, NO
import cellprofiler.preferences as cpprefs
import identify as I
from cellprofiler.modules.loadimages import LoadImagesImageProvider
from cellprofiler.modules.loadimages import C_FILE_NAME, C_PATH_NAME, C_URL
from cellprofiler.modules.loadimages import C_SERIES, C_FRAME
from cellprofiler.modules.loadimages import C_OBJECTS_FILE_NAME
from cellprofiler.modules.loadimages import C_OBJECTS_PATH_NAME
from cellprofiler.modules.loadimages import C_OBJECTS_URL
from cellprofiler.measurements import C_OBJECTS_SERIES, C_OBJECTS_FRAME
from cellprofiler.modules.loadimages import C_MD5_DIGEST, C_SCALING
from cellprofiler.modules.loadimages import C_HEIGHT, C_WIDTH
from cellprofiler.modules.loadimages import bad_sizes_warning
from cellprofiler.modules.loadimages import convert_image_to_objects
from cellprofiler.modules.loadimages import pathname2url, url2pathname
from cellprofiler.preferences import standardize_default_folder_names, \
DEFAULT_INPUT_FOLDER_NAME, DEFAULT_OUTPUT_FOLDER_NAME, NO_FOLDER_NAME, \
ABSOLUTE_FOLDER_NAME, IO_FOLDER_CHOICE_HELP_TEXT
IMAGE_CATEGORIES = (C_URL, C_FILE_NAME, C_PATH_NAME)
OBJECTS_CATEGORIES = (C_OBJECTS_URL, C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME)
DIR_NONE = 'None'
DIR_OTHER = 'Elsewhere...'
DIR_ALL = [DEFAULT_INPUT_FOLDER_NAME, DEFAULT_OUTPUT_FOLDER_NAME,
NO_FOLDER_NAME, ABSOLUTE_FOLDER_NAME]
'''Reserve extra space in pathnames for batch processing name rewrites'''
PATH_PADDING = 20
'''Cache of header columns for files'''
header_cache = {}
###################################################################
#
# Helper functions for the header columns, Image_FileName_<image-name>
# and Image_PathName_<image-name>
#
# These need to be converted to FileName_<image-name> and
# PathName_<image-name> internally.
###################################################################
def header_to_column(field):
'''Convert the field name in the header to a column name
This function converts Image_FileName to FileName and
Image_PathName to PathName so that the output column names
in the database will be Image_FileName and Image_PathName
'''
for name in (C_PATH_NAME, C_FILE_NAME, C_URL,
C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME, C_OBJECTS_URL):
if field.startswith(cpmeas.IMAGE + '_' + name + '_'):
return field[len(cpmeas.IMAGE) + 1:]
return field
def is_path_name_feature(feature):
'''Return true if the feature name is a path name'''
return feature.startswith(C_PATH_NAME + '_')
def is_file_name_feature(feature):
'''Return true if the feature name is a file name'''
return feature.startswith(C_FILE_NAME + '_')
def is_url_name_feature(feature):
return feature.startswith(C_URL + "_")
def is_objects_path_name_feature(feature):
'''Return true if the feature name is the path to a labels file'''
return feature.startswith(C_OBJECTS_PATH_NAME + "_")
def is_objects_file_name_feature(feature):
'''Return true if the feature name is a labels file name'''
return feature.startswith(C_OBJECTS_FILE_NAME + "_")
def is_objects_url_name_feature(feature):
return feature.startswith(C_OBJECTS_URL + "_")
def get_image_name(feature):
'''Extract the image name from a feature name'''
if is_path_name_feature(feature):
return feature[len(C_PATH_NAME + '_'):]
if is_file_name_feature(feature):
return feature[len(C_FILE_NAME + '_'):]
if is_url_name_feature(feature):
return feature[len(C_URL + '_'):]
raise ValueError('"%s" is not a path feature or file name feature' % feature)
def get_objects_name(feature):
'''Extract the objects name from a feature name'''
if is_objects_path_name_feature(feature):
return feature[len(C_OBJECTS_PATH_NAME + "_"):]
if is_objects_file_name_feature(feature):
return feature[len(C_OBJECTS_FILE_NAME + "_"):]
if is_objects_url_name_feature(feature):
return feature[len(C_OBJECTS_URL + "_"):]
raise ValueError('"%s" is not a objects path feature or file name feature' % feature)
def make_path_name_feature(image):
'''Return the path name feature, given an image name
The path name feature is the name of the measurement that stores
the image's path name.
'''
return C_PATH_NAME + '_' + image
def make_file_name_feature(image):
'''Return the file name feature, given an image name
The file name feature is the name of the measurement that stores
the image's file name.
'''
return C_FILE_NAME + '_' + image
def make_objects_path_name_feature(objects_name):
'''Return the path name feature, given an object name
The path name feature is the name of the measurement that stores
the objects file path name.
'''
return C_OBJECTS_PATH_NAME + '_' + objects_name
def make_objects_file_name_feature(objects_name):
'''Return the file name feature, given an object name
The file name feature is the name of the measurement that stores
the objects file name.
'''
return C_OBJECTS_FILE_NAME + '_' + objects_name
class LoadData(cpm.CPModule):
module_name = "LoadData"
category = 'File Processing'
variable_revision_number = 6
def create_settings(self):
self.csv_directory = cps.DirectoryPath(
"Input data file location", allow_metadata=False, support_urls=True,
doc="""Select the folder containing the CSV file to be loaded.
%(IO_FOLDER_CHOICE_HELP_TEXT)s
<p>An additional option is the following:
<ul>
<li><i>URL</i>: Use the path part of a URL. For instance, an example .CSV file
is hosted at <i>http://cellprofiler.org/svnmirror/ExampleImages/ExampleSBSImages/1049_Metadata.csv</i>
To access this file, you would choose <i>URL</i> and enter
<i>http://cellprofiler.org/svnmirror/ExampleImages/ExampleSBSImages</i>
as the path location.</li>
</ul></p>""" % globals())
def get_directory_fn():
'''Get the directory for the CSV file name'''
return self.csv_directory.get_absolute_path()
def set_directory_fn(path):
dir_choice, custom_path = self.csv_directory.get_parts_from_path(path)
self.csv_directory.join_parts(dir_choice, custom_path)
self.csv_file_name = cps.FilenameText(
"Name of the file",
cps.NONE, doc="""
Provide the file name of the CSV file containing the data.""",
get_directory_fn=get_directory_fn,
set_directory_fn=set_directory_fn,
browse_msg="Choose CSV file",
exts=[("Data file (*.csv)", "*.csv"), ("All files (*.*)", "*.*")]
)
self.browse_csv_button = cps.DoSomething(
"Press to view CSV file contents", "View...", self.browse_csv)
self.wants_images = cps.Binary("Load images based on this data?", True, doc="""
Select <i>%(YES)s</i> to have <b>LoadData</b> load images using the <i>Image_FileName</i> field and the
<i>Image_PathName</i> fields (the latter is optional).""" % globals())
self.rescale = cps.Binary(
"Rescale intensities?", True, doc="""
This option determines whether image metadata should be
used to rescale the image's intensities. Some image formats
save the maximum possible intensity value along with the pixel data.
For instance, a microscope might acquire images using a 12-bit
A/D converter which outputs intensity values between zero and 4095,
but stores the values in a field that can take values up to 65535.
<p>Select <i>%(YES)s</i> to rescale the image intensity so that
saturated values are rescaled to 1.0 by dividing all pixels
in the image by the maximum possible intensity value. </p>
<p>Select <i>%(NO)s</i> to ignore the image metadata and rescale the image
to 0 – 1.0 by dividing by 255 or 65535, depending on the number
of bits used to store the image.</p>""" % globals())
self.image_directory = cps.DirectoryPath(
"Base image location",
dir_choices=DIR_ALL, allow_metadata=False, doc="""
The parent (base) folder where images are located. If images are
contained in subfolders, then the file you load with this module should
contain a column with path names relative to the base image folder (see
the general help for this module for more details). You can choose among the following options:
<ul>
<li><i>Default Input Folder:</i> Use the Default Input Folder.</li>
<li><i>Default Output Folder:</i> Use the Default Output Folder.</li>
<li><i>None:</i> You have an <i>Image_PathName</i> field that supplies an absolute path.</li>
<li><i>Elsewhere...</i>: Use a particular folder you specify.</li>
</ul>""")
self.wants_image_groupings = cps.Binary(
"Group images by metadata?", False, doc="""
Select <i>%(YES)s</i> to break the image sets in an experiment into groups
that can be processed by different nodes on a computing cluster. Each set of
files that share your selected metadata tags will be processed
together. See <b>CreateBatchFiles</b> for details on submitting a
CellProfiler pipeline to a computing cluster for processing.""" % globals())
self.metadata_fields = cps.MultiChoice(
"Select metadata tags for grouping", None, doc="""
<i>(Used only if images are to be grouped by metadata)</i><br>
Select the tags by which you want to group the image files here. You can select multiple tags. For
example, if a set of images had metadata for "Run", "Plate", "Well", and
"Site", selecting <i>Run</i> and <i>Plate</i> will create groups containing
images that share the same [<i>Run</i>,<i>Plate</i>] pair of tags.""")
self.wants_rows = cps.Binary(
"Process just a range of rows?",
False, doc="""
Select <i>%(YES)s</i> if you want to process a subset of the rows in the CSV file.
Rows are numbered starting at 1 (but do not count the header line).
<b>LoadData</b> will process up to and including the end row.""" % globals())
self.row_range = cps.IntegerRange(
"Rows to process",
(1, 100000), 1, doc="""
<i>(Used only if a range of rows is to be specified)</i><br>
Enter the row numbers of the first and last row to be processed.""")
def do_reload():
global header_cache
header_cache = {}
try:
self.open_csv()
except:
pass
self.clear_cache_button = cps.DoSomething(
"Reload cached information", "Reload", do_reload, doc="""
Press this button to reload header information saved inside
CellProfiler. <b>LoadData</b> caches information about
your .csv file in its memory for efficiency. The
information is reloaded if a modification is detected.
<b>LoadData</b> might fail to detect a modification on a
file accessed over the network and will fail to detect
modifications on files accessed through HTTP or FTP. In
this case, you will have to use this button to reload the
header information after changing the file.
<p>This button will never destroy any information on
disk. It is always safe to press it.</p>
""")
def settings(self):
return [self.csv_directory,
self.csv_file_name, self.wants_images, self.image_directory,
self.wants_rows,
self.row_range, self.wants_image_groupings,
self.metadata_fields, self.rescale]
def validate_module(self, pipeline):
csv_path = self.csv_path
if self.csv_directory.dir_choice != cps.URL_FOLDER_NAME:
if not os.path.isfile(csv_path):
raise cps.ValidationError("No such CSV file: %s" % csv_path,
self.csv_file_name)
try:
self.open_csv()
except IOError, e:
import errno
if e.errno == errno.EWOULDBLOCK:
raise cps.ValidationError("Another program (Excel?) is locking the CSV file %s." %
self.csv_path, self.csv_file_name)
else:
raise cps.ValidationError("Could not open CSV file %s (error: %s)" %
(self.csv_path, e), self.csv_file_name)
try:
self.get_header()
except Exception, e:
raise cps.ValidationError(
"The CSV file, %s, is not in the proper format. See this module's help for details on CSV format. (error: %s)" %
(self.csv_path, e), self.csv_file_name)
def validate_module_warnings(self, pipeline):
'''Check for potentially dangerous settings
The best practice is to have a single LoadImages or LoadData module.
'''
from cellprofiler.modules.loadimages import LoadImages
for module in pipeline.modules():
if id(module) == id(self):
return
if isinstance(module, LoadData):
raise cps.ValidationError(
"Your pipeline has two or more LoadData modules.\n"
"The best practice is to have only one LoadData module.\n"
"Consider combining the CSV files from all of your\n"
"LoadData modules into one and using only a single\n"
"LoadData module", self.csv_file_name)
if isinstance(module, LoadImages):
raise cps.ValidationError(
"Your pipeline has a LoadImages and LoadData module.\n"
"The best practice is to have only a single LoadImages\n"
"or LoadData module. This LoadData module will match its\n"
"metadata against that of the previous LoadImages module\n"
"in an attempt to reconcile the two modules' image\n"
"set lists and this can result in image sets with\n"
"missing images or metadata.", self.csv_file_name)
# check that user has selected fields for grouping if grouping is turned on
if self.wants_image_groupings.value and (len(self.metadata_fields.selections) == 0):
raise cps.ValidationError("Group images by metadata is True, but no metadata "
"tags have been chosen for grouping.",
self.metadata_fields)
def visible_settings(self):
result = [self.csv_directory, self.csv_file_name,
self.browse_csv_button]
if self.csv_directory.dir_choice == cps.URL_FOLDER_NAME:
result += [self.clear_cache_button]
self.csv_file_name.text = "URL of the file"
self.csv_file_name.set_browsable(False)
else:
self.csv_file_name.text = "Name of the file"
self.csv_file_name.set_browsable(True)
result += [self.wants_images]
if self.wants_images.value:
result += [self.rescale, self.image_directory,
self.wants_image_groupings]
if self.wants_image_groupings.value:
result += [self.metadata_fields]
try:
fields = [field[len("Metadata_"):]
for field in self.get_header()
if field.startswith("Metadata_")]
if self.has_synthetic_well_metadata():
fields += [cpmeas.FTR_WELL]
self.metadata_fields.choices = fields
except:
self.metadata_fields.choices = ["No CSV file"]
result += [self.wants_rows]
if self.wants_rows.value:
result += [self.row_range]
return result
def convert(self):
data = matplotlib.mlab.csv2rec(self.csv_path)
src_dsc = data['source_description']
def uniquewaves(seq):
output = []
for x in seq:
if x not in output:
output.append(x)
return output
waves = uniquewaves(src_dsc)
pathname = []
filename = []
wave_pnames = []
wave_fnames = []
for i in range(len(waves)):
mask = data['source_description'] == waves[i]
pathname.append(data[mask]['file_path'])
filename.append(data[mask]['file_name'])
wave_pnames.append('PathName_%s' % (waves[i].strip('"')))
wave_fnames.append('FileName_%s' % (waves[i].strip('"')))
for i in range(len(waves)):
if len(filename[i]) != len(filename[0]):
raise RuntimeError("Image %s has %d files, but image %s has %d files" %
(wave_fnames[i], len(filename[i]), wave_fnames[0], len(filename[0])))
def metadatacols(header):
output = []
for h in header:
if not h.startswith('file_'):
if isinstance(h, unicode):
output.append(h.encode("utf-8"))
else:
output.append(h)
return output
def data_for_one_wave(data):
mask = data['source_description'] == waves[0]
data_onewave = data[mask]
return data_onewave
header = data.dtype.names
metadata_names = metadatacols(header)
data_onewave = data_for_one_wave(data)
strdate = []
for date in data_onewave['date_created']:
strdate += [str(date)]
metadata_names.remove('source_description')
metadata_names.remove('date_created')
data_onewave_nofilepaths = matplotlib.mlab.rec_keep_fields(data_onewave, metadata_names)
metadata_names = ['Metadata_' + m for m in metadata_names]
data_onewave_nofilepaths.dtype.names = metadata_names
final_data = data_onewave_nofilepaths
final_data = matplotlib.mlab.rec_append_fields(final_data, 'Metadata_date_created', strdate)
for i in range(len(waves)):
final_data = matplotlib.mlab.rec_append_fields(final_data, wave_pnames[i], pathname[i])
final_data = matplotlib.mlab.rec_append_fields(final_data, wave_fnames[i], filename[i])
return final_data
@property
def csv_path(self):
'''The path and file name of the CSV file to be loaded'''
if cpprefs.get_data_file() is not None:
return cpprefs.get_data_file()
if self.csv_directory.dir_choice == cps.URL_FOLDER_NAME:
return self.csv_file_name.value
path = self.csv_directory.get_absolute_path()
return os.path.join(path, self.csv_file_name.value)
@property
def image_path(self):
return self.image_directory.get_absolute_path()
@property
def legacy_field_key(self):
'''The key to use to retrieve the metadata from the image set list'''
return 'LoadTextMetadata_%d' % self.module_num
def get_cache_info(self):
'''Get the cached information for the data file'''
global header_cache
entry = header_cache.get(self.csv_path, dict(ctime=0))
if cpprefs.is_url_path(self.csv_path):
if not header_cache.has_key(self.csv_path):
header_cache[self.csv_path] = entry
return entry
ctime = os.stat(self.csv_path).st_ctime
if ctime > entry["ctime"]:
entry = header_cache[self.csv_path] = {}
entry["ctime"] = ctime
return entry
def open_csv(self, do_not_cache=False):
'''Open the csv file or URL, returning a file descriptor'''
global header_cache
if cpprefs.is_url_path(self.csv_path):
if not header_cache.has_key(self.csv_path):
header_cache[self.csv_path] = {}
entry = header_cache[self.csv_path]
if entry.has_key("URLEXCEPTION"):
raise entry["URLEXCEPTION"]
if entry.has_key("URLDATA"):
fd = StringIO(entry["URLDATA"])
else:
if do_not_cache:
raise RuntimeError('Need to fetch URL manually.')
import urllib2
try:
url_fd = urllib2.urlopen(self.csv_path)
except Exception, e:
entry["URLEXCEPTION"] = e
raise e
fd = StringIO()
while True:
text = url_fd.read()
if len(text) == 0:
break
fd.write(text)
fd.seek(0)
entry["URLDATA"] = fd.getvalue()
return fd
else:
return open(self.csv_path, 'rb')
def browse_csv(self):
import wx
from cellprofiler.gui import get_cp_icon
try:
fd = self.open_csv()
except:
wx.MessageBox("Could not read %s" % self.csv_path)
return
reader = csv.reader(fd)
header = reader.next()
frame = wx.Frame(wx.GetApp().frame, title=self.csv_path)
sizer = wx.BoxSizer(wx.VERTICAL)
frame.SetSizer(sizer)
list_ctl = wx.ListCtrl(frame, style=wx.LC_REPORT)
sizer.Add(list_ctl, 1, wx.EXPAND)
for i, field in enumerate(header):
list_ctl.InsertColumn(i, field)
for line in reader:
list_ctl.Append([unicode(s, 'utf8') if isinstance(s, str) else s
for s in line[:len(header)]])
frame.SetMinSize((640, 480))
frame.SetIcon(get_cp_icon())
frame.Fit()
frame.Show()
def get_header(self, do_not_cache=False):
'''Read the header fields from the csv file
Open the csv file indicated by the settings and read the fields
of its first line. These should be the measurement columns.
'''
entry = self.get_cache_info()
if entry.has_key("header"):
return entry["header"]
fd = self.open_csv(do_not_cache=do_not_cache)
reader = csv.reader(fd)
header = reader.next()
fd.close()
if header[0].startswith('ELN_RUN_ID'):
try:
data = self.convert()
except Exception, e:
raise RuntimeError("%s" % e)
header = data.dtype.names
entry["header"] = [header_to_column(column) for column in header]
return entry["header"]
def get_image_names(self, do_not_cache=False):
header = self.get_header(do_not_cache=do_not_cache)
image_names = set([
get_image_name(field)
for field in header
if is_file_name_feature(field) or is_url_name_feature(field)])
return list(image_names)
def get_object_names(self, do_not_cache=False):
header = self.get_header(do_not_cache=do_not_cache)
object_names = set([get_objects_name(field)
for field in header
if is_objects_file_name_feature(field) or
is_objects_url_name_feature(field)])
return list(object_names)
def other_providers(self, group):
'''Get name providers from the CSV header'''
if group == 'imagegroup' and self.wants_images.value:
try:
# do not load URLs automatically
return self.get_image_names(do_not_cache=True)
except Exception, e:
return []
elif group == 'objectgroup' and self.wants_images:
try:
# do not load URLs automatically
return self.get_object_names(do_not_cache=True)
except Exception, e:
return []
return []
def is_image_from_file(self, image_name):
'''Return True if LoadData provides the given image name'''
providers = self.other_providers('imagegroup')
return image_name in providers
def is_load_module(self):
'''LoadData can make image sets so it's a load module'''
return True
def prepare_run(self, workspace):
pipeline = workspace.pipeline
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
'''Load the CSV file at the outset and populate the image set list'''
if pipeline.in_batch_mode():
return True
fd = self.open_csv()
reader = csv.reader(fd)
header = [header_to_column(column) for column in reader.next()]
if header[0].startswith('ELN_RUN_ID'):
reader = self.convert()
header = list(reader.dtype.names)
if self.wants_rows.value:
# skip initial rows
rows = []
for idx, row in enumerate(reader):
if idx + 1 < self.row_range.min:
continue
if idx + 1 > self.row_range.max:
break
if len(row) == 0:
continue
row = [unicode(s, 'utf8') if isinstance(s, str) else s
for s in row]
if len(row) != len(header):
raise ValueError("Row # %d has the wrong number of elements: %d. Expected %d" %
(i, len(row), len(header)))
rows.append(row)
else:
rows = [[unicode(s, 'utf8') if isinstance(s, str) else s
for s in row] for row in reader
if len(row) > 0]
fd.close()
#
# Check for correct # of columns
#
n_fields = len(header)
for i, row in enumerate(rows):
if len(row) < n_fields:
text = ('Error on line %d of %s.\n'
'\n"%s"\n'
'%d rows found, expected %d') % (
i + 2, self.csv_file_name.value,
','.join(row),
len(row), n_fields)
raise ValueError(text)
elif len(row) > n_fields:
del row[n_fields:]
#
# Find the metadata, object_name and image_name columns
#
metadata_columns = {}
object_columns = {}
image_columns = {}
well_row_column = well_column_column = well_well_column = None
for i, column in enumerate(header):
if column.find("_") == -1:
category = ""
feature = column
else:
category, feature = column.split("_", 1)
if category in IMAGE_CATEGORIES:
if not image_columns.has_key(feature):
image_columns[feature] = []
image_columns[feature].append(i)
elif category in OBJECTS_CATEGORIES:
if not object_columns.has_key(feature):
object_columns[feature] = []
object_columns[feature].append(i)
else:
metadata_columns[column] = i
if category == cpmeas.C_METADATA:
if feature.lower() == cpmeas.FTR_WELL.lower():
well_well_column = i
elif cpmeas.is_well_row_token(feature):
well_row_column = i
elif cpmeas.is_well_column_token(feature):
well_column_column = i
if (well_row_column is not None and well_column_column is not None and
well_well_column is None):
# add a synthetic well column
metadata_columns[cpmeas.M_WELL] = len(header)
header.append(cpmeas.M_WELL)
for row in rows:
row.append(row[well_row_column] + row[well_column_column])
if self.wants_images:
#
# Add synthetic object and image columns
#
if self.image_directory.dir_choice == cps.NO_FOLDER_NAME:
path_base = ""
else:
path_base = self.image_path
for d, url_category, file_name_category, path_name_category in (
(image_columns, C_URL, C_FILE_NAME, C_PATH_NAME),
(object_columns, C_OBJECTS_URL, C_OBJECTS_FILE_NAME,
C_OBJECTS_PATH_NAME)):
for name in d.keys():
url_column = file_name_column = path_name_column = None
for k in d[name]:
if header[k].startswith(url_category):
url_column = k
elif header[k].startswith(file_name_category):
file_name_column = k
elif header[k].startswith(path_name_category):
path_name_column = k
if url_column is None:
if file_name_column is None:
raise ValueError(
("LoadData needs a %s_%s column to match the "
"%s_%s column") % (file_name_category, name,
path_name_category, name))
#
# Add URL column
#
d[name].append(len(header))
url_feature = "_".join((url_category, name))
header.append(url_feature)
for row in rows:
if path_name_column is None:
fullname = os.path.join(path_base,
row[file_name_column])
else:
row_path_name = os.path.join(
path_base, row[path_name_column])
fullname = os.path.join(
row_path_name, row[file_name_column])
row[path_name_column] = row_path_name
url = pathname2url(fullname)
row.append(url)
if path_name_column is None:
#
# Add path column
#
d[name].append(len(header))
path_feature = "_".join((path_name_category, name))
header.append(path_feature)
for row in rows:
row.append(path_base)
elif path_name_column is None and file_name_column is None:
#
# If the .csv just has URLs, break the URL into
# path and file names
#
path_feature = "_".join((path_name_category, name))
path_name_column = len(header)
header.append(path_feature)
file_name_feature = "_".join((file_name_category, name))
file_name_column = len(header)
header.append(file_name_feature)
for row in rows:
url = row[url_column]
idx = url.rfind("/")
if idx == -1:
idx = url.rfind(":")
if idx == -1:
row += ["", url]
else:
row += [url[:(idx + 1)], url[(idx + 1):]]
else:
row += [url[:idx], url[(idx + 1):]]
column_type = {}
for column in self.get_measurement_columns(pipeline):
column_type[column[1]] = column[2]
previous_column_types = dict([
(c[1], c[2]) for c in pipeline.get_measurement_columns(self)
if c[0] == cpmeas.IMAGE])
#
# Arrange the metadata into columns
#
columns = {}
for index, feature in enumerate(header):
c = []
columns[feature] = c
for row in rows:
value = row[index]
if column_type.has_key(feature):
datatype = column_type[feature]
else:
datatype = previous_column_types[feature]
if datatype == cpmeas.COLTYPE_INTEGER:
value = int(value)
elif datatype == cpmeas.COLTYPE_FLOAT:
value = float(value)
c.append(value)
if len(metadata_columns) > 0:
# Reorder the rows by matching metadata against previous metadata
# (for instance, to assign metadata values to images from
# loadimages)
#
image_numbers = m.match_metadata(
metadata_columns.keys(),
[columns[k] for k in metadata_columns.keys()])
image_numbers = np.array(image_numbers, int).flatten()
max_image_number = np.max(image_numbers)
new_columns = {}
for key, values in columns.iteritems():
new_values = [None] * max_image_number
for image_number, value in zip(image_numbers, values):
new_values[image_number - 1] = value
new_columns[key] = new_values
columns = new_columns
for feature, values in columns.iteritems():
m.add_all_measurements(cpmeas.IMAGE, feature, values)
if self.wants_image_groupings and \
len(self.metadata_fields.selections) > 0:
keys = ["_".join((cpmeas.C_METADATA, k))
for k in self.metadata_fields.selections]
m.set_grouping_tags(keys)
return True
def prepare_to_create_batch(self, workspace, fn_alter_path):
'''Prepare to create a batch file
This function is called when CellProfiler is about to create a
file for batch processing. It will pickle the image set list's
"legacy_fields" dictionary. This callback lets a module prepare for
saving.
pipeline - the pipeline to be saved
image_set_list - the image set list to be saved
fn_alter_path - this is a function that takes a pathname on the local
host and returns a pathname on the remote host. It
handles issues such as replacing backslashes and
mapping mountpoints. It should be called for every
pathname stored in the settings or legacy fields.
'''
if self.wants_images:
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
image_numbers = m.get_image_numbers()
all_image_features = m.get_feature_names(cpmeas.IMAGE)
for url_category, file_category, path_category, names in (
(C_URL, C_FILE_NAME, C_PATH_NAME, self.get_image_names()),
(C_OBJECTS_URL, C_OBJECTS_FILE_NAME, C_OBJECTS_PATH_NAME,
self.get_object_names())):
for name in names:
url_feature = "_".join((url_category, name))
path_feature = "_".join((path_category, name))
if path_feature not in all_image_features:
path_feature = None
file_feature = "_".join((file_category, name))
if file_feature not in all_image_features:
file_feature = None
urls = m.get_measurement(cpmeas.IMAGE,
url_feature,
image_set_number=image_numbers)
for image_number, url in zip(image_numbers, urls):
url = url.encode("utf-8")
if url.lower().startswith("file:"):
fullname = url2pathname(url)
fullname = fn_alter_path(fullname)
path, filename = os.path.split(fullname)
url = unicode(pathname2url(fullname), "utf-8")
m.add_measurement(cpmeas.IMAGE, url_feature, url,
image_set_number=image_number)
if file_feature is not None:
m.add_measurement(
cpmeas.IMAGE, file_feature,
filename,
image_set_number=image_number)
if path_feature is not None:
m.add_measurement(
cpmeas.IMAGE, path_feature,
path, image_set_number=image_number)
self.csv_directory.alter_for_create_batch_files(fn_alter_path)
self.image_directory.alter_for_create_batch_files(fn_alter_path)
return True
def fetch_provider(self, name, measurements, is_image_name=True):
path_base = self.image_path
if is_image_name:
url_feature = C_URL + "_" + name
series_feature = C_SERIES + "_" + name
frame_feature = C_FRAME + "_" + name
else:
url_feature = C_OBJECTS_URL + "_" + name
series_feature = C_OBJECTS_SERIES + "_" + name
frame_feature = C_OBJECTS_FRAME + "_" + name
url = measurements.get_measurement(cpmeas.IMAGE, url_feature)
url = url.encode('utf-8')
full_filename = url2pathname(url)
path, filename = os.path.split(full_filename)
if measurements.has_feature(cpmeas.IMAGE, series_feature):
series = measurements[cpmeas.IMAGE, series_feature]
else:
series = None
if measurements.has_feature(cpmeas.IMAGE, frame_feature):
frame = measurements[cpmeas.IMAGE, frame_feature]
else:
frame = None
return LoadImagesImageProvider(
name, path, filename,
rescale=self.rescale.value and is_image_name,
series=series,
index=frame)
def run(self, workspace):
'''Populate the images and objects'''
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
image_set = workspace.image_set
object_set = workspace.object_set
statistics = []
features = [x[1] for x in
self.get_measurement_columns(workspace.pipeline)
if x[0] == cpmeas.IMAGE]
if self.wants_images:
#
# Load the image. Calculate the MD5 hash of every image
#
image_size = None
for image_name in self.other_providers('imagegroup'):
provider = self.fetch_provider(image_name, m)
image_set.get_providers().append(provider)
image = image_set.get_image(image_name)
pixel_data = image.pixel_data
m.add_image_measurement("_".join((C_MD5_DIGEST, image_name)),
provider.get_md5_hash(m))
m.add_image_measurement("_".join((C_SCALING, image_name)),
image.scale)
m.add_image_measurement("_".join((C_HEIGHT, image_name)),
int(pixel_data.shape[0]))
m.add_image_measurement("_".join((C_WIDTH, image_name)),
int(pixel_data.shape[1]))
if image_size is None:
image_size = tuple(pixel_data.shape[:2])
first_filename = image.file_name
elif tuple(pixel_data.shape[:2]) != image_size:
warning = bad_sizes_warning(image_size, first_filename,
pixel_data.shape, image.file_name)
if self.show_window:
workspace.display_data.warning = warning
else:
print warning
#
# Process any object tags
#
objects_names = self.get_object_names()
for objects_name in objects_names:
provider = self.fetch_provider(
objects_name, m, is_image_name=False)
image = provider.provide_image(workspace.image_set)
pixel_data = convert_image_to_objects(image.pixel_data)
o = cpo.Objects()
o.segmented = pixel_data
object_set.add_objects(o, objects_name)
I.add_object_count_measurements(m, objects_name, o.count)
I.add_object_location_measurements(m, objects_name, pixel_data)
for feature_name in sorted(features):
value = m.get_measurement(cpmeas.IMAGE, feature_name)
statistics.append((feature_name, value))
if self.show_window:
workspace.display_data.statistics = statistics
def display(self, workspace, figure):
if hasattr(workspace.display_data, "warning"):
from cellprofiler.gui.errordialog import show_warning
show_warning("Images have different sizes",
workspace.display_data.warning,
cpprefs.get_show_report_bad_sizes_dlg,
cpprefs.set_show_report_bad_sizes_dlg)
figure.set_subplots((1, 1))
figure.subplot_table(0, 0, workspace.display_data.statistics)
def get_groupings(self, workspace):
'''Return the image groupings of the image sets
See CPModule for documentation
'''
if (self.wants_images.value and
self.wants_image_groupings.value and
len(self.metadata_fields.selections) > 0):
keys = ["_".join((cpmeas.C_METADATA, k))
for k in self.metadata_fields.selections]
if len(keys) == 0:
return None
m = workspace.measurements
assert isinstance(m, cpmeas.Measurements)
return keys, m.get_groupings(keys)
return None
def get_measurement_columns(self, pipeline):
'''Return column definitions for measurements output by this module'''
entry = None
try:
entry = self.get_cache_info()
if entry.has_key("measurement_columns"):
return entry["measurement_columns"]
fd = self.open_csv()
reader = csv.reader(fd)
header = [header_to_column(x) for x in reader.next()]
if header[0].startswith('ELN_RUN_ID'):
reader = self.convert()
header = reader.dtype.names
except:
if entry is not None:
entry["measurement_columns"] = []
return []
previous_columns = pipeline.get_measurement_columns(self)
previous_fields = set([x[1] for x in previous_columns
if x[0] == cpmeas.IMAGE])
already_output = [x in previous_fields for x in header]
coltypes = [cpmeas.COLTYPE_INTEGER] * len(header)
#
# Make sure the well_column column type is a string
#
for i in range(len(header)):
if (header[i].startswith(cpmeas.C_METADATA + "_") and
cpmeas.is_well_column_token(header[i].split("_")[1])):
coltypes[i] = cpmeas.COLTYPE_VARCHAR
if any([header[i].startswith(x)
for x in (C_PATH_NAME, C_FILE_NAME, C_OBJECTS_FILE_NAME,
C_OBJECTS_PATH_NAME, C_URL, C_OBJECTS_URL)]):
coltypes[i] = cpmeas.COLTYPE_VARCHAR
collen = [0] * len(header)
key_is_path_or_file_name = [
(key.startswith(C_PATH_NAME) or
key.startswith(C_FILE_NAME) or
key.startswith(C_OBJECTS_FILE_NAME) or
key.startswith(C_OBJECTS_PATH_NAME)) for key in header]
key_is_path_or_url = [
(key.startswith(C_PATH_NAME) or
key.startswith(C_OBJECTS_PATH_NAME) or
key.startswith(C_URL) or
key.startswith(C_OBJECTS_URL)) for key in header]
for row in reader:
if len(row) > len(header):
row = row[:len(header)]
for index, field in enumerate(row):
if already_output[index]:
continue
if (not self.wants_images) and key_is_path_or_file_name[index]:
continue
try:
len_field = len(field)
except TypeError:
field = str(field)
len_field = len(field)
if key_is_path_or_url[index]:
# Account for possible rewrite of the pathname
# in batch data
len_field = max(cpmeas.PATH_NAME_LENGTH,
len_field + PATH_PADDING)
if coltypes[index] != cpmeas.COLTYPE_VARCHAR:
ldtype = get_loaddata_type(field)
if coltypes[index] == cpmeas.COLTYPE_INTEGER:
coltypes[index] = ldtype
elif (coltypes[index] == cpmeas.COLTYPE_FLOAT and
ldtype != cpmeas.COLTYPE_INTEGER):
coltypes[index] = ldtype
if collen[index] < len(field):
collen[index] = len(field)
for index in range(len(header)):
if coltypes[index] == cpmeas.COLTYPE_VARCHAR:
coltypes[index] = cpmeas.COLTYPE_VARCHAR_FORMAT % collen[index]
image_names = self.other_providers('imagegroup')
result = [(cpmeas.IMAGE, colname, coltype)
for colname, coltype in zip(header, coltypes)
if colname not in previous_fields]
if self.wants_images:
for feature, coltype in (
(C_URL, cpmeas.COLTYPE_VARCHAR_PATH_NAME),
(C_PATH_NAME, cpmeas.COLTYPE_VARCHAR_PATH_NAME),
(C_FILE_NAME, cpmeas.COLTYPE_VARCHAR_FILE_NAME),
(C_MD5_DIGEST, cpmeas.COLTYPE_VARCHAR_FORMAT % 32),
(C_SCALING, cpmeas.COLTYPE_FLOAT),
(C_HEIGHT, cpmeas.COLTYPE_INTEGER),
(C_WIDTH, cpmeas.COLTYPE_INTEGER)):
for image_name in image_names:
measurement = feature + '_' + image_name
if not any([measurement == c[1] for c in result]):
result.append((cpmeas.IMAGE, measurement, coltype))
#
# Add the object features
#
for object_name in self.get_object_names():
result += I.get_object_measurement_columns(object_name)
for feature, coltype in (
(C_OBJECTS_URL, cpmeas.COLTYPE_VARCHAR_PATH_NAME),
(C_OBJECTS_PATH_NAME, cpmeas.COLTYPE_VARCHAR_PATH_NAME),
(C_OBJECTS_FILE_NAME, cpmeas.COLTYPE_VARCHAR_FILE_NAME)):
mname = C_OBJECTS_URL + "_" + object_name
result.append((cpmeas.IMAGE, mname, coltype))
#
# Try to make a well column out of well row and well column
#
well_column = None
well_row_column = None
well_col_column = None
for column in result:
if not column[1].startswith(cpmeas.C_METADATA + "_"):
continue
category, feature = column[1].split('_', 1)
if cpmeas.is_well_column_token(feature):
well_col_column = column
elif cpmeas.is_well_row_token(feature):
well_row_column = column
elif feature.lower() == cpmeas.FTR_WELL.lower():
well_column = column
if (well_column is None and well_row_column is not None and
well_col_column is not None):
length = cpmeas.get_length_from_varchar(well_row_column[2])
length += cpmeas.get_length_from_varchar(well_col_column[2])
result += [(cpmeas.IMAGE,
'_'.join((cpmeas.C_METADATA, cpmeas.FTR_WELL)),
cpmeas.COLTYPE_VARCHAR_FORMAT % length)]
entry["measurement_columns"] = result
return result
def has_synthetic_well_metadata(self):
'''Determine if we should synthesize a well metadata feature
'''
fields = self.get_header()
has_well_col = False
has_well_row = False
for field in fields:
if not field.startswith(cpmeas.C_METADATA + "_"):
continue
category, feature = field.split('_', 1)
if cpmeas.is_well_column_token(feature):
has_well_col = True
elif cpmeas.is_well_row_token(feature):
has_well_row = True
elif feature.lower() == cpmeas.FTR_WELL.lower():
return False
return has_well_col and has_well_row
def get_categories(self, pipeline, object_name):
try:
columns = self.get_measurement_columns(pipeline)
result = set([column[1].split('_')[0] for column in columns
if column[0] == object_name])
return list(result)
except:
return []
def get_measurements(self, pipeline, object_name, category):
columns = self.get_measurement_columns(pipeline)
return [feature for c, feature in
[column[1].split('_', 1) for column in columns
if column[0] == object_name
and column[1].startswith(category + "_")]]
def change_causes_prepare_run(self, setting):
'''Check to see if changing the given setting means you have to restart
Some settings, esp in modules like LoadImages, affect more than
the current image set when changed. For instance, if you change
the name specification for files, you have to reload your image_set_list.
Override this and return True if changing the given setting means
that you'll have to do "prepare_run".
'''
if self.wants_images or setting == self.wants_images:
return True
return False
def upgrade_settings(self, setting_values, variable_revision_number,
module_name, from_matlab):
DIR_DEFAULT_IMAGE = 'Default input folder'
DIR_DEFAULT_OUTPUT = 'Default Output Folder'
if from_matlab and variable_revision_number == 2:
logging.warning(
"Warning: the format and purpose of LoadText "
"has changed substantially.")
text_file_name = setting_values[0]
field_name = setting_values[1]
path_name = setting_values[2]
if path_name == '.':
path_choice = DIR_DEFAULT_IMAGE
elif path_name == '&':
path_choice = DIR_DEFAULT_OUTPUT
else:
path_choice = DIR_OTHER
setting_values = [path_choice, path_name, text_file_name,
cps.NO, DIR_DEFAULT_IMAGE, '.',
cps.NO, "1,100000"]
from_matlab = False
variable_revision_number = 1
module_name = self.module_name
if (not from_matlab) and variable_revision_number == 1:
setting_values = setting_values + [cps.NO, ""]
variable_revision_number = 2
if variable_revision_number == 2 and (not from_matlab):
if setting_values[0].startswith("Default Image"):
setting_values = [DIR_DEFAULT_IMAGE] + setting_values[1:]
elif setting_values[0].startswith("Default Output"):
setting_values = [DIR_DEFAULT_OUTPUT] + setting_values[1:]
if setting_values[4].startswith("Default Image"):
setting_values = (setting_values[:4] + [DIR_DEFAULT_IMAGE] +
setting_values[5:])
elif setting_values[4].startswith("Default Output"):
setting_values = (setting_values[:4] + [DIR_DEFAULT_OUTPUT] +
setting_values[5:])
variable_revision_number = 3
if variable_revision_number == 3 and (not from_matlab):
module_name = self.module_name
if variable_revision_number == 3 and (not from_matlab):
# directory choice, custom directory merged
# input_directory_choice, custom_input_directory merged
csv_directory_choice, csv_custom_directory, \
csv_file_name, wants_images, image_directory_choice, \
image_custom_directory, wants_rows, \
row_range, wants_image_groupings, \
metadata_fields = setting_values
csv_directory = cps.DirectoryPath.static_join_string(
csv_directory_choice, csv_custom_directory)
image_directory = cps.DirectoryPath.static_join_string(
image_directory_choice, image_custom_directory)
setting_values = [
csv_directory, csv_file_name, wants_images,
image_directory, wants_rows, row_range, wants_image_groupings,
metadata_fields]
variable_revision_number = 4
# Standardize input/output directory name references
setting_values = list(setting_values)
for index in (0, 3):
setting_values[index] = cps.DirectoryPath.upgrade_setting(
setting_values[index])
if variable_revision_number == 4 and (not from_matlab):
csv_directory, csv_file_name, wants_images, \
image_directory, wants_rows, row_range, wants_image_groupings, \
metadata_fields = setting_values
dir_choice, custom_dir = cps.DirectoryPath.split_string(csv_directory)
if dir_choice == cps.URL_FOLDER_NAME:
csv_file_name = custom_dir + '/' + csv_file_name
csv_directory = cps.DirectoryPath.static_join_string(dir_choice, '')
setting_values = [
csv_directory, csv_file_name, wants_images,
image_directory, wants_rows, row_range, wants_image_groupings,
metadata_fields]
variable_revision_number = 5
if variable_revision_number == 5 and (not from_matlab):
# Added rescaling option
setting_values = setting_values + [cps.YES]
variable_revision_number = 6
return setting_values, variable_revision_number, from_matlab
LoadText = LoadData
def best_cast(sequence, coltype=None):
'''Return the best cast (integer, float or string) of the sequence
sequence - a sequence of strings
Try casting all elements to integer and float, returning a numpy
array of values. If all fail, return a numpy array of strings.
'''
if (isinstance(coltype, (str, unicode)) and
coltype.startswith(cpmeas.COLTYPE_VARCHAR)):
# Cast columns already defined as strings as same
return np.array(sequence)
def fn(x, y):
if cpmeas.COLTYPE_VARCHAR in (x, y):
return cpmeas.COLTYPE_VARCHAR
if cpmeas.COLTYPE_FLOAT in (x, y):
return cpmeas.COLTYPE_FLOAT
return cpmeas.COLTYPE_INTEGER
ldtype = reduce(fn, [get_loaddata_type(x) for x in sequence],
cpmeas.COLTYPE_INTEGER)
if ldtype == cpmeas.COLTYPE_VARCHAR:
return np.array(sequence)
elif ldtype == cpmeas.COLTYPE_FLOAT:
return np.array(sequence, np.float64)
else:
return np.array(sequence, np.int32)
int32_max = np.iinfo(np.int32).max
int32_min = np.iinfo(np.int32).min
def get_loaddata_type(x):
'''Return the type to use to represent x
If x is a 32-bit integer, return cpmeas.COLTYPE_INTEGER.
If x cannot be represented in 32 bits but is an integer,
return cpmeas.COLTYPE_VARCHAR
If x can be represented as a float, return COLTYPE_FLOAT
'''
global int32_max, int32_min
try:
iv = int(x)
if iv > int32_max:
return cpmeas.COLTYPE_VARCHAR
if iv < int32_min:
return cpmeas.COLTYPE_VARCHAR
return cpmeas.COLTYPE_INTEGER
except:
try:
fv = float(x)
return cpmeas.COLTYPE_FLOAT
except:
return cpmeas.COLTYPE_VARCHAR
| true
|
9d2a97330a0eca83f1a863dc9cd560042391433e
|
Python
|
Erik0x42/Netscape-Bookmarks-File-Parser
|
/NetscapeBookmarksFileParser/__init__.py
|
UTF-8
| 5,577
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
from dataclasses import dataclass
non_parsed = dict() # lines not parsed
@dataclass
class BookmarkItem:
"""
Represents an item in the bookmarks. An item can be a folder
or an shortcut (can be feed or web slice too, but it's rare nowadays).
"""
num: int = 0 # the position of the item in the folder it's in
add_date_unix: int = 0 # the creation date of the item in unix time
last_modified_unix: int = 0 # the creation date of the item in unix time
parent = None # the parent folder of the item. Just the root folder have this equal None
name: str = '' # name of the item
@dataclass
class BookmarkFolder(BookmarkItem):
"""
Represents a folder in the bookmarks
"""
personal_toolbar: bool = False # true if the folder is the bookmarks toolbar
items = None # list that contains all items inside this folder
children = None # list that contains all subfolders inside this folder
shortcuts = None # list that contains all shortcuts inside this folder
def __post_init__(self):
self.items = []
self.children = []
self.shortcuts = []
def sync_items(self, recursive=True):
"""
sync the folder item list with children and shortcut lists.
The item list is cleaned and populated with the items from
children and shortcut lists
:param recursive: if subfolders should have their items synced too
:return: nothing
"""
self.items = []
self.items.extend(self.children)
self.items.extend(self.shortcuts)
if recursive:
for child in self.children:
child.sync_items()
def split_items(self, recursive=True):
"""
splits the items list into children and shortcuts
:param self: folder to have items splitted
:param recursive: if subfolders should have their items splitted too
:return: nothing
"""
for item in self.items:
if isinstance(item, BookmarkShortcut):
self.shortcuts.append(item)
elif isinstance(item, BookmarkFolder):
self.children.append(item)
if recursive:
item.split_items()
def sort_items(self, recursive=True):
"""
sort the items list by the num of each item.
split_items() is ran before sorting happens
:param recursive: if subfolders will have their items sorted too
:return: nothing
"""
def sort_by_number(e):
return e.num
self.items.sort(key=sort_by_number)
self.children.sort(key=sort_by_number)
self.shortcuts.sort(key=sort_by_number)
self.split_items(recursive)
if recursive:
for child in self.children:
child.sort_items()
def sort_children_and_shortcuts(self, recursive=True):
"""
sort the children and shortcuts lists by the num of each item.
sync_items() is ran before sorting happens
:param recursive: if subfolders will have their children and shortcuts sorted too
:return: nothing
"""
def sort_by_number(e):
return e.num
self.children.sort(key=sort_by_number)
self.shortcuts.sort(key=sort_by_number)
self.sync_items(recursive)
if recursive:
for child in self.children:
child.sort_children_and_shortcuts()
@dataclass
class BookmarkShortcut(BookmarkItem):
"""
Represents a shortcut in the bookmarks
"""
href: str = "" # link to the web page (or anything alike) of the shortcut
last_visit_unix: int = 0 # date when the web paged was last visited, in unix time
private: int = 0 # equals to the PRIVATE attribute
tags = None # tags of this shortcut, if present
icon_url_fake: bool = False # true if the ICON_URI attribute start with fake-favicon-uri.
icon_url: str = "" # the favicon url if icon_url_fake is false and the attribute ICON_URI is present
icon_base64: str = "" # the favicon encoded in base64. Commonly is a png image. The string here can be really big
feed: bool = False # true if the attribute FEED is present. Legacy support for feeds
web_slice: bool = False # true if the attribute WEBSLICE is present. Legacy support for web slices
comment: str = "" # comment of the shortcut if present
def __post_init__(self):
self.tags = []
@dataclass
class BookmarkFeed(BookmarkShortcut):
"""
Represents a Feed in the bookmarks
"""
feed: bool = True
feed_url: str = "" # feed url
@dataclass
class BookmarkWebSlice(BookmarkShortcut):
"""
Represents an Web Slice in the bookmarks
"""
web_slice: bool = True
is_live_preview: bool = False # value of the attribute ISLIVEPREVIEW
preview_size: str = "" # value of the attribute PREVIEWSIZE.
class NetscapeBookmarksFile(object):
"""
Represents the Netscape Bookmark File
"""
def __init__(self, bookmarks=""):
self.html: str = ""
if hasattr(bookmarks, 'read'):
self.html = bookmarks.read()
elif isinstance(bookmarks, str):
self.html = bookmarks
self.doc_type = ""
self.http_equiv_meta = ""
self.content_meta = ""
self.title = ""
self.bookmarks = BookmarkFolder()
global non_parsed
self.non_parsed = non_parsed
def __str__(self):
return "NetscapeBookmarkFile(bookmarks: {0})".format(str(self.bookmarks))
| true
|
5d281540d41f3f9f3a073db55f0bb2441f363951
|
Python
|
yongtal/CS6381
|
/project/Top_method/mr_mapworker.py
|
UTF-8
| 5,565
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/python
#
# Vanderbilt University, Computer Science
# CS4287-5287: Principles of Cloud Computing
# Author: Aniruddha Gokhale
# Created: Nov 2016
#
#
# Purpose:
# This code runs the wordcount map task. It runs inside the worker process. Since the
# worker gets commands from a master and sends result back to the master, we use
# ZeroMQ as a way to get this communication part done.
import os
import sys
import time
import re
import zmq
import json
import argparse # argument parser
# @NOTE@: You will need to make appropriate changes
# to this logic. You can maintain the overall structure
# but the logic of the map function has to change to
# suit the needs of the assignment
#
# I do not think you need to change the class variables
# but you may need additional ones. The key change
# will be in do_work
# ------------------------------------------------
# Main map worker class
class MR_Map ():
""" The map worker class """
def __init__ (self, args):
""" constructor """
self.id = args.id
self.master_ip = args.masterip
self.master_port = args.masterport
self.receiver = None # connection to master
self.sender = None # connection to map barrier
#------------------------------------------
def init_worker (self):
""" Word count map worker initialization """
print "initializing map worker with id: ", self.id, " in directory: ", os.getcwd ()
context = zmq.Context()
# Socket to receive messages on. Worker uses PULL from the master
self.receiver = context.socket(zmq.PULL)
connect_addr = "tcp://"+ self.master_ip + ":" + str(self.master_port)
print "Using PULL, map worker connecting to ", connect_addr
self.receiver.connect(connect_addr)
# Socket to send messages to. In our case, the map worker will push an event
# to the map barrier indicating two things.
# First, it tells that it is up and running.
# Second, it tells it has completed the map task.
# Note that the port number of the map barrier is 2 more than the
# port of the master
self.sender = context.socket(zmq.PUSH)
connect_addr = "tcp://" + self.master_ip + ":" + str(self.master_port+2)
print "Using PUSH, map worker connecting to barrier at ", connect_addr
self.sender.connect(connect_addr)
# now send an ACK to the barrier to let it know that we are up
self.sender.send(b'0')
#------------------------------------------
def do_work (self):
""" Word count map function """
print "starting work: map worker with id: ", self.id
# recall that the master broadcasts the map or reduce message via the PUSH.
# It can be received by both the map and reduce workers. So it is the job of
# the map and reduce worker to make sure the message was meant for it.
# Else ignore it.
# In our case, we do only one task and our job in life is done :-)
json_obj = self.receiver.recv_json()
print "received message = ", json.dumps(json_obj)
# now parse the json object and do the work
# We use our id to index into the array of workers in the received message to
# find the position in the file we want to read from
datafile = open(json_obj['datafile'],'r')
datafile.seek(json_obj['start'], 0)
content = datafile.read (json_obj['size'])
datafile.close()
# Each map task saves its intermediate results in a file
map_file = open("Map"+str(self.id)+".csv", "w")
letters = (re.sub(r'[^a-zA-Z]+', '', content)).lower()
for ch in letters:
map_file.write (ch + ", 1\n")
map_file.close()
# trigger the map barrier by sending a dummy byte
self.sender.send (b'0') # Question: why here it send twice?? (once in init_worker)
print "map worker with ID: ", self.id, " exiting"
##################################
# Command line parsing
##################################
def parseCmdLineArgs ():
# parse the command line
parser = argparse.ArgumentParser ()
# add positional arguments in that order
parser.add_argument ("id", type=int, help="worker number")
parser.add_argument ("masterip", help="IP addr of master")
parser.add_argument ("masterport", type=int, help="Port number of master")
# parse the args
args = parser.parse_args ()
return args
#---------------------------------------------------------------------------------------------------------------
# main function
def main ():
""" Main program for Map worker """
print "MapReduce Map Worker program"
parsed_args = parseCmdLineArgs ()
# now invoke the mapreduce framework. Notice we have slightly changed the way the
# constructor works and the arguments it takes.
mapobj = MR_Map(parsed_args)
# this is a hack for the purposes of coordination. We need to have the servers
# ready for us to connect. So sleep for a few secs to make sure the push and sink
# servers are up.
time.sleep (2)
# initialize the map worker network connections
mapobj.init_worker ()
# invoke the map process
mapobj.do_work ()
#----------------------------------------------
if __name__ == '__main__':
main ()
| true
|
5a44cf718a1037c61af7ff6d7c178ceed4c87c18
|
Python
|
XuShaoming/CompVision_ImageProc
|
/project1/code/mycv.py
|
UTF-8
| 487
| 3.53125
| 4
|
[] |
no_license
|
def resize_shrink(matrix, fx, fy):
"""
Purpose:
shrink a matrix given fx and fy.
Input:
fx: resize on column
fy: resize on row
Output:
shrink matrix list
"""
fx_inv = int(1 / fx)
fy_inv = int(1 / fy)
res = []
for i in range(0, len(matrix), fy_inv):
res_row = []
for j in range(0, len(matrix[i]), fx_inv):
res_row.append(matrix[i][j])
res.append(res_row)
return res
| true
|
f4de79a5d92973ae9c2985a277467e12681e1e17
|
Python
|
arwaahmedf/tasks
|
/mass.py
|
UTF-8
| 613
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[5]:
from pyopenms import *
seq = AASequence.fromString("VAKA")
V_weight=seq.getMonoWeight()
A_weight=seq.getMonoWeight()
K_weight=seq.getMonoWeight()
A_weight=seq.getMonoWeight()
print("Monoisotopic mass of peptide [V] is ",V_weight)
print("Monoisotopic mass of peptide [A] is ",A_weight)
print("Monoisotopic mass of peptide [K] is ",K_weight)
print("Monoisotopic mass of peptide [A] is ",A_weight)
print ("The piptide", str(seq), "consists of the following amino acids:")
for aa in seq:
print(aa.getName(), ":", aa.getMonoWeight())
# In[ ]:
# In[ ]:
| true
|
4b49ae86aec50a383ed20bb3ba56bb5107f58f90
|
Python
|
18bcs6526/Python
|
/fbbonaci.py
|
UTF-8
| 113
| 3.546875
| 4
|
[] |
no_license
|
a=0
b=1
x=int (input("enter the number"))
print('0')
for i in range (0,x):
c=a+b
a=b
b=c
print(c)
| true
|
93a5205b2167481c4725605629813b2c04fa2821
|
Python
|
realpython/materials
|
/python-311/units.py
|
UTF-8
| 714
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
import pathlib
import tomllib
with pathlib.Path("units.toml").open(mode="rb") as file:
base_units = tomllib.load(file)
units = {}
for unit, unit_info in base_units.items():
units[unit] = unit_info
for alias in unit_info["aliases"]:
units[alias] = unit_info
def to_baseunit(value, from_unit):
from_info = units[from_unit]
if "multiplier" not in from_info:
return (
value,
from_info["label"]["singular" if value == 1 else "plural"],
)
return to_baseunit(value * from_info["multiplier"], from_info["to_unit"])
print(to_baseunit(7, "s"))
print(to_baseunit(3.11, "minutes"))
print(to_baseunit(14, "days"))
print(to_baseunit(1 / 12, "yr"))
| true
|
de4cd82d9d1402b819fe0bbfa234f1ebc62d3e60
|
Python
|
Fredy/UCSP-Bioinspirada
|
/lab_3/lab_3.py
|
UTF-8
| 5,823
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
"""Lab 3: Genetic Algorithms"""
from random import random, randrange, sample, choice
from sys import argv
from copy import deepcopy
from math import sin, sqrt
import numpy as np
from fitness import calc_fitnesses, linear_normalization
from operators import crossovers, mutation
from selection import selections, elitism
from representation import Cromosome
from charts import draw_chart
def canonical(optfunc, population_len, limits, precisions, epochs,
crossover, selection, mutation, pc, pm, use_elitism,
use_normalization, minv=None, maxv=None, save_bests=False, save_pops=False):
if use_normalization and (minv is None or maxv is None):
raise TypeError(
'If use_normalization is true, minv and maxv must be specified'
)
bests = [] if save_bests else None
pops = [] if save_pops else None
population = np.array([Cromosome(limits, precisions)
for i in range(population_len)])
fitnesses = calc_fitnesses(population, optfunc)
if use_normalization:
normalized = linear_normalization(fitnesses, minv, maxv)
population = population[[i[0] for i in normalized]]
fitnesses = [i[1] for i in normalized]
if save_bests:
idx = np.argmax(fitnesses)
bests.append(population[idx].get_real_val())
if save_pops:
pops.append([c.get_real_val() for c in population])
for i in range(epochs - 1):
new_pop = selection(population, fitnesses)
if use_elitism:
prev_best = elitism(population, fitnesses)
operate(new_pop, crossover, mutation, pc, pm)
fitnesses = calc_fitnesses(new_pop, optfunc)
if use_elitism:
idx = randrange(len(new_pop))
new_pop[idx] = deepcopy(prev_best[0])
fitnesses[idx] = prev_best[1]
if use_normalization:
normalized = linear_normalization(fitnesses, minv, maxv)
population = deepcopy(new_pop[[i[0] for i in normalized]])
fitnesses = [i[1] for i in normalized]
else:
population = new_pop
if save_bests:
idx = np.argmax(fitnesses)
bests.append(population[idx].get_real_val())
if save_pops:
pops.append([c.get_real_val() for c in population])
return population, bests, pops
def operate(population, crossover, mutation, pc, pm):
length = len(population)
for i in range(length):
if random() < pc:
samp = sample(list(population), 2)
bin_reprs = [i.bin_value for i in samp]
crossover(bin_reprs[0], bin_reprs[1])
if random() < pm:
bin_rep = choice(population).bin_value
mutation(bin_rep)
def optfunc(x):
# −100 ≤ x1 ≤ 100
# −100 ≤ x2 ≤ 100
xsqr = x[0] ** 2 + x[1] ** 2
tmp1 = sin(sqrt(xsqr))**2 - 0.5
tmp2 = (1 + 0.001 * (xsqr)) ** 2
return 0.5 - tmp1 / tmp2
if __name__ == "__main__":
if len(argv) < 10:
print(
'Usage python lab_3.py population_length epochs experiments crossover selection pc pm use_elitism use_norm',
'- population_length: length of the population',
'- epochs: number of epochs',
'- experiments: number of experiments',
'- crossover: one of {}'.format(list(crossovers.keys())),
'- selection: one of {}'.format(list(selections.keys())),
'- pc: cross probability',
'- pm: mutation probaility',
'- use_elitism: true or false',
'- use_norm: true or false',
'- if use_norm is true: two extra params must be specified: vmin and vmax',
sep='\n'
)
exit()
population_len = int(argv[1])
epochs = int(argv[2])
experiments = int(argv[3])
crossover = argv[4]
selection = argv[5]
pc = float(argv[6])
pm = float(argv[7])
use_elitism = argv[8] == 'true'
use_norm = argv[9] == 'true'
minv = None
maxv = None
if use_norm:
if len(argv) < 12:
print('If use_norm is true: two extra params must be specified: minv and maxv')
exit()
minv = float(argv[10])
maxv = float(argv[11])
crossover_func = crossovers[crossover]
selection_func = selections[selection]
bests_fitnesses = np.zeros(epochs)
population_fitnesses = np.zeros(epochs)
for i in range(experiments):
res, bests, pops = canonical(
optfunc=optfunc, population_len=population_len,
limits=((-100, 100), (-100, 100)),
precisions=(6, 6), epochs=epochs,
crossover=crossover_func,
selection=selection_func,
mutation=mutation,
pc=pc, pm=pc,
use_elitism=use_elitism, use_normalization=use_norm,
minv=minv, maxv=maxv,
save_bests=True,
save_pops=True
)
fitnesses = calc_fitnesses(bests, optfunc)
bests_fitnesses += fitnesses
population_f = []
for i in pops:
tmp = np.average(calc_fitnesses(i, optfunc))
population_f.append(tmp)
population_fitnesses += population_f
res_fit = calc_fitnesses(res, optfunc)
for r, f in zip(res, res_fit):
print(r.get_real_val(), ' : ', f)
print('------')
bests_fitnesses /= experiments
population_fitnesses /= experiments
draw_chart(bests_fitnesses, population_fitnesses, '{} {} pc: {} pm: {} E: {} N: {}'.format(
selection, crossover, pc, pm, use_elitism, use_norm))
res_fitnesses = calc_fitnesses(res, optfunc)
best_idx = np.argmax(res_fitnesses)
print(res[best_idx].get_real_val(), ' -> ', res_fitnesses[best_idx])
print(bests_fitnesses)
| true
|
7ba8324637b222baa4334489f68834cfc5e13076
|
Python
|
alejandrosd/Ejercicio-Fibonacci
|
/fibonacci.py
|
UTF-8
| 805
| 3.609375
| 4
|
[] |
no_license
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Estudiantes
#
# Created: 13/10/2017
# Copyright: (c) Estudiantes 2017
# Licence: <your licence>
#-------------------------------------------------------------------------------
def fibonacci(num):
u=0
pu=1
n=num
num=num-1
for i in range(0,num+1):
r=u+pu
pu=u
u=r
return r;
def Rfibonacci(num):
if(num>2):
return Rfibonacci(num-1)+Rfibonacci(num-2)
else:
return 1
def main():
pass
num=int(input( "Ingresar Numero Positivo \n") )
if num==0:
print ("Promedio Indeterminado")
else:
print ("Fibonacci= ", fibonacci(num))
if __name__ == '__main__':
main()
| true
|
37a4e03e010ad6ecdc7f3442300e84abd9b77cd4
|
Python
|
Thelordofdream/Deep-Learning
|
/mnist in Attensive Reader/application.py
|
UTF-8
| 1,856
| 2.515625
| 3
|
[] |
no_license
|
# coding=utf-8
import os
os.chdir("../")
import tensorflow as tf
import model
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
def predict(model, pred_num, sess):
saver = tf.train.Saver()
correct = 0
row = int((pred_num - 1) / 5) + 1
fig, ax = plt.subplots(5, 3 * row)
for i in range(pred_num):
batch_x, batch_y = mnist.train.next_batch(model.batch_size)
batch_x = batch_x.reshape((model.batch_size, model.steps, model.inputs))
saver.restore(sess, "./mnist in Attensive Reader/model/model.ckpt")
pred, attention = sess.run([tf.argmax(model.output, 1), model.s], feed_dict={model.q: batch_x, model.a:batch_x, model.keep_prob_q: 1.0, model.keep_prob_a: 1.0})
label = sess.run(tf.argmax(batch_y, 1))
print "Sample %d------\nprediction %s\nreal label %s" % (i + 1, pred, label)
draw(i, ax, batch_x, attention)
if pred == label:
correct += 1
print "predict accuracy %g" % (correct * 1.0/ pred_num)
plt.show()
def draw(num, ax, image, attention):
origin = np.array(image[0].reshape([28, 28]) * 255, dtype="uint8")
ax[num % 5, int(num / 5) * 3].imshow(origin, cmap='gray')
max = np.max(attention)
attention /= max
att = range(0, 28)
for i in range(28):
origin[i] = origin[i] * attention[0][i][0]
att[27 - i] = attention[0][i][0]
ax[num % 5, int(num / 5) * 3 + 1].imshow(origin, cmap='gray')
ax[num % 5, int(num / 5) * 3 + 2].barh(range(0, 28), att)
if __name__ == "__main__":
my_network = model.Attensive_Reader(name="mnist", batch_size=1)
pred_num = 15
init = tf.global_variables_initializer()
with tf.Session() as sess:
predict(my_network, pred_num, sess)
| true
|
7f3f3f4e692bb307bdda07260475f8581e4946c6
|
Python
|
byAbaddon/Book-Introduction-to-Programming-with----JavaScript____and____Python
|
/Pyrhon - Introduction to Programming/8.2. Exam Preparation - Part II/06. Letters Combinations.py
|
UTF-8
| 362
| 3.25
| 3
|
[] |
no_license
|
n1, n2, n3 = [ord(input()) for _ in range(3)]
res = ''
count = 0
for i in range(n1, n2 + 1):
for j in range(n1,n2 + 1):
for k in range(n1,n2 + 1):
if i != n3 and j != n3 and k != n3:
res += chr(i) + chr(j) +chr(k) + ' '
count+= 1
print(f'{res}{count}')
'''
a
c
b
#aaa aac aca acc caa cac cca ccc 8
'''
| true
|
27a1a22845aeedddb72639fd27c3af0fc662def0
|
Python
|
chriskaravel/Python_Machine_Learning_Flight_Delay_Prediction
|
/test.py
|
UTF-8
| 2,981
| 3.15625
| 3
|
[] |
no_license
|
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
## NOTE : This is an example of delay prediction with pd.DataFrames
# load the data from the csv file.if our data is seperated by semicolons we need to do sep=";"
data = pd.read_csv("Lots_of_flight_data1.csv")
data = data[["CRS_DEP_TIME","DEP_TIME","DEP_DELAY","CRS_ARR_TIME","ARR_TIME","ARR_DELAY","CRS_ELAPSED_TIME","ACTUAL_ELAPSED_TIME","AIR_TIME","DISTANCE"]]
# data with no NaN values
data_no_nulls = data.dropna()
# X is our features we use to try and do our prediction
X = data_no_nulls.loc[:,["CRS_DEP_TIME","DEP_TIME","DEP_DELAY","CRS_ARR_TIME","ARR_TIME","CRS_ELAPSED_TIME","ACTUAL_ELAPSED_TIME","AIR_TIME","DISTANCE"]]
# y is the value we try to predict
y = data_no_nulls.loc[:,["ARR_DELAY"]]
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
# TRAIN MODEL MULTIPLE TIMES FOR BEST SCORE
best=0
for x in range(10):
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
regressor = LinearRegression()
# train the model using the training data
regressor.fit(x_train, y_train)
acc = regressor.score(x_test, y_test)
predictions = regressor.predict(x_test)
# print("accuracy: \n", acc)
# print("r^2: \n", metrics.r2_score(y_test, predictions))
if acc > best:
best=acc
# save our model with pickle
# you save model if u have hundreds of thousand data you dont want to retrain the model every time
# so you save that model if it has a good accuracy
with open("flight_model.pickle", 'wb') as f:
pickle.dump(regressor, f)
#you load your saved model
pickle_in = open("flight_model.pickle","rb")
regressor=pickle.load(pickle_in)
# load the data from the csv file
data = pd.read_csv("Lots_of_flight_data2.csv")
data = data[["CRS_DEP_TIME","DEP_TIME","DEP_DELAY","CRS_ARR_TIME","ARR_TIME","ARR_DELAY","CRS_ELAPSED_TIME","ACTUAL_ELAPSED_TIME","AIR_TIME","DISTANCE"]]
# data with no NaN values
data_no_nulls = data.dropna()
# X is our features we use to try and do our prediction
X = data_no_nulls.loc[:,["CRS_DEP_TIME","DEP_TIME","DEP_DELAY","CRS_ARR_TIME","ARR_TIME","CRS_ELAPSED_TIME","ACTUAL_ELAPSED_TIME","AIR_TIME","DISTANCE"]]
# y is the value we try to predict
y = data_no_nulls.loc[:,["ARR_DELAY"]]
predictions = regressor.predict(X)
predictions_df = pd.DataFrame(predictions)
predictions_df.columns=['Predicted Delay']
# Reset the index values to the second dataframe appends properly
y_test_df = pd.DataFrame(y).reset_index(drop=True)
#Concat the two dataframes
merged_df = pd.concat([predictions_df, y_test_df],axis=1)
#show all rows
pd.set_option('display.max_rows', None)
print(merged_df)
| true
|
019be973d4ab6a70b82605be50b437ff701425df
|
Python
|
Nom0ri/Pyton_snake_game
|
/pyton.py
|
UTF-8
| 3,714
| 3.3125
| 3
|
[] |
no_license
|
import pygame
import time
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
#window size
win_y = 600
win_x = 800
window=pygame.display.set_mode((win_x,win_y))
pygame.display.update()
pygame.display.set_caption('Pyton by Nomori')
snek_size = 10
clock = pygame.time.Clock()
font_style = pygame.font.SysFont("comicsansms", 40)
score_style = pygame.font.SysFont("comicsansms", 35)
def show_score(score):
value = score_style.render("Score: " + str(score), True, white)
window.blit(value, [0, 0])
def snek(snek_size, snek_list):
for x in snek_list:
pygame.draw.rect(window, white, [x[0], x[1], snek_size, snek_size])
def message(msg,color):
text = font_style.render(msg, True, color)
window.blit(text, [win_x/4,win_y/2])
def game(): #Main game function
game_over = False
game_close = False
x1 = win_x/2
y1 = win_y/2
x1_upd = 0
y1_upd = 0
foodx = round(random.randrange(0, win_x - snek_size) / 10.0) * 10.0
foody = round(random.randrange(0, win_y - snek_size) / 10.0) * 10.0
snek_list=[]
snek_len=1
while not game_over:
while game_close == True:
window.fill(black)
message("Q - exit or E - try again", white)
show_score(snek_len-1)
pygame.display.update()
for event in pygame.event.get():
if event.type==pygame.QUIT: #React to close button
game_over = True
game_close = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_e:
game()
for event in pygame.event.get():
if event.type==pygame.QUIT: #React to close button
game_over=True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT or event.key == pygame.K_a:
x1_upd = -snek_size
y1_upd = 0
elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:
x1_upd = snek_size
y1_upd = 0
elif event.key == pygame.K_UP or event.key == pygame.K_w:
y1_upd = -snek_size
x1_upd = 0
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
y1_upd = snek_size
x1_upd = 0
if x1 >= win_x or x1 < 0 or y1 >= win_y or y1 < 0:
game_close = True
x1 += x1_upd
y1 += y1_upd
window.fill(black)
#set snake speed
speed = 15
clock = pygame.time.Clock()
clock.tick(speed)
pygame.draw.rect(window, red, [foodx, foody, snek_size, snek_size])
snek_head = []
snek_head.append(x1)
snek_head.append(y1)
snek_list.append(snek_head)
if len(snek_list) > snek_len:
del snek_list[0]
for x in snek_list[:-1]:
if x == snek_head:
game_close = True
snek(snek_size, snek_list)
show_score(snek_len-1)
pygame.display.update()
if x1 == foodx and y1 == foody:
foodx = round(random.randrange(0, win_x - snek_size) / 10.0) * 10.0
foody = round(random.randrange(0, win_y - snek_size) / 10.0) * 10.0
snek_len += 1
pygame.display.update()
pygame.quit()
quit()
game()
| true
|
6aa7f2a8eafa6415c6bf5b86d62bf55d4360388f
|
Python
|
Masum-Osman/pythonista
|
/ZKM/ds2/tree.py
|
UTF-8
| 188
| 3
| 3
|
[] |
no_license
|
class TreeNode:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
class BinaryTree:
def __init__(self):
super().__init__()
| true
|
f5705678ce0a8401ab438f1a49c68e1d4ec79ff3
|
Python
|
AdamJozwiak/PBL_Endless_Project
|
/Executable/convert-unity.py
|
UTF-8
| 2,084
| 2.859375
| 3
|
[] |
no_license
|
# Imports
import sys
import pathlib
# Read program arguments
arguments = None
if len(sys.argv) == 1:
arguments = ["."]
else:
arguments = sys.argv[1:]
# Transform arguments into paths
input_paths = [pathlib.Path(argument) for argument in arguments]
# Make a list of all files to convert
filenames = []
for input_path in input_paths:
if input_path.is_dir():
for extension in ["*.unity", "*.prefab", "*.mat"]:
filenames += [str(path) for path in input_path.rglob(extension)]
else:
filenames.append(str(input_path))
# Convert files
for filename in filenames:
# Read file's contents
input_lines = []
with open(filename, "r") as input_file:
input_lines = [line for line in input_file]
# Overwrite contents in-place with converted version
with open(filename, "w") as output_file:
for i in range(len(input_lines)):
# Print conversion status
print(
"|",
round((i * 10) / len(input_lines)) * "-",
(10 - round((i * 10) / len(input_lines))) * " ",
"| ",
filename,
sep="",
end="\r",
)
# Remove "stripped" keyword
if input_lines[i].find("---") != -1:
stripped_location = input_lines[i].find("stripped")
if stripped_location != -1:
input_lines[i] = input_lines[i][:stripped_location].strip() + "\n"
# Write corrected line
output_file.write(input_lines[i])
# Skip this line if conversion is not applicable
if (
i < 1
or i >= len(input_lines) - 1
or input_lines[i - 1].find("---") == -1
or input_lines[i + 1].find(" id: ") != -1
):
continue
# Add a new line with identifier
output_file.write(
" id: " + input_lines[i - 1][input_lines[i - 1].find("&") + 1 :]
)
# Print new line
print()
| true
|
38402d4c2a9a8605fba48e9049aed69a5f7b14ee
|
Python
|
serban-hartular/OnlineParser
|
/cgi-bin/gram/rule_builder.py
|
UTF-8
| 5,768
| 2.703125
| 3
|
[] |
no_license
|
# format:
# "VP[mod=Ind head=verb] -> subj:NP[nr=@ pers=@ case=N] , verb:V"
import re
from constraints import Constraint, OverwriteConstraint
from nodes import Monomial
from rules import *
DEFAULT_ERROR_SCORE = 1.0
EQUALS = '='
OVERWRITE = '~'
NONE_NOT_OK = '=='
def constraint_from_string(string : str, l_deprel:str = None) -> Constraint:
""" string format 'cas=N' or 'num=@' or 'gen=subj.gen' or 'num = verb.num'
@ will be replaced with the lval
over-write rule: num/3 means is_bare will be yes no matter what it was before, no error"""
if EQUALS in string: separator = EQUALS
elif OVERWRITE in string: separator = OVERWRITE
else: raise Exception('Invalid constraint %s, no separator' % string)
is_none_ok = (NONE_NOT_OK not in string)
string = string.replace(NONE_NOT_OK, EQUALS)
is_overwrite = (separator == OVERWRITE)
(lstr, rstr) = string.split(separator)
lstr = lstr.strip()
rstr = rstr.strip()
if rstr[-1] == '!': # mandatory condition
score = float('Inf')
rstr = rstr[:-1]
elif '?' in rstr:
(rstr, score) = rstr.split('?')
score = float(score) if score else 0.5
else:
score = DEFAULT_ERROR_SCORE
# do lexp
lstr = lstr.split('.') # split by period -- ie, subj.gen
lexp = [l_deprel] if l_deprel else []
lexp = lexp + lstr
# for item in lstr:
# lexp.append(item)
# do rexp
rexpr = []
if rstr == '@':
rexpr = lstr
elif '.' in rstr:
rstr = rstr.split('.')
for item in rstr:
rexpr.append(item)
else: # plain string
rexpr = rstr
if is_overwrite:
return OverwriteConstraint(lexp, rexpr)
else:
return Constraint(lexp, rexpr, score, is_none_ok)
def constraint_list_from_string(string : str, l_deprel:str = None):
# string will look like 'case=2 num=q gen = bizzarre '
# to make whitespace a separator, replace ' = ' with '='
string = re.sub('\s*=\s*', '=', string)
string = string.strip()
strings = string.split() # whitespace
return [constraint_from_string(s, l_deprel) for s in strings]
def type_and_constraint_list_from_string(string:str, l_deprel:str = None) -> tuple:
# string has form 'NP[case=Nom nr=@ pers=@]'
string = string.strip()
if '[' not in string: # it's a singleton
if ' ' in string: # bad
raise Exception('Bad name ' + string)
return(string, [])
try:
(name, constraint_string) = string.split('[')
except:
raise Exception('Error splitting ' + string)
if constraint_string[-1] != ']':
raise Exception('%s lacks ]', string)
constraint_string = constraint_string[:-1] # elim last char
constraints = constraint_list_from_string(constraint_string, l_deprel)
return (name.strip(), constraints)
def deprel_type_from_string(string: str) -> tuple:
string = string.strip()
try:
(deprel, type_string) = string.split(':')
except:
raise Exception('Missing deprel:item in item %s' % string )
deprel = deprel.strip()
(type_name, constraints) = type_and_constraint_list_from_string(type_string, deprel)
return (deprel, type_name, constraints)
def _get_head_phrase(parent_constraints : list, head_str = 'head') -> str:
head_name = ''
for constraint in parent_constraints:
if constraint.lexpr == [head_str]: # this is a constraint of form 'head=blah'
head_name = constraint.rexpr # ie, blah
break
if head_name:
parent_constraints.remove(constraint)
return head_name
def _get_type_constraint(deprel:str, type_name : str, error_score = float('inf')) -> Constraint:
if deprel:
return Constraint([deprel, Monomial.CATEGORY], type_name, error_score)
else:
return Constraint([Monomial.CATEGORY], type_name, error_score)
def rule_from_string(string: str, head_separator = '->', append_separator = '+=',
child_separator = ',') -> Rule:
# "VP[mod=Ind head=verb] -> subj:NP[nr=@ pers=@ case=N] , verb:V"
# VP += iobj:NP[case=Dat]
separator = None
for s in [head_separator, append_separator]:
if s in string:
separator = s
break
if not separator: raise Exception('No valid rule separator found in %s' % string)
try:
(parent, children) = string.split(separator)
except:
raise Exception('Error splitting "%s" by %s' % (string, separator))
parent = parent.strip()
(parent_name, constraints) = type_and_constraint_list_from_string(parent)
# get head_name from constraint eg "head=verb". fn removes this constraint if found
head_name = _get_head_phrase(constraints)
if separator == append_separator and head_name:
raise Exception('Cannot append and set phrase head in rule %s' % string)
deprel_list = list()
for child_str in children.split(child_separator):
child_str = child_str.strip()
(deprel, type_name, child_constraints) = deprel_type_from_string(child_str)
deprel_list.append(deprel)
constraints.insert(0, _get_type_constraint(deprel, type_name)) # add constraint that child is of type eg 'NP'
constraints = constraints + child_constraints
if head_name:
return Headed_Rule(parent_name, deprel_list, head_name, None, constraints, string)
elif separator == append_separator:
deprel_list.insert(0, AppendRule.SELF) # insert 'self' deprel
constraints.insert(0, _get_type_constraint('', parent_name)) # add constraint that child is of type eg 'NP'
return AppendRule(parent_name, deprel_list, constraints, string)
else:
return Rule(parent_name, deprel_list, constraints, string)
| true
|
1b9f2a27b6962d0fd61d0037e0e87abef01ef3b2
|
Python
|
sevenhe716/LeetCode
|
/HashTable/q049_group_anagrams.py
|
UTF-8
| 2,069
| 3.609375
| 4
|
[] |
no_license
|
# Time: O(n)
# Space: O(1)
# 解题思路:
# 一种思路是利用位置无关的特性,如sum,利用hash做初选,然后再用Counter再次分类
# 另一种思路则是利用hash一步到位,但是需要5*26个bit的大整型,且每个字母个数不能大于32个
# 优化思路:其实无需生成hash,字符串本身可以作为key,利用map来分组
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
import itertools
from operator import itemgetter
hashs = [0] * len(strs)
lst = []
for i, s in enumerate(strs):
for c in s:
hashs[i] += 1 << (ord(c) - ord('a')) * 5
lst.append({'hash': hashs[i], 'value': s})
lst.sort(key=itemgetter('hash'))
# lst.sort(lambda x : x['hash'])
lstg = itertools.groupby(lst, itemgetter('hash'))
ans = []
for key, group in lstg:
ans.append([g['value'] for g in group])
return ans
class Solution1(object):
# Categorize by Sorted String
def groupAnagrams1(self, strs):
import collections
ans = collections.defaultdict(list)
for s in strs:
ans[tuple(sorted(s))].append(s)
return ans.values()
# Categorize by Count
def groupAnagrams(self, strs):
import collections
ans = collections.defaultdict(list)
for s in strs:
count = [0] * 26
for c in s:
count[ord(c) - ord('a')] += 1
ans[tuple(count)].append(s)
return list(ans.values())
class SolutionF:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
d = {}
for s in strs:
ss = ''.join(sorted(s))
if ss not in d:
d[ss] = [s]
else:
d[ss].append(s)
ans = []
for key in d:
ans.append(d[key])
return ans
| true
|
8875eb68f04d67372b4e7956d6826fcbab4e6c25
|
Python
|
ymink716/PS
|
/BOJ/BaaarkingDog/0x11_그리디/2847.py
|
UTF-8
| 462
| 3.296875
| 3
|
[] |
no_license
|
# 게임을 만든 동준이
# https://www.acmicpc.net/problem/2847
n = int(input())
scores = []
for _ in range(n):
scores.append(int(input()))
answer = 0
# 뒤에서 부터 순회
for i in range(n - 1, 0, -1):
# i -1 점수 >= i 점수
if scores[i - 1] >= scores[i]:
cnt = scores[i - 1] - scores[i] + 1 # 이 구간에서 감소 횟수
scores[i - 1] -= cnt # i -1 점수를 cnt 만큼 감소
answer += cnt
print(answer)
| true
|
60f37cbb20cb76ef905068fa06d64ce8d6b7870c
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03096/s312768742.py
|
UTF-8
| 455
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
def main():
MAX = 10**9 + 7
n = int(input())
lc = [int(input()) for _ in range(n)]
dp = [0] * (n)
last_is = [-1]*(2*10**5+1)
dp[0] = 1
last_is[lc[0]] = 0
for i,c in enumerate(lc[1:], 1):
last_i = last_is[c]
dp[i] = dp[i-1]
if last_i != -1 and last_i != i-1:
dp[i] += dp[last_i]
dp[i] %= MAX
last_is[c] = i
print(dp[-1])
if __name__=='__main__':
main()
| true
|
c902b835a7454f4aed41e0312545d9517f603c22
|
Python
|
axxsxbxx/SSAFY5-Algorithm
|
/week2_3_23/BOJ_2212_수빈.py
|
UTF-8
| 1,841
| 3.46875
| 3
|
[] |
no_license
|
'''
2212. 센서
한국도로공사는 고속도로의 유비쿼터스화를 위해 고속도로 위에 N개의 센서를 설치하였다.
문제는 이 센서들이 수집한 자료들을 모으고 분석할 몇 개의 집중국을 세우는 일인데, 예산상의 문제로, 고속도로 위에 최대 K개의 집중국을 세울 수 있다고 한다.
각 집중국은 센서의 수신 가능 영역을 조절할 수 있다. 집중국의 수신 가능 영역은 고속도로 상에서 연결된 구간으로 나타나게 된다.
N개의 센서가 적어도 하나의 집중국과는 통신이 가능해야 하며, 집중국의 유지비 문제로 인해 각 집중국의 수신 가능 영역의 길이의 합을 최소화해야 한다.
편의를 위해 고속도로는 평면상의 직선이라고 가정하고, 센서들은 이 직선 위의 한 기점인 원점으로부터의 정수 거리의 위치에 놓여 있다고 하자.
따라서, 각 센서의 좌표는 정수 하나로 표현된다. 이 상황에서 각 집중국의 수신 가능영역의 거리의 합의 최솟값을 구하는 프로그램을 작성하시오.
단, 집중국의 수신 가능영역의 길이는 0 이상이며 모든 센서의 좌표가 다를 필요는 없다.
'''
import sys
input = sys.stdin.readline
# 센서의 개수 N
N = int(input())
# 집중국의 개수 K
K = int(input())
# 센서의 좌표
sensors = list(map(int, input().split()))
sensors.sort()
# 센서 간의 거리 배열
sensor_dist = []
for i in range(N-1):
sensor_dist.append(sensors[i+1] - sensors[i])
# 센서 간의 거리가 먼 순서대로 끊어가면서 범위를 설정하면 된다.
sensor_dist.sort()
if sensor_dist:
for _ in range(K-1):
sensor_dist.pop()
print(sum(sensor_dist))
else:
print(0)
'''
[입력]
6
2
1 6 9 3 6 7
[출력]
5
'''
| true
|
fa0e09eac4d6132f18ad7a0081ba5e5e1638c099
|
Python
|
pythoncpp/Python01
|
/day_16/page9.py
|
UTF-8
| 302
| 2.5625
| 3
|
[] |
no_license
|
import pandas as pd
df = pd.read_csv('/Volumes/Data/Sunbeam/2019/August/workshops/Python01/day_16/temp.csv')
print(df.describe())
print()
print(df.info())
print()
df['expected'] = df.high + 10
print(df.info())
df.to_csv('/Volumes/Data/Sunbeam/2019/August/workshops/Python01/day_16/temp_modified.csv')
| true
|
448c31c7098f97ae049fa600938578b69f0a148c
|
Python
|
shwang0416/Jungle_week03
|
/basic/BFS/BOJ2589_보물섬.py
|
UTF-8
| 1,331
| 3.140625
| 3
|
[] |
no_license
|
# [백준] https://www.acmicpc.net/problem/2589 보물섬
# L과 다른 L사이의 최단거리중 가장 먼 거리 찾기
# BFS로 풀기
import sys
# input
sys.stdin = open('BOJ2589.txt')
row, col = list(map(int, sys.stdin.readline().split()))
visited = [[0]*col for _ in range(row)]
board = [] #0으로 초기화 된 row ,col 모두 N까지 존재하는 이차원 리스트
cnt = 0
max_value = 0
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(row):
for j in range(col):
tmp = sys.stdin.readline().strip()
tmp = list(tmp)
board.append(tmp)
que = []
def bfs(y, x):
global cnt
que.append([y,x])
visited = [[0] * col for _ in range(row)]
visited[y][x] = 1
while len(que) != 0:
y, x = que.pop(0)
for i in range(4):
cx = x + dx[i]
cy = y + dy[i]
if cy >= 0 and cy < row and cx >= 0 and cx < col:
if board[cy][cx] == 'L' and visited[cy][cx] == 0:
que.append([cy, cx])
visited[cy][cx] = visited[y][x] + 1
cnt = max(cnt, visited[cy][cx])
return cnt
for i in range(row):
for j in range(col):
if board[i][j] == 'L':
cnt = bfs(i, j)
max_value = max(max_value, cnt)
cnt = 0
print(max_value-1)
| true
|
846221564fb045b2dcd32c13dc7e854e6175d6ce
|
Python
|
Aasthaengg/IBMdataset
|
/Python_codes/p03101/s270242628.py
|
UTF-8
| 502
| 2.625
| 3
|
[] |
no_license
|
# 2019-11-12 22:11:12(JST)
import sys
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# from scipy.misc import comb # float
# import numpy as np
def main():
H, W, h, w = [int(x) for x in sys.stdin.read().split()]
ans = H * W - (h * W + (H - h) * w)
print(ans)
if __name__ == "__main__":
main()
| true
|
71dd56564d52c8db6fd528314ed89a72e7d262bb
|
Python
|
WEgeophysics/watex
|
/examples/view/plot_phase_tensor_2d.py
|
UTF-8
| 818
| 3.015625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
"""
================================================
Plot two dimensional phase tensors
================================================
gives a quick visualization of phase tensors at the
component 'yx'
"""
# Author: L.Kouadio
# Licence: BSD-3-clause
#%%
from watex.view.plot import TPlot
from watex.datasets import load_edis
# get some 12 samples of EDI for demo
edi_data = load_edis (return_data =True, samples =12)
# customize plot by adding plot_kws
plot_kws = dict( ylabel = '$Log_{10}Frequency [Hz]$',
xlabel = '$Distance(m)$',
cb_label= '$Phase [\degree]$' ,
fig_size =(6, 3),
font_size =7.,
)
t= TPlot(component='yx', **plot_kws).fit(edi_data)
# plot recovery2d using the log10 resistivity
t.plot_tensor2d( tensor ='phase', to_log10=True)
| true
|
2f9a3b9601fb412d48077089653c820f2327cbd2
|
Python
|
michaelwozniak/web_scraping
|
/project_selenium/justjoinit_scraper.py
|
UTF-8
| 14,543
| 2.90625
| 3
|
[] |
no_license
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys #selenium features for keys from keyboard
from selenium.webdriver import ActionChains #selenium features for mouse movements
from selenium.webdriver.common.by import By #selenium features By
from selenium.webdriver.support.ui import WebDriverWait #selenium features for waiting
from selenium.webdriver.support import expected_conditions as EC #selenium features for waiting
import time
import datetime
import os
from os import path
import pandas as pd
import matplotlib.pyplot as plt #plots
import logging #library for logging
import re
def clean_html(raw_html):
"""Function removing html tags from string
Args:
String with html code
Returns:
Cleaned string
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
##################################################### Author: Michał Wrzesiński ############################################################
class Scraper():
log_file_name = "logs/log_" + str(datetime.datetime.now()).replace(":","_").replace("-","_").replace(" ","_") + ".txt" #name for log file
logging.basicConfig(
filename=log_file_name,
format='%(levelname)s: %(message)s',
level=logging.INFO
) # logging configuration; logs are available in logs folder
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
# declaring options
def __init__(self, headless_mode = True):
self.headless_mode = headless_mode
# path of geckodriver
gecko_path = path.join(os.path.dirname(os.path.abspath('__file__')), 'geckodriver')
url = 'https://justjoin.it/'
options = webdriver.firefox.options.Options()
# headless mode depending on the initial choice of user
if headless_mode == True:
options.headless = True
else:
options.headless = False
self.driver = webdriver.Firefox(options = options, executable_path = gecko_path)
self.driver.get(url)
"""Constructor - declaration of scraper configuration"""
print("==========================================")
print("Please, configure scraper!")
print("==========================================")
# Page limit handling
pages_100_bool = input("Do you want to set the page limit to 100? [T/F]: \t") in {"T","True","TRUE","Y","yes","YES"}
if pages_100_bool == True:
self.number = 100
else:
self.number = 999999
element = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.XPATH, '//div[@class="css-son5n9"][text() = "offers with salary"]')))
element.click()
# Salary choice handling
salary_expectations_bool = input("Do you want to provide boundaries of salary (logical alternative) [T/F]: \t") \
in {"T","True","TRUE","Y","yes","YES"}
if salary_expectations_bool == True:
self.salary()
# Location choice handling
self.localization_choice_bool = input("Do you want to choose location of offer?: \t") \
in {"T","True","TRUE",'Y',"yes","YES"}
if self.localization_choice_bool == True:
self.location()
# Location
# - if user chose location from all available - website will filter it,
# - if not - website close the window with cities and there will be prompt in console that: 'There is no such location. You will have offers with all possibile cities.'
def location(self):
element = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//span[@class="MuiButton-label"][text() = "Location"]')))
element.click()
# user input
self.choose_location = input("Please type, Which city are you interested in?: 'Białystok', 'Bielsko-Biała', 'Bydgoszcz', 'Częstochowa', 'Gliwice', 'Katowice', 'Kielce', 'Kraków', 'Lublin', 'Olsztyn', 'Opole', 'Poznań', 'Rzeszów', 'Szczecin', 'Toruń', 'Trójmiasto', 'Warszawa', 'Wrocław', 'Zielona Góra', 'Łódź': \t")
if (self.choose_location in ['Białystok', 'Bielsko-Biała', 'Bydgoszcz', 'Częstochowa', 'Gliwice', 'Katowice', 'Kielce', 'Kraków', 'Lublin', 'Olsztyn', 'Opole', 'Poznań', 'Rzeszów', 'Szczecin', 'Toruń', 'Trójmiasto', 'Warszawa', 'Wrocław', 'Zielona Góra', 'Łódź']):
element = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, f'//span[@class="MuiButton-label"][text() = "{self.choose_location}"]')))
element.click()
else:
print('There is no such location. You will have offers with all possibile cities.')
element = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//button[@class="MuiButtonBase-root MuiIconButton-root css-tze5xj"]')))
element.click()
# Salary
# This part goes on ActionChains from Selenium
# Scraper click on point on site and move to the desired position:
# Clicking on the element finding by xpath --> move_by_offset with formula: (11 * min_salary / 1000, 0 for minimum) and (11 * (max_salary - 50000) / 1000, 0 form maximum)
# Multiplying by 11 and dividing by 1000 due to default settings on site (with horizontal slider)
# for example: 0 is default minimum salary expectations: if user enter 10000 - scraper will move point from 0 to 10000 horizontally [swipe from left to right]
# in the same way it works for maximum salary expectations -> from 50000 to exemplary 30000 [swipe from right to left]
def salary(self):
element = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//span[text() = "More filters"]')))
element.click()
# user inputs
min_salary = int(input('Choose minimum salary expectations:\n'))
max_salary = int(input('Choose maximum salary expectations:\n'))
en = self.driver.find_element_by_xpath('//span[@class="MuiSlider-thumb MuiSlider-thumbColorSecondary"][@data-index="0"]')
# swipe horizontal slider due to user input (left-hand edge)
move_left = ActionChains(self.driver)
move_left.click_and_hold(en).move_by_offset(11 * min_salary / 1000, 0).release().perform()
en = self.driver.find_element_by_xpath('//span[@class="MuiSlider-thumb MuiSlider-thumbColorSecondary"][@data-index="1"]')
# swipe horizontal slider due to user input (right-hand edge)
move_right = ActionChains(self.driver)
move_right.click_and_hold(en).move_by_offset(11 * (max_salary - 50000) / 1000, 0).release().perform()
element = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, '//span[@class="MuiButton-label"][text() = "Show offers"]')))
element.click()
# Offers
# This part involves gathering links to pages
# It is possible due to while loop (links are appended till desired number of pages or to the bottom of the website)
# Loop for 2 lists: elements and location is necessery because of default settings of page:
# When there are only few offers from chosen city, website displays some offers from other cities
# Then, due to location list - we are certain that we gather links only for desired city
# Page Down key is necessery for scrolling down - after each iteration
# gathering links to list
# checking if there are no duplicates - if yes, loop ignores those links
# checking if there new links - if no new links -> bottom of the page -> end of appending links
def offers(self):
# crating list of links of offers to further scraping
links = []
# while loop to reach destined number of pages
while(len(links) < self.number):
# lists of elements and locations before 'Page Down'
elements = self.driver.find_elements_by_css_selector("a.css-18rtd1e")
locations = self.driver.find_elements_by_xpath("//div[@class='css-1ihx907']")
# checking length of links before loop
check_before = len(links)
# for loop for elements and location
for element, location in zip(elements, locations):
if (self.localization_choice_bool == True and self.choose_location in ['Białystok', 'Bielsko-Biała', 'Bydgoszcz', 'Częstochowa', 'Gliwice', 'Katowice', 'Kielce', 'Kraków', 'Lublin', 'Olsztyn', 'Opole', 'Poznań', 'Rzeszów', 'Szczecin', 'Toruń', 'Trójmiasto', 'Warszawa', 'Wrocław', 'Zielona Góra', 'Łódź']):
# solving problem with displaying offers for other cities
if(location.text == self.choose_location):
link = element.get_attribute("href")
# if link exists in list of links - continue
if(link in links):
continue
else:
# append links
links.append(link)
# if length of links is >= predefined number of pages - break
if (len(links)>= self.number):
break
else:
break
else:
link = element.get_attribute("href")
# if link exists in list of links - continue
if(link in links):
continue
else:
# append links
links.append(link)
# if length of links is >= predefined number of pages - break
if (len(links)>= self.number):
break
# checking length of links after loop
check_after = len(links)
# press 'Page Down' key to get next list of offers
elements[-1].send_keys(Keys.PAGE_DOWN)
time.sleep(5)
# if length of links after loop and before loop are the same - bottom of the page - break the loop
# if not - continue
if check_before == check_after:
break
else:
continue
return links
########################################## End of Michał Wrzesiński part ##############################################
########################################## Author: Rafał Rysiejko ####################################################
def link_opener(self):
#Call offers method to fetch url links to filtered offers.
links = self.offers()
#Create placeholders for output
offer_link_list=[]
offer_title_list=[]
company_name_list=[]
company_size_list=[]
empoyment_type_list=[]
experience_lvl_list=[]
salary_list=[]
place_list=[]
tech_stack_list=[]
company_page_list=[]
direct_apply_list=[]
offer_description_list=[]
#Iterate through each offer link.
for link in links:
#Call driver to open a given url.
self.driver.get(link)
#Scrape appropreiate items from oppened site.
offer_link = link
offer_title = self.driver.find_element_by_xpath("//span[@class='css-1v15eia']").text
company_name = self.driver.find_element_by_xpath("//a[@class='css-l4opor']").text
company_size = self.driver.find_element_by_xpath("//div[2]/div[@class='css-1ji7bvd']").text
empoyment_type = self.driver.find_element_by_xpath("//div[3]/div[@class='css-1ji7bvd']").text
experience_lvl = self.driver.find_element_by_xpath("//div[4]/div[@class='css-1ji7bvd']").text
salary = self.driver.find_element_by_xpath("//span[@class='css-8cywu8']").text
place = self.driver.find_element_by_xpath("//div[@class='css-1d6wmgf']").text
tech_stack = [{i.text:j.text} for i,j in zip (self.driver.find_elements_by_xpath("//div[@class='css-1eroaug']"),self.driver.find_elements_by_xpath("//div[@class='css-19mz16e']"))]
direct_apply = True if len(self.driver.find_element_by_xpath("//button[@class='MuiButtonBase-root MuiButton-root MuiButton-text css-im43rs']").text) !=0 else False
company_page = self.driver.find_element_by_xpath("//a[@class='css-l4opor']").get_attribute("href")
offer_description = clean_html(self.driver.find_element_by_xpath("//div[@class='css-u2qsbz']").text)
#Append newly scrapped elements to their corresponding lists
offer_link_list.append(offer_link)
offer_title_list.append(offer_title)
company_name_list.append(company_name)
company_size_list.append(company_size)
empoyment_type_list.append(empoyment_type)
experience_lvl_list.append(experience_lvl)
salary_list.append(salary)
place_list.append(place)
tech_stack_list.append(tech_stack)
company_page_list.append(company_page)
direct_apply_list.append(direct_apply)
offer_description_list.append(offer_description)
#Save output to a Pandas data.frame object.
output = pd.DataFrame(list(zip(offer_link_list,
offer_title_list,
company_name_list,
company_size_list,
empoyment_type_list,
experience_lvl_list,
salary_list,
place_list,
tech_stack_list,
direct_apply_list,
company_page_list,
offer_description_list)), columns=['offer_link', 'offer_title', 'company_name','company_size','empoyment_type','experience_lvl','salary','place','tech_stack','direct_apply','company_page','offer_description_list'])
#Return data.frame obejct
return output
# Destructor
def __del__(self):
self.driver.quit()
if __name__ == '__main__':
c = Scraper()
links = c.link_opener()
links.to_csv('output.csv', encoding='utf-8')
c.__del__()
| true
|
bd4fb09616d5b2f891e555952362f8cbc9bfbfc0
|
Python
|
MarinaFirefly/Python_homeworks
|
/6/homework6/lists_max.py
|
UTF-8
| 1,246
| 4.40625
| 4
|
[] |
no_license
|
#function find_max_dif finds the maximal difference between elements of 2 lists and returns its length and which elements have the maximal difference.
#list should have same length. In other way function zip will take the shortest list as a basis
list1 = [12,34,565]
list2 = [123123,67,78,12444]
str1 = "Is this the real life?"
str2 = "Is this just fantazy?"
list3 = str1.split(" ")
list4 = str2.split(" ")
#function find_max_dif takes 2 lists as parameters
def find_max_dif(l1,l2):
#check that both arguments are lists. Otherwise functions returns message "At least one of the arguments isn't list!"
if type(l1) != list or type(l2) != list:
return (print("At least one of the arguments isn't list!"))
else:
new_list = []
for i, j in zip(l1,l2):
#add differences in length of elements to new_list. Values in the list is always positive because abs() is used
new_list.append(abs(len(str(i)) - len(str(j))))
#return string containing maximal value from new_list and its possition in the list starting from 0
return print("Maximal difference in length is " + str(max(new_list)) + " between " + str(new_list.index((max(new_list)))) + " elements!")
find_max_dif(list1,list2)
find_max_dif(list3,list4)
find_max_dif("sad",list4)
| true
|
b7ed99236f1c1295efa83737369ee5ca156a9e95
|
Python
|
ChristoffenOSWorks/PandaCat
|
/cairo_coordinates.py
|
UTF-8
| 652
| 3.296875
| 3
|
[] |
no_license
|
number_of_times = int(raw_input("Please enter the number of pairs you want drawn"))
time_current = 0
while (time_current < number_of_times):
print " Please enter X value of the first pair"
point_x1 = float(raw_input(" >> "))
print " Please enter Y value of the first pair"
point_y1 = float(raw_input(" >> "))
time_current += 1
with open('out.txt', 'a') as f:
print >> f, "cairo_line_to(cr, " + str(point_x1) + ", " + str(point_y1) + ");"
print >> f, "cairo_close_path(cr);"
f.close()
print "cairo_line_to(cr, " + str(point_x1) + ", " + str(point_y1) + ");"
print "cairo_close_path(cr);"
| true
|
17c3962d6d0e8688d9f700acc2f436612548ccd1
|
Python
|
CaioOliveiraOFC/Sockets-em-python
|
/TCPServer.py
|
UTF-8
| 2,213
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3.9
#Importando o módulo socket e o módulo time
from socket import *
from time import sleep
#Atribuir a porta ao servidor e criar o socket
serverPort = 12000
print('Esse servidor usará a porta {} para conexão'.format(serverPort))
sleep(5)
print('Criando o socket que utilizarei para essa aplicação...')
serverSocket = socket(AF_INET, SOCK_STREAM)
sleep(10)
#Atribuir um IP e uma porta ao socket '' significa que o kernel vai atribuir o IP para nós (Vai utilizar o IP local se testar na mesma máquina)
print('Amarrando o meu endereço de IP e a porta para o socket que acabei de criar...')
serverSocket.bind(('', serverPort))
sleep(10)
print('Estou pronto e esperando conexões...')
print('Já pode abrir o arquivo TCPClient.py')
# O servidor está esperando pela conexão, esse .listen(1) escuta e 1 é o numero máximo de conexões em fila'
serverSocket.listen(1)
#Após a escuta ele vai entrar num loop infinito, há maneiras de interromper esse lopp, mas nesse caso eu vou deixar infinito.
while True:
# OBS: TEMOS UMA PARTICULARIDADE AQUI (TCP), NOSSO SOCKET serverSocket É APENAS UMA ENTRADA PARA A CONEXÃO, ELE QUE FAZ O HANDSHAKE;
# connectionSocket e addr vão herdar os parâmetros que o método .accept() passar para eles respectivamente (ler a documentação)
# Quando o cliente bate na porta '.listen()' ele passa direto para esse looping que cria uma conexão somente para o cliente que está tentando conectar, é assim que conseguimos várias conexões no servidor TCP.
connectionSocket, addr = serverSocket.accept()
print('Criei o Socket temporário que vou usar para falar com o cliente: {}, esse é o socket que usaremos para trocar informações'.format(addr))
# Aqui funciona como o outro código
msg = connectionSocket.recv(1024)
print('Recebi o pacote...')
sleep(5)
msg = msg.decode()
print('Decodifiquei o pacote...')
sleep(5)
msg = msg.upper()
print('Transfomei a mensagme em upper case...')
sleep(5)
print('Me preparando para enviar a mensagem...')
sleep(10)
connectionSocket.send(msg.encode())
print('Mensagem ENVIADA!!')
#fecha a conexão
connectionSocket.close()
| true
|
80f46d0a286a5bdf17869a03588d9af89a7840f4
|
Python
|
MudretsovaSV/Python
|
/footbolGurls10to12.py
|
WINDOWS-1251
| 328
| 4.125
| 4
|
[] |
no_license
|
gender=raw_input(" - m f? (m-, f-) ")
if gender=="m":
print " ."
elif gender=="f":
age=float(raw_input(" ? "))
if 10<=age<=12:
print " "
else: print " "
| true
|
cd8de4d800845e6fccbd1315dd01c51fe672f42b
|
Python
|
parthjalan37/Timetable-Generation
|
/main_gui.py
|
UTF-8
| 3,752
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
from tkinter import *
from new_main import semaphore_algo
window = Tk()
window.title("Timetable Generation OS Project")
class ProvideException(object):
def __init__(self, func):
self._func = func
def __call__(self, *args):
try:
return self._func(*args)
except ValueError:
text7 = Label(window, text="Please enter integer values only")
text7.grid(row=7, column=0)
except KeyboardInterrupt:
text7 = Label(window, text="You hit a interrupt key like ' ctrl+c' or 'ctrl+v'. Please rerun the code.")
text7.grid(row=7, column=0)
@ProvideException
def set_values():
list_1 = [label3_1.get(), label3_2.get(), label3_3.get(), label3_4.get()] #Batch 1
list_2 = [label4_1.get(), label4_2.get(), label4_3.get(), label4_4.get()] #Batch 2
list_3 = [label5_1.get(), label5_2.get(), label5_3.get(), label5_4.get()] #Batch 3
list_4 = [label6_1.get(), label6_2.get(), label6_3.get(), label6_4.get()] #Batch 4
final_list = [list_1, list_2, list_3, list_4]
print(list_1)
print(list_2)
print(list_3)
print(list_4)
print(final_list)
fac_list_1 = [] # Number of lectures by each batch
fac_list_2 = []
fac_list_3 = []
fac_list_4 = []
for faculty_no in range(0, 4):
x = int(final_list[faculty_no][0])
for hour_cnt in range(0, x):
fac_list_1.append(faculty_no)
x1 = int(final_list[faculty_no][1])
for hour_cnt in range(0, x1):
fac_list_2.append(faculty_no)
x2 = int(final_list[faculty_no][2])
for hour_cnt in range(0, x2):
fac_list_3.append(faculty_no)
x3 = int(final_list[faculty_no][3])
for hour_cnt in range(0, x3):
fac_list_4.append(faculty_no)
print(fac_list_1)
print(fac_list_2)
print(fac_list_3)
print(fac_list_4)
semaphore_algo(fac_list_1, fac_list_2, fac_list_3, fac_list_4)
text1 = Label(window, text="Enter the faculty hours required for each branch")
text1.grid(row=0)
text2 = Label(window, text="Branch Name")
text2_1 = Label(window, text="Faculty 1")
text2_2 = Label(window, text="Faculty 2")
text2_3 = Label(window, text="Faculty 3")
text2_4 = Label(window, text="Faculty 4")
text2.grid(row=1, column=0)
text2_1.grid(row=1, column=1)
text2_2.grid(row=1, column=2)
text2_3.grid(row=1, column=3)
text2_4.grid(row=1, column=4)
text3 = Label(window, text="B.Tech CS")
label3_1 = Entry(window)
label3_2 = Entry(window)
label3_3 = Entry(window)
label3_4 = Entry(window)
text3.grid(row=2, column=0)
label3_1.grid(row=2, column=1)
label3_2.grid(row=2, column=2)
label3_3.grid(row=2, column=3)
label3_4.grid(row=2, column=4)
text4 = Label(window, text="B.Tech IT")
label4_1 = Entry(window)
label4_2 = Entry(window)
label4_3 = Entry(window)
label4_4 = Entry(window)
text4.grid(row=3, column=0)
label4_1.grid(row=3, column=1)
label4_2.grid(row=3, column=2)
label4_3.grid(row=3, column=3)
label4_4.grid(row=3, column=4)
text5 = Label(window, text="MBA.Tech CS")
label5_1 = Entry(window)
label5_2 = Entry(window)
label5_3 = Entry(window)
label5_4 = Entry(window)
text5.grid(row=4, column=0)
label5_1.grid(row=4, column=1)
label5_2.grid(row=4, column=2)
label5_3.grid(row=4, column=3)
label5_4.grid(row=4, column=4)
text6 = Label(window, text="MBA.Tech IT")
label6_1 = Entry(window)
label6_2 = Entry(window)
label6_3 = Entry(window)
label6_4 = Entry(window)
text6.grid(row=5, column=0)
label6_1.grid(row=5, column=1)
label6_2.grid(row=5, column=2)
label6_3.grid(row=5, column=3)
label6_4.grid(row=5, column=4)
button1 = Button(window, text="Submit Request", command=set_values)
button1.grid(row=6, column=2)
window.mainloop()
| true
|
8a49e0a6498a8fc3b7b5b968e06e16e63e3ecef9
|
Python
|
Skillz619/CS-180
|
/Python/Roman-decimal.py
|
UTF-8
| 646
| 4.0625
| 4
|
[] |
no_license
|
#This program converts roman numerals to decimal integers
# using python dictonaries
x ={"I":1,"V":5,"X":10,"L":50,"C":100,"D":500,"M":1000}
value = input("Enter a roman numberal: ")
value = value.upper()
total= int
total=0
try:
for i in range(len(value)):
if i+1<len(value):
if( x[value[i]]>=x[value[i+1]]): #if second value is greater then we add
total+=x[value[i]]
elif i+1==len(value):
total+=x[value[i]]
else:
total-=x[value[i]] #if second value is small we substract
except:
print("Your input was wrong")
print(total)
| true
|
cb0403875926fa9d7ca383eb6f74cb94d4703d3a
|
Python
|
stanpython/Python-Scripts
|
/SSP_createAppendix.py
|
UTF-8
| 3,368
| 2.78125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 15:36:17 2020
@author: stanleyhuang2
"""
import pandas as pd
import numpy as np
import datetime as dt
import tkinter as tk
from tkinter import filedialog
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 200, height = 200, bg = 'lightblue1')
canvas1.pack()
def getExcel ():
global df
import_file_path = filedialog.askopenfilename()
df = pd.read_excel (import_file_path)
root.destroy()
if __name__ == '__main__':
browseButton_Excel = tk.Button(root, text='Click to select Data Collection (Cleaned) file', wraplength=80, command=getExcel, bg='green', fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(100, 100, window=browseButton_Excel)
root.mainloop()
root2= tk.Tk()
canvas1 = tk.Canvas(root2, width = 200, height = 200, bg = 'lightblue1')
canvas1.pack()
def getExcel ():
global df2
import_file_path = filedialog.askopenfilename()
df2 = pd.read_excel (import_file_path)
root2.destroy()
browseButton_Excel = tk.Button(root2, text='Click to select Application Findings (Cleaned) file', wraplength=90, command=getExcel, bg='red', fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(100, 100, window=browseButton_Excel)
root.mainloop()
#df = pd.read_excel('data_collection-cleaned.xlsx')
#df2 = pd.read_excel('app_findings-cleaned.xlsx')
str = ['No']
high = ['High']
#Create result sets for different columns needed for appendix
result = df[['PPA Record', 'Name of the Application', 'Business Group', 'Hosting Location', 'Last/Latest Test Date', 'Comments for delay/non compliance', 'In Compliance (Yes/No)', 'CTO']]
result2 = df2[['Finding ID', 'Helper - Project Name', 'Finding', 'Risk Rating', 'Practice', 'Open Date', 'Days Open', 'CTO', 'Response']]
result3 = df2[['Pre - Production Application', 'Finding ID', 'Helper - Project Name', 'Finding', 'Risk Rating', 'Group Name','Business Owner','Risk Acceptance Reasoning', 'Response', 'Open Date']]
#Filter the dataframes above
not_compliant = result[result['In Compliance (Yes/No)'].isin(str)]
high = result2[(result2['Risk Rating']=='High') & (result2['Response']=='Remediate Risk') & (result2['Helper - Project Name']!='KPMG Foundation')]
med = result2[(result2['Risk Rating']=='Medium') & (result2['Response']=='Remediate Risk') & (result2['Helper - Project Name']!='KPMG Foundation')]
acceptrisk = result3[(result3['Response']=='Accept Risk') & (result3['Helper - Project Name']!='KPMG Foundation')]
#Sort each sheet
high = high.sort_values(by='Days Open', ascending=False)
med = med.sort_values(by='Days Open', ascending=False)
acceptrisk = acceptrisk.sort_values('Risk Rating')
#Reset index column (A)
not_compliant.reset_index(inplace=True, drop=True)
not_compliant.index += 1
high.reset_index(inplace=True, drop=True)
high.index += 1
med.reset_index(inplace=True, drop=True)
med.index += 1
acceptrisk.reset_index(inplace=True, drop=True)
acceptrisk.index += 1
#Export to excel
writer = pd.ExcelWriter(dt.datetime.today().strftime("%Y%m%d") + '_appendix.xlsx', engine = 'openpyxl')
not_compliant.to_excel(writer, sheet_name = 'Not Compliant', index=True)
high.to_excel(writer, sheet_name = 'Very high & High')
med.to_excel(writer, sheet_name = 'Medium')
acceptrisk.to_excel(writer, sheet_name = 'Risk Accepted')
writer.save()
writer.close()
| true
|
33457d93ff9148c9cbb56b2b172de14f2ad05398
|
Python
|
gspetillo/pythagorean-calculator-api
|
/main.py
|
UTF-8
| 2,939
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
from flask import Flask, request
from flask_restful import Resource , Api
import math
app = Flask(__name__)
api = Api(app)
class Hypotenuse(Resource):
def get(self):
args = request.args
if('sideA' in args and 'sideB' in args):
sideA = float(args['sideA'])
sideB = float(args['sideB'])
hypotenuse = math.sqrt( pow(sideA,2) + pow(sideB,2) )
return {
'status': 200,
'data': hypotenuse
},200
else:
return {
'status': 422,
'data': 'Invalid parameters'
},422
class Side(Resource):
def get(self):
args = request.args
if('side' in args and 'hypotenuse' in args):
side = float(args['side'])
hypotenuse = float(args['hypotenuse'])
if(hypotenuse>side):
side = math.sqrt(pow(hypotenuse,2) - pow(side,2))
return {
'status': 200,
'data': side
},200
else:
return {
'status': 200,
'data': 'Hypotenuse can\'t be greater than side'
},200
else:
return {
'status': 422,
'data': 'Invalid parameters'
},422
class PythagorasCalculator(Resource):
def get(self):
return {
'about':'Welcome to PythagorasCalculator API',
'description': 'Use routes to return values of hypotenuse or side',
'routes': {
'/hypotenuse': [{
'methods':[
'GET'
],
'args':{
'sideA':{
'type': 'float',
'required': True
},
'sideB':{
'type': 'float',
'required': True
},
},
}],
'/side': [{
'methods':[
'GET'
],
'args':{
'side':{
'type': 'float',
'required': True
},
'hypotenuse':{
'type': 'float',
'required': True
},
}
}],
}
}
api.add_resource(PythagorasCalculator, '/')
api.add_resource(Hypotenuse, '/hypotenuse')
api.add_resource(Side, '/side')
if __name__ == '__main__':
app.run(threaded=True, port=5000)
| true
|
cf5f6732ae47ffcbe551c023de903ee45b152439
|
Python
|
RPGroup-PBoC/human_impacts
|
/code/figures/barnyard_number/cattle_production.py
|
UTF-8
| 2,584
| 2.9375
| 3
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import anthro.io
import anthro.viz
colors = anthro.viz.plotting_style()
# Load the FAO data
data = pd.read_csv('../../../data/agriculture/FAOSTAT_livestock_product_produced/processed/FAOSTAT_livestock_and_product.csv')
cattle = data[data['category']=='cattle']
cattle.drop(columns=['category'], inplace=True)
# Compute the total and append.
tot = cattle.groupby(['year']).sum().reset_index()
tot['subcategory'] = 'total'
merged = pd.concat([cattle, tot], sort=False)
# Rescale the units to the estimate unit.
merged['kg_mass'] = merged['mass_produced_Mt'].values * 1E9 / 1E11
#%%
fig, ax = plt.subplots(1, 1, figsize=(3, 1.7))
ax.xaxis.set_tick_params(labelsize=6)
ax.yaxis.set_tick_params(labelsize=6)
ax.set_xlim([1961, 2018])
ax.set_ylim([0, 10.5])
ax.set_yticks([0, 2, 4, 6, 8, 10])
ax.set_xlabel('year', fontsize=6)
ax.set_ylabel('mass of cattle product [10$^{11}$ kg]', fontsize=6)
beef = merged[merged['subcategory']=='beef']
dairy = merged[merged['subcategory']=='dairy (milk)']
total = merged[merged['subcategory']=='total']
ax.hlines(10, 1961, 2018, 'k', linestyle='--', lw=0.75, label='estimate')
ax.plot(beef['year'], beef['kg_mass'], '-o', ms=1, lw=0.5, label='beef',
color=colors['red'])
ax.plot(dairy['year'], dairy['kg_mass'], '-o', ms=1, lw=0.5, label='dairy (milk)',
color='white')
ax.plot(total['year'], total['kg_mass'], '-o', ms=1, lw=0.5, label='total',
color=colors['dark_green'])
ax.legend(fontsize=6, handlelength=0.75, loc='upper left')
plt.savefig('../../../figures/barnyard_number/cattle_product_mass.svg')
# %%
# load the data of livestock populations and examine only cattle
livestock_pop = pd.read_csv('../../../data/agriculture/FAOSTAT_livestock_population/processed/FAOSTAT_Livestock_population.csv')
cattle = livestock_pop[livestock_pop['animal']=='cattle']
# Adjust the units
cattle['pop_bhd'] = cattle['population_Mhd'] * 1E6 / 1E9
fig, ax = plt.subplots(1, 1, figsize=(3, 2))
ax.xaxis.set_tick_params(labelsize=6)
ax.yaxis.set_tick_params(labelsize=6)
ax.set_xlabel('year', fontsize=6)
ax.set_ylabel('standing population [billions]', fontsize=6)
ax.set_ylim([0, 2])
ax.set_yticks([0.0, 0.5, 1.0, 1.5, 2])
ax.set_xlim([1961, 2018])
ax.hlines(1.3, 1961, 2018, 'k', linestyle='--', lw=0.75,
label='estimate')
ax.plot(cattle['year'], cattle['pop_bhd'], '-o', color=colors['blue'],
ms=1, lw=0.5, label='total population')
ax.legend(fontsize=6)
plt.savefig('../../../figures/barnyard_number/cattle_population.svg')
# %%
| true
|
088c777c0bdf812d69fb45b3f08a37a932a5622a
|
Python
|
sdvillal/happysad
|
/happysad.py
|
UTF-8
| 13,389
| 2.921875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# coding=utf-8
"""
Black magic metaprogramming to redefine descriptors in python instances.
You should never lie, avoid to use this if possible.
When using it, you should really understand what you are doing.
You will probably also need paracetamol.
These patched objects have two personalities, or more concretely, two classes.
One is their original class, when exposing it objects are "sad".
The other one is an instance specific subclass, when exposing it objects are "happy".
(just funny API)
TODO... write proper doc and tests
Pickling
--------
Mixing liars dark magic with object serialization / pickling is not a good idea.
You can either use dill or temporarilly pickle using
>>> import pickle
>>> inst = 2
>>> with forget_synthetic_class(inst):
... pickle.dumps(inst)
In our use case, serialization was handled by pickle only after
storing the important stuff in a dictionary.
Using these functions you can control access to members of objects when
you do not want to, or cannot, touch their code, and overriding or simple
attribute setting would not be enough.
We use this magic at loopbio to modify the behavior of layers in deep neural networks
from (heavily designed) frameworks, hoping for minimal maintenance costs on our
side. Thanks to them we are able to correct performance deficits and bugs in these
frameworks.
"""
from __future__ import print_function, division
from contextlib import contextmanager
__author__ = 'Santi Villalba'
__version__ = '0.1.0'
__license__ = '3-clause BSD'
__all__ = ['happy', 'make_happy', 'maybe_happy',
'sad', 'make_sad',
'saddest',
'RetrievableDescriptor', 'MemberView', 'ControlledSetter',
'take_happy_pills', 'create_with_joy']
# --- Synthetic/Original classes swapping
# noinspection PyProtectedMember
def _original_class(inst):
"""Returns `inst` original class, without any class swapping."""
try:
return inst._Xoriginal_classX
except AttributeError:
return inst.__class__
# noinspection PyProtectedMember
def _synthetic_class(inst):
"""Returns `inst` synthetic class (can be None), without any swapping."""
try:
return inst._Xsynthetic_classX
except AttributeError:
return None
def _bookkept_attrs(inst):
"""Returns the dictionary of synthetic class bookkept attributes."""
return _synthetic_class(inst).bookeeping
def _delete_old_attrs(inst):
"""Deletes the synthetic class bookkept attributes from the instance."""
if inst.__class__ == _original_class(inst):
bookkept = _bookkept_attrs(inst)
for attr in bookkept:
try:
bookkept[attr] = getattr(inst, attr)
delattr(inst, attr)
except AttributeError:
pass
def _set_synthetic(inst):
"""
Mutates the instance to be of the synthetic class.
Takes care of storing away the bookkept attributes.
"""
_delete_old_attrs(inst)
inst.__class__ = _synthetic_class(inst)
def _reset_old_attrs(inst):
"""Sets the synthetic class bookkept attributes in the instance."""
if inst.__class__ == _original_class(inst):
for attr, val in _bookkept_attrs(inst).items():
setattr(inst, attr, val)
def _set_original(inst):
"""
Mutates the instance to be of the original class.
Takes care of restoring the bookkept attributes.
"""
inst.__class__ = _original_class(inst)
_reset_old_attrs(inst)
def _create_synthetic_class(cls):
"""Creates a synthetic subclass of cls, adding a few attributes."""
# Python 2, old style classes support
if not isinstance(cls, type):
cls = type(cls.__name__, (cls, object), {})
# Create the subclass
return type(cls.__name__, (cls,), {'XsyntheticX': True,
'bookeeping': {}})
# noinspection PyProtectedMember,PyTypeChecker
def force_synthetic_class(inst):
"""
Derives a synthetic class from `inst` class and assigns it to `inst.__class__`.
If inst already has a synthetic class in `inst._Xsynthetic_classX`,
it is used instead of creating a new one.
In this way any manipulation to the instance class will be local to `inst`.
The original class can be retrieved by `inst._Xoriginal_classX`.
The synthetic class has provision for storing old values in the original
instance by providing a "bookeeping" dictionary. It can be used to provide
"undo" / "redo" abilities to other monkey-patching pals.
Parameters
----------
inst : object
Any object we want to make its class local to.
Returns
-------
The synthetic class of the object (i.e. its current class, for fluency).
"""
if not hasattr(inst, '_Xsynthetic_classX'):
inst._Xsynthetic_classX = _create_synthetic_class(type(inst))
inst._Xoriginal_classX = inst.__class__
inst.__class__ = inst._Xsynthetic_classX
_set_synthetic(inst)
return inst.__class__
def maybe_synthetic_class(inst):
"""
Attributes inst to its synthetic class if it exists, otherwise does nothing.
Returns the current class for the instance for fluency.
"""
try:
_set_synthetic(inst)
except AttributeError:
pass
return inst.__class__
def force_original_class(inst):
"""
Forces an instance to use its original class.
See `force_synthetic_class`.
Returns the current class for the instance for fluency.
"""
try:
_set_original(inst)
except AttributeError:
pass
return inst.__class__
def forget_synthetic_class(inst):
try:
force_original_class(inst)
delattr(inst, '_Xsynthetic_classX')
except AttributeError:
pass
return inst.__class__
def _original_class_contextmanager_factory(forget):
"""
Generate context managers for setting the original class.
If forget is False, `force_original_class` is called,
simply ensuring the object is of the original class in the context.
If forget is True, `forget_synthetic_class` is called,
ensuring the object is of the original class in the context
and temporarily removing the synthetic class attribute.
This is specially useful to ensure (de)serialization does not
fail because of the generated classes.
"""
to_original = force_original_class if not forget else forget_synthetic_class
@contextmanager
def cm(inst, *insts):
insts = (inst,) + insts
current_classes = [inst.__class__ for inst in insts]
synth_classes = [_synthetic_class(inst) for inst in insts]
if len(insts) == 1:
yield to_original(insts[0])
else:
yield tuple(to_original(inst) for inst in insts)
for current_class, synth_class, inst in zip(current_classes, synth_classes, insts):
if synth_class is not None:
inst._Xsynthetic_classX = synth_class
if current_class == _synthetic_class(inst):
force_synthetic_class(inst)
cm.__name__ = 'original_class' if not forget else 'no_synthetic_class'
cm.__doc__ = ('Call `%s` in a context manager.' %
('force_original_class' if not forget else 'forget_synthetic_class'))
return cm
original_class = _original_class_contextmanager_factory(forget=False)
no_synthetic_class = _original_class_contextmanager_factory(forget=True)
@contextmanager
def synthetic_class(inst, *insts):
"""Call `force_synthetic_class` in a context manager."""
insts = (inst,) + insts
classes = [inst.__class__ for inst in insts]
if len(insts) == 1:
yield force_synthetic_class(insts[0])
else:
yield tuple(force_synthetic_class(inst) for inst in insts)
for cls, inst in zip(classes, insts):
if cls == _original_class(inst):
force_original_class(inst)
# --- Descriptors
class RetrievableDescriptor(object):
"""
An abstract descriptor which allows to retrieve itself and control setting policies.
Ideally, you will need to override `_get_hook` and `set_hook` in subclasses.
Parameters
----------
on_set: one of ('pass', 'fail', 'set')
What to do with the descriptor when set is called.
If pass: do nothing
If fail: raise an exception
If set: call hook method _set_hook()
"""
def __init__(self, on_set='pass'):
super(RetrievableDescriptor, self).__init__()
valid_on_set = 'pass', 'fail', 'set'
if on_set not in valid_on_set:
raise ValueError('on_set must be one of %r' % (valid_on_set,))
self.on_set = on_set
def __get__(self, instance, owner):
# Allow to access the descriptor itself via the class
if instance is None:
return self
return self._get_hook(instance, owner)
def _get_hook(self, instance, owner):
"""Actual implementation of __get__ when it is called on the instance, instead of on the class."""
raise NotImplementedError()
def __set__(self, instance, value):
if self.on_set == 'fail':
raise Exception('Trying to set a read only constant')
elif self.on_set == 'set':
self._set_hook(instance, value)
def _set_hook(self, instance, value):
"""Actual implementation of __set__ when `self.on_set == 'set'`."""
raise NotImplementedError()
class MemberView(RetrievableDescriptor):
"""A descriptor that acts as a view to another object member."""
def __init__(self, viewed_object, parameter, on_set='pass'):
super(MemberView, self).__init__(on_set=on_set)
self.viewed_object = viewed_object
self.parameter = parameter
def _get_hook(self, _, owner):
return getattr(self.viewed_object, self.parameter)
def _set_hook(self, _, value):
setattr(self.viewed_object, self.parameter, value)
class ControlledSetter(RetrievableDescriptor):
"""A descriptor that can (dis)allow setting and always returns a private variable."""
def __init__(self, val=None, on_set='pass'):
super(ControlledSetter, self).__init__(on_set=on_set)
self.val = val
def _get_hook(self, *_):
return self.val
def _set_hook(self, _, value):
self.val = value
# Some useful descriptors
AlwaysNone = ControlledSetter(val=None, on_set='pass')
StrictAlwaysNone = ControlledSetter(val=None, on_set='fail')
def add_descriptors(inst, bookkeep_attrs=False, **descriptors):
"""
Adds descriptors to an object instance class.
`inst' is forced to have a local synthetic class first, so the original
class is untouched (see `force_synthetic_class`). As a side effect, inst
is mutated to be of the synthetic class.
Any attribute already in the instance will be deleted. They can be
saved by setting `save_old` to True. In this case, they will be restablished
and deleted each time `force_synthetic_class` and `force_original_class` are
used to cycle through inst synthetic and original classes.
Returns inst itself for fluency.
Examples
--------
>>> class Mango(object):
... def __init__(self, price=2):
... super(Mango, self).__init__()
... self.price = price
>>> mango = Mango()
>>> mango.price
2
>>> mango = add_descriptors(mango, bookkeep_attrs=True, price=ControlledSetter(5))
>>> mango.price
5
>>> mango.price = 7
>>> mango.price
5
>>> mango = add_descriptors(mango, price=ControlledSetter(5, on_set='fail'))
>>> mango.price = 7
Traceback (most recent call last):
...
Exception: Trying to set a read only constant
>>> with sad(mango):
... print('Old original price:', mango.price)
... mango.price = 2.5
... print('New original price:', mango.price)
Old original price: 2
New original price: 2.5
>>> mango.price
5
>>> with sad(mango):
... print('Old original price:', mango.price)
Old original price: 2.5
>>> with happy(mango):
... mango.price
5
"""
cls = force_synthetic_class(inst)
for name, descriptor in descriptors.items():
try:
if bookkeep_attrs:
_bookkept_attrs(inst)[name] = getattr(inst, name)
delattr(inst, name)
except AttributeError:
pass
setattr(cls, name, descriptor)
return inst
def class_with_descriptors(cls, **descriptors):
"""Creates a subclass from cls and adds some descriptors to it."""
# Derive a new class, with the given descriptors
cls = _create_synthetic_class(cls)
for name, descriptor in descriptors.items():
setattr(cls, name, descriptor)
return cls
def intercept_creation(cls, descriptors, *args, **kwargs):
"""Intercepts attribute access upon instance creation."""
synthetic = class_with_descriptors(cls, **descriptors)
inst = synthetic(*args, **kwargs)
inst._Xsynthetic_classX = synthetic
inst._Xoriginal_classX = cls
return inst
# --- Happy/Sad API
make_happy = force_synthetic_class
happy = synthetic_class
maybe_happy = maybe_synthetic_class
make_sad = force_original_class
sad = original_class
make_saddest = forget_synthetic_class
saddest = no_synthetic_class
take_happy_pills = add_descriptors
create_with_joy = intercept_creation
| true
|
f3b355274d540c8887235e4fad86109dfe6885d0
|
Python
|
robotics-4-all/tektrain-robot-sw
|
/tests/test_mc23x17.py
|
UTF-8
| 838
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
import unittest
import time
from pidevices.mcp23x17 import MCP23x17
class TestMCP23x17(unittest.TestCase):
def test_get_chunk(self):
device = MCP23x17()
address, number = device._get_chunk_number("A_2")
self.assertEqual(address, "A", "It should be A")
self.assertEqual(number, 2, "It should be 2")
with self.assertRaises(TypeError):
device._get_chunk_number(12)
with self.assertRaises(TypeError):
device._get_chunk_number("A_c")
with self.assertRaises(ValueError):
device._get_chunk_number("A_12")
with self.assertRaises(ValueError):
device._get_chunk_number("C_12")
with self.assertRaises(ValueError):
device._get_chunk_number("a_12")
if __name__ == "__main__":
unittest.main()
| true
|
bf795532c68fb7fac905aff571fa2314e110cda6
|
Python
|
Code-Wen/LeetCode_Notes
|
/179.largest-number.py
|
UTF-8
| 1,031
| 3.203125
| 3
|
[] |
no_license
|
#
# @lc app=leetcode id=179 lang=python3
#
# [179] Largest Number
#
# https://leetcode.com/problems/largest-number/description/
#
# algorithms
# Medium (29.09%)
# Likes: 2333
# Dislikes: 260
# Total Accepted: 205.8K
# Total Submissions: 698.5K
# Testcase Example: '[10,2]'
#
# Given a list of non negative integers, arrange them such that they form the
# largest number.
#
# Example 1:
#
#
# Input: [10,2]
# Output: "210"
#
# Example 2:
#
#
# Input: [3,30,34,5,9]
# Output: "9534330"
#
#
# Note: The result may be very large, so you need to return a string instead of
# an integer.
#
#
# @lc code=start
class Solution:
def largestNumber(self, nums: List[int]) -> str:
nums = [str(n) for n in nums]
for i in range(1, len(nums)):
j = i - 1
while j >= 0 and int(nums[j+1]+nums[j]) > int(nums[j]+nums[j+1]):
nums[j+1], nums[j] = nums[j], nums[j+1]
j -= 1
res = ''.join(nums)
return res if res[0]!='0' else '0'
# @lc code=end
| true
|
ba5eacc413a99891ff57c20905da7d7b780910a8
|
Python
|
nick0121/python_practice
|
/Part_2/practice_game/rocket.py
|
UTF-8
| 1,733
| 2.953125
| 3
|
[] |
no_license
|
import sys
import pygame as pg
from setting import Settings
from ship import Ship
class Rocket:
def __init__(self):
pg.init()
self.settings = Settings()
self.screen = pg.display.set_mode((1200, 800))
self.screen_width = self.screen.get_rect().width
self.screen_height = self.screen.get_rect().height
pg.display.set_caption('Rocket')
self.ship = Ship(self)
def run_game(self):
while True:
self.check_events()
self.screen.fill((230, 230, 230))
self.ship.update()
self.ship.blit_me()
pg.display.flip()
def check_events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
sys.exit()
elif event.type == pg.KEYDOWN:
if event.key == pg.K_RIGHT:
self.ship.moving_right = True
elif event.key == pg.K_LEFT:
self.ship.moving_left = True
elif event.key == pg.K_UP:
self.ship.moving_up = True
elif event.key == pg.K_DOWN:
self.ship.moving_down = True
elif event.type == pg.KEYUP:
if event.key == pg.K_RIGHT:
self.ship.moving_right = False
elif event.key == pg.K_LEFT:
self.ship.moving_left = False
elif event.key == pg.K_UP:
self.ship.moving_up = False
elif event.key == pg.K_DOWN:
self.ship.moving_down = False
if __name__ == '__main__':
ai = Rocket()
ai.run_game()
| true
|
06497822c9674420ce2d8344c4dc6d1d8a004db7
|
Python
|
GJAI-School/GJAI-Algorithm
|
/queue.py
|
UTF-8
| 924
| 3.265625
| 3
|
[] |
no_license
|
# import sys
# input = sys.stdin.readline
def process_queue(queue_list, f_idx, r_idx, command):
cmd = command[0]
if cmd == "push":
queue_list[r_idx] = command[1]
r_idx += 1
elif cmd == "pop":
if f_idx == r_idx:
print(-1)
else:
print(queue_list[f_idx])
f_idx += 1
elif cmd == "size":
print(r_idx-f_idx)
elif cmd == "empty":
print(int(r_idx == f_idx))
elif cmd == "front":
if f_idx == r_idx:
print(-1)
else:
print(queue_list[f_idx])
elif cmd == "back":
if f_idx == r_idx:
print(-1)
else:
print(queue_list[r_idx-1])
return [f_idx, r_idx]
n = int(input())
queue_list = [0 for _ in range(n)]
f_idx = 0
r_idx = 0
for _ in range(n):
command = input().split()
f_idx, r_idx = process_queue(queue_list, f_idx, r_idx, command)
| true
|
37cb2b15dbd6fc6aaf55c4ebe741234df2db895b
|
Python
|
JosephLevinthal/Research-projects
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4178/codes/1644_1055.py
|
UTF-8
| 197
| 3.3125
| 3
|
[] |
no_license
|
from math import *
v = float(input("Velocidade inicial: "))
a = float(input("Angulo do vetor: "))
g = float(input("Aceleracao da gravidade: "))
xx= radians(a)
r = (v)**2 * (sin(2*a))/g
print(r)
| true
|
18a1542e93eada0c053812c564b227b3adc27e2f
|
Python
|
mohsr/scribe
|
/scribe
|
UTF-8
| 1,117
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
import datetime
import os
import sys
# Write a message to a given filepath and a backup path
def scribe(text, path, backup_path):
# Gather formatted time string
time = datetime.datetime.now().strftime("%I:%M%p on %A, %B %d, %Y")
text = "-----\n" + time + ":\n" + text.strip() + "\n"
# Write the data and backup if requested
with open(path, "a+") as file:
file.write(text)
print("scribe wrote to %s" % path)
if backup_path:
with open(backup_path, "a+") as file:
file.write(text)
print("scribe wrote to %s" % backup_path)
if __name__ == "__main__":
# If SCRIBE_PATH is not set, default to ~/scribe.txt
path = os.environ.get('SCRIBE_PATH')
if path is None:
path = os.path.expanduser("~") + "/scribe.txt"
backup_path = os.environ.get('SCRIBE_BACKUP')
# Launch in interactive mode or argument mode
if len(sys.argv) == 1:
sys.stdout.write("> ")
sys.stdout.flush()
scribe(sys.stdin.read(), path, backup_path)
else:
scribe(sys.argv[1], path, backup_path)
| true
|
9c16315b47e948422470420209c0cb3d885ddad8
|
Python
|
asishraz/banka_sir_notes
|
/ch_3/44.py
|
UTF-8
| 745
| 4.4375
| 4
|
[] |
no_license
|
#wap to print the perfect numbers between A and B
# A = int(input("enter the number: "))
# B = int(input("enter the number: "))
# N = int(input("enter the range: "))
'''
6 => 1+2+3 = 6(sum of factors equals the number)
'''
# fact = 0
# for i in range(1,N):
# if N%i == 0:
# fact += i
# if fact == N:
# print(str(N) + " is a perfect number")
def perfect_number(a,b):
fact_a = 0
fact_b = 0
for j in range(1,a):
if a%j == 0:
fact_a += j
for k in range(1,b):
if b%k == 0:
fact_b += k
if fact_a == a:
print(str(a) + " is a perfect number")
elif fact_b == b:
print(str(b) + " is a perfect number")
var = perfect_number(2,10)
print(var)
| true
|
67aa63d55fff88acaaf853e654ed8eda1923bf05
|
Python
|
kmad1729/python_notes
|
/gen_progs/are_anagram.py
|
UTF-8
| 432
| 3.53125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env python3
from collections import Counter
def are_anagrams(*args):
'return True if args are anagrams'
if len(args) < 2:
raise TypeError("expected 2 or more arguments")
c = Counter(args[0])
return all(c == Counter(a) for a in args[1:])
arg1 = "appel apple aplep leapp".split()
#print("check if {} are anagrams".format(arg1))
print("are_anagrams {} ? {} ".format(arg1, are_anagrams(*arg1)))
| true
|
20e62a735a45f7765cfff19b8b6329875edd8616
|
Python
|
kansald006/GoogleSearch
|
/CSVSeabrn.py
|
UTF-8
| 798
| 2.984375
| 3
|
[] |
no_license
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df=pd.read_csv("Fulldata.csv")
# print(df)
# print(df.head(5))
#
# plt.figure(figsize=(30,20))
# # plt.savefig("")
#
# sns.countplot(y=df.Nationality, palette="Set2")
# plt.show()
#
# plt.figure(figsize=(30, 20))
# sns.countplot(x="Age", palette="Set2")
a=0.5
b=1
c=1.5
d=2
e=3
df['GK_Shot-Stopper']=(a*df.GK_Positioning+b*df.GK_Diving+c*df.GK_Kicking+d*df.GK_Handling+e*df.GK_Reflexes)
print(df['GK_Shot-Stopper'])
sortedDF= df.sort_values('GK_Shot-Stopper')
top5=sortedDF.tail(5)
print(top5)
X=np.array(list(top5['Name']))
Y=np.array(list(top5['GK_Shot-Stopper']))
# df1=df[['GK_Handling','GK_Diving']]
# print(df1)
sns.barplot(X, Y, palette="colorblind")
plt.ylabel("Shot Stopper Score")
plt.show()
| true
|
0ff8a8c9c4b7a51c42534985a91581b58cb55fe7
|
Python
|
damanraaj/SummerGeeks2020SDE
|
/summergeeks2020assignment/way2smsApiCreateSenderId.py
|
UTF-8
| 740
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
import requests
import json
URL = 'https://www.way2sms.com/api/v1/createSenderId'
# post request
def sendPostRequest(reqUrl, apiKey, secretKey, useType, senderId):
req_params = {
'apikey':apiKey,
'secret':secretKey,
'usetype':useType,
'senderid':senderId
}
return requests.post(reqUrl, req_params)
# get response
print("Enter API Key : ",end="")
APIKEY=input()
print("Enter Secret Key : ",end="")
SECRET=input()
print('Enter Sender ID : ',end="")
SENDERID=input()
response = sendPostRequest(URL, APIKEY, SECRET, 'prod', SENDERID)
"""
Note:-
you must provide apikey, secretkey, usetype and senderid values
and then requst to api
"""
# print response if you want
print (response.text)
| true
|
3b072685303cecf675253495f3881b7ab391c10b
|
Python
|
Mestway/falx-artifact
|
/artifact/output/plot_script_1.py
|
UTF-8
| 3,456
| 2.921875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
import argparse
import json
import os
import pandas as pd
from pprint import pprint
import numpy as np
import sys
# default directories
OUTPUT_DIR = os.path.join(".")
MAX_TIME = 600
def parse_log_content(exp_id, data_id, lines):
"""parse a log file"""
status = {
"exp_id": exp_id,
"data_id": data_id,
"num_candidates": [],
"table_prog": None,
"vis_spec": None,
"time": MAX_TIME
}
for i, l in enumerate(lines):
if l.startswith("# candidates before getting the correct solution: "):
status["num_candidates"].append(int(l.split(":")[-1].strip()) + 1)
if l.startswith("# time used (s): "):
status["time"] = float(l.split(":")[-1].strip())
if l.startswith("# table_prog:") and len(lines) > i + 1:
#status["table_prog"] = lines[i + 1]
pass
if l.startswith("# vis_spec:") and len(lines) > i + 1:
#status["vis_spec"] = lines[i + 1]
pass
status["solved"] = False if status["time"] >= MAX_TIME else True
status["num_explored"] = sum(status["num_candidates"])
status.pop("num_candidates")
return status
def read_log_result_list(log_dir_list, titles=None):
all_result = []
for i, log_dir in enumerate(log_dir_list):
for fname in os.listdir(log_dir):
if not fname.endswith(".log"): continue
fpath = os.path.join(log_dir, fname)
title = log_dir if titles is None else titles[i]
with open(fpath) as f:
status = parse_log_content(title, fname.split(".")[0], f.readlines())
all_result.append(status)
all_result.sort(key=lambda x:x["time"])
return all_result
def plot_solving_time(log_dir):
log_dir_list = [log_dir]
titiles = log_dir_list
all_result = read_log_result_list(log_dir_list, titiles)
plot_data = []
for i in [1, 10, 60, 600]:
cnt = {}
for r in all_result:
if r["exp_id"] not in cnt:
cnt[r["exp_id"]] = 0
if r["solved"] and r["time"] > 0 and r["time"] < i:
cnt[r["exp_id"]] += 1
for exp_id in cnt:
plot_data.append({"time": i, "cnt": cnt[exp_id], "exp_id": exp_id })
print("---")
for d in plot_data:
print(" # caes solved within {} second(s): {}".format(d["time"], d["cnt"]))
def plot_num_candidates(log_dir):
log_dir_list = [log_dir]
titles = [log_dir]
all_result = read_log_result_list(log_dir_list, titles)
df = pd.DataFrame.from_dict(all_result)
df = df[df["solved"] == True]
for t in titles:
cases_solved_within_top_5 = []
print("{}".format(t))
dft = df[df["exp_id"]==t]
#print("# cases solved within top 5:")
#print(list(dft[dft["num_explored"] <= 5]["data_id"]))
print(" # cases solved solved within top 1: {}".format(len(dft[dft["num_explored"] <= 1])))
print(" # cases solved solved within top 5: {}".format(len(dft[dft["num_explored"] <= 3])))
print(" # cases solved solved within top 10: {}".format(len(dft[dft["num_explored"] <= 5])))
print(" # cases solved within time limit: {}".format(len(dft)))
if __name__ == '__main__':
# python plot_script_1.py exp_falx_4 exp_falx_6 exp_falx_8
num_arguments = len(sys.argv) - 1
plot_num_candidates(sys.argv[1])
plot_solving_time(sys.argv[1])
| true
|
36f489aceeb26414dadc7591b9b3fc4c39af5e1c
|
Python
|
LoganW94/Text-Adventure
|
/player.py
|
UTF-8
| 480
| 3.4375
| 3
|
[] |
no_license
|
class Player:
__inventory = {"picture":
"In the Picture there is a Boy and a Girl. They are sitting on a park bench on a sunny fall day",
"sword":
"A cheap sword. Probably a toy"
}
__credits = 0
__name = ""
__location = 0
def __init__(self):
self.__name = "Default"
def printInventory(self):
for i in self.__inventory:
print(i)
def printCredits(self):
print("You have %d credits" % self.__credits)
def loadPlayer(self):
print("loading")
| true
|
aff5f763746e306b276398d72dd19d1d1eecc5f2
|
Python
|
ppilcher22/PythonBeginnerProjects
|
/__pycache__/OOP-_Tutorials/OOP_Tut_1.py
|
UTF-8
| 389
| 3.8125
| 4
|
[] |
no_license
|
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
class Child(Person):
def __init__(self, name, age, mother, father):
super().__init__(name, age)
self.mother = mother
self.father = father
pers1 = Person('Homer', 33)
kid = Child('Charlie', 7, 'Mum', 'Papa')
print(pers1.name)
print(kid.father, kid.name)
| true
|
7e5b49671f642e0e55dec89347259790c02d9895
|
Python
|
lookfiresu123/Interacive_python
|
/dollors_cents.py
|
UTF-8
| 1,565
| 3.890625
| 4
|
[] |
no_license
|
"""
# string literals
s1 = "chensu's funny"
s2 = 'chensu"s funny'
# print s1
# print s2
# print s1 + s2
print s1[0]
print len(s1)
# [0th, 7th), just like [0th, 6th]
print s1[0:7]
print s1[:10]
s1 = "0123456789"
il = int(s1[:10])
print il + 1000000
"""
# import module
import simpleguitk as simplegui
# initialize globals
value = 3.12
# define helper functions which consist of event handlers
# handle single quantity
def convert_units(val, name):
result = str(val) + " " + name
if val > 1:
result += "s"
return result
# convert xx.yy to xx dollars and yy cents
def convert(val):
# split into dollars and cents
dollars = int(val)
cents = int(round(100 * (val - dollars)))
# convert to strings
dollars_string = convert_units(dollars, "dollar")
cents_string = convert_units(cents, "cent")
# return composite string
if dollars == 0 and cents == 0:
return "broke!"
elif dollars != 0 and cents == 0:
return dollars_string
elif dollars == 0 and cents != 0:
return cents_string
else:
return dollars_string + " and " + cents_string;
# define event handlers
# define draw handler
def draw(canvas):
canvas.draw_text(convert(value), [50, 100], 20, "White")
# define an input field handler
def input_handler(text):
global value
value = float(text)
# create frame
frame = simplegui.create_frame("Converter", 400, 200)
# register event handlers into frame
frame.set_draw_handler(draw)
frame.add_input("Input", input_handler, 100)
# start frame
frame.start()
| true
|
6e09253ba6d311233470a1bbd07d5ebe8c1547e8
|
Python
|
Recursing/SlidingPuzzleSolver
|
/klotski.py
|
UTF-8
| 4,599
| 2.96875
| 3
|
[] |
no_license
|
from sliding_game import SlidingGame
import board_utils
class Klotski(SlidingGame):
def __init__(
self,
width=4,
height=5,
start_board=(2, 6, 6, 2, 3, 6, 6, 3, 2, 4, 5, 2, 3, 1, 1, 3, 1, 0, 0, 1),
goals=(17, 18),
):
super().__init__(width, height, start_board)
self.goals = goals
def move(self, board, spaces, space, target):
other_space = spaces[0] if spaces[0] != space else spaces[1]
assert board[other_space] == board[space] == 0
cell_type = board[target]
if cell_type == 1: # small square
return board_utils.swap(board, target, space), (target, other_space)
ABOVE = -self.width
BELOW = self.width
LEFT = -1
RIGHT = +1
if cell_type == 2: # upper part of vertical rectangle
if target - space == BELOW:
return (
board_utils.rotate(board, space, target, target + BELOW),
(target + BELOW, other_space),
)
elif other_space - space == BELOW:
return (
board_utils.double_swap(
board, space, other_space, target, target + BELOW
),
(target, target + BELOW),
)
elif cell_type == 3: # lower part of vertical rectangle
assert board[target + ABOVE] == 2
if target - space == ABOVE:
return (
board_utils.rotate(board, space, target, target + ABOVE),
(target + ABOVE, other_space),
)
elif other_space - space == ABOVE:
return (
board_utils.double_swap(
board, space, other_space, target, target + ABOVE
),
(target, target + ABOVE),
)
elif cell_type == 4: # left part of horizontal rectangle
assert board[target + RIGHT] == 5
if target - space == RIGHT:
return (
board_utils.rotate(board, space, target, target + RIGHT),
(target + RIGHT, other_space),
)
elif other_space - space == RIGHT:
return (
board_utils.double_swap(
board, space, other_space, target, target + RIGHT
),
(target, target + RIGHT),
)
elif cell_type == 5: # right part of horizontal rectangle
assert board[target + LEFT] == 4
if target - space == LEFT:
return (
board_utils.rotate(board, space, target, target + LEFT),
(target + LEFT, other_space),
)
elif other_space - space == LEFT:
return (
board_utils.double_swap(
board, space, other_space, target, target + LEFT
),
(target, target + LEFT),
)
elif cell_type == 6: # any part of big square
direction = target - space
if (
0 <= other_space + direction < self.width * self.height
and board[other_space + direction] == 6
):
new_board = board_utils.double_swap(
board,
space,
other_space,
space + direction * 2,
other_space + direction * 2,
)
return new_board, (space + direction * 2, other_space + direction * 2)
def is_goal(self, board):
return all(board[goal] == 6 for goal in self.goals)
def pretty_print(self, boards):
print("-" * (self.width * 2))
ENDC = "\x1b[0m"
colors = [
"",
"\x1b[0;30;41m",
"\x1b[0;30;42m",
"\x1b[0;30;42m",
"\x1b[0;30;42m",
"\x1b[0;30;42m",
"\x1b[1;37;43m",
"\x1b[0;30;40m",
]
for line_num in range(self.height):
lines = [
board[line_num * self.width : (line_num + 1) * self.width]
for board in boards
]
print(
" ".join(
"".join(
"{}{} {}".format(colors[value], value, ENDC) for value in line
)
for line in lines
)
)
| true
|
494e06ffde26eb30016899083aa4a3101f69fbe7
|
Python
|
egyptai/Python
|
/calculation20210531.py
|
UTF-8
| 202
| 3.265625
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 31 22:53:46 2021
@author: dms10
"""
print("7+4 = ", 7+4)
print("7*4 = ", 7*4)
print("7/4 = ", 7/4)
print("2**3 = ", 2**3)
print("5%3 = ", 5%3)
| true
|
998ad791113fc9afb6256ae1ee8eccc59c2884da
|
Python
|
bhuynh1103/breakout
|
/ball.py
|
UTF-8
| 1,712
| 3.125
| 3
|
[] |
no_license
|
from pygame.draw import *
from constants import *
from random import uniform
class Ball:
def __init__(self):
self.w = screenSize * .02
self.x = screenSize // 2 - self.w // 2
self.y = self.x + GUISize
self.speed = 7.5
self.xspeed = 0
self.yspeed = -1
self.released = False
def draw(self, window):
rect(window, gray(150), (self.x, self.y, self.w, self.w))
if not self.released:
line(window, white, (self.x + self.w // 2, self.y - self.w), (self.x + self.w // 2, self.y + self.w // 2), 1)
def move(self, paddle):
if not self.released:
self.x = paddle.x + paddle.w // 2 - self.w // 2
self.y = paddle.y - self.w * 1.5
else:
self.x += self.xspeed * self.speed
self.y += self.yspeed * self.speed
def edgeBounce(self):
if self.x < 0 or self.x + self.w > screenSize:
self.xspeed *= -1
elif self.y < GUISize: # or self.y + self.w > screenSize + GUISize:
self.yspeed *= - 1
def bounce(self, xspeed):
self.xspeed = xspeed
self.yspeed = -1
def collide(self, other):
myTop = self.y
myRight = self.x + self.w
myBottom = self.y + self.w
myLeft = self.x
otherTop = other.y
otherRight = other.x + other.w
otherBottom = other.y + other.h
otherLeft = other.x
if myTop > otherBottom:
return False
elif myRight < otherLeft:
return False
elif myBottom < otherTop:
return False
elif myLeft > otherRight:
return False
else:
return True
| true
|
614cdf6267889af4860b8dd5201743c1ad92dbb5
|
Python
|
K-Phoen/runner
|
/scripts/runner-edit
|
UTF-8
| 1,090
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
import argparse
from runner import dump_to_file, parse_from_file, TimeEditor
def configure_common_args(parser):
parser.add_argument(
'-i', '--input', type=str, required=True,
help='File to read from.',
)
parser.add_argument(
'-o', '--output', type=str, required=True,
help='File to write the output to.',
)
def parse_args():
parser = argparse.ArgumentParser(
description='Edit FIT and TCX files'
)
# add editors parsers
subparsers = parser.add_subparsers()
# time editor
time_parser = subparsers.add_parser('time', help='Edit time entries')
configure_common_args(time_parser)
TimeEditor.configure_args_parser(time_parser)
return parser.parse_args()
def main():
options = parse_args()
editor = options.editor()
# read the original file
activity = parse_from_file(options.input)
# edit the activity
editor.edit(activity, options)
# write the edited activity
dump_to_file(activity, options.output)
if __name__ == '__main__':
main()
| true
|
44f23ea333e54c7dd788deb063a2a3d380180972
|
Python
|
mariuscmorar/AutomationScripts
|
/IP_Processing/validateIP.py
|
UTF-8
| 192
| 2.953125
| 3
|
[] |
no_license
|
import socket
original_list = [ip.strip() for ip in open('ip_list.csv', 'r').readlines()]
i=0
for a in original_list:
i+=1
try:
socket.inet_aton(a)
except socket.error:
print(i," ",a)
| true
|
e168de7292c77ac1acaf49d219555613f8fe7188
|
Python
|
SushilPudke/PythonTest
|
/demopattern.py
|
UTF-8
| 95
| 2.828125
| 3
|
[] |
no_license
|
# demo pattern
for r in range(6) :
for c in range(r):
print(r,end=" ")
print()
| true
|
9e05a85e6fcd808ed99bb3b9ff30e71ef4741191
|
Python
|
thc2125/csclassifier
|
/test/test_utils.py
|
UTF-8
| 3,122
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/python3
import unittest
import csv
import numpy as np
import random
import utils
from collections import defaultdict
from collections import Counter
from pathlib import Path
word_col = 1
dl = ','
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.corpora_filenames = ['Corpus_corpus_de+ar.csv',
'Corpus_corpus_fr+ar.csv']
pass
def tearDown(self):
pass
'''
def test_randomly_read_CS_Langs_Corpus_comb(self):
train_corpus = Corpus_CS_Langs(train=True)
test_corpus = Corpus_CS_Langs()
comb_corpus = Corpus_CS_Langs()
for corpus in self.corpora_filepaths:
utils.randomly_read_Corpus_CS_Langs(corpus, train_corpus, test_corpus)
temp_corpus = Corpus_CS_Langs()
temp_corpus.read_corpus(corpus, dl=',')
comb_corpus += temp_corpus
self.assertEqual(len(train_corpus.sentences)
+ len(test_corpus.sentences), len(comb_corpus.sentences))
def test_randomly_read_CS_Langs_Corpus_split(self):
train_corpus = Corpus_CS_Langs(train=True)
test_corpus = Corpus_CS_Langs()
for corpus in self.corpora_filepaths:
utils.randomly_read_Corpus_CS_Langs(corpus, train_corpus, test_corpus)
self.assertAlmostEqual(len(train_corpus.sentences) , 9, delta=2)
self.assertAlmostEqual(len(test_corpus.sentences) , 1, delta=2)
'''
def test_deduce_cs_langs_str(self):
expected_langs = ('en', 'es')
test_langs = utils.deduce_cs_langs('test_corpus_name_en+es')
self.assertEqual(expected_langs, test_langs)
def test_deduce_cs_langs_filenames0(self):
expected_langs = ('de','ar')
test_langs = utils.deduce_cs_langs(self.corpora_filenames[0])
self.assertEqual(expected_langs, test_langs)
def test_deduce_cs_langs_filenames1(self):
expected_langs = ('fr','ar')
test_langs = utils.deduce_cs_langs(self.corpora_filenames[1])
self.assertEqual(expected_langs, test_langs)
'''
def test_randomly_split_corpus_len_sentences(self):
train_corpus, test_corpus = self.corpus1.randomly_split_corpus()
self.assertEqual(len(self.corpus1.sentences), len(train_corpus.sentences) + len(test_corpus.sentences))
def test_randomly_split_corpus_len_labels(self):
train_corpus, test_corpus = self.corpus1.randomly_split_corpus()
print(len(train_corpus.labels))
print(len(test_corpus.labels))
self.assertEqual(len(self.corpus1.labels), len(train_corpus.labels) + len(test_corpus.labels))
def test_randomly_split_corpus_reconstitute_labels(self):
train_corpus, test_corpus = self.corpus1.randomly_split_corpus()
self.assertEqual(sorted(self.corpus1.labels),
sorted(train_corpus.labels + test_corpus.labels))
def test_randomly_split_corpus_reconstitute_sentences(self):
train_corpus, test_corpus = self.corpus1.randomly_split_corpus()
self.assertEqual(sorted(self.corpus1.sentences),
sorted(train_corpus.sentences + test_corpus.sentences))
'''
| true
|
fffbd0a25b0fa49b398b67ad3d9b820afcb4ad22
|
Python
|
cfc424/NGS
|
/binTranscriptome.py
|
UTF-8
| 4,295
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#from __future__ import division, with_statement
'''
Copyright 2013, 陈同 (chentong_biology@163.com).
===========================================================
'''
__author__ = 'chentong & ct586[9]'
__author_email__ = 'chentong_biology@163.com'
#=========================================================
'''
Functionla description
This is designed to bin transcriptome for coverage analysis.
Input file:
chr18 36526168 36526187 NM_001081365_31.UTR5 0 +
chr18 36552962 36553024 NM_001081365_31.UTR3 0 +
chr17 26012444 26012474 NM_026686_32.UTR5 0 +
chr17 26013942 26014108 NM_026686_32.UTR3 0 +
chr18 36526187 36526395 NM_001081365_31.Coding_exon.1 0 +
chr18 36552849 36552962 NM_001081365_31.Coding_exon.2 0 +
chr17 26012474 26012671 NM_026686_32.Coding_exon.1 0 +
chr17 26012904 26013067 NM_026686_32.Coding_exon.2 0 +
chr17 26013164 26013224 NM_026686_32.Coding_exon.3 0 +
chr17 26013439 26013507 NM_026686_32.Coding_exon.4 0 +
chr17 26013605 26013684 NM_026686_32.Coding_exon.5 0 +
chr17 26013894 26013942 NM_026686_32.Coding_exon.6 0 +
Output file:
1.
'''
import sys
import os
from time import localtime, strftime
timeformat = "%Y-%m-%d %H:%M:%S"
from optparse import OptionParser as OP
def cmdparameter(argv):
if len(argv) == 1:
cmd = 'python ' + argv[0] + ' -h'
os.system(cmd)
sys.exit(1)
desc = ""
usages = "%prog -i file"
parser = OP(usage=usages)
parser.add_option("-i", "--input-file", dest="filein",
metavar="FILEIN", help="Usually a bed file containing UTR5, \
Coding exon or UTR3.")
parser.add_option("-l", "--length-of-bin", dest="len_bin",
default=25, metavar=25, help="The length of bins you expected. \
Not exactly but roughly the numbner given here. The program will pick \
a suitable number to avoid very small length for last bin.")
parser.add_option("-v", "--verbose", dest="verbose",
default=0, help="Show process information")
parser.add_option("-d", "--debug", dest="debug",
default=False, help="Debug the program")
(options, args) = parser.parse_args(argv[1:])
assert options.filein != None, "A filename needed for -i"
return (options, args)
#--------------------------------------------------------------------
def main():
options, args = cmdparameter(sys.argv)
#-----------------------------------
file = options.filein
verbose = options.verbose
debug = options.debug
len_bin = int(options.len_bin)
#-----------------------------------
if file == '-':
fh = sys.stdin
else:
fh = open(file)
#--------------------------------
for line in fh:
lineL = line.split()
start = int(lineL[1])
end = int(lineL[2])
name = lineL[3]
len_r = end - start
time = len_r / len_bin
#for regions with length smaller than expected bin
if time == 0:
lineL[3] = '__'.join([name, '1', '1'])
print '\t'.join(lineL)
continue
real_len = len_r / time
for i in range(time):
lineL[1] = str(start + i * real_len)
if i+1 == time:
lineL[2] = str(end)
else:
lineL[2] = str(start + (i+1) * real_len)
lineL[3] = '__'.join([name, str(time), str(i+1)])
print '\t'.join(lineL)
#----------The last one---------------
#-------------END reading file----------
#----close file handle for files-----
if file != '-':
fh.close()
#-----------end close fh-----------
if verbose:
print >>sys.stderr,\
"--Successful %s" % strftime(timeformat, localtime())
if __name__ == '__main__':
startTime = strftime(timeformat, localtime())
main()
endTime = strftime(timeformat, localtime())
fh = open('python.log', 'a')
print >>fh, "%s\n\tRun time : %s - %s " % \
(' '.join(sys.argv), startTime, endTime)
fh.close()
| true
|
75af6f3bc0a69a6276ed0148b693d992758a7d5c
|
Python
|
sm7eca/dmr-dreambox
|
/eim-service/docker/eim-core/src/db/mongodb.py
|
UTF-8
| 8,681
| 2.578125
| 3
|
[] |
no_license
|
import os
import sys
import re
from urllib.parse import quote_plus
from pymongo import MongoClient
from pymongo.database import Database
from pymongo.errors import ConnectionFailure
from pymongo.collection import Collection
from common.logger import get_logger
from common.definitions import Repeater, RepeaterItem, DmrUser
from typing import List, Optional, Dict
from pydantic import BaseModel
from datetime import datetime
logger = get_logger("mongodb", log_level=os.getenv("EIM_LOG_LEVEL", "INFO"))
class MongoDbError(BaseException):
def __init__(self, msg):
self.msg = msg
class MongoDB:
def __init__(self, db_name: str = "eim"):
"""Instantiate a DB entity"""
self._repeater_log = {}
if "EIM_DB_USER" not in os.environ.keys():
raise MongoDbError("missing ENV variable EIM_DB_USER")
if "EIM_DB_PASSWORD" not in os.environ.keys():
raise MongoDbError("missing ENV variable EIM_DB_PASSWORD")
if "EIM_DB_HOST" not in os.environ.keys():
raise MongoDbError("missing ENV variable EIM_DB_HOST")
if "EIM_DB_PORT" not in os.environ.keys():
raise MongoDbError("missing ENV variable EIM_DB_PORT")
user = quote_plus(os.getenv("EIM_DB_USER"))
password = quote_plus(os.getenv("EIM_DB_PASSWORD"))
host = os.getenv("EIM_DB_HOST")
port = os.getenv("EIM_DB_PORT")
uri = f"mongodb://{user}:{password}@{host}:{port}/?authSource=admin"
client = MongoClient(uri)
# ensure that we have connectivity
try:
client.admin.command("ismaster")
except ConnectionFailure as ex:
sys.exit(f"Failed to connect to MongoDB at {host}:{port} => {ex}")
logger.info(f"successfully connected to MongoDB at {host}:{port}")
self._db = Database(client, name=db_name)
@staticmethod
def _translate_db_2_repeater(db_entry: Dict) -> Repeater:
r_object = {
"dmr_id": db_entry["repeaterid"],
"tx": float(db_entry["tx"]) * 1e6,
"rx": float(db_entry["rx"]) * 1e6,
"cc": int(db_entry["colorcode"]),
"max_ts": 0,
"name": db_entry["callsign"],
"location": f"{db_entry['lng']},{db_entry['lat']}",
"city": db_entry.get("city", "unknown") or "unknown",
"num_tg": 0
}
r = Repeater(**r_object)
logger.debug(f"item translated: {repr(r)}")
return r
@staticmethod
def _translate_db_2_repeater_item(db_entry: Dict) -> RepeaterItem:
"""
Translate into a shorter Repeater Item, the data can be reused
in order to make additional calls retrieving detailed information
using the unique DMR ID.
"""
ri_object = {
"dmr_id": db_entry["repeaterid"],
"tx": float(db_entry["tx"]) * 1e6,
"rx": float(db_entry["rx"]) * 1e6,
"cc": int(db_entry["colorcode"]),
"name": db_entry["callsign"],
"location": f"{db_entry['lng']},{db_entry['lat']}",
"city": db_entry.get("city", "unknown") or "unknown"
}
ri = RepeaterItem(**ri_object)
logger.debug(f"RepeaterItem translated: {repr(ri)}")
return ri
@staticmethod
def _translate_user_2_dmr_user(user_entry: Dict) -> DmrUser:
"""
Translate a user object received from DB into DmrUser
"""
user = DmrUser(**user_entry)
return user
def get_repeater_by_master(self, master_id: int) -> Optional[List[RepeaterItem]]:
"""
Return a list of Repeater objects for a given master ID.
:param master_id: DMR master ID
:return: List[Repeater]
"""
col = self._db.get_collection("repeater")
timestamp_1week_ago = int(datetime.now().timestamp()) - 604800
# try to find all repeater (status == 3) for a given master, updated 24 hours ago
query = {
"lastKnownMaster": str(master_id),
"status": "3",
"last_updated_ts": {"$gt": timestamp_1week_ago}
}
docs = col.find(filter=query, limit=0).sort("callsign")
logger.debug(f"received {docs.count()} repeater from DB")
list_repeater = [self._translate_db_2_repeater_item(record) for record in docs]
return list_repeater
def get_repeater_by_callsign(self, call_sign) -> Optional[List[RepeaterItem]]:
col = self._db.get_collection("repeater")
timestamp_1week_ago = int(datetime.now().timestamp()) - 604800
query = {
"status": "3",
"callsign": {"$regex": re.escape(call_sign)},
"last_updated_ts": {"$gt": timestamp_1week_ago}
}
docs = col.find(filter=query, limit=0).sort("callsign")
logger.debug(f"received {docs.count()} repeater for callsign={call_sign} from DB")
list_repeater = [self._translate_db_2_repeater_item(record) for record in docs]
return list_repeater
def get_repeater_by_dmrid(self, dmr_id: int) -> Optional[List[Repeater]]:
"""
Here we are looking for a detailed list for each repeater, including talk groups.
The talk groups are fetched using additional request towards the RestAPI and
cached in the database
"""
col = self._db.get_collection("repeater")
# we are looking for both repeater ("3") and hotspots ("4")
query = {
"status": {"$in": ["3", "4"]},
"repeaterid": str(dmr_id)
}
logger.debug(f"query: {repr(query)}")
docs = col.find(filter=query, limit=0)
logger.debug(f"received {docs.count()} repeater for repeater_id={dmr_id} from DB")
list_repeater = [self._translate_db_2_repeater(record) for record in docs]
return list_repeater
def get_hotspot(self, call_sign: str) -> Optional[List[RepeaterItem]]:
"""
Return a list of Repeater for a given callsign
- filter for status == 4
- no filter for updated recently
"""
logger.debug(f"==> Received hotspot request, callsign: {call_sign}")
timestamp_24_hours_ago = int(datetime.now().timestamp()) - 86400
col = self._db.get_collection("repeater")
query = {
"callsign": call_sign,
"status": "4"
}
docs = col.find(filter=query, limit=0).sort("repeaterid")
logger.debug(f"received {docs.count()} hotspots from DB")
list_hotspots = []
for record in docs:
list_hotspots.append(self._translate_db_2_repeater_item(record))
return list_hotspots
def count_docs(self, collection: str) -> int:
"""
Return the number of documents for a given collection
- throw an exception if collection doesn't exist
"""
if collection not in self._db.list_collection_names():
return 0
col: Collection = self._db.get_collection(collection)
num_docs = col.count_documents(filter={})
return num_docs
def get_repeater_by_location(self, long: float, lat: float, distance_km: int) -> Optional[List[RepeaterItem]]:
"""
Based on valid location data received from BM, query for 2d near-maxDistance
"""
col: Collection = self._db.get_collection("repeater")
timestamp_1week_ago = int(datetime.now().timestamp()) - 604800
query = {
"status": "3",
"loc_valid": True,
"loc": {"$near": {"$geometry": {"type": "Point", "coordinates": [long, lat]},
"$maxDistance": distance_km * 1000}},
"last_updated_ts": {"$gt": timestamp_1week_ago}
}
docs = col.find(filter=query, limit=0)
logger.debug(f"received {docs.count()} repeater for {[long, lat]}, distance: {distance_km} km")
list_repeater = []
for record in docs:
list_repeater.append(self._translate_db_2_repeater_item(record))
return list_repeater
def get_user_by_dmrid(self, dmr_id: int) -> Optional[DmrUser]:
logger.debug(f"find user for ID: {dmr_id}")
col: Collection = self._db.get_collection("dmr_user")
num_users: int = col.count_documents(filter={})
logger.debug(f"found {num_users} in collection {col.name}")
query = {
"dmr_id": dmr_id
}
doc = col.find_one(filter=query)
if not doc:
logger.debug(f"no doc found for {dmr_id}")
return None
else:
return self._translate_user_2_dmr_user(doc)
| true
|
7474799f69aaf16b33205db0333b97449d294140
|
Python
|
lianxiaolei/Ginormica
|
/tech/algo/arrays/image_rotation.py
|
UTF-8
| 535
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
def image_rotation(a):
n = len(a)
for i in range(n - 1):
for j in range(i + 1, n):
tmp = a[i, j]
a[i, j] = a[j, i]
a[j, i] = tmp
for i in range(n):
for j in range(n / 2):
tmp = a[i, j]
a[i, j] = a[i, n - 1 - j]
a[i, n - 1 - j] = tmp
return a
rotate_image_awsome = lambda a: zip(*a[::-1])
if __name__ == '__main__':
import numpy as np
a = np.linspace(1, 16, 16).reshape(4, 4)
| true
|
9d6d6073e3abfcb9887cbb2d0c6fa138aa0057ad
|
Python
|
justinorjt/bnb-blog-flask
|
/scrapeKitCollections.py
|
UTF-8
| 1,144
| 2.609375
| 3
|
[] |
no_license
|
# Pull in Kit Collections
from bs4 import BeautifulSoup as bsoup
from html.parser import HTMLParser
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import selenium
def getKits():
config = selenium.webdriver.ChromeOptions()
config.add_argument('headless')
browser = webdriver.Chrome(options=config)
theUrl = 'https://kit.co/rakidzich'
browser.get(theUrl)
# get the html and the link
# wait = browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return document.body.innerHTML;")
# time.sleep(.7)
try:
WebDriverWait(browser, 5).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "collection-card"))
)
page = browser.page_source
finally:
browser.quit()
soup = bsoup(page, 'html.parser')
cards = soup.find_all('a', attrs={'class':'collection-card'})
kits = []
for card in cards:
link = card.get('href')
kits.append({"link":link})
# print (kits)
return kits
# getKits()
| true
|
f5ec3c3722f5b8869b42a41613ebbcb2bd0001d6
|
Python
|
abcapo/um-programacion-i-2020
|
/58089-CAPO-AGUSTINA/TP1/8.py
|
UTF-8
| 545
| 3.84375
| 4
|
[] |
no_license
|
class Curso():
def __init__(self):
self.materias = ["Matemáticas", "Física", "Química", "Historia", "Lengua"]
self.notas = []
def ingreso(self):
for i in range(5):
print("Ingrese la nota de "+self.materias[i]+":")
self.notas.append(input())
return(self.notas)
def imprimir(self):
self.ingreso()
for j in range(5):
print(self.materias[j] + self.notas[j])
def main():
N = Curso()
N.imprimir()
if __name__ == "__main__":
main()
| true
|
74403ae7661fe5f815f6209a4fd6a4763b7331c5
|
Python
|
guidolingip1/Project-Euler
|
/4.py
|
UTF-8
| 476
| 3.671875
| 4
|
[] |
no_license
|
#Find the largest palindrome made from the product of two 3-digit numbers.
def reverte(numero):
revertido = 0
while (numero > 0):
resto = numero % 10
revertido = (revertido * 10) + resto
numero = numero // 10
return revertido
maior = 0
for i in range (999,1,-1):
for j in range (999,1,-1):
soma = i*j
x = soma
if reverte(soma) == soma:
if soma > maior:
maior = soma
print(maior)
| true
|
218574330e73907e99908832de3b3e37cad9424f
|
Python
|
Nam-Seung-Woo/tensorflow_practice
|
/준표문제.py
|
UTF-8
| 520
| 2.9375
| 3
|
[] |
no_license
|
import tensorflow as tf
x_value=[1,2,3,4,5,6,7,8,9,10]
y_value=[3,5,7,9,11,13,15,17,19,21]
W=tf.Variable(tf.random_normal([1]))
b=tf.Variable(tf.random_normal([1]))
hypothesis=x_value*W+b
cost=tf.reduce_mean(tf.square(hypothesis-y_value))
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01)
train=optimizer.minimize(cost)
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(20001):
sess.run(train)
if i%100==0:
print(i, sess.run(cost), sess.run(W), sess.run(b))
| true
|