blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
133
| path
stringlengths 2
333
| src_encoding
stringclasses 30
values | length_bytes
int64 18
5.47M
| score
float64 2.52
5.81
| int_score
int64 3
5
| detected_licenses
listlengths 0
67
| license_type
stringclasses 2
values | text
stringlengths 12
5.47M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
797a419a87d47d61a08d610a97ecec200a732124
|
Python
|
ivanistheone/cs231n
|
/assignment1/cs231n/classifiers/softmax.py
|
UTF-8
| 3,551
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
from builtins import range
import numpy as np
from random import shuffle
from past.builtins import xrange
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
num_classes = W.shape[1]
num_train = X.shape[0]
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for i in range(num_train):
scores = X[i].dot(W)
scores -= np.max(scores)
exp_scores = np.exp(scores)
ps = exp_scores / np.sum(exp_scores)
loss += -1 * np.log(ps[y[i]])
# gradient calcs
for j in range(num_classes):
dW[:,j] += ps[j]*X[i]
if j == y[i]:
dW[:,y[i]] += -1*X[i]
# 1/N factor in front and regularization component
loss /= num_train
loss += reg * np.sum(W * W)
# 1/N factor in front and regularization component
dW /= num_train
dW += 2*W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
num_train = X.shape[0]
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# build mask selecting only the correct classes (one-hot encodig of y_i)
mask = np.eye(W.shape[1], dtype=bool)[y]
S = X.dot(W)
S -= np.max(S, axis=1)[:,np.newaxis]
ES = np.exp(S)
P = ES / np.sum(ES, axis=1)[:,np.newaxis]
# compute loss
loss = -1.0/num_train*np.sum(np.log(P[mask])) + reg * np.sum(W * W)
# -1/N sum log(prob of y_i) + regularization part
# gadiaent
ones_yi = mask.astype(float)
dW = 1.0/num_train * X.T.dot(P - ones_yi) + reg * 2*W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
| true
|
a9396e80d2eaa6594ddbb69fe36f6da7a3cefaa1
|
Python
|
DuttaSejuti/HackerRank-Python
|
/mutation.py
|
UTF-8
| 124
| 2.734375
| 3
|
[] |
no_license
|
if __name__ == '__main__':
s=input()
n,c=input().split()
n=int(n)
c=str(c)
print(s[:n]+c+s[n+1:])
| true
|
246756226d7639c17e7f5ecf37002a6fabf011ed
|
Python
|
coder5492/WineQualityPrediction
|
/wineQualityDataset.py
|
UTF-8
| 1,911
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
Created on Tue Dec 11 09:43:03 2018
"""
@author: sangeeth
"""
import pandas as pd
from sklearn import preprocessing
from sklearn.decomposition import PCA
import pylab as pl
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
#Importing the data
data = pd.read_csv(url, sep=";")
#Declaring X and Y
X = data.iloc[:,:-1]
Y = data.iloc[:, 11:12]
#Standardising the data
sc = preprocessing.StandardScaler()
X = sc.fit(X).transform(X)
#Splitting the observations to test set and train set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=42)
#Predicting the output using actual variables
bayes_real_model = GaussianNB()
bayes_real_model.fit(X_train,Y_train)
real_prediction = bayes_real_model.predict(X_test)
#Calculation of Accuracy Score and Confusion Matrix
from sklearn.metrics import accuracy_score
real_score = accuracy_score(Y_test,real_prediction)
print("The accuracy with Real data is " + str(real_score))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(Y_test,real_prediction).trace())
#Predicting with Different models with different PCA components
for i in range(1,10):
model = PCA(n_components = i)
model.fit(X)
X_PCA = model.transform(X)
X_train_PCA, X_test_PCA, Y_train_PCA, Y_test_PCA = train_test_split(X_PCA, Y, test_size=0.33, random_state=42)
bayes_PCA_model = GaussianNB()
bayes_PCA_model.fit(X_train_PCA,Y_train)
PCA_prediction = bayes_PCA_model.predict(X_test_PCA)
PCA_score = accuracy_score(Y_test,PCA_prediction)
print("The accuracy with PCA data with " + str(i) + " components is " + str(PCA_score))
print(confusion_matrix(Y_test,PCA_prediction).trace())
| true
|
ad2018deab75f496911bb3599eab481de8d925f3
|
Python
|
Thalisson01/Python
|
/Exercício Python #092 - Cadastro de Trabalhador em Python.py
|
UTF-8
| 693
| 3.515625
| 4
|
[] |
no_license
|
from datetime import date
dataatual = date.today().year
dados = dict()
dados['nome'] = str(input('Digite seu nome: '))
dados['idade'] = dataatual - int(input('Digite a sua data de nascimento: '))
cdt = int(input('Digite a sua carteira de trabalho. [0] caso não tenha: '))
if (cdt != 0):
dados['CTPS'] = cdt
dados['ano de contratação'] = int(input('Qual foi o ano de contratação? '))
dados['salário'] = float(input('Qual é o seu salário: R$'))
aposentadoria = 35 - (dataatual - dados['ano de contratação'])
dados['aposentádoria'] = dados['idade'] + aposentadoria
else:
dados['CTPS'] = 'Empty'
for k, v in dados.items():
print(f'{k} tem o valor: {v}')
| true
|
86d3e7f84cfd2a9e7af722876e6a2c88c80320e3
|
Python
|
franklintra/template-project
|
/main.py
|
UTF-8
| 827
| 2.9375
| 3
|
[] |
no_license
|
import math as m
import numpy as np
import sympy as sp
import statistics as s
import sys
import os
import argparse
import random as r
import pprint
def loop_list(n: int, f: callable, *args) -> object:
"""
:rtype: list
:param n: int
:param f: callable
:param kwargs: args
"""
return [f(*args) for i in range(n)]
def loop(n: int, f: callable, *args) -> object:
"""
:rtype: list
:param n: int
:param f: callable
:param kwargs: args
"""
for i in range(n):
f(*args)
def main():
pass
def add(f, liste):
return liste.append(f)
if __name__ == '__main__':
liste = loop_list(100, loop_list, 10, r.randint, 0, 1)
pprint.pprint(liste)
average = []
for i in liste:
average.append(s.mean(i))
pprint.pprint(s.mean(average))
help()
| true
|
63a890d056ac84054eaf3c1c1b7c057ccd96e269
|
Python
|
Dflybird/PlotDemo
|
/ocean_physics_exp/speed/speed_airboat.py
|
UTF-8
| 1,200
| 2.640625
| 3
|
[] |
no_license
|
# coding:utf-8
import matplotlib.pyplot as plt
import numpy
import json
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
if __name__ == "__main__":
plt.subplots(1, 1, figsize=(8, 4), frameon=True)
plt.xlim((0, 14))
plt.ylim((0, 0.8))
plt.grid(True, linestyle='--', alpha=0.5)
# plt.xlabel("Lutra Airboat和仿真的无人船速度对比,推进力为3.13N", size=12)
plt.xlabel("时间 (单位:秒)", size=12)
plt.ylabel("速度(单位:米/秒)", size=12)
labels = ["仿真无人船", "真实舰艇"]
f = open("./lutra_airboat_data.json")
# f = open("./lutra_prop_data.json")
jsonData = json.load(f)
realData = jsonData["speedData"]
f = open("./sim_speed_data.json")
jsonData = json.load(f)
simData = jsonData["speed"]
plt.plot(numpy.arange(0, len(realData) / 5, 0.2), realData, label="真实舰艇", linewidth=0, marker='s')
plt.plot(numpy.arange(0, len(simData) / 10, 0.1), simData, label="仿真无人船", linewidth=0, marker='.')
plt.legend(loc="lower right")
plt.tight_layout()
plt.savefig("./speed_lutra_airboat.png", dpi=300)
# plt.show()
plt.clf()
| true
|
daa23629d033c9c0e3e9c0ead97f361faf9f238d
|
Python
|
mengqingmeng/gcft_ant
|
/test_ant/ant/export_excel_test.py
|
UTF-8
| 2,555
| 2.578125
| 3
|
[] |
no_license
|
#coding=utf-8
'''
Created on 2016年8月18日
@author: MQM
'''
from openpyxl import Workbook
from openpyxl import load_workbook
import json
wb = Workbook() #新建文档
ws = wb.active
#workSheet = wb.create_sheet("sheet2") #新建一页,名字“sheet2”
#workSheet.title = "new sheet2" #更改sheet 名字
#ws3 = wb["New Title"] 获取sheet,两种方式
#ws4 = wb.get_sheet_by_name("New Title")
ws['A1'] = "id"
ws['B1'] = "名称"
ws['C1'] = "备案编号"
ws['D1'] = "备案日期"
ws['E1'] = "备案状态"
ws['F1'] = "企业名称"
ws['G1'] = "企业地址"
ws['H1'] = "卫生许可"
ws['I1'] = "说明"
ws['J1'] = "processid"
for value in range(1,600000):
with open('F:\JTPData\detail\\'+str(value)+'_gcft.json' , 'r',encoding='utf-8',errors='ignore') as f:
jsonData = json.load(f)
name = jsonData["productname"]
apply_sn = jsonData["apply_sn"]
provinceConfirm = jsonData["provinceConfirm"] #备案日期
state = jsonData["state"]
enterprise_name = jsonData["scqyUnitinfo"]["enterprise_name"]
enterprise_address = jsonData["scqyUnitinfo"]["enterprise_address"]
enterprise_healthpermits = jsonData["scqyUnitinfo"]["enterprise_healthpermits"]
remark = jsonData["scqyUnitinfo"]["remark"]
processid = jsonData["processid"]
ws['A'+str(value+1)] = str(value)
ws['B'+str(value+1)] = name
ws['C'+str(value+1)] = apply_sn
ws['D'+str(value+1)] = provinceConfirm
ws['E'+str(value+1)] = state
ws['F'+str(value+1)] = enterprise_name
ws['G'+str(value+1)] = enterprise_address
ws['H'+str(value+1)] = enterprise_healthpermits
ws['I'+str(value+1)] = remark
ws['J'+str(value+1)] = processid
tep =int(ord('J'))
for cf in jsonData["pfList"]:
tep = tep+1
cowNum = ''
if tep <=90:
cowNum = chr(tep)
if tep > 90:
cowNum = 'A'+chr(tep-26)
if tep >116:
cowNum = 'B'+chr(tep-52)
if tep>142:
cowNum = 'C'+chr(tep-78)
if tep>168:
cowNum ='D'+chr(tep-104)
try:
ws[cowNum+str(value+1)] = cf["cname"]
except:
print("行数超出")
if value % 1000 == 0:
print("value:",value)
wb.save("F:\JTPData\\test.xlsx")
#wb.save("F:\JTPData\\test.xlsx")
| true
|
3c952aec77460288fc4ef9537dd62dccc0bb9cef
|
Python
|
MenaceDenis/DojoWork
|
/Python/average.py
|
UTF-8
| 96
| 3.140625
| 3
|
[] |
no_license
|
a = [1, 2, 5, 10, 255, 3]
sum = 0
for num in a:
sum += num
avg = sum / len(a)
print avg
| true
|
aec58eff5161dbfcdb012f94039fa95a2cbbf2fe
|
Python
|
misskaseyann/candy-slayer
|
/candy_slayer/game_state/NeighborhoodScreen.py
|
UTF-8
| 4,425
| 3.0625
| 3
|
[] |
no_license
|
import os
import pygame
from candy_slayer.game_state.GameState import GameState
class NeighborhoodScreen(GameState):
"""
Neighborhood game state object.
Music credit: Visager @ https://soundcloud.com/visagermusic
Font credit: Alagard @ https://www.dafont.com/alagard.font
"""
def __init__(self, manager):
"""
Initialize the neighborhood game state.
:param manager: game object manager
"""
super().__init__(manager)
self.neighborhoodx = self.manager.neighborhood.get_width()
self.neighborhoody = self.manager.neighborhood.get_height()
self.playerx = (335 - ((self.neighborhoodx * 100) / 2))
self.playery = (300 - ((self.neighborhoody * 80) / 2))
self.housex = 0
self.housey = 0
def startup(self, persistent):
"""
Called when a state resumes being active.
Allows information to be passed between states.
:param persistent: a dict passed from state to state
"""
pygame.mixer.music.load(os.path.join("candy_slayer/assets/", "eerieloop.wav"))
pygame.mixer.music.play(-1)
self.font = pygame.font.Font(os.path.join("candy_slayer/assets/", "alagard.ttf"), 16)
self.house_img = pygame.image.load(os.path.join("candy_slayer/assets/", "house.png")).convert_alpha()
self.player_img = pygame.image.load(os.path.join("candy_slayer/assets/", "player.png")).convert_alpha()
self.enemies_txt = self.font.render("Monsters: " + str(self.manager.get_population()) + " | Health: " +
str(self.manager.get_player().get_currhp()) + "/" +
str(self.manager.get_player().get_hpmax()) + " | Weapon: " +
str(self.manager.get_player().get_currweapon().get_name()),
True, (112, 89, 154))
def get_event(self, event):
"""
Handles events in the game.
:param event: event to be handled
"""
# If player exits, end the game.
if event.type == pygame.QUIT:
self.quit = True
if event.type == pygame.KEYDOWN:
# 'd' key moving the player to the right.
if event.key == pygame.K_d:
if self.playerx < (((self.neighborhoodx - 1) * 100) + (320 - ((self.neighborhoodx * 100)/2))):
self.housex += 1
self.playerx += 100
# 'a' key moving the player to the left.
if event.key == pygame.K_a:
if self.playerx > (320 - (((self.neighborhoodx - 1) * 100)/2)):
self.housex -= 1
self.playerx -= 100
# 'w' key moving the player up.
if event.key == pygame.K_w:
if self.playery > (280 - (((self.neighborhoody - 1) * 80)/2)):
self.housey -= 1
self.playery -= 80
# 's' key moving the player down.
if event.key == pygame.K_s:
if self.playery < (((self.neighborhoody - 1) * 80) + (280 - ((self.neighborhoody * 80)/2))):
self.housey += 1
self.playery += 80
# Enter button selects the house the player is entering.
if event.key == pygame.K_RETURN:
pygame.mixer.music.load(os.path.join("candy_slayer/assets/", "battle.wav"))
pygame.mixer.music.play(-1)
self.manager.get_player().set_currhouse(
self.manager.get_neighborhood().get_house(self.housey, self.housex))
self.next_state = "BATTLE"
self.done = True
def draw(self, surface):
"""
Draw game objects on the screen.
:param surface: game screen to be drawn on
"""
surface.fill((255, 241, 235))
for h in range(0, self.neighborhoody):
for w in range(0, self.neighborhoodx):
surface.blit(pygame.transform.scale(self.house_img, (64, 64)),
((w * 100) + (320 - ((self.neighborhoodx * 100)/2)),
(h * 80) + (280 - ((self.neighborhoody * 80)/2))))
surface.blit(self.player_img, (self.playerx, self.playery))
surface.blit(self.enemies_txt, (300 - self.enemies_txt.get_width() / 2, 10))
| true
|
def86977940f03af17c8e91505b5763e6a81113d
|
Python
|
Vchenhailong/my-notes-on-python
|
/basic_rules/chapter_9/__init__.py
|
UTF-8
| 1,444
| 3.671875
| 4
|
[] |
no_license
|
#! /usr/bin/python
# coding:utf-8
"""
contractor, property, iterator & generator:
—— To create objects what can act as a sequence or mapping. For activating the objs,
should implement the following contractors:
__len__(self),
__getitem__(self, key),
__setitem__(self, key, value),
__delitem__(self, key)
Note: immutable objs need to implements 2 methods, mutable objs need 4 methods.
property: @see contractors.py
@property: to get the attributes
@staticmethod: 静态方法,其定义中没有参数 self,可直接通过类来调用。
@classmethod: 类方法,其定义中包含类似于 self 的参数,通常被命名为 cls.
iterator: Any objs implement __iter__ method, and it will contains __next__ method.
生成器:yield 语句
构造器、属性、迭代器和生成器:
—— 可创建基于序列或映射的对象。为了使之有效,按需实现下列构造方法:
__len__(self),
__getitem__(self, key),
__setitem__(self, key, value),
__delitem__(self, key).
注意:不可变对象需 要实现2个方法,而可变对象需要实现4个
property:见 contractors.py
@property:属性取值器
@staticmethod:静态方法,其定义中没有参数 self,可直接通过类来调用。
@classmethod: 类方法,其定义中包含类似于 self 的参数,通常被命名为 cls.
iterator: 任何实现了 __iter__ 方法的对象。
"""
| true
|
1dbca8ddc241452d7c31fa58d253a891828a9325
|
Python
|
kh4r00n/Pet-Shop-Boys
|
/petshopboys_final.py
|
UTF-8
| 14,842
| 3.484375
| 3
|
[] |
no_license
|
# FUNÇÃO VALOR DO PRODUTOS:
def val_prod(s, n):
if s == 'ração' or s == 'R':
return 199.90 * n
elif s == 'ração_premium' or s == 'RP':
return 259.90 * n
elif s == 'brinquedo' or s == 'BR':
return 39.90 * n
elif s == 'remédio' or s == 'RM':
return 59.90 * n
return 'Erro'
# FUNÇÃO VALOR SERVIÇOS:
def val_serv(s, n):
if s == 'tosa' or s == 'T':
return 59.90 * n
elif s == 'banho' or s == 'B':
return 49.90 * n
elif s == 'passeio' or s == 'P':
return 39.90 * n
elif s == 'hotel' or s == 'H':
return 119.90 * n
return 'Erro'
# FUNÇÃO EXIBIR LISTA DE PRODUTOS:
def exibir_lista(l):
for e in l:
qnt = e[0]
prod = e[1]
val = e[2]
if e[0] > 1:
print(f'{qnt} unidades de {prod} = R$ {val:.2f}')
else:
print(f'{qnt} unidade de {prod} = R$ {val:.2f}')
# FUNÇÃO TROCO:
def troco(numero):
cinquenta = int(numero / 50)
numero = numero - (cinquenta * 50)
vinte = int(numero / 20)
numero = numero - (vinte * 20)
dez = int(numero / 10)
numero = numero - (dez * 10)
cinco = int(numero / 5)
numero = numero - (cinco * 5)
dois = int(numero / 2)
numero = numero - (dois * 2)
um = numero
print('Notas R$ 50,00 = ', cinquenta)
print('Notas R$ 20,00 = ', vinte)
print('Notas R$ 10,00 = ', dez)
print('Notas R$ 5,00 = ', cinco)
print('Notas R$ 2,00 = ', dois)
print('Notas R$ 1,00 = ', um)
# FUNÇÃO PARA IDENTIFICAR O ITEM A SER REMOVIDO:
def identifica(l, s):
i = 0
for e in l:
if e[1] == s:
return i
else:
i += 1
# FUNÇÃO ADEQUAÇÃO LETRA EM PALAVRA
def adeq(l, s):
for e in l:
if e[1] == s:
return e[0]
else:
return s
# FUNÇÃO ENCERRAMENTO DO CÓDIGO:
def bye(l, carrinho):
print(f'\n\n\nSucesso, {name}! Sua compra já foi validada! :D')
print('\n\nSegue sua notinha:')
print('-' * 35)
print(' ' * 13 + 'NOTA FISCAL\n')
exibir_lista(l)
print('-' * 35)
print(f'Valor total: R$ {carrinho:.2f}')
print('-' * 35)
print(f'\nA PetShopBoys agradece pela sua compra!\n\nVolte sempre, {name}!')
print(
f'\n\nPs.: Ei, não fui eu quem te disse, mas tá aqui um cupom de desconto para você compartilhar com a galera: {name}TemOPetMaisLindo\n\n')
# FUNÇÃO MÉTODOS DE PAGAMENTO:
def pagamento(s, tot, l):
if s == 'dinheiro' or s == 'DR':
print(f'\nValor da compra: R$ {tot:.2f}')
val = float(input(f'\nDeseja troco para quanto?\n> '))
while val < tot:
val = float(input(f'\nValor inválido. Deseja troco para quanto?\n> '))
val_troco = (val - tot) // 1
print(f'\nSeu troco será de: R$ {val_troco:.2f}\n')
troco(val_troco)
bye(l, tot)
elif s == 'cartão_de_crédito' or s == 'CC':
print(f'\nValor da compra: R$ {tot:.2f}\n')
qnt_parc = int(input(f'Gostaria de parcelar em quantas vezes?\nPodemos fazer em até 5 vezes sem juros!\n> '))
while qnt_parc > 5 or qnt_parc < 1:
qnt_parc = int(input('Ops! Nº de parcelas inválido. Tenta outro:\n> '))
parM = tot / qnt_parc
print(f'\nValor da parcela: R$ {parM:.2f}')
input(
f'\nColoca aqui dados do cartão:\nAh, {name}, pode ficar relax que ninguém vai ter acesso a esses dados, tá?\n> ')
bye(l, tot)
elif s == 'cartão_de_débito' or s == 'CD':
print(f'\nValor da compra: R$ {tot:.2f}')
input(
f'\nColoca aqui os dados do cartão:\nAh, {name}, pode ficar relax que ninguém vai ter acesso a esses dados, tá?\n> ')
bye(l, tot)
elif s == 'PIX' or s == 'pix':
print(f'\nValor da compra: R$ {tot:.2f}\n')
print('Pronto! Agora é fazer o PIX para o e-mail: professores.bcw4@soulcodeacademy.com')
input('\nColoca aqui pra mim o comprovante da transferência:\n> ')
bye(l, tot)
# ______________________________________________________________________________________________________________________________________________________________
# DECLARAÇÃO VARIÁVEIS:
list_adeq = [['Ração(R)', 'R'], ['Ração Premium(RP)', 'RP'], ['Brinquedo(BR)', 'BR'], ['Remédio(RM)', 'RM'],
['Tosa(T)', 'T'], ['Banho(B)', 'B'], ['Passeio(P)', 'P'], ['Hotel(H)', 'H']]
list_prod = [
'Os produtos em estoque são:\n\n- Ração(R): R$ 199.90\n- Ração_Premium(RP): R$ 259.90\n- Brinquedo(BR): R$ 39.90\n- Remédio(RM): R$ 59.90']
list_serv = [
'Os serviços disponíveis são:\n\n- Tosa(T): R$ 59.90\n- Banho(B): R$ 49.90\n- Passeio(P): R$ 39.90\n- Hotel(H): R$ 119.90']
list_main = []
carrinho = 0
y = 13457
# ______________________________________________________________________________________________________________________________________________________________
# INICIO CÓDIGO
name = input(f'Olá, cliente nº: {y}!\n\nPor favor, digite seu primeiro nome:\n> ')
y += 1
print(f'\nAgora sim, {name}!\nBem-vindo(a) a PetShopBoys: O parque de diversões dos "Pais de Pet"!')
while True:
continuar = ' ' # input('\n\nEscolha Acessar(A) nosso menu ou Finalizar(F) o atendimento:\n> ').strip().upper()[0]
# WHILE INPUT DIFERENTE DE F EXECUTAR O PROGRAMA
while continuar not in 'AF':
continuar = input('\n\nEscolha Acessar(A) nosso menu ou Finalizar(F) o atendimento:\n> ').strip().upper()[0]
# IF INPUT SEJA A ABRIR MENU PARA O CLIENTE
if continuar == 'A':
call = input('\nPosso te ajudar com:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
resp = ''
while (resp != 'S'):
# IF PARA CALLAR OS PRODUTOS
if call == 'produtos' or call == 'PRODUTOS' or call == 'P':
print()
print(*list_prod, sep='')
prod = input('\nPõe aqui o código do produto que você escolheu: ').upper()
while prod not in 'R, RP, BR, RM':
prod = input('\nPõe aqui o código do produto que você escolheu: ').upper()
while True:
try:
qnt = int(input('Escolha a quantidade: '))
while qnt < 1:
qnt = int(input('Ops! Quantidade inválida, tente novamente: '))
break
except:
print('Número inválido')
val = val_prod(prod, qnt)
adequação = adeq(list_adeq, prod)
list_sub = [qnt, adequação, val]
list_main.append(list_sub)
carrinho += val
print(f'\n> Adicionei {qnt} unidades de {adequação} no seu carrinho.\n>> Total: R$ {val:.2f}')
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
# ELIF PARA CALLAR OS SERVIÇOS
elif (call == 'serviços' or call == 'SERVIÇOS' or call == 'SV'):
print()
print(*list_serv, sep='')
serv = input('\nPõe aqui o código do serviço desejado: ').upper()
while serv not in 'T, B, P, H':
serv = input('\nPõe aqui o código do serviço desejado: ').upper()
while True:
try:
qnt = int(input('Escolha a quantidade de vezes: '))
while qnt < 1:
qnt = int(input('Ops! Quantidade inválida, tente novamente: '))
break
except:
print('Número inválido')
val = val_serv(serv, qnt)
adequação = adeq(list_adeq, serv)
list_sub = [qnt, adequação, val]
list_main.append(list_sub)
carrinho += val
print(f'\n> Adicionei {qnt} vezes o serviço {adequação} no seu carrinho.\n>> Total: R$ {val:.2f}')
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
# ELIF PARA CALLAR O CARRINHO
elif (call == 'carrinho' or call == 'CARRINHO' or call == 'C'):
# IF PARA CARRINHO VAZIO
if carrinho == 0:
print('\nOps! Seu carrinho ainda está vazio! Vamos às compras!')
call = input(
'\nPosso te ajudar com:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
# ELSE EXIBIR LISTA MAIN = PRODUTOS NO CARRINHO
else:
print()
exibir_lista(list_main)
print('-' * 35)
print(f'Valor total: {carrinho:.2f}')
# INPUT/IF PARA REMOVER PRODUTOS DO CARRINHO
q = str(input(f'\n{name}, quer remover algum intruso dessa lista?\n(S/N): ')).strip().upper()[0]
while q not in 'SN':
q = \
str(input(f'\n{name}, quer remover algum intruso dessa lista?\n(S/N): ')).strip().upper()[0]
if q == 'S':
r = input('\nFala pra mim o código do item intruso:\n> ').upper()
while r not in 'R, RP, BR, RM, T, B, P, H':
r = input('\nFala pra mim o código do item intruso:\n> ').upper()
r = adeq(list_adeq, r)
i = identifica(list_main, r)
carrinho -= list_main[i][2]
list_main.pop(i)
print('\nProduto removido com sucesso!')
# IF PARA CALLAR CHECKOUT
checkout = input('\nTudo certo para finalizarmos a compra?\n(S/N): ').strip().upper()[0]
while checkout not in 'SN':
checkout = input('\nTudo certo para finalizarmos a compra?\n(S/N): ').strip().upper()[0]
if checkout == 'S':
metodo = input(
f'\n{name}, qual a forma de pagamento que você prefere?\n\nDinheiro(DR), PIX, Cartão de Crédito(CC), Cartão de Débito(CD) ou Desistir da Compra(D)?\n> ').upper()
resp = 'S'
# IF PARA DESISTÊNCIA DA COMPRA
if metodo == 'D' or metodo == 'DESISTIR':
print(
f'\nCOMOASSIM, {name}?! Você realmente vai abandonar seu carrinho (e a mim também ;-;)?\n')
exibir_lista(list_main)
desist = input('(S/N): ').strip().upper()[0]
while desist not in "SN":
desist = input('(S/N): ').strip().upper()[0]
# IF DESISTÊNCIA POSITIVA
if desist == 'S':
print(f'\nUma pena você estar indo embora, {name}.')
# IF DESISTÊNCIA NEGATIVA
else:
metodo = input(
f'\nOba, você ficou! Qual forma de pagamento você prefere, {name}:\n\nDinheiro(DR), Cartão de Crédito(CC), Cartão de Débito(CD) ou PIX?\n> ').upper()
pagamento(metodo, carrinho, list_main)
# ELSE O CLIENTE NÃO TENTE DESISTIR
else:
pagamento(metodo, carrinho, list_main)
# ELSE CASO O CLIENTE RESPONDA ALGO ERRADO NO MÉTODO DE PAGAMENTO
else:
call = input(
'\nCerto! Vamos para onde então:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
# ELIF CASO O CLIENTE DIGITE SAIR
elif (call == 'sair' or call == 'SAIR' or call == 'S'):
resp = input(f'\nTem certeza que deseja sair, {name}? ;-;\n(S/N): ').strip().upper()[0]
while resp not in 'SN':
resp = input(f'\nTem certeza que deseja sair, {name}? ;-;\n(S/N): ').strip().upper()[0]
# IF CASO CLIENTE DESISTA DE SAIR
if resp != 'S':
call = input(
'\nOba, você ficou! Vamos para onde então:\n\nProdutos(P), Serviços(SV) ou Carrinho(C)?\n> ').upper()
while call not in 'P,SV,C,S':
call = input(
'\nVamos para onde agora:\n\nProdutos(P), Serviços(SV), Carrinho(C) ou Sair(S)?\n> ').upper()
# ELSE CASO CLIENTE PROSSIGA COM SAÍDA
else:
print(f'\nUma pena você estar indo embora, {name}. :(')
# CONTINUAR PARA QUE AO FIM DO PROG RETORNE AO INÍCIO
# IF CASO O CLIENTE DIGITE F TANTO NO INÍCIO QUANTO AO FIM
if continuar == 'F':
print(f'\n\nObrigado pela visita, {name}! \o/\nVolte sempre!')
break
| true
|
ad72d5193218779432833d444fd5fa0a1749e037
|
Python
|
ScottLangridge/Shutdown-Command-Generator
|
/VirtualKeyboard.py
|
UTF-8
| 2,858
| 3.109375
| 3
|
[] |
no_license
|
from time import sleep
from code_dict import CODE_DICT
from code_dict import CUSTOM_CODES
import ctypes
class VirtualKeyboard:
#Set custom codes to matching definitions for user defined quick codes
def __init__(self):
self.user32 = ctypes.windll.user32
#Returns code from key
def decode(self, key):
return CODE_DICT[key.lower()]
#Presses down key
def key_down(self, key):
self.user32.keybd_event(self.decode(key), 0, 0, 0)
sleep(0.1)
#Lets go of key
def key_up(self, key):
self.user32.keybd_event(self.decode(key), 0, 2, 0)
sleep(0.1)
#Quickly presses then lets go of key
def key_stroke(self, key):
self.key_down(key)
self.key_up(key)
## Type whole messages. Letters will be read as keys, except # which denotes
## the start of a multi-char key. EG: To write:
##
## hello
## world
##
## You should enter: "hello#enter##tab#world
def type(self, keys):
if keys in CUSTOM_CODES.keys():
self.type(CUSTOM_CODES[keys])
else:
code = ''
in_code = False
for char in keys:
if char == '#':
if in_code:
self.key_stroke(code)
code = ''
in_code = False
else:
in_code = True
elif in_code:
code = code + str(char)
else:
self.key_stroke(str(char))
#Presses keys down one by one then releases together.
#Good for things like "#alt##tab#" or "#ctrl##alt##delete#"
def hold_keys(self, keys):
if keys in CUSTOM_CODES.keys():
self.hold_keys(CUSTOM_CODES[keys])
else:
code = ''
in_code = False
for char in keys:
if char == '#':
if in_code:
self.key_down(code)
code = ''
in_code = False
else:
in_code = True
elif in_code:
code = code + str(char)
else:
self.key_down(str(char))
for char in keys:
if char == '#':
if in_code:
self.key_up(code)
code = ''
in_code = False
else:
in_code = True
elif in_code:
code = code + str(char)
else:
self.key_up(str(char))
| true
|
fda77059f91015439cbfddc96e2473ffecaf27f6
|
Python
|
straga/micropython_littlefs_test
|
/data_esp/core/rpc/jsonrpc.py
|
UTF-8
| 2,606
| 2.53125
| 3
|
[] |
no_license
|
try:
import ujson as json
except Exception:
import json
pass
from core import logging
log = logging.getLogger("JSONRPC")
class JsonRpc:
def __init__(self, core, mbus):
self.core = core
self.mbus = mbus
@staticmethod
def isgenerator(iterable):
return hasattr(iterable, '__iter__') and not hasattr(iterable, '__len__')
@staticmethod
def query_params(params):
if "args" in params:
params["args"] = tuple(params["args"])
else:
params["args"] = tuple()
if "kwargs" in params:
params["kwargs"] = params["kwargs"]
else:
params["kwargs"] = dict()
return params
# DB
async def call_db(self, params):
response = {}
try:
# ACTION
response["result"] = await self.core.uconf.call(
params["method"],
params["param"],
*params["args"],
**params["kwargs"]
)
if self.isgenerator(response["result"]):
response["result"] = list(response["result"])
except Exception as e:
response["error"] = "".format(e)
log.error("RPC-DB: {}".format(e))
pass
return response
# ENV
async def call_env(self, params):
response = {}
try:
_env = params["env"]
_path = params["path"]
# ACTION
response["result"] = await self.mbus.rpc.action(env_name=_env, path=_path,
args=params["args"], kwargs=params["kwargs"])
except Exception as e:
response["error"] = "{}".format(e)
log.error("RPC-ENV: {} : {}".format(e, params))
pass
return response
# CALL
async def call(self, rpc_string):
response = {}
rpc_id = 0
method = None
parse_params = None
try:
jsonrpc = json.loads(rpc_string)
rpc_id = jsonrpc["id"]
method = jsonrpc["method"]
parse_params = self.query_params(jsonrpc["params"])
except Exception as e:
response["error"] = "{}".format(e)
log.error("RPC-ENV: {} : {}".format(e, rpc_string))
pass
# Method
if method == "call_db":
response = await self.call_db(parse_params)
if method == "call_env":
response = await self.call_env(parse_params)
response["id"] = rpc_id
return json.dumps(response)
| true
|
6921c5494947346bf1d1b0fad9276610224b9166
|
Python
|
karimmakynch/PYTHON
|
/Encryption[py]/bak/c.py
|
UTF-8
| 455
| 2.578125
| 3
|
[] |
no_license
|
# Variables
key = 200
def encode(msg):
tx = ''
for l in msg:
if ord(l) in range(0,0xff):
pos = ord(l)
pos += key
if pos >= 0xff:
pos -= 0xff
if pos < 0:
pos += 0xff
tx += chr(pos)
else:
tx += l
return tx
def decode(msg):
tx = ''
for l in msg:
if ord(l) in range(0,0xff):
pos = ord(l)
pos -= key
if pos >= 0xff:
pos -= 0xff
if pos < 0:
pos += 0xff
tx += chr(pos)
else:
tx += l
return tx
| true
|
5ff597fb6d6375c577c80afdb57d950e7f66a985
|
Python
|
nitinverma99/Codeforces---800
|
/Two_Arrays_and_Swaps.py
|
UTF-8
| 416
| 2.703125
| 3
|
[] |
no_license
|
for i in range(int(input())):
n, k = list(map(int, input().split()))
lst = list(map(int, input().split()))
gst = list(map(int, input().split()))
while(k>0):
if max(gst)>min(lst):
maxx = max(gst)
minn = min(lst)
lst.append(maxx)
lst.remove(minn)
gst.remove(maxx)
k -= 1
else:
break
print(sum(lst))
| true
|
5160a5d6f5580ad0b42f3e831af7c3642ac3c38a
|
Python
|
jerry73204/ms-agv-car
|
/tf_openvino_source/movidius_video.py
|
UTF-8
| 4,671
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
#
# 本程式檔據隨附於套件的的「LICENSE.txt」檔案內容授權,感謝你遵守協議。
import argparse
import time
import cv2
import numpy as np
from openvino.inference_engine import IENetwork, IEPlugin
def main():
# 設定程式參數
arg_parser = argparse.ArgumentParser(description='使用 Movidius 進行預測')
arg_parser.add_argument(
'--model-file',
default='../tf_openvino_model/mo2_model/saved_model.xml',
help='模型架構檔',
)
arg_parser.add_argument(
'--weights-file',
default='../tf_openvino_model/mo2_model/saved_model.bin',
help='模型參數檔',
)
arg_parser.add_argument(
'--video-type',
choices=['file', 'camera'],
default='file',
help='影片類型',
)
arg_parser.add_argument(
'--source',
default='../sample_video/example_1.mp4',
help='影片來源檔',
)
arg_parser.add_argument(
'--input-width',
type=int,
default=48,
help='模型輸入影像寬度',
)
arg_parser.add_argument(
'--input-height',
type=int,
default=48,
help='模型輸入影像高度',
)
arg_parser.add_argument(
'--gui',
action='store_true',
help='啓用圖像界面',
)
arg_parser.add_argument(
'--device',
choices=['CPU', 'MYRIAD'],
default='MYRIAD',
help='計算裝置',
)
# 解讀程式參數
args = arg_parser.parse_args()
assert args.input_width > 0 and args.input_height > 0
# 設置 Movidius 裝置
plugin = IEPlugin(device=args.device)
# 載入模型檔
net = IENetwork.from_ir(model=args.model_file, weights=args.weights_file)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
exec_net = plugin.load(network=net)
# 開啓影片來源
if args.video_type == 'file': # 檔案
video_dev = cv2.VideoCapture(args.source)
video_width = video_dev.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = video_dev.get(cv2.CAP_PROP_FRAME_HEIGHT)
elif args.video_type == 'camera': # 攝影機
video_dev = cv2.VideoCapture(0)
# 主迴圈
try:
prev_timestamp = time.time()
while True:
ret, orig_image = video_dev.read()
curr_time = time.localtime()
# 檢查串流是否結束
if ret is None or orig_image is None:
break
# 縮放爲模型輸入的維度、調整數字範圍爲 0~1 之間的數值
preprocessed_image = cv2.resize(
orig_image.astype(np.float32),
(args.input_width, args.input_height),
) / 255.0
# 這步驟打包圖片成大小爲 1 的 batch
batch = np.expand_dims(
np.transpose(preprocessed_image, (2, 0 ,1)), # 將維度順序從 NHWC 調整爲 NCHW
0,
)
# 執行預測
request_handle = exec_net.start_async(
request_id=0,
inputs={input_blob: batch}
)
status = request_handle.wait()
result_batch = request_handle.outputs[out_blob]
result_onehot = result_batch[0]
# 判定結果
left_score, right_score, stop_score, other_score = result_onehot
class_id = np.argmax(result_onehot)
if class_id == 0:
class_str = 'left'
elif class_id == 1:
class_str = 'right'
elif class_id == 2:
class_str = 'stop'
elif class_id == 3:
class_str = 'other'
# 計算執行時間
recent_timestamp = time.time()
period = recent_timestamp - prev_timestamp
prev_timestamp = recent_timestamp
print('時間:%02d:%02d:%02d ' % (curr_time.tm_hour, curr_time.tm_min, curr_time.tm_sec))
print('輸出:%.2f %.2f %.2f %.2f' % (left_score, right_score, stop_score, other_score))
print('類別:%s' % class_str)
print('費時:%f' % period)
print()
# 顯示圖片
if args.gui:
cv2.imshow('', orig_image)
cv2.waitKey(1)
except KeyboardInterrupt:
print('使用者中斷')
# 終止影像裝置
video_dev.release()
if __name__ == '__main__':
main()
| true
|
b491ab1931f7f936e8f71e186daf2e4f57af5490
|
Python
|
akatkar/PythonLesson
|
/ThreadDemo.py
|
UTF-8
| 493
| 3.359375
| 3
|
[] |
no_license
|
from threading import *
from time import sleep
class MyThread(Thread):
def __init__(self, name, delay=0.5):
Thread.__init__(self, name=name)
self.delay = delay
def run(self):
i = 0
while i<5:
sleep(self.delay)
i +=1
print(f"{self.name} finished")
th = [MyThread("M1"), MyThread("M2",0.4)]
for t in th:
t.start()
print("check point 1")
for t in th:
t.join()
print("check point 2")
| true
|
c413e55be7bf623a80bef6b0b897b4360be5d1f3
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_116/1221.py
|
UTF-8
| 1,169
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
import os
import time
import itertools
import collections
xs = range(4)
ys = range(4)
def solv(s, c):
if any([all([s[i][j]==c or s[i][j]=='T' for j in ys]) for i in xs]):
return True
if any([all([s[i][j]==c or s[i][j]=='T' for i in xs]) for j in ys]):
return True
if all([s[i][i] == c or s[i][i] == 'T' for i in xs]):
return True
if all([s[i][3-i] == c or s[i][3-i] == 'T' for i in xs]):
return True
def main():
tt = int(raw_input())
#print "t=",tt
s = ['' for i in range(4)]
for t in xrange(tt):
s[0] = raw_input()
s[1] = raw_input()
s[2] = raw_input()
s[3] = raw_input()
ss = ''.join(s)
if t!=tt-1:
raw_input()
#print s
#print ss
if solv(s, 'X'):
print "Case #%d: X won" % (t+1)
elif solv(s, 'O'):
print "Case #%d: O won" % (t+1)
elif '.' in ss:
print "Case #%d: Game has not completed" % (t+1)
else:
print "Case #%d: Draw" % (t+1)
if __name__ == '__main__':
main()
| true
|
949f35a84828a28cfe65c20a561df2aea6a5209e
|
Python
|
jmnel/dromedary-welcome
|
/src/help_browser.py
|
UTF-8
| 3,187
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
import os
from PySide2.QtWidgets import (QWidget, QPushButton, QTextBrowser, QHBoxLayout, QVBoxLayout,
QDialog)
from PySide2.QtCore import Qt, Slot
class HelpBrowser(QWidget):
instance = None # Class variable instance of help browser
documentation_path = '' # Documentation path
def __init__(self, parent=None):
# def __init__(self, path, page, parent=None):
# We don't pass parent to superclass, because we don't want help browser to be a child of
# main window. We handle closing help browser when main window closes manually.
super(HelpBrowser,self).__init__()
# Set needed widget attributes. WA_DeleteOnClose is needed so that closing main window also
# closes instance of help browser.
self.setAttribute(Qt.WA_DeleteOnClose) # Destroy widget when window is closed.
self.setAttribute(Qt.WA_GroupLeader)
# Create home, back, and close buttons.
self.home_button = QPushButton(self.tr('&Home'))
self.back_button = QPushButton(self.tr('&Back'))
self.close_button = QPushButton(self.tr('Close'))
self.close_button.setShortcut(self.tr('Esc'))
# Layout home, back, and close buttons.
self.button_layout = QHBoxLayout()
self.button_layout.addWidget(self.home_button)
self.button_layout.addWidget(self.back_button)
self.button_layout.addStretch()
self.button_layout.addWidget(self.close_button)
# Create basic layout containing QTextBrowser.
self.text_browser = QTextBrowser()
self.main_layout = QVBoxLayout()
self.main_layout.addLayout(self.button_layout)
self.main_layout.addWidget(self.text_browser)
self.setLayout(self.main_layout)
# Connect button signals
self.home_button.clicked.connect(self.text_browser.home)
self.back_button.clicked.connect(self.text_browser.backward)
self.close_button.clicked.connect(self.close)
# Calls static function to clear help browser instance reference.
self.destroyed.connect(HelpBrowser.on_close)
# Close help browser on parent is_closing signal.
parent.is_closing.connect(self.close)
# Navigates to page in documentation path.
def goto_page(self, page):
page_file_path = os.path.join(HelpBrowser.documentation_path, page)
self.text_browser.setSource(page_file_path)
# Sets documenation path.
@staticmethod
def set_documentation_path(path):
HelpBrowser.documentation_path = path
# Unsets help browser instance reference. This gets called when help browser is destroyed.
@staticmethod
def on_close():
if HelpBrowser.instance != None:
HelpBrowser.instance = None
# Creates and shows help browser window, stores instance in class variable, and navigates to
# page in documentation path.
@staticmethod
def show_page(page, parent=None):
if HelpBrowser.instance == None:
HelpBrowser.instance = HelpBrowser(parent)
HelpBrowser.instance.resize(500,400)
HelpBrowser.instance.show()
HelpBrowser.instance.goto_page(page)
| true
|
7c8a1408c8e623011af7e781790890eac990ba0e
|
Python
|
COHRINT/cops-and-robots-2.0
|
/main.py
|
UTF-8
| 7,890
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
'''
Cops and Robots launchig file. Contains the main update loop in the __init__ function
'''
__author__ = ["LT"]
__copyright__ = "Copyright 2017, Cohrint"
__credits__ = ["Ian Loefgren","Sierra Williams","Matt Aiken","Nick Sweet"]
__license__ = "GPL"
__version__ = "2.2" # for CnR 2.0
__maintainer__ = "Luke Barbier"
__email__ = "luke.barbier@colorado.edu"
__status__ = "Development"
from pdb import set_trace
import sys
import os
import rospy
import yaml
from core.helpers.config import load_config
from core.robo_tools.cop import Cop
from core.robo_tools.robber import Robber
from core.robo_tools.gaussianMixtures import GM, Gaussian
from caught.msg import Caught
from std_msgs.msg import Bool
class MainTester(object):
"""
Starts the CnR experiment
Methods
----------
1) __init__() : launches the experiment and contains the main loop
2) init_cop_robber() : creates each robot as either a cop or robber
3) update_cop_robber() : calls the robot.update() method of each robot
4) end_experiment() : callback to the /caught_confirm topic and influences the self.running_experiment variable
"""
running_experiment = True
experiment_runspeed_hz = 2;
map_bounds = [-5, -2.5, 5, 2.5]
max_num_robots = 2 # Maximum number of robots our experiment is designed for
# Related to Cop's belief
# cop_initial_belief = GM() # cop x, cop y, rob x, rob y, then follow the rooms
# cop_initial_belief.addNewG([0,0,-2,2],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # kitchen
# cop_initial_belief.addNewG([0,0,-5,0],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # hallway
# cop_initial_belief.addNewG([0,0,0,-2.5],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # library
# cop_initial_belief.addNewG([0,0,2,2.5],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # billiards room
# cop_initial_belief.addNewG([0,0,-5,-2],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # study
# cop_initial_belief.addNewG([0,0,-8,-2],[[2,0,0,0],[0,2,0,0],[0,0,2,0],[0,0,0,2]],1) # dining room
# cop_initial_belief.normalizeWeights()
delta = 0.1
def __init__(self, config_file='config/config.yaml'):
print("Starting Cops and Robots")
rospy.init_node("Python_Node")
rospy.Subscriber('/caught_confirm', Caught, self.end_experiment)
# caught_confirm topic
# Create robots
self.init_cop_robber(config_file)
# Main Loop
print("Entering Main Loop")
r = rospy.Rate(self.experiment_runspeed_hz) # 1 Hz
while self.running_experiment is True and not rospy.is_shutdown():
self.update_cop_robber()
r.sleep()
for robot in self.robots:
self.robots[robot].goal_planner.return_position()
rospy.sleep(1)
print("Experiment Finished")
def init_cop_robber(self, config_file=None):
"""
Initialize the cop and robber using the config file
"""
if config_file != None:
cfg = load_config(config_file) #load the config file as a dictionary
else:
print("No Config File. Restart and pass the config file.")
raise
self.robots = {} # robot dictionary
num_robots = 0
try:
for robot, kwargs in cfg['robots'].iteritems():
if cfg['cop_rob'][robot] != 'no':
# check for bad config, too many robots selected
num_robots += 1
if num_robots > self.max_num_robots:
print("Bad config file, More robots selected than allowed")
print("Check config/config.yaml or run gui.py and reconfigure")
raise
# goal_planner string
goal_planner = cfg['robots'][robot]['goal_planner']
# Check cop or robber
# Initialize a cop
if cfg['cop_rob'][robot] == 'cop':
with open('models/'+cfg['map']+'.yaml', 'r') as stream:
map_cfg = yaml.load(stream)
cop_initial_belief = GM()
for room in map_cfg['info']['rooms']:
max_x = map_cfg['info']['rooms'][room]['max_x']
max_y = map_cfg['info']['rooms'][room]['max_y']
min_x = map_cfg['info']['rooms'][room]['min_x']
min_y = map_cfg['info']['rooms'][room]['min_y']
cent_x = (max_x + min_x) / 2
cent_y = (max_y + min_y) / 2
cop_initial_belief.addG(Gaussian([0,0,cent_x,cent_y],[[0.5,0,0,0],[0,0.5,0,0],[0,0,0.5,0],[0,0,0,0.5]],1))
cop_initial_belief.normalizeWeights()
self.robots[robot] = Cop(cop_initial_belief,
self.delta,
self.map_bounds,
robot,
goal_planner)
# Initialize a robber
elif cfg['cop_rob'][robot] == 'rob':
self.robots[robot] = Robber(robot, goal_planner)
print("Added: " + str(robot) + " to the experiment")
except TypeError as ex:
print("***ERROR***, in config/config.yaml, add singe quotes (') around 'cop', 'rob' and 'no' ")
raise
except Exception as ex:
template = "***ERROR*** An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
raise
print("COP AND ROBBER INITIALIZED")
def update_cop_robber(self):
"""
Updates the cop and robber: goal pose and belief (if cop)
"""
# set_trace()
for robot_name, robot in self.robots.iteritems():
# print("UPDATING: : " + robot_name)
robot.update() # calls Robot.update (in Robot.py)
def end_experiment(self, msg):
if msg.confirm is True:
self.running_experiment = False
print("*****"+ msg.robber.upper() + " CAUGHT*****")
print(" ENDING EXPERIMENT")
self.running_experiment = False
if __name__ == '__main__':
MainTester()
| true
|
9281eb55ef79475f37a1cb2fcf721f5203021385
|
Python
|
rajesh95cs/assignments
|
/1/substringmatchtuple.py
|
UTF-8
| 516
| 3.171875
| 3
|
[] |
no_license
|
import string
def countsubstringmatch(target, key):
i=0
b = 0
c = 0
alist[:]
alist[0:len(target)]=0
count = 0
while b != -1:
b = string.find(target, key, c)
if b == -1:
break
else:
count = count+1
print "Position at ", b
alist[i:]=b
i=i+1
c = b + 1
print(count)
atuple=tuple(alist)
print(atuple)
print "tuple="
target = "atgacatgcacaagtatgcat"
key = "atgc"
countsubstringmatch(target, key)
| true
|
29468acd8f1a3cb76cabb1fbb0f62bad76bae661
|
Python
|
Klowner/8cic-encode
|
/8cic_encode.py
|
UTF-8
| 4,278
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env python
"""
Anything-That-PIL-Can-Read to Lightwall 8cic converter
Mark "Klowner" Riedesel (mark@klowner.com) / QCCoLab
------------------------------------------------------
version 2
Reads a series of image files (or a single animated GIF)
and converts them into lightwall ready format!
You can use a tool such as ffmpeg to convert any resize
a variety of videos available on the internets.
example:
$ ffmpeg -i giveyouup.flv -s 16x16 -o giveyouup_%05d.png
Then use this script to process the resulting series of
png image files.
Requires: Python Imaging Library (PIL)
"""
import struct
import glob
import sys
from PIL import Image
FORMAT_VERSION = 3
FORMAT_TYPE = 0
def dump_color_stream(c):
out = []
c = [x/16 for x in c]
for bit in reversed(range(4)):
byte = 0
for i, color in enumerate(c):
byte |= (color >> bit & 1) << i
out.append(byte ^ 0xFF)
return out
def process_pixel_column(pixels):
(r,g,b) = map(lambda x:[p[x] for p in pixels], range(3))
for color_column in (b,g,r):
yield dump_color_stream(reversed(color_column))
def write_frame(image, size, ostream):
(w,h) = size
rows = h/8
for row_groups in xrange(rows):
for xpos in xrange(w):
ypos = range(h)[row_groups::2]
pixels = map(lambda y: image.getpixel((xpos,y)), ypos)
for x in process_pixel_column(pixels):
ostream.write(struct.pack('BBBB', *x))
def write_header(target_size, delay, ostream):
(w,h) = target_size
rows = h/8
if FORMAT_VERSION > 2:
ostream.write(struct.pack('BBBBH', FORMAT_VERSION, FORMAT_TYPE, w*rows, w, delay))
else:
ostream.write(struct.pack('BBB', FORMAT_VERSION, FORMAT_TYPE, w*rows))
sys.stdout.write("HEADER [ 8cic version:%d target:%dx%d ]\n" % (FORMAT_VERSION, w,h))
def process_image_sequence(files, target_size, options, ostream):
for x in xrange(options.repeat):
for filename in files:
img = Image.open(filename)
img = img.convert('RGB')
write_frame(img, target_size, ostream)
sys.stdout.write("FRAME [ %s ]\n" % filename)
def process_animated_gif(files, target_size, options, ostream):
filename = files[0]
gifimg = Image.open(filename)
count = 0
while count < options.repeat:
# REPEATEDLY INSERT FRAME TO FILL REQUEST FRAME DURATION
for i in xrange( gifimg.info.get('duration') / options.delay):
img = gifimg.convert('RGB')
write_frame(img, target_size, ostream)
try:
gifimg.seek(gifimg.tell()+1)
except EOFError, e:
count += 1
gifimg.seek(0)
def cmdline():
import optparse
parser = optparse.OptionParser()
parser.add_option('-i', '--input', dest="input_path", help="Source image(s) (glob) or single animated GIF")
parser.add_option('-o', '--output', dest='output_file', default='wall.dat', help="Destination filename (default: wall.dat)")
parser.add_option('-W', '--width', dest='width', default=0, type='int', help="Force width")
parser.add_option('-H', '--height', dest='height', default=0, type='int', help="Force height")
parser.add_option('-r', '--repeat', dest='repeat', default=1, type='int', help="Repeat N times (warning: makes file Nx larger)")
parser.add_option('-d', '--delay', dest='delay', default=16, type='int', help="Inter-frame delay (mS, default: 16)")
(options, args) = parser.parse_args()
if not options.input_path:
parser.print_help()
return
ostream = file(options.output_file, 'wb')
files = glob.glob(options.input_path)
files.sort()
if files:
img = Image.open(files[0])
target_size = img.size
target_size = (options.width or target_size[0], options.height or target_size[1])
write_header(target_size, options.delay, ostream)
if len(files) == 1 and img.format == 'GIF':
process_animated_gif(files, target_size, options, ostream)
else:
process_image_sequence(files, target_size, options, ostream)
ostream.close()
sys.stdout.write("Done.\n")
if __name__ == '__main__':
cmdline()
| true
|
11bcc3080344a710d50e26e9f4196ac7379391fc
|
Python
|
stanfordnlp/color-describer
|
/third-party/stanza/stanza/text/vocab.py
|
UTF-8
| 13,262
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
"""
Vocabulary module for conversion between word tokens and numerical indices.
"""
__author__ = 'victor'
from collections import Counter, namedtuple, OrderedDict
from itertools import izip
import numpy as np
from copy import deepcopy
import zipfile
from ..util.resource import get_data_or_download
class Vocab(object):
"""A mapping between words and numerical indices. This class is used to facilitate the creation of word embedding matrices.
Example:
.. code-block:: python
v = Vocab('***UNK***')
indices = v.update("I'm a list of words".split())
print('indices')
NOTE: UNK is always represented by the 0 index.
"""
def __init__(self, unk):
"""Construct a Vocab object.
:param unk: string to represent the unknown word (UNK). It is always represented by the 0 index.
"""
self._word2index = OrderedDict()
self._counts = Counter()
self._unk = unk
# assign an index for UNK
self.add(self._unk, count=0)
def clear(self):
"""
Resets all mappings and counts. The unk token is retained.
"""
self._word2index.clear()
self._counts.clear()
self.add(self._unk, count=0)
def __iter__(self):
"""
:return: An iterator over the (word, index) tuples in the vocabulary
"""
return iter(self._word2index)
def iteritems(self):
"""
:return: An iterator over the (word, index) tuples in the vocabulary
"""
return self._word2index.iteritems()
def items(self):
"""
:return: A list of (word, index) pairs from the vocabulary.
"""
return self._word2index.items()
def keys(self):
"""
:return: A list of words in the vocabulary.
"""
return self._word2index.keys()
def iterkeys(self):
"""
:return: An iterator over the words in the vocabulary.
"""
return self._word2index.iterkeys()
def __repr__(self):
"""Represent Vocab as a dictionary from words to indices."""
return str(self._word2index)
def __str__(self):
return 'Vocab(%d words)' % len(self._word2index)
def __len__(self):
"""Get total number of entries in vocab (including UNK)."""
return len(self._word2index)
def __getitem__(self, word):
"""Get the index for a word.
If the word is unknown, the index for UNK is returned.
"""
return self._word2index.get(word, 0)
def __contains__(self, word):
"""
:return: whether word is in the vocabulary
"""
return word in self._word2index
def add(self, word, count=1):
"""Add a word to the vocabulary and return its index.
:param word: word to add to the dictionary.
:param count: how many times to add the word.
:return: index of the added word.
WARNING: this function assumes that if the Vocab currently has N words, then
there is a perfect bijection between these N words and the integers 0 through N-1.
"""
if word not in self._word2index:
self._word2index[word] = len(self._word2index)
self._counts[word] += count
return self._word2index[word]
def update(self, words):
"""
Add an iterable of words to the Vocabulary.
:param words: an iterable of words to add. Each word will be added once.
:return: the corresponding list of indices for each word.
"""
return [self.add(w) for w in words]
def words2indices(self, words):
"""
Convert a list of words into a list of indices.
:param words: an iterable of words to map to indices.
:return: the corresponding indices for each word. If a word is not found in the vocabulary then the unknown index will be returned for it.
"""
return [self[w] for w in words]
def indices2words(self, indices):
"""
Converts a list of indices into a list of words.
:param indices: indices for which to retrieve words.
:return: a list of words corresponding to each index.
"""
index2word = self._word2index.keys() # works because word2index is an OrderedDict
return [index2word[i] for i in indices]
@property
def counts(self):
"""
:return: a counter containing the number of occurrences of each word.
"""
return self._counts
def prune_rares(self, cutoff=2):
"""
returns a **new** `Vocab` object that is similar to this one but with rare words removed. Note that the indices in the new `Vocab` will be remapped (because rare words will have been removed).
:param cutoff: words occuring less than this number of times are removed from the vocabulary.
:return: A new, pruned, vocabulary.
NOTE: UNK is never pruned.
"""
# make a deep copy and reset its contents
v = deepcopy(self)
v.clear()
for w in self._word2index:
if self._counts[w] >= cutoff or w == self._unk: # don't remove unk
v.add(w, count=self._counts[w])
return v
def sort_by_decreasing_count(self):
"""Return a **new** `Vocab` object that is ordered by decreasing count.
The word at index 1 will be most common, the word at index 2 will be
next most common, and so on.
:return: A new vocabulary sorted by decreasing count.
NOTE: UNK will remain at index 0, regardless of its frequency.
"""
v = self.__class__(unk=self._unk) # use __class__ to support subclasses
# UNK gets index 0
v.add(self._unk, count=self._counts[self._unk])
for word, count in self._counts.most_common():
if word != self._unk:
v.add(word, count=count)
return v
def clear_counts(self):
"""
Removes counts for all tokens.
:return: the vocabulary object.
"""
# TODO: this removes the entries too, rather than setting them to 0
self._counts.clear()
return self
@classmethod
def from_dict(cls, word2index, unk):
"""Create Vocab from an existing string to integer dictionary.
All counts are set to 0.
:param word2index: a dictionary representing a bijection from N words to the integers 0 through N-1.
UNK must be assigned the 0 index.
:param unk: the string representing unk in word2index.
:return: a created vocab object.
"""
try:
if word2index[unk] != 0:
raise ValueError('unk must be assigned index 0')
except KeyError:
raise ValueError('word2index must have an entry for unk.')
# check that word2index is a bijection
vals = set(word2index.values()) # unique indices
n = len(vals)
bijection = (len(word2index) == n) and (vals == set(range(n)))
if not bijection:
raise ValueError('word2index is not a bijection between N words and the integers 0 through N-1.')
# reverse the dictionary
index2word = {idx: word for word, idx in word2index.iteritems()}
vocab = cls(unk=unk)
for i in xrange(n):
vocab.add(index2word[i])
return vocab
class EmbeddedVocab(Vocab):
def get_embeddings(self):
"""
:return: the embedding matrix for this vocabulary object.
"""
raise NotImplementedError()
def backfill_unk_emb(self, E, filled_words):
""" Backfills an embedding matrix with the embedding for the unknown token.
:param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`.
:param filled_words: these words will not be backfilled with unk.
NOTE: this function is for internal use.
"""
unk_emb = E[self[self._unk]]
for i, word in enumerate(self):
if word not in filled_words:
E[i] = unk_emb
class SennaVocab(EmbeddedVocab):
"""
Vocab object with initialization from Senna by Collobert et al.
Reference: http://ronan.collobert.com/senna
"""
embeddings_url = 'https://github.com/baojie/senna/raw/master/embeddings/embeddings.txt'
words_url = 'https://raw.githubusercontent.com/baojie/senna/master/hash/words.lst'
n_dim = 50
def __init__(self, unk='UNKNOWN'):
super(SennaVocab, self).__init__(unk=unk)
@classmethod
def gen_word_list(cls, fname):
with open(fname) as f:
for line in f:
yield line.rstrip("\n\r")
@classmethod
def gen_embeddings(cls, fname):
with open(fname) as f:
for line in f:
yield np.fromstring(line, sep=' ')
def get_embeddings(self, rand=None, dtype='float32'):
"""
Retrieves the embeddings for the vocabulary.
:param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.
:param dtype: Type of the matrix.
:return: embeddings corresponding to the vocab instance.
NOTE: this function will download potentially very large binary dumps the first time it is called.
"""
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
embeddings = get_data_or_download('senna', 'embeddings.txt', self.embeddings_url)
words = get_data_or_download('senna', 'words.lst', self.words_url)
E = rand((len(self), self.n_dim)).astype(dtype)
seen = []
for word_emb in izip(self.gen_word_list(words), self.gen_embeddings(embeddings)):
w, e = word_emb
if w in self:
seen += [w]
E[self[w]] = e
self.backfill_unk_emb(E, set(seen))
return E
class GloveVocab(EmbeddedVocab):
"""
Vocab object with initialization from GloVe by Pennington et al.
Reference: http://nlp.stanford.edu/projects/glove
"""
GloveSetting = namedtuple('GloveSetting', ['url', 'n_dims', 'size', 'description'])
settings = {
'common_crawl_48': GloveSetting('http://nlp.stanford.edu/data/glove.42B.300d.zip',
[300], '1.75GB', '48B token common crawl'),
'common_crawl_840': GloveSetting('http://nlp.stanford.edu/data/glove.840B.300d.zip',
[300], '2.03GB', '840B token common crawl'),
'twitter': GloveSetting('http://nlp.stanford.edu/data/glove.twitter.27B.zip',
[25, 50, 100, 200], '1.42GB', '27B token twitter'),
'wikipedia_gigaword': GloveSetting('http://nlp.stanford.edu/data/glove.6B.zip',
[50, 100, 200, 300], '822MB', '6B token wikipedia 2014 + gigaword 5'),
}
def __init__(self, unk='UNKNOWN'):
super(GloveVocab, self).__init__(unk=unk)
def get_embeddings(self, rand=None, dtype='float32', corpus='common_crawl_48', n_dim=300):
"""
Retrieves the embeddings for the vocabulary.
:param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.
:param dtype: Type of the matrix.
:param corpus: Corpus to use. Please see `GloveVocab.settings` for available corpus.
:param n_dim: dimension of vectors to use. Please see `GloveVocab.settings` for available corpus.
:return: embeddings corresponding to the vocab instance.
NOTE: this function will download potentially very large binary dumps the first time it is called.
"""
assert corpus in self.settings, '{} not in supported corpus {}'.format(corpus, self.settings.keys())
self.n_dim, self.corpus, self.setting = n_dim, corpus, self.settings[corpus]
assert n_dim in self.setting.n_dims, '{} not in supported dimensions {}'.format(n_dim, self.setting.n_dims)
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
zip_file = get_data_or_download('glove', '{}.zip'.format(self.corpus), self.setting.url, size=self.setting.size)
E = rand((len(self), self.n_dim)).astype(dtype)
n_dim = str(self.n_dim)
with zipfile.ZipFile(open(zip_file)) as zf:
# should be only 1 txt file
names = [info.filename for info in zf.infolist() if info.filename.endswith('.txt') and n_dim in info.filename]
if not names:
s = 'no .txt files found in zip file that matches {}-dim!'.format(n_dim)
s += '\n available files: {}'.format(names)
raise IOError(s)
name = names[0]
seen = []
with zf.open(name) as f:
for line in f:
toks = line.rstrip().split(' ')
word = toks[0]
if word in self:
seen += [word]
E[self[word]] = np.array([float(w) for w in toks[1:]], dtype=dtype)
self.backfill_unk_emb(E, set(seen))
return E
| true
|
828a8c41783e8ea8b98c4c15d3a7f252288b4698
|
Python
|
xiaoxiaomeng0/python_projects
|
/Day39_flight-deals-start/flight_search.py
|
UTF-8
| 5,492
| 2.671875
| 3
|
[] |
no_license
|
# from datetime import datetime, timedelta
# import requests
# import data_manager
# from dotenv import load_dotenv
# import os
#
# load_dotenv()
#
# flight_search_endpoint = "https://tequila-api.kiwi.com/v2/search"
#
# class FlightSearch:
# #This class is responsible for talking to the Flight Search API.
# def __init__(self, list:data_manager, range=30):
# self.list = list
# self.now = datetime.now()
# self.range = range
# self.future = None
# self.cheap_flight = []
# # self.stop_overs = stop_overs
# # self.via_city = via_city
#
# def day_range_cal(self):
# self.future = self.now + timedelta(days=self.range)
# self.now = self.now.strftime("%d/%m/%Y")
# self.future = self.future.strftime("%d/%m/%Y")
#
# def flight_request(self):
# self.day_range_cal()
# for data in self.list:
# search_params = {
# "fly_from": "BOS",
# "fly_to": data["iataCode"],
# "dateFrom": self.now,
# "dateTo": self.future,
# # "max_stopovers": self.stop_overs,
# "flight_type": "round",
# # "curr": "EUR",
# # "select_stop_airport": self.via_city
# }
# headers = {
# "content-encoding": "gzip",
# "apikey": os.environ.get("FLIGHT_APIKEY"),
# }
# response = requests.get(url=flight_search_endpoint, params=search_params, headers=headers)
# # try:
# cur_lowest = response.json()["data"][0]
# print(cur_lowest)
# # except IndexError:
# # print(f"No flights found for {data['iataCode']}")
#
# # else:
# if cur_lowest["price"] < int(data["lowestPrice"]):
# flight = {
# "price": cur_lowest["price"],
# "flyFrom": cur_lowest["flyFrom"],
# "flyTo": cur_lowest["flyTo"],
# "local_arrival": cur_lowest["local_arrival"],
# "local_departure": cur_lowest["local_departure"],
# }
# self.cheap_flight.append(flight)
#
# return self.cheap_flight
#
import os
from dotenv import load_dotenv
import requests
from flight_data import FlightData
from pprint import pprint
load_dotenv()
TEQUILA_ENDPOINT = "https://tequila-api.kiwi.com"
class FlightSearch:
def __init__(self):
self.city_codes = []
def get_destination_codes(self, city_names):
print("get destination codes triggered")
location_endpoint = f"{TEQUILA_ENDPOINT}/locations/query"
headers = {"apikey": os.environ.get("FLIGHT_APIKEY")}
for city in city_names:
query = {"term": city, "location_types": "city"}
response = requests.get(url=location_endpoint, headers=headers, params=query)
results = response.json()["locations"]
code = results[0]["code"]
self.city_codes.append(code)
return self.city_codes
def check_flights(self, origin_city_code, destination_city_code, from_time, to_time):
print(f"Check flights triggered for {destination_city_code}")
headers = {"apikey": os.environ["TEQUILA_API_KEY"]}
query = {
"fly_from": origin_city_code,
"fly_to": destination_city_code,
"date_from": from_time.strftime("%d/%m/%Y"),
"date_to": to_time.strftime("%d/%m/%Y"),
"nights_in_dst_from": 7,
"nights_in_dst_to": 30,
"flight_type": "round",
"one_for_city": 1,
"max_stopovers": 0,
"curr": "GBP"
}
response = requests.get(
url=f"{TEQUILA_ENDPOINT}/v2/search",
headers=headers,
params=query,
)
try:
data = response.json()["data"][0]
except IndexError:
##########################
query["max_stopovers"] = 1
response = requests.get(
url=f"{TEQUILA_ENDPOINT}/v2/search",
headers=headers,
params=query,
)
data = response.json()["data"][0]
pprint(data)
flight_data = FlightData(
price=data["price"],
origin_city=data["route"][0]["cityFrom"],
origin_airport=data["route"][0]["flyFrom"],
destination_city=data["route"][1]["cityTo"],
destination_airport=data["route"][1]["flyTo"],
out_date=data["route"][0]["local_departure"].split("T")[0],
return_date=data["route"][2]["local_departure"].split("T")[0],
stop_overs=1,
via_city=data["route"][0]["cityTo"]
)
return flight_data
###########################
else:
flight_data = FlightData(
price=data["price"],
origin_city=data["route"][0]["cityFrom"],
origin_airport=data["route"][0]["flyFrom"],
destination_city=data["route"][0]["cityTo"],
destination_airport=data["route"][0]["flyTo"],
out_date=data["route"][0]["local_departure"].split("T")[0],
return_date=data["route"][1]["local_departure"].split("T")[0]
)
return flight_data
| true
|
8e910d9e8666850dbe6805b6b5bfa58b58c35fa8
|
Python
|
nayankshingnapure/Nayank-Shingnapure
|
/Model 1 (1).py
|
UTF-8
| 3,183
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
stock=pd.read_csv('c:\\Nifty50.csv')
# In[3]:
print(stock)
# In[4]:
stock.head()
# In[5]:
stock.tail()
# In[6]:
stock.describe()
# In[7]:
stock.tail(90)
# In[8]:
stock.tail(90).min(axis=0)
# In[9]:
stock.tail(90).max(axis=0)
# In[10]:
stock.tail(90).mean(axis=0)
# In[11]:
stock.info()
# In[12]:
stock['Date']=pd.to_datetime(stock['Date'])
# In[13]:
stock.info()
# In[14]:
stock['Month']=pd.DatetimeIndex(stock['Date']).month
# In[15]:
stock['Year']=pd.DatetimeIndex(stock['Date']).year
# In[16]:
gym=stock.groupby(['Month','Year'])
# In[17]:
stock['vwap'] = (np.cumsum(stock['Shares Traded'] * stock['Close']) / np.cumsum(stock['Close'])).astype(int)
# In[18]:
stock
# In[19]:
N=int(input(""))
stock["avg"]=stock['Close'].rolling(window=N).mean()
# In[20]:
stock.head(1)
# In[21]:
def profit_loss_pct(N):
stock['Profit/Loss'] = (stock['Close'] - stock['Close'].shift(1)) / stock['Close']
total_days = len(stock['Profit/Loss'])
calc_pnl = stock['Profit/Loss'][total_days-N:].sum()
if stock["Profit/Loss"][N] < 0:
print("Loss pct is: {:5.2f}%". format(stock["Profit/Loss"][N]*100));
else:
print("Profit pct is : {:5.2f}%". format(stock["Profit/Loss"][N]*100));
return
# In[22]:
profit_loss_pct(365)
# In[23]:
stock.fillna(0)
# In[24]:
stock['Day_Perc_Change']=stock['Close'].pct_change()
# In[25]:
stock
# In[26]:
stock.fillna(0)
# In[27]:
trend=pd.Series([])
for i in range(len(stock)):
if stock['Day_Perc_Change'][i]>-0.5 or stock['Day_Perc_Change'][i]<0.5 :
trend[i] = "Slight or No change"
elif stock['Day_Perc_Change'][i]>0.5 or stock['Day_Perc_Change'][i]<1 :
trend[i] = "Slight positive"
elif stock['Day_Perc_Change'][i]>-1 or stock['Day_Perc_Change'][i]<-0.5 :
trend[i] = "Slight negative"
elif stock['Day_Perc_Change'][i]>1 or stock['Day_Perc_Change'][i]<3 :
trend[i] = "Positive"
elif stock['Day_Perc_Change'][i]>-3 or stock['Day_Perc_Change'][i]<-1 :
trend[i] = "Negative"
elif stock['Day_Perc_Change'][i]>3 or stock['Day_Perc_Change'][i]<7:
trend[i] = "Among top gainers"
elif stock['Day_Perc_Change'][i]>-7 or stock['Day_Perc_Change'][i]<-3 :
trend[i] = "Among top losers"
elif stock['Day_Perc_Change'][i]>7:
trend[i] = "Bull run"
elif stock['Day_Perc_Change'][i]<-7 :
trend[i] = "Bear drop"
stock.insert(10,"Trend",trend)
# In[28]:
stock
# In[29]:
stock.fillna(0)
# In[30]:
stock.head(30)
# In[31]:
gtrend=stock.groupby(['Trend'])
# In[32]:
stock['Total Traded Quantity']=stock['Shares Traded']
# In[33]:
stock
# In[34]:
stock_trend = stock.groupby(["Trend"])
average_trades = stock_trend["Total Traded Quantity"].mean()
print("The average traded quantity is: ", average_trades)
# In[35]:
stock.groupby(stock.Trend).mean()['Total Traded Quantity'].astype(int)
# In[36]:
stock.groupby(stock.Trend).median()['Total Traded Quantity']
# In[ ]:
# In[ ]:
| true
|
30c28fbb7c5875a089b520bef2f9c21cf7ac52c3
|
Python
|
JitenKumar/Data-Science-Practice-With-Python
|
/Cleaning Data in Python/duplicates.py
|
UTF-8
| 781
| 3.515625
| 4
|
[] |
no_license
|
# Create the new DataFrame: tracks
tracks = billboard[['year', 'artist', 'track','time']]
# Print info of tracks
print(tracks.info())
# Drop the duplicates: tracks_no_duplicates
tracks_no_duplicates = tracks.drop_duplicates()
# Print info of tracks
print(tracks_no_duplicates.info())
#-----------------------------
# fill missing values
# Calculate the mean of the Ozone column: oz_mean
oz_mean = airquality.Ozone.mean()
# Replace all the missing values in the Ozone column with the mean
airquality['Ozone'] = airquality.fillna(oz_mean)
# Print the info of airquality
print(airquality.info())
#---- assert -------------------
# Assert that there are no missing values
assert pd.notnull(ebola).all().all()
# Assert that all values are >= 0
assert (ebola >= 0).all().all()
| true
|
a235af6ed810cccb07ef2a0a7eaee8980a1e984d
|
Python
|
scanner/tesla-powerwall-play
|
/tesla-api-play.py
|
UTF-8
| 8,614
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
#
# File: $Id$
#
"""
Try using the tesla-api instead of talking to the powerwall directly
"""
# system imports
#
import os
import asyncio
import pprint
import time
from pathlib import Path
from collections import defaultdict
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import hvac
from tesla_api import TeslaApiClient
COLORS = ["red", "blue", "green", "yellow", "orange", "cyan", "magenta"]
TESLA_API_TOKEN_FILE = Path("~/.tesla-api-token").expanduser()
VAULT_TOKEN_FILE = Path("~/.vault-token").expanduser()
VAULT_SECRETS_PATH = os.getenev("VAULT_SECRETS_PATH")
CHARTS = [
"battery_power",
# "generator_power",
"grid_power",
# "grid_services_power",
"solar_power",
]
####################################################################
#
def get_hvac_client():
"""
Return a connection to our hashicorp vault server so we can get login
credentials for the site we are going to download things from.
Raises a RuntimeError if we are not able to authenticate to the vault
server.
"""
if "VAULT_ADDR" not in os.environ:
raise RuntimeError('"VAULT_ADDR" not in environment')
vault_addr = os.environ["VAULT_ADDR"]
if "VAULT_TOKEN" in os.environ:
vault_token = os.environ["VAULT_TOKEN"]
elif VAULT_TOKEN_FILE.exists():
with open(VAULT_TOKEN_FILE) as f:
vault_token = f.read()
hvac_client = hvac.Client(url=vault_addr, token=vault_token)
if not hvac_client.is_authenticated():
raise RuntimeError(f"Can not authenticate with token to {vault_addr}")
return hvac_client
####################################################################
#
def get_login_credentials(hvac_client):
"""
Go to vault, get our login credentials and return a dict properly
formatted for authenticating with the web site.
"""
login_credentials = hvac_client.secrets.kv.v1.read_secret(
VAULT_SECRETS_PATH
)
return login_credentials["data"]
#############################################################################
#
async def save_token(token):
"""
Save the oauth token for re-use instead of logging in again. We
store it in the vault cubbyhole secrets engine.
"""
os.umask(0)
with open(
os.open(TESLA_API_TOKEN_FILE, os.O_CREAT | os.O_WRONLY, 0o600), "w"
) as fh:
fh.write(token)
####################################################################
#
def read_token():
"""
Reads the token from the token file. Returns None if file does not
exist.
"""
if not TESLA_API_TOKEN_FILE.exists():
return None
return open(TESLA_API_TOKEN_FILE, "r").read()
####################################################################
#
def tg_plot_history_power(ts):
"""
Plot the timeseries data using termgraph
Keyword Arguments:
ts -- list of dicts. Each dict contains the keys: 'battery_power',
'generator_power', 'grid_power', 'grid_services_power',
'solar_power', 'timestamp'
'timestamp' is of the format: : '2020-10-25T00:00:00-07:00'
All of the other values are floats (presummably in watts?)
"""
# Open our output data file we are generating for termgraph and
# define in it what data columns we are writing.
#
with open("termgraph.dat", "w") as fh:
fh.write(f"# Tesla energy graph starting {ts[0]['timestamp']}\n")
fh.write(f"@ {','.join(CHARTS)}\n")
for ts_d in ts:
# We generate one row at a time. The row label, then the data
# in the same order as we wrote in the header above.
#
row = []
row.append(ts_d["timestamp"][11:16])
for c in CHARTS:
row.append(str(abs(ts_d[c])))
fh.write(",".join(row))
fh.write("\n")
####################################################################
#
def write_blessed_datafile(ts):
"""
Write a javascript file that can be used by `blessed` to write an
ascii chart
Keyword Arguments:
ts -- list of dicts. Each dict contains the keys: 'battery_power',
'generator_power', 'grid_power', 'grid_services_power',
'solar_power', 'timestamp'
'timestamp' is of the format: : '2020-10-25T00:00:00-07:00'
All of the other values are floats (presummably in watts?)
"""
min_y = 0
max_y = 0
timestamps = []
series = defaultdict(list)
for ts_d in ts:
timestamps.append(f"\"{ts_d['timestamp'][11:16]}\"")
for c in CHARTS:
min_y = min(min_y, ts_d[c])
max_y = max(max_y, ts_d[c])
series[c].append(str(ts_d[c]))
# Open our output data file we are generating for termgraph and
# define in it what data columns we are writing.
#
with open("tesla-blessed.js", "w") as fh:
fh.write(
f"""
var blessed = require('blessed')
, contrib = require('../index')
, screen = blessed.screen()
, line = contrib.line(
{{ width: 164
, height: 24
, xPadding: 5
, minY: {min_y}
, showLegend: true
, legend: {{width: 12}}
, wholeNumbersOnly: false // true=do not show fraction in y axis
, label: 'Power data'}});
"""
)
series_names = []
for idx, c in enumerate(CHARTS):
series_name = f"series{idx}"
series_names.append(series_name)
fh.write(f"var {series_name} = {{\n")
fh.write(f" title: '{c}',\n")
fh.write(f" x: [{','.join(timestamps)}],\n")
fh.write(f" y: [{','.join(series[c])}],\n")
fh.write(f" style: {{line: '{COLORS[idx]}'}}\n")
fh.write(" };\n")
fh.write("screen.append(line); //must append before setting data\n")
set_data = ", ".join(series_names)
fh.write(f"line.setData([{set_data}]);\n")
fh.write(
"""
screen.key(['escape', 'q', 'C-c'], function(ch, key) {
return process.exit(0);
});
screen.render();
"""
)
#############################################################################
#
async def main():
pp = pprint.PrettyPrinter(indent=2)
email = password = None
token = read_token()
if token is None:
hvac_client = get_hvac_client()
creds = get_login_credentials(hvac_client)
email = creds["username"]
password = creds["password"]
async with TeslaApiClient(
email, password, token, on_new_token=save_token
) as client:
energy_sites = await client.list_energy_sites()
print(f"Number of energy sites = {len(energy_sites)}")
# We only expect there to be a single site for our home
# (Apricot Systematic)
#
assert len(energy_sites) == 1
site_as01 = energy_sites[0]
reserve = await site_as01.get_backup_reserve_percent()
print(f"Backup reserve percent = {reserve}")
operating_mode = await site_as01.get_operating_mode()
print(f"Operating mode: {operating_mode}")
version = await site_as01.get_version()
print(f"Version: {version}")
battery_count = await site_as01.get_battery_count()
print(f"Battery count: {battery_count}")
# history_energy = await site_as01.get_energy_site_calendar_history_data(
# kind="energy", period="lifetime"
# )
# print(f"History energy: \n{pp.pformat(history_energy)}")
# history_sc = await site_as01.get_energy_site_calendar_history_data(
# kind="self_consumption", period="lifetime"
# )
# print(f"History self consumption:\n{pp.pformat(history_sc)}")
while True:
live_status = await site_as01.get_energy_site_live_status()
print(f"Site live status:\n{pp.pformat(live_status)}")
time.sleep(150)
# tg_plot_history_power(history_power["time_series"])
# write_blessed_datafile(history_power["time_series"])
# print("Increment backup reserve percent")
# await energy_sites[0].set_backup_reserve_percent(reserve + 1)
############################################################################
############################################################################
#
# Here is where it all starts
#
if __name__ == "__main__":
asyncio.run(main())
#
############################################################################
############################################################################
| true
|
bc6f92dee367b78b68847d32706a70bfc28bb2af
|
Python
|
mmangione/open
|
/open/core/betterself/fixtures/demo_constants.py
|
UTF-8
| 5,804
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
ACTIVITY_NAMES = [
"Run",
"Job",
"Eat",
"Jumping Jacks",
"Drinking Tea",
"Playing Video Games",
"Juggling",
"Hug Dog",
"Hug Spouse",
"Call Parents",
"Meditate",
"Poop",
"Exercise",
"Got A Period",
"Online Shopping",
"Play Guitar",
"Sex",
"Listened to THE BEST SONG EVER",
"Proposed",
"Proposed To Love of my Life - Got Rejected",
"Not Have Sex",
"Buy A Dog",
"Power Nap",
"Video Chat",
"Laughing",
"BIG POOP",
"Sprint",
"Lazily Stare at Workout Equipment",
"Read",
"Fall Asleep Reading Mathematics Textbook",
"Watch YouTube",
"Watch Motivational Videos",
"Listen to Podcast",
"Binge on Food",
"Binge on Alcohol",
]
FOOD_NAMES = [
"Cheeseburger",
"Beefburger",
"Beef",
"Steak",
"Chicken",
"Tomato",
"Carrot",
"Cheese",
"Pizza",
"Big Steak",
"Onions",
"Rice",
"Garlic",
"Soup",
"Bland Soup",
"Mom's Cooking",
"Chinese Takeout",
"Italian Food",
"Chips",
"Cape Cod",
"Lays Chips",
"Taco",
"Guacamole",
"Big Cheese",
"Tasty Food",
"Not Tasty Food",
"Awful Salad",
"Beef Salad",
"Chicken Salad",
"Chicken Water Salad",
"Chicken of the Sea",
"Air",
"Generic Food Name",
"Chinese Food",
"Asian Food",
"Thai Food",
"Seamless",
"GrubHub",
"Delivery",
]
GENERIC_NOTES_TO_USE = [
"Happy as a dog",
"SO PUMPED IM GETTING A PUPPY",
"Feel Good",
"Sad",
"Need to poop",
"Thirsty",
"Felt strong today",
"Super motivated",
"A little tired",
"Have a hangover",
"Sad that I lost my wallet",
"Happy that I found my wallet",
"Anxious",
"Happy I finally cleaned my apartment",
"Angry that I played video games",
"Excited spouse is visiting",
"Excited to propose",
"Stomach hurts, annoyed",
"Super happy",
"Coffee feels awesome today",
"Super productive",
"Productive",
"Lazy",
"Feel like shit",
"Jog felt good",
"Crushing it at work",
"Not crushing it at work",
"Distracted",
"Want to watch movies",
"Super lazy",
"Super sleepy",
"Furious about morning",
"Constipated",
"Lethargic",
"ENERGIZED",
"I feel like I can do anything",
"Dont wanna be sad no more",
"Cramped",
"Period Hurts",
"Motivated af",
"I AM THE BEST",
"I AM THE WORST",
"Ugh....",
"I want coffee",
"I want a nap",
"I want to scream",
"Shit feels tedious",
"Laughing",
"Gotta poop, gotta poop",
"DOG MAKES ME SO HAPPY",
"Feeling happy I called Mom",
"Angry people make me angry",
"Gotta meditate, gotta meditate",
"ANGRY I PLAYED VIDEO GAMES, WTF",
"FOOD POISONING, THE PAIN, OW OW OW",
"I'm going to crush it one day, but right now I feel like shit",
"I haven't been this productive in a while, I'm super pleased how much progress I made today",
"I felt really tired after the afternoon nap ... ",
"In hindsight, I drank way too much coffee earlier - it's been hard to sleep",
"Definitely been feeling a bit more anxious than normal, hoping this should fade by the end of the day",
"Trying to be a more productive person these days is quite an up hill battle. 1 step of hard work followed by 2 steps of YouTube, sigh."
"If I spend another 15 minutes on Reddit, I'm going to be so angry at myself",
"I will continue eating the right foods to focus better ....",
"I'm definitely allergic to cheese. I can't stop farting",
"Having that hour long meeting really drained me ... ",
"Today is really important for me to sleep the proper amount of hours",
"NO SOCIAL MEDIA AGAIN",
"I feel bad I haven't gone to the gym",
"I want a cheese burger ...",
]
PRODUCTIVITY_NOTES_TO_USE = [
"Been super productive for the whole day today. Started off on the right rhythm and made sure I didn't go on Reddit.",
"Didn't do as much as I wanted today - was feeling a bit tired from my hangover and the food from last night.",
"Been hard to focus, I started off the day reading news ... it didn't help, made me keep on checking those dopamine sites for new updates. BAD.",
"Been pretty decent - I've gotten a lot of work done. Coworkers are pushing me to finish this project.",
"Today I got a lot of work done because the deadlines were very soon.",
"Felt super lethargic, couldn't get as much work done as I wanted to today.",
"Today was a bad day, I wanted to go shopping and it distracted me really hard.",
"I had to do a lot of studying today, as a result ... I didn't do any studying. Bad day, but hopefully next week I'll be much better.",
"Thank god it's Friday, finally done with all my chores. I'll make sure I'm super productive in the next weeks to come.",
"Been trying to fight off some of my addictions ... as a result , I just could never get into a good rhythm that I was proud enough of what I could get done.",
"Been reading motivational quotes all throughout the day to keep me motivated. As a result, I've gotten a pretty good day of work done. Happy about that.",
"Some crappy fights w/spouse today, I couldn't focus throughout the day.",
"I ate a lot of junk food the night before, and so I was a little bit sluggish throughout the day.",
"I've been eating only clean foods the last few weeks, so I've been able to get so much done today and feel great doing so!",
]
# most notes should be empty ... no one would write this much, so create a bunch of empty notes too
EMPTY_SPACES_NOTES = [""] * len(GENERIC_NOTES_TO_USE)
# make it 50% filled, 50% blank
NOTES_TO_USE_WITH_EMPTY_SPACES = GENERIC_NOTES_TO_USE + EMPTY_SPACES_NOTES
| true
|
67d2e9141b5b3e80fa76f3c0bc4b0993df28cfdc
|
Python
|
leonmbauer/Advent-Of-Code-2020
|
/day12/day12part2.py
|
UTF-8
| 1,661
| 3.59375
| 4
|
[] |
no_license
|
# Puzzle link: https://adventofcode.com/2020/day/12
inputfile = open("D:\coding\Advent of Code 2020\day12\day12input.txt", "r")
lines = inputfile.readlines()
directions = []
for value in lines:
directions.append(value.strip("\n"))
def day12(directions):
x = 0
y = 0
wx = 10
wy = 1
for direction in directions:
print(direction)
if direction[0] == "N":
wy += int(direction[1:])
elif direction[0] == "S":
wy -= int(direction[1:])
elif direction[0] == "E":
wx += int(direction[1:])
elif direction[0] == "W":
wx -= int(direction[1:])
elif direction[0] == "F":
x += wx * int(direction[1:])
y += wy * int(direction[1:])
xdiff = wx
ydiff = wy
if direction[0] == "R":
if int(direction[1:]) % 360 == 90:
wx = ydiff
wy = -1 * xdiff
elif int(direction[1:])% 360 == 180:
wx = -1 * xdiff
wy = -1 * ydiff
elif int(direction[1:]) % 360 == 270:
wx = -1 * ydiff
wy = xdiff
elif direction[0] == "L":
if int(direction[1:]) % 360 == 90:
wx = -1 * ydiff
wy = xdiff
elif int(direction[1:]) % 360 == 180:
wx = -1 * xdiff
wy = -1 * ydiff
elif int(direction[1:]) % 360 == 270:
wx = ydiff
wy = -1 * xdiff
print("waypoint: ", wx, wy)
print("boat: ", x, y)
return abs(x) + abs(y)
print(day12(directions))
| true
|
880fb5f5569d86dd71d6988ad0464ad1be022130
|
Python
|
weed478/asd1
|
/offline/zad3.py
|
UTF-8
| 1,485
| 3.109375
| 3
|
[] |
no_license
|
from random import randint, shuffle, seed
def partition(A, p, r):
i = p - 1
for j in range(p, r):
if A[j] <= A[r - 1]:
i += 1
A[i], A[j] = A[j], A[i]
return i
def insertion_sort(A, p, r):
for i in range(p + 1, r):
for j in range(i, p, -1):
if A[j - 1] > A[j]:
A[j - 1], A[j] = A[j], A[j - 1]
else:
break
def pivot5(A, p, r):
insertion_sort(A, p, r)
return (p + r) // 2
def median_of_medians(A, p, r):
n = r - p
bins = (n + 4) // 5
for i in range(bins):
left = p + i * 5
right = min(left + 5, r)
median = pivot5(A, left, right)
A[p + i], A[median] = A[median], A[p + i]
return linear_select(A, p, p + bins, p + bins // 2)
def pivot(A, p, r):
if r - p <= 5:
return pivot5(A, p, r)
else:
return median_of_medians(A, p, r)
def linear_select(A, p, r, k):
while r - p > 1:
x = pivot(A, p, r)
A[x], A[r - 1] = A[r - 1], A[x]
q = partition(A, p, r)
if k < q:
r = q
elif q < k:
p = q + 1
else:
return q
return p
def linearselect(A, k):
return linear_select(A, 0, len(A), k)
seed(42)
n = 11
for i in range(n):
A = list(range(n))
shuffle(A)
print(A)
x = linearselect(A, i)
if x != i:
print("Blad podczas wyszukiwania liczby", i)
exit(0)
print("OK")
| true
|
d8af9ff3d9d0e1a5d9911979d397a75c05ec9d09
|
Python
|
versigtig/code
|
/python/pygame_basically.py
|
UTF-8
| 915
| 3.25
| 3
|
[] |
no_license
|
# Import
import pygame
# Initialize game engine
pygame.init()
# Open window
window_size = (640, 480)
screen = pygame.display.set_mode(window_size)
pygame.display.set_caption("The Quest")
WHITE = (255, 255, 255)
RED = (255, 0, 0)
done = False
clock = pygame.time.Clock()
# MAIN GAME LOOP
while not done:
# EVENTS
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# GAME LOGIC
# WIPE SCREEN
screen.fill(WHITE)
# DRAWING
offset = 0
for x_offset in range(30,300,30):
pygame.draw.line(screen,RED,[x_offset,100],[x_offset-10,90],2)
pygame.draw.line(screen,RED,[x_offset,90],[x_offset-10,100],2)
font = pygame.font.SysFont('Calibri',25,True,False)
text = font.render("Anal Seepage",True,RED)
screen.blit(text,[250,250])
# UPDATE SCREEN
pygame.display.flip()
clock.tick(60)
pygame.quit()
| true
|
0f580bf33aa4471a83d6aa68387ead34c46e990d
|
Python
|
BUEC500C1/twitter-summarizer-rest-service-lqi25
|
/queue_mul.py
|
UTF-8
| 1,463
| 2.53125
| 3
|
[] |
no_license
|
import Twitter2Video
import queue
import threading
import multiprocessing
import os
q = queue.Queue()
threads = []
def Mul_Threads(item_list,num):
if item_list == []:
return "No twitter names entered"
if num == 0:
return "The num should be bigger than 0"
#nn = num.copy()
def Thread():
while True:
#if q.empty():
#break
item = q.get()
if item is None: break
print("Thread {} is processing".format(item))
Twitter2Video.tweet2image(item)
Twitter2Video.image2video(item)
print("Thread {} has completed".format(item))
q.task_done()
#if q.empty():
#break
for i in range(num):
t = threading.Thread(target = Thread)
t.start()
threads.append(t)
for item in item_list:
q.put(item)
q.join()
for i in range(num):
q.put(None)
for t in threads:
t.join()
print()
print("All threads have completed")
return "All threads have completed!"
#item_list = ['BU_Tweets','CNN','Nike','mfaboston']
#item_list_2 = ['BU_Tweets', 'CNN', 'Nike', 'mfaboston', 'BU_ece', 'BostonDynamics', 'realDonaldTrump', 'WHO', 'TIME']
#Mul_Threads(item_list,4)
#Mul_Threads(item_list_2,6)
'''
print(os.path.exists('BU_Tweets.avi') == True)
print(os.path.exists('CNN.avi') == True)
print(os.path.exists('Nike.avi') == True)
print(os.path.exists('mfaboston.avi') == True)
print(os.path.exists('tttttt.avi') == True)
'''
| true
|
065d5a27bd08c429bb639bce66c8bb6ff8da3380
|
Python
|
Navid2zp/django-challenge
|
/volleyball/matches/api/serializers.py
|
UTF-8
| 3,377
| 2.890625
| 3
|
[] |
no_license
|
from rest_framework.exceptions import ValidationError
from rest_framework.fields import IntegerField, SerializerMethodField
from rest_framework.serializers import ModelSerializer, Serializer
from matches.models import Match, MatchSeat
from stadiums.api.serializers import StadiumSerializer
from stadiums.models import StadiumSeatRow
class MatchSerializer(ModelSerializer):
""" Serializer responsible for generating matches list and creating one"""
stadium = StadiumSerializer()
class Meta:
model = Match
fields = (
'id', 'stadium', 'team_a', 'team_b', 'start_time', 'stadium'
)
read_only_fields = ('id',)
class MatchSeatSerializer(ModelSerializer):
""" Serializer responsible for generating matches list and creating one"""
class Meta:
model = MatchSeat
fields = (
'id', 'row', 'seat_number', 'price',
)
class AddSeatSerializer(Serializer):
"""
We'll accept a range for each row of the stadium to generate the seats for the match.
Using range will allow us to create bulk seats as well as creating them one by one.
"""
row = IntegerField(required=True, help_text="seat row")
from_column = IntegerField(required=True, help_text="starting column range")
to_column = IntegerField(required=True, help_text="ending column range")
price = IntegerField(required=True, help_text="ticket price")
def get_match(self) -> Match:
return self.context.get("match")
def validate_row(self, value) -> StadiumSeatRow:
"""
Check if row exists in the stadium of the match
:param value: int - row number
:return: StadiumSeatRow - row instance
"""
match = self.get_match()
if match.stadium.row_count < value:
raise ValidationError("row doesn't exists")
try:
return match.stadium.rows.get(row_number=value)
except:
raise ValidationError("row doesn't exists")
def validate_from_column(self, value):
"""
Check if the start column is in range.
"""
if self.get_match().stadium.seat_in_row < value:
raise ValidationError("seat out of range")
return value
def validate_to_column(self, value):
"""
Check if the end column is in range.
"""
if self.get_match().stadium.seat_in_row < value:
raise ValidationError("seat out of range")
return value
@staticmethod
def validate_price(value):
"""
Price can't be less than 1
"""
if value < 0:
raise ValidationError("ticket price must be greater than 0")
return value
def create(self, validated_data):
"""
Create seats in range [from_column, to_column] (from and to column included).
Example: range 1-5 will generate 5 seats with seat numbers: [1, 2, 3, 4, 5]
"""
match = self.get_match()
seats = []
for i in range(validated_data["from_column"], validated_data["to_column"] + 1):
seats.append(MatchSeat(seat_number=i, match=match, row=validated_data["row"]))
# Generate seats all together.
# No reason to create them one by one.
MatchSeat.objects.bulk_create(seats)
return {"status": "ok", "message": "seats added"}
| true
|
492001d64a03a80a34111d769b8d29fae113f310
|
Python
|
Kunal-Kumar-Sahoo/Hand-Gesture-Based-Painting
|
/e-Paint.py
|
UTF-8
| 2,882
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import mediapipe as mp
import os
import numpy as np
import HandTrackingModule as htm
################################################
#Configurable variables
brushThickness = 15
eraserThickness = 100
################################################
folderPath = "Header Files"
myList = os.listdir(folderPath)
# print(myList) Output: ['4.png', '1.png', '3.png', '2.png']
overlayList = []
for imgPath in myList:
image = cv2.imread(f"{folderPath}/{imgPath}")
overlayList.append(image)
# print(overlayList)
header = overlayList[1]
drawColour = (255, 0, 255)
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(2)
cap.set(3, 1280)
cap.set(4, 720)
detector = htm.HandDetector(detectionConf=0.85)
xp, yp = 0, 0
imageCanvas = np.zeros((720, 1280, 3), np.uint8)
while True:
# Import Image
_, image = cap.read()
image = cv2.flip(image, 1)
# Find Hand Landmarks
image = detector.findHands(image)
landmarkList = detector.findPosition(image, draw=False)
if len(landmarkList) != 0:
# print(landmarkList)
# Tip of index and middle finger
x1, y1 = landmarkList[8][1:]
x2, y2 = landmarkList[12][1:]
# Check which fingers are up ?
fingers = detector.fingersUp()
# print(fingers)
# Selection mode : 2 fingers are up
if fingers[1] and fingers[2]:
# print("Selection mode")
xp, yp = 0, 0
if y1 < 125:
if 250 < x1 < 450:
header = overlayList[1]
drawColour = (255, 0, 255) # Purple
elif 550 < x1 < 750:
header = overlayList[3]
drawColour = (255, 0, 0) # Blue
elif 800 < x1 < 950:
header = overlayList[2]
drawColour = (0, 255, 0) # Green
elif 1050 < x1 < 1200:
header = overlayList[0]
drawColour = (0, 0, 0) # Black
cv2.rectangle(image, (x1, y1-15), (x2, y2+15), drawColour, cv2.FILLED)
# Drawing mode : Index finger is up
if fingers[1] and not fingers[2]:
cv2.circle(image, (x1, y1), 15, drawColour, cv2.FILLED)
# print("Drawing mode")
if xp == 0 and yp == 0:
xp, yp = x1, y1
if drawColour == (0, 0, 0):
cv2.line(image, (xp, yp), (x1, y1), drawColour, eraserThickness)
cv2.line(imageCanvas, (xp, yp), (x1, y1), drawColour, eraserThickness)
cv2.line(image, (xp, yp), (x1, y1), drawColour, brushThickness)
cv2.line(imageCanvas, (xp, yp), (x1, y1), drawColour, brushThickness)
xp, yp = x1, y1
imageGray = cv2.cvtColor(imageCanvas, cv2.COLOR_BGR2GRAY)
_, imageInverse = cv2.threshold(imageGray, 50, 255, cv2.THRESH_BINARY_INV)
imageInverse = cv2.cvtColor(imageInverse, cv2.COLOR_GRAY2BGR)
image = cv2.bitwise_and(image, imageInverse)
image = cv2.bitwise_or(image, imageCanvas)
image[0:125, 0:1280] = header # Parcing the image as it is a matrix
# image = cv2.addWeighted(image, 0.5, imageCanvas, 0.5, 0)
cv2.imshow("Frontend Canvas", image)
# cv2.imshow("Backend Canvas", imageCanvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| true
|
84e3c7833958a923cf3c3aea9c576da037abb83f
|
Python
|
Liahm/CS-265
|
/Labs/Lab 4/s1.py
|
UTF-8
| 566
| 3.3125
| 3
|
[] |
no_license
|
import sys
import math
if len( sys.argv ) < 2 : # no file name
print 'ERROR:'
sys.exit()
else :
fName = "students"
f = open (fName, "r") #open file for reading
l = f.readline()
while l :
l = l.strip (' \t\n' ) #Remove whitespaces
s = l.split() #split strings into "chars"
length = len(s[1:])
i = 1
total = 0 #Value of student scores
while i<length :
total += float(s[i]) #+1 total
i += 1
total = int(round(total/length))#Average
print '{0}{1}'.format(s[0], total) #Prints formatted output
l = f.readline() #Next line
| true
|
136e0ee1d344a340d81a78fa2df0eb379455fa51
|
Python
|
memicq/ProgrammingContestAnswers
|
/aizu/lectures/computational_geometry/segments_lines/parallel_orthogonal.py
|
UTF-8
| 841
| 3.6875
| 4
|
[] |
no_license
|
#! python3
# parallel_orthogonal.py
class Point():
def __init__(self, x, y):
self.x = x
self.y = y
class Line():
def __init__(self, x1, y1, x2, y2):
self.p1 = Point(x1, y1)
self.p2 = Point(x2, y2)
def get_slope(self):
if self.p1.x == self.p2.x:
return float('inf')
return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x)
q = int(input())
for i in range(q):
x0, y0, x1, y1, x2, y2, x3, y3 = list(map(int, input().split(' ')))
line1, line2 = Line(x0, y0, x1, y1), Line(x2, y2, x3, y3)
a1, a2 = line1.get_slope(), line2.get_slope()
if a1 == a2: # 平行
print('2')
elif round(a1*a2, 8) == -1.0:
print('1')
elif (a1 == float('inf') and a2 == 0) or (a1 == 0 and a2 == float('inf')):
print('1')
else:
print('0')
| true
|
e6cdc91d6a85e778af9e03ad70d475526d8af64d
|
Python
|
gone2808/IACV_background_subtractor
|
/optical_flow.py
|
UTF-8
| 2,311
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
import cv2
import numpy as np
FLOW_MAG_THRESHOLD = 0.7
CONSECUTIVE_FLOW_FRAMES = 6
# FLOW_ANGLE_THRESHOLD = 0.3
cap = cv2.VideoCapture('dataset/Jackson_Hole_Wyoming/out0.mov')
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
hsv[...,2] = 255
# avg_flow_angle = np.zeros(frame1.shape[:2], dtype=float)
# avg_mag = np.zeros_like(avg_flow_angle)
scores = np.zeros(frame1.shape[:2], dtype=np.uint8)
flow_mag_history = []
def update_flow(mag, scores):
indices = mag > FLOW_MAG_THRESHOLD
hot = np.zeros(mag.shape, dtype=np.uint8)
hot[indices] = 255
flow_mag_history.append(hot)
if len(flow_mag_history) >= CONSECUTIVE_FLOW_FRAMES:
flow_mag_history.pop(0)
total_hot = np.ones(mag.shape, dtype=np.uint8) * 255
for m in flow_mag_history:
total_hot = cv2.bitwise_and(total_hot, m)
scores = cv2.bitwise_or(scores, total_hot)
return scores
else:
return np.zeros(mag.shape, dtype=np.uint8)
n = 1
while(1):
ret, frame2 = cap.read()
cv2.imshow('frame', frame2)
next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
# threshold
# indices = mag > FLOW_MAG_THRESHOLD
# hot = np.zeros(mag.shape, dtype=np.uint8)
# hot[indices] = 255
# cv2.imshow('mag', hot)
scores = update_flow(mag, scores)
cv2.imshow('mag', scores)
# show optical flow
# hsv[...,0] = ang*180/np.pi/2
# hsv[...,1] = 0
# hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
# bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
# cv2.imshow('flow',bgr)
if cv2.waitKey(25) & 0xff == ord('q'): break
prvs = next
n += 1
cap.release()
cv2.destroyAllWindows()
# update average flow direction
# def flow_with_angle():
# ang[ang >= np.pi] -= np.pi
# flow_indices = mag > FLOW_MAG_THRESHOLD
# avg_flow_angle[flow_indices] += (ang[flow_indices] - avg_flow_angle[flow_indices]) / n
# differences = np.abs(avg_flow_angle - ang)
# scores[(differences < FLOW_ANGLE_THRESHOLD) & (flow_indices) & (scores < 255)] += 1
# cv2.imshow('scores', scores)
| true
|
1da4ae51674a11c93391583262a73ee86f69ca95
|
Python
|
lapotolo/Smart_ELF
|
/SmartApp.GNLP/acronyms.py
|
UTF-8
| 828
| 2.765625
| 3
|
[] |
no_license
|
import json
import spacy
import en_core_web_sm
with open('all_lectures.json') as f:
all_lectures = json.load(f)
# Days of the week
days = {
"Mon" : "Monday",
"Tue" : "Tuesday",
"Wed" : "Wednesday",
"Thu" : "Thursday",
"Fri" : "Friday",
"Sat" : "Saturday",
"Sun" : "Sunday"
}
# Lectures
lectures = {}
parser = en_core_web_sm.load()
for entry in all_lectures:
lecture = all_lectures[entry]["name"]
tree = parser(lecture)
acronym = ""
for token in tree:
if (token.tag_ != "IN" and token.tag_ != "TO" and token.tag_ != "DT" and token.tag_ != "CC"):
acronym = acronym + token.text[0].upper()
lectures[acronym] = lecture
with open('lectures_acr.json', 'w') as fp:
json.dump(lectures, fp)
with open('days_acr.json', 'w') as fp:
json.dump(days, fp)
| true
|
2b4266bf96f1ac79e91be13b7d41d77b738eace6
|
Python
|
parksjsj9368/TIL
|
/ALGORITHM/BAEKJOON/SOURCE/07. Sort(정렬)/15. 시리얼 번호.py
|
UTF-8
| 337
| 3.453125
| 3
|
[] |
no_license
|
n = int(input())
data = []
for _ in range(n) :
data.append(list(input()))
def data_sum(x) :
sum = 0
for i in x :
if i.isnumeric() :
sum += int(i)
return sum
answer = sorted(data, key = lambda x : (len(x), data_sum(x), x))
for i in range(len(answer)) :
print(''.join(answer[i]))
| true
|
22c04f7c9842e43421bc465545b05c654090fc33
|
Python
|
tderensis/digital_control
|
/inverted_pendulum.py
|
UTF-8
| 3,157
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
"""
Design of a state space controller for an inverted pendulum driven by stepper motor.
"""
import control_plot, control_sim, control_design, control_optimize, control_eval, control_poles
from scipy import signal
import numpy as np
import math
# System Clasification Results
# motor position low pass filter (bessel with 1 sec settling time)
b_1 = 21.9
b_0 = 8.106
b_g = 21.9
g = 9.81
w0 = 4.008 # natural frequency
d = 0.0718 # damping
a_1 = w0**2
a_2 = a_1/g
# State Space Equations
"""
x = | x | - motor position (m)
| vel | - motor velocity (m/s)
| theta | - pendulum position (rad)
| theta_dot | - pendulum velocity (rad/s)
u = | x_d | - desired motor position (m)
"""
A = np.matrix([
[ 0, 1, 0, 0],
[-b_1, -b_0, 0, 0],
[ 0, 0, 0, 1],
[-b_1*a_2, -b_0*a_2, a_1, -d]
])
B = np.matrix([
[0],
[b_g],
[0],
[b_g*a_2]
])
C = np.matrix([
[1, 0, 0, 0],
[0, 0, 1, 0]
])
D = np.matrix([
[0],
[0]
])
sys_c_ol = signal.StateSpace(A, B, C, D)
print(sys_c_ol)
T = 0.05 # sampling time
Ts = 1.2 # settling time
Tso = Ts/6
print("Using T =", T, "Ts =", Ts, "Tso = ", Tso)
spoles = [
(-4.053+2.34j), (-4.053-2.34j), (-4.044060776465936+0j), (-3.9722607764659337+0j)
]
(sys_d_ol, L, K) = control_design.design_regob(sys_c_ol, T, Ts, Tso, spoles)
phi = sys_d_ol.A
gamma = sys_d_ol.B
print("phi =\n", phi)
print("gamma =\n", gamma)
print("L =\n", L)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regsf(sys_d_ol, L)
print("Stability assuming all states are measured")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_regob(sys_d_ol, L, K)
print("Stability using a full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
x0 = np.zeros((1, 4))
x0[0,1] = 20/math.pi
(t, u, x) = control_sim.sim_regsf(phi, gamma, L, T, x0, Ts*2)
print("reg settling time = ", control_eval.settling_time(t, x))
control_plot.plot_regsf(t, u, x)
(t, u, x, xhat, y) = control_sim.sim_regob(phi, gamma, C, L, K, T, x0, Ts*2)
print("fob settling time = ", control_eval.settling_time(t, y))
control_plot.plot_regob(t, u, x, xhat, y)
# Add a pole for the tracking system
spoles = spoles + control_poles.bessel_spoles(1, Ts)
# Only position is tracked
Ca = np.matrix([ 1, 0, 0, 0 ])
(sys_d_ol, phia, gammaa, L1, L2, K) = control_design.design_tsob(sys_c_ol, Ca, T, Ts, Tso, spoles)
print("phia = ", phia)
print("gammaa = ", gammaa)
print("L1 = ", L1)
print("L2 = ", L2)
print("K =\n", K)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tssf(sys_d_ol, phia, gammaa, Ca, L1, L2)
print("Stability using a tracking system with full state feedback.")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
(phi_ltf, gamma_ltf, c_ltf) = control_eval.ltf_tsob(sys_d_ol, phia, gammaa, Ca, L1, L2, K)
print("Stability using a tracking system with full order observer")
control_eval.print_stability_margins(phi_ltf, gamma_ltf, c_ltf)
| true
|
7144f8d25e213d5df479984d2e8af851082379a7
|
Python
|
asihacker/python3_bookmark
|
/python笔记/aaa基础内置/类相关/多父类调用顺序.py
|
UTF-8
| 1,259
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/21 16:01
# @Author : AsiHacker
# @File : 多父类调用顺序.py
# @Software: PyCharm
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/16 16:08
# @Author : AsiHacker
# @Site :
# @File : 自定义异常.py
# @Software: PyCharm
class Displayer():
def display(self, message):
print(message)
class LoggerMixin():
def log(self, message, filename='logfile.txt'):
with open(filename, 'bet') as fh:
fh.write(message)
def display(self, message):
super().display(message)
self.log(message)
class MySubClass(LoggerMixin, Displayer):
def log(self, message):
super().log(message, filename='subclasslog.txt')
subclass = MySubClass()
subclass.display("This string will be shown and logged in subclasslog.txt")
# 总结 如果上述的解释太过于难以理解,我们可以简单记住,self.method() 将会先在当前类中查看 method() 方法,
# 如果没有,就在继承链中进行查找,查找顺序就是你继承的顺序从左到右,直到 method() 方法被找到。super().method()
# 与 self.method() 是差不多的,只是 super().method() 需要跳过当前类而已。
| true
|
1d88678bcd96a394ad346e4759efcbb62a8e0006
|
Python
|
rabramley/advent_of_code_2015
|
/4a/solution.py
|
UTF-8
| 254
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python3
import hashlib
import itertools
for i in itertools.count(1):
tohash = 'yzbqklnj' + str(i)
m = hashlib.md5(tohash.encode('utf-8'))
hash = m.hexdigest()
if hash[0:5] == "00000":
print(i, hash)
break
| true
|
e04ff3fffa38274d486fc2e7b35b5a2eec87969b
|
Python
|
TaeYeon-kim-ai/Pytorch
|
/object_detection/YOLOv3/train.py
|
UTF-8
| 2,427
| 2.515625
| 3
|
[] |
no_license
|
import config
import torch
import torch.optim as optim
from model import YOLOv3
from tqdm import tqdm
from utils import (
mean_average_precision,
cells_to_bboxes, # 이미지에 대한 상대적 경계상자
get_evaluation_bboxes,
save_checkpoint,
load_checkpoint,
check_class_accuracy,
get_loaders,
plot_couple_examples
)
from loss import YoloLoss
torch.backends.cudnn.benchmark = True
def train_fn(train_loader, model, optimizer, loss_fn, scaler, scaled_anchors) :
loop = tqdm(train_loader, leave = True)
losses = []
for batch_idx, (x, y) in enumerate(loop) :
x = x.to(config.DEVICE)
y0, y1, y2 = (
y[0].to(config.DEVICE),
y[1].to(config.DEVICE),
y[2].to(config.DEVICE)
)
with torch.cida.amp.autocast():
out = model(x)
loss = (
loss_fn(out[0], y0, scaled_anchors[0]),
+ loss_fn(out[1], y1, scaled_anchors[1])
+ loss_fn(out[2], y2, scaled_anchors[2])
)
losses.append(loss.item())
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
#update progress bar
mean_loss = sum(losses) / len(losses)
loop.set_postfix(loss = mean_loss)
def main() :
model = YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE)
optimizer = optim.Adam(
model.parameters(), lr = config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY,
)
loss_fn = YoloLoss()
scaler = torch.cuda.amp.GradScaler()
train_loader, test_loader, train_eval_loder = get_loaders(
train_csv_path = config.DATASET+"/100examples.csv", test_csv_path=config.DATASET+"/100examples.csv"
)
if config.LOAD_MODEL :
load_checkpoint(
config.CHECKPOINT_FILE, model, optimizer, config.LEARNING_RATE,
)
scaled_anchors = (
torch.tensor(config.ANCHORS)
*torch.tensor(config.S).unsqueeze(1).unsqueeze(2).repeat(1, 3, 2)
).to(config.DEVICE)
for epoch in range(config.NUM_CLASSES) :
train_fn(test_loader, model, optimizer, loss_fn, scaler, scaled_anchors)
#train_fn(train_loader, model, optimizer, loss_fn, scaler, scaled_anchors)
if config.SAVE_MODEL :
save_checkpoint(model, optimizer)
if __name__ == "__main__" :
main()
| true
|
bb0859718d03b418127e834c096304b265388641
|
Python
|
Kmmanki/bit_seoul
|
/AE/a07_nosie2_CAE.py
|
UTF-8
| 2,369
| 2.578125
| 3
|
[] |
no_license
|
import numpy as np
from tensorflow.keras.datasets import mnist
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(60000,28,28,1).astype('float32')/255.
x_test = x_test.reshape(10000,28,28,1).astype('float32')/255.
x_train_noised = x_train + np.random.normal(0, 0.1, size=x_train.shape)
x_test_noised = x_test + np.random.normal(0, 0.1, size= x_test.shape)
x_train_noised = np.clip(x_train_noised, a_min=0, a_max=1)
x_test_noised = np.clip(x_test_noised, a_min=0, a_max=1)
# print(x_train[0])
# print(x_test[0])
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, Conv2D, Flatten
def autoencoder (hidden_layer_size):
model = Sequential()
model.add(Conv2D(154, (3,3), strides=(1,1),
padding='valid', input_shape=(28,28,1)
))
model.add(Conv2D(128, padding='valid',kernel_size= (3,3) ) )
model.add(Conv2D(64, padding='valid',kernel_size= (3,3) ) )
model.add(Flatten())
model.add(Dense(units=784, activation='sigmoid'))
return model
#pca 시 가장손실이 적은 값이 154였음
model = autoencoder(hidden_layer_size=154)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
model.fit(x_train_noised, x_train.reshape(60000,784,), epochs=10, batch_size=512)
outputs = model.predict(x_test_noised)
from matplotlib import pyplot as plt
import random
fig, ((ax1,ax2,ax3,ax4,ax5), (ax6,ax7,ax8,ax9,ax10), (ax11, ax12, ax13, ax14, ax15)) = \
plt.subplots(3,5,figsize=(20,7))
random_images = random.sample(range(outputs.shape[0]), 5)
for i, ax in enumerate([ax1, ax2, ax3, ax4, ax5]):
ax.imshow(x_test[random_images[i]].reshape(28,28), cmap='gray')
if i == 0:
ax.set_ylabel("input", size=20)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
for i, ax in enumerate([ax6,ax7,ax8,ax9,ax10]):
ax.imshow(x_test_noised[random_images[i]].reshape(28,28), cmap='gray')
if i == 0:
ax.set_ylabel("noised", size=20)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
for i, ax in enumerate([ax11, ax12, ax13, ax14, ax15]):
ax.imshow(outputs[random_images[i]].reshape(28,28), cmap='gray')
if i == 0:
ax.set_ylabel("output", size=20)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
| true
|
a83d5ec8c70b75269545900f7b447a5e14c61fd5
|
Python
|
SemyonSinchenko/RandomGraphModels
|
/utils/plot_report.py
|
UTF-8
| 5,117
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import Counter
import logging
from pathlib import Path
from random import choice, random
import igraph as ig
import matplotlib.pylab as plt
import numpy as np
def plot_all_distributions(g: ig.Graph, path_prefix: Path) -> None:
path_prefix.mkdir(parents=True, exist_ok=True)
logger = logging.getLogger("Plotter")
logger.info("\tEstimate degree distribution...")
_plot_degree_distribution(g, path_prefix)
logger.info("\tDone.")
logger.info("\tEstimate clsutering...")
_plot_clustering_coeff(g, path_prefix)
logger.info("\tDone.")
logger.info("\tEstimate paths...")
_plot_shortest_paths(g, path_prefix)
logger.info("\tDone.")
logger.info("\tEstimate correlations...")
_plot_degrees_correlations(g, path_prefix)
logger.info("\tDone.")
def _plot_degree_distribution(g: ig.Graph, path_prefix: Path) -> None:
file_path = path_prefix.joinpath("degrees.png")
# Count degrees
degrees = g.vs.degree()
degree_distr = Counter(degrees)
log_deg = np.zeros(len(degree_distr), dtype=np.float32)
log_cnt = np.zeros(len(degree_distr), dtype=np.float32)
for i, kv in enumerate(degree_distr.items()):
log_deg[i] = np.log(kv[0]) if kv[0] > 0 else 0
log_cnt[i] = np.log(kv[1])
# Fit linear regression
solution = np.polyfit(log_deg[log_cnt > 1.5], log_cnt[log_cnt > 1.5], deg=1)
# Plot distributions
plt.style.use("ggplot")
f: plt.Figure = plt.Figure(figsize=(8, 5), dpi=150)
ax: plt.Axes = f.add_subplot()
ax.plot(log_deg, log_cnt, ".", label="Real Degrees")
ax.plot(
log_deg,
log_deg * solution[0] + solution[1],
"-",
label="Linear fit: {:.4f}x + {:.2f}".format(solution[0], solution[1]),
)
ax.legend()
ax.set_xlabel("Log degree")
ax.set_ylabel("Log count degrees")
ax.set_ylim(bottom=np.min(log_cnt) - 1e-5)
f.savefig(str(file_path.absolute()))
plt.close(f)
def _plot_clustering_coeff(g: ig.Graph, path_prefix: Path) -> None:
file_path = path_prefix.joinpath("clustcoeff.png")
g = g.copy().simplify()
# Get clustering coefficient based on 50% of vertices
subset = [vidx for vidx in g.vs.indices if random() <= 0.5]
lcc = g.transitivity_local_undirected(vertices=subset, mode="zero")
degrees = g.degree(vertices=subset)
avg_clustering_coeff = np.mean(lcc)
# Plot
plt.style.use("ggplot")
f: plt.Figure = plt.Figure(figsize=(8, 5), dpi=150)
ax: plt.Axes = f.add_subplot()
ax.plot(
degrees, lcc, ".", label="ClustCoeff. Avg = {:.2e}".format(avg_clustering_coeff)
)
ax.legend()
ax.set_xlabel("Degree")
ax.set_ylabel("Avg Clustering of Degree")
f.savefig(str(file_path.absolute()))
plt.close(f)
def _plot_shortest_paths(g: ig.Graph, path_prefix: Path) -> None:
file_path = path_prefix.joinpath("shortest_paths.png")
# Estimate diameter and paths
paths = g.get_all_shortest_paths(0, mode=ig.ALL)
path_lens = list(map(len, paths))
diam = max(path_lens)
eff_diam = np.percentile(path_lens, 90)
cnts = Counter(path_lens)
# Plot
plt.style.use("ggplot")
f: plt.Figure = plt.Figure(figsize=(8, 5), dpi=150)
ax: plt.Axes = f.add_subplot()
ax.plot(
cnts.keys(),
cnts.values(),
".",
label="Path.Len.Distr.\nDiam.: {:d}\nEff.Diam.: {:.2f}".format(diam, eff_diam),
)
ax.legend()
ax.set_xlabel("Path. length")
ax.set_ylabel("Count paths")
f.savefig(str(file_path.absolute()))
plt.close(f)
def _plot_degrees_correlations(g: ig.Graph, path_prefix: Path) -> None:
file_path = path_prefix.joinpath("degree_correlations.png")
# Compute correlations
num_nodes = int(g.vcount() * 0.35)
visited = set()
corrs = np.zeros((num_nodes, 2), dtype=np.float32)
# Compute coefficient
assortativity = g.assortativity_degree(directed=False)
for i in range(num_nodes):
while True:
rnd_id = choice(g.vs)
if rnd_id.index not in visited:
visited.add(rnd_id.index)
break
deg = rnd_id.degree()
for nbh in rnd_id.neighbors():
corrs[i, 1] += nbh.degree() / deg
corrs[i, 0] = deg
# Drop heavy tail
corrs = corrs[np.argsort(corrs[:, 0])[:-50], :]
# Fit line
solution = np.polyfit(corrs[:, 0], corrs[:, 1], deg=1)
# Plot scatter
plt.style.use("ggplot")
f: plt.Figure = plt.Figure(figsize=(8, 5), dpi=150)
ax: plt.Axes = f.add_subplot()
ax.plot(corrs[:, 0], corrs[:, 1], ".", label="Real data")
ax.plot(
corrs[np.argsort(corrs[:, 0]), 0],
corrs[np.argsort(corrs[:, 0]), 0] * solution[0] + solution[1],
"-",
label="Fitted line: {:2f}x + {:2f}\nAssortativity coeff.: {:.3f}".format(
solution[0], solution[1], assortativity
),
)
ax.legend()
ax.set_xlabel("Node degree")
ax.set_ylabel("Neighborhood avg degree")
f.savefig(str(file_path.absolute()))
plt.close(f)
| true
|
56d9008d3653b886f4a7c0489242ea1484408e85
|
Python
|
jonnylee719/actividad_practica_iic2233_2
|
/AC10-201516_2-metaclase/trial_2.py
|
UTF-8
| 1,409
| 3.671875
| 4
|
[] |
no_license
|
def create_property(name):
def setter(self, val):
if self.__dict__.get(name) is None:
self.__dict__[name] = val
else:
raise AttributeError
def getter(self):
return self.__dict__.get(name)
return property(getter, setter)
class RestrictedAccess(type):
def __new__(cls, name, bases, attrs):
for a in attrs['attributes']:
attrs.update({a: create_property(a)})
return super().__new__(cls, name, bases, attrs)
def __call__(self, *args, **kwargs):
inst = super().__call__(*args, **kwargs)
for i in range(len(inst.attributes)):
setattr(inst, inst.attributes[i], args[i])
del inst.__class__.attributes
return inst
class Persona(metaclass=RestrictedAccess):
attributes = ['name', 'lastname', 'alias']
def __init__(self, *args):
pass
class Singleton(type):
instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
self.instance = super().__call__(*args, **kwargs)
return self.instance
class A(metaclass=Singleton):
def __init__(self, value):
self.val = value
if __name__ == '__main__':
p1 = Persona('Bruce', 'Wayne', 'Batman')
print(p1.name , p1. lastname , "es", p1.alias , "!")
print(p1.__dict__)
a = A(10)
b = A(20)
print(a.val, b.val)
print(a is b)
| true
|
5139240eb2aa77a26b14075b198ad54fbc71737a
|
Python
|
RagnvaldArnulfson/TIPE-Game-Of-War
|
/BacktrackPredeterminedWeight.py
|
UTF-8
| 3,565
| 3.09375
| 3
|
[] |
no_license
|
# -*- coding: cp1252 -*-
import random as rd
jeu52Cartes = [(i%13)+1 for i in range(13*4)]
#poids du jeu : 364
#poids minimal d'un paquet (26 cartes tirées dedans) : 98
#poids maximal : 266 (=364-98)
jeu32Cartes = [(i%8)+1 for i in range(8*4)]
#poids du jeu : 144
#poids minimal d'un paquet (16 cartes tirées dedans) : 40
# maximal : 104 (=144-40)
#CF NB en bas de page pour des commentaires sur la nouvelle version
#aleatoire booléen pour savoir si on veut un jeu aléatoire
#attention, augmente significativement le temps de calcul pour les cas équilibrés
#si aleatoire == True
def genererDistribution(jeuComplet,poidsSouhaite,aleatoire = False):
jeuComplet = sorted(jeuComplet) #ON TRIE LE JEU DONNE (obligatoire pour genererPaquet)
poidsTotal = sum(jeuComplet)
jeuContraire = False
if poidsSouhaite > poidsTotal//2:
poidsSouhaite = poidsTotal-poidsSouhaite
jeuContraire = True
paquet1 = genererPaquet(jeuComplet,len(jeuComplet)//2,poidsSouhaite,aleatoire)
paquet2 = jeuComplet[:]
for carte in paquet1:
paquet2.remove(carte)
if aleatoire:
paquet2.shuffle()
return [paquet2,paquet1] if jeuContraire else [paquet1,paquet2]
#cartesDispo doit être triée à chaque étape de la récursivité
def genererPaquet(cartesDispo,nombreDeCartesSouhaitees,poidsSouhaite,aleatoire = False,paquetActuel = []):
nombreDeCartesDispo = len(cartesDispo)
nombreDeCartesActuel = len(paquetActuel)
poidsActuel = sum(paquetActuel)
nombreDeCartesATirer = nombreDeCartesSouhaitees-nombreDeCartesActuel
#comme cartesDispo est triée il est simple de déterminer le poids min
#ainsi que le poids max du paquetActuel quand celui-ci sera complet
poidsMin = poidsActuel + sum(cartesDispo[:nombreDeCartesATirer])
poidsMax = poidsActuel + sum(cartesDispo[(nombreDeCartesDispo-nombreDeCartesATirer):])
if nombreDeCartesActuel == nombreDeCartesSouhaitees and poidsActuel == poidsSouhaite:
return paquetActuel
elif nombreDeCartesActuel == nombreDeCartesSouhaitees \
or poidsMin > poidsSouhaite or poidsMax < poidsSouhaite:
return []
dejaVu = []
piocheAlea = cartesDispo[:]
if aleatoire:
rd.shuffle(piocheAlea)
for cartePioche in piocheAlea:
if cartePioche not in dejaVu:
dejaVu.append(cartePioche)
nouveauDispo = cartesDispo[:]
nouveauDispo.remove(cartePioche)
#nouveau dispo est bien trié
solution = genererPaquet(nouveauDispo,nombreDeCartesSouhaitees,poidsSouhaite,aleatoire,paquetActuel+[cartePioche])
if solution != []:
return solution
return []
#preuve que même si le tirage est aléatoire, le temps de calcul pour les cas
#de poids extremes est relativement rapide
#(ça tombe bien c'est ce qui nous intéresse)
for i in range(100):
print(genererDistribution(jeu52Cartes,100,True)[0])
#NB :
#on a ajouté une condition d'arrêt symetrique à l'ancienne qui utilisais ajoutMin
#on a rendu cette condition plus fine en considérant que si cartesDispo est triée
#à chaque niveau de la récursivité, on peut vraiment donner le poidsMin ainsi que
#le poidsMax du paquet courant (et non plus un simple minorant)
#le renversement du probleme (jeuContraire) reste pertinent car si cartesDispo est
#triée par ordre croissant, les solutions de poids faibles sont trouvées en premier
| true
|
15b73ffb7feea81118dff6e36d68d185b88087d0
|
Python
|
arielsl/LynBot
|
/urlhelper.py
|
UTF-8
| 4,699
| 2.984375
| 3
|
[] |
no_license
|
"""
A simple class that will handle URL requests
"""
from urllib.request import Request, urlopen
import urllib.error
import re
import help_messages
from bs4 import *
"""
Checks if the given url exists by reading the response code
"""
def url_exits(url):
code = 0
try:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req)
code = webpage.getcode()
except urllib.error.HTTPError as e:
pass
if code == 200:
return True
else:
return False
"""
Finds the card's image url
"""
def get_card_image(url, cardname):
imgpostfix = None
try:
req = Request(url+"png", headers={'User-Agent': 'Mozilla/5.0'})
source = urlopen(req).read()
soup = BeautifulSoup(source, "html.parser")
imgpostfix = soup.find("img").get("src")
return help_messages.card_img_prefix + imgpostfix
except urllib.error.HTTPError as e:
pass
try:
req = Request(url+"jpg", headers={'User-Agent': 'Mozilla/5.0'})
source = urlopen(req).read()
soup = BeautifulSoup(source, "html.parser")
imgpostfix = soup.find("img").get("src")
return help_messages.card_img_prefix + imgpostfix
except urllib.error.HTTPError as e:
pass
try:
req = Request(url+"jpeg", headers={'User-Agent': 'Mozilla/5.0'})
source = urlopen(req).read()
soup = BeautifulSoup(source, "html.parser")
imgpostfix = soup.find("img").get("src")
return help_messages.card_img_prefix + imgpostfix
except urllib.error.HTTPError as e:
pass
return None
"""
Find the game's info
"""
def game_info(game_url):
info = []
try:
req = Request(game_url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except urllib.error.HTTPError as e:
pass
soup = BeautifulSoup(webpage, "html.parser")
for h in soup.find_all("h2",{"class" : "page-title"}):
info.append(h.string)
info.append(soup.p.get_text(" ", strip=True))
want = True
counter = 0
for heading in soup.find_all("td"):
if want and counter < 3:
info.append(heading.get_text(" ", strip=True))
want = False
counter += 1
else:
want = True
info.append(soup.img.get('src'))
return info
"""
Find the booster's info
"""
def booster_info(booster_url):
info = []
try:
req = Request(booster_url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except urllib.error.HTTPError as e:
pass
soup = BeautifulSoup(webpage, "html.parser")
info.append(soup.find_all("h1",{"class":"page-header__title"})[0].get_text(" ", strip=True))
info.append(soup.find_all("p")[0].get_text(" ", strip=True))
info.append(soup.find_all("p")[1].get_text(" ", strip=True))
info.append(soup.find_all('div', id='mw-content-text')[0].ul.get_text(" ", strip=True))
link = soup.find_all("a",{"class":"image-thumbnail"})[0]
info.append(link.get("href"))
info.append(soup.find_all("div",{"class":"pi-data-value"})[4].get_text(" ", strip=True))
return info
"""
Find the deck info
"""
def deck_info(deck_url):
info = []
try:
req = Request(deck_url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except urllib.error.HTTPError as e:
pass
soup = BeautifulSoup(webpage, "html.parser")
info.append(soup.find_all("h1",{"class":"page-header__title"})[0].get_text(" ", strip=True))
info.append(soup.find_all("p")[0].get_text(" ", strip=True))
info.append(soup.find_all("p")[1].get_text(" ", strip=True))
info.append(soup.find_all('div', id='mw-content-text')[0].ul.get_text(" ", strip=True))
link = soup.find_all("a",{"class":"image-thumbnail"})[0]
info.append(link.get("href"))
info.append(soup.find_all("div",{"class":"pi-data-value"})[4].get_text(" ", strip=True))
return info
"""
Find the color info
"""
def get_color_info(color_url):
info = []
color_data = []
try:
req = Request(color_url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except urllib.error.HTTPError as e:
pass
soup = BeautifulSoup(webpage, "html.parser")
paragraphs = soup.find_all("p")
for p in paragraphs:
info.append(p.get_text(" ", strip=True))
if len(info) == 2:
color_data.append(info[0])
else:
color_data.append(info[0])
if len(info[1]) > 997:
info[1] = info[1][:997]
info[1] = info[1] + "..."
color_data.append(info[1])
return color_data
| true
|
b60dcbf78326fdc9809d7278780485fda6820088
|
Python
|
Dhaval2110/Python
|
/Regex_Parser/main_method.py
|
UTF-8
| 8,359
| 2.953125
| 3
|
[] |
no_license
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 19 00:10:40 2022
@author: Dhaval
"""
# global imports
import os
import re
import logging
import warnings
#local imports
from argparser import argParsing
# global/local variables
l=[]
#==============================================================================
# main class to perform patternmatching
#==============================================================================
class PatternMatch():
#--------------------------------------------------------------------------
# Name : constructor method
# Inputs : None
# Description : Argparse return value , logging module initilization
# and color code definition is defined in this consturctor
# Output : None
#--------------------------------------------------------------------------
def __init__(self):
self.sVal = argParsing() # calling method from pattern_find.py to fetch the cli inputs
logging.basicConfig(filename='output.log', level=logging.DEBUG, # Initializing logging file to save the output logs printed on console
format='%(message)s',filemode='w')
self.CYAN = '\033[96m' # Color code for CYAN
self.END = '\033[0m' # Color code for END
#--------------------------------------------------------------------------
# Name: argsaves method
# Input : None
# Description : The class method to read the file from commandline, process
# file line by line and match the pattern passed in command
# line or match pattern to command line string
# print them in differnent formats based on
# optional parameters which are mutually exclusive.
# Logging console output to log file 'output.txt'
# Output : Output.txt file and console output
#--------------------------------------------------------------------------
def argSaves(self):
try:
if self.sVal.sFile: # If file is passed as an argument
for files in self.sVal.sFile: # Iterate for multiple files if passed
#----------------------------------------------------------
# Case 1 : If file is not passed as command line STDIN as
# input
#----------------------------------------------------------
if not os.path.exists(files): # Check if file is valid path or not
warnings.warn("File is not passed???")
match = re.search(self.sVal.sPattern,files) #Check if regex pattern is matches with line
if match != None:
if self.sVal.underline: #If underline is passed to command line print "^" to line on termina
print("{} matches in line {}"
.format(match.group(0),files))
print(" " ,"^" * len(files) )
if self.sVal.color: # Print color on console output when color argument is passed to a file
print("{} matches in line {}"
.format(match.group(0),files) + self.END)
if self.sVal.machine: # print a machine readable format matched string to pattern as it does not have lines
print("{}:{}".format((files),match.group(0)),
end="")
else:
print("{} matches in line {}"
.format(match.group(0),files))
#----------------------------------------------------------
# Case 2: If files are passed as command line , filenames
# are as inputs
#----------------------------------------------------------
else:
with open(files,'r') as fd: # Open a file one by one to read
l = fd.readlines() # Save the content lines of files into a list
for i in range(0,len(l)) : # Iterate over list elements
match = re.search(self.sVal.sPattern,l[i]) # Check if regex pattern is matches with line
if match != None: # In order to print the lines with matching pattern , ignore if no patten match
if self.sVal.underline: # If underline is passed to command line print "^" to line on terminal as well as in log file
print("{} matches at line {} in file {}"
.format(match.group(0),i+1,files))
print(" " ,"^" * len(l[i]) )
logging.info("{} matches at line {} in"
" file {}"
.format(match.group(0),
i+1,files))
logging.info(" ^" )
if self.sVal.color: # Print color on console output when color argument is passed to a file
print("{} matches at line {} in file {}"
.format(match.group(0),i+1,files) +
self.CYAN + l[i] + self.END)
logging.info("{} matches at line {} "
" in file {}"
.format(match.group(0),
i+1,files) +
self.CYAN + l[i] + self.END)
if self.sVal.machine: # which generate machine-readable output , format: file_name:no_line:start_pos:matched_text
print("{}:{}:{}".format(files, i+1,
l[i]), end="")
else: # if in case no optional arguments are passed
print("{} matches at line {} in file "
"{}".format(match.group(0),i+1,
files))
logging.info("{} matches at line {} in "
"file {}"
.format(match.group(0),i+1,
files))
except Exception : # Ignore case if file does not exists but use STDIN as input of -f
pass
#------------------------------------------------------------------------------
# main function
#------------------------------------------------------------------------------
if __name__ == '__main__':
c=PatternMatch()
c.argSaves()
| true
|
dbca189b795c5dff81afc83652af2bc2837d06da
|
Python
|
JancisWang/-offer_python
|
/剪绳子.py
|
UTF-8
| 853
| 3.40625
| 3
|
[] |
no_license
|
'''
给你一根长度为n的绳子,请把绳子剪成整数长的m段(m、n都是整数,n>1并且m>1),每段绳子的长度记为k[0],k[1],...,k[m]。请问k[0]xk[1]x...xk[m]可能的最大乘积是多少?例如,当绳子的长度是8时,我们把它剪成长度分别为2、3、3的三段,此时得到的最大乘积是18。
输入描述:
输入一个数n,意义见题面。(2 <= n <= 60)
输出描述:
输出答案。
示例1
输入
复制
8
输出
复制
18
'''
# -*- coding:utf-8 -*-
class Solution:
def cutRope(self, number):
# write code here
dp = [1] * (number+1)
dp[1] = 1
dp[2] = 1
for i in range(2, number+1):
res = 0
for j in range(1, i):
res = max(res, max(dp[j], j)*max(dp[i-j], i-j))
dp[i] = res
return dp[-1]
| true
|
8971a16b298c6c344a97d8ec8a060d16d15e314b
|
Python
|
SnapshotSerengetiScienceTeam/Scripts
|
/metadatadb-scripts/import_clean_season_metadata_into_database.py
|
UTF-8
| 9,830
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/python
import MySQLdb
import sys
import snapshotDB
import datetime
import csv
# format of the authentication file should be three lines:
# MSI user name
# database password
# full name
# format of the season file should be one or more comma-separated line(s)
# with fields:
# season: integer
# start date: date in format 'YYYY-MM-DD'
# end date: date in format 'YYYY-MM-DD'
# comments: quoted string of up to 500 characters
# ---
# format of the clean season metadata file should be:
# COLUMNS:
# rownum: integer
# season: integer
# site: 3-character alphanumeric
# roll: integer
# capture: integer
# image: integer
# path: character string
# newtime: datetime in format 'YYYY-MM-DD HH:MM:SS'
# oldtime: datetime in format 'YYYY:MM:DD HH:MM:SS'
# invalid: integer
# include: 1 for send to Zooniverse, 0 for don't
# make sure we have 4 arguments
if len(sys.argv) < 5 :
print ("format: import_clean_season_metadata_into_database <authentication file> <season file> <metadata file> <output dir>")
exit(1)
authfilename = sys.argv[1]
sfilename = sys.argv[2]
infilename = sys.argv[3]
outdirname = sys.argv[4]
with open(authfilename,'rb') as afile:
username = afile.readline().strip()
password = afile.readline().strip()
fullname = afile.readline().strip()
try:
# connect to the database
db = MySQLdb.connect(host="mysql.msi.umn.edu",
user=username,
passwd=password,
db="packerc_snapshot_serengeti")
# use the database
with db:
snapshotDB.cur = db.cursor()
print "Validating Season file\n"
# add the season information
with open(sfilename,'rb') as sfile:
# use CSV reader
sreader = csv.reader(sfile,delimiter=',',quotechar='"')
# usually just 1 file for 1 season, but could handle more
for row in sreader:
season = row[0]
startdate = row[1]
enddate = row[2]
comments = row[3]
# see if this season is already in the DB
if not snapshotDB.seasonExists(season):
snapshotDB.addSeason(season,startdate,enddate,comments)
# make a note in the log
snapshotDB.log(fullname,"Added new season " + season)
# now the metadata
lastseason = "0"
lastsite = "0"
print "Validating the season and site values in the metadata file\n"
# Go through the file once to check seasons and sites
# We will not calculate roll start and stop times, as this is easy
# to do once the data are loaded, and makes this script cleaner.
with open(infilename,'rb') as infile:
# use CSV reader
freader = csv.reader(infile,delimiter=',',quotechar='"')
# ignore header
freader.next()
# for each image
for row in freader:
season = row[0]
site = row[1]
newtime = row[6]
# verify season
if season != lastseason:
# make sure the season is already in the database
if (lastseason!="0" and
not snapshotDB.seasonExists(season)):
print "Season " + season + " is not in the database."
print "Please create and import a Season file before uploading metadata for that season."
print "Metadata import ABORTED. No metadata imported."
exit(1)
lastseason = season
# verify site
if site != lastsite:
# make sure sites are already in the database
if (lastsite!="0" and
not snapshotDB.siteExists(site)):
print "Site " + site + " is not in the database."
print "This Site will need to be created in the database before uploading metadata for it."
print "Metadata import ABORTED. No metadata imported."
exit(1)
lastsite = site
# record start and stop date
#rsskey = season+site
#if rsskey not in rollstartstop:
# add start date
# rollstartstop[rsskey] = [newtime[0:10],None]
# add stop date
#rollstartstop[rsskey][1] = newtime[0:10]
print "Adding rolls from metadata file\n"
# season(s) and sites are okay
# now go through and add the rolls, creating two new files in the process
# for capture and image imports
outfilename1 = outdirname + "temp_captures.csv"
outfilename2 = outdirname + "temp_images1.csv"
# ugly with python 2.6, but that's what's running at MSI
with open(infilename,'rb') as infile:
with open(outfilename1,'wb') as outfile1:
with open(outfilename2,'wb') as outfile2:
# CSV readers
freader = csv.reader(infile,delimiter=',',quotechar='"')
fwriter1 = csv.writer(outfile1,delimiter=',',quotechar='"')
fwriter2 = csv.writer(outfile2,delimiter=',',quotechar='"')
# remove header line
freader.next()
# write header lines
fwriter1.writerow(["idRoll","capture","newtime","invalid","zoon_status"])
fwriter2.writerow(["idSeason","idSite","idRoll","capture",
"image","path","newtime","oldtime"])
lastsite = "0"
lastroll = "0"
for row in freader:
season = row[0]
site = row[1]
roll = row[2]
capture = row[3]
image = row[4]
path = row[5]
newtime = row[6]
oldtime = row[7]
invalid = row[8]
include = row[9]
# look up the site
if site!=lastsite:
siteID = snapshotDB.getSite(site)
lastsite = site
# create (or look up) roll
rollcombo = season+site+roll
if rollcombo!=lastroll:
print "adding roll " + season + ", " + site + ", " + roll
rollID = snapshotDB.addRoll(season,site,roll,None,None)
lastroll = rollcombo
# write to captures file using data for first image
if image=="1":
fwriter1.writerow([rollID,capture,newtime,invalid,include])
# write to images file using all data
fwriter2.writerow([season,siteID,rollID,capture,
image,path,newtime,oldtime])
# make a note in the log
snapshotDB.log(fullname,"Added rolls for new season from file " + infilename)
# now rolls is created
# create capture events with temp file
snapshotDB.addCaptures(outfilename1)
# make a note in the log
snapshotDB.log(fullname,"Added capture events for new season from file " + infilename)
# modify the image import file with capture event ID numbers
outfilename3 = outdirname + "temp_images2.csv"
with open(outfilename2,'rb') as infile:
with open(outfilename3,'wb') as outfile:
# CSV readers
freader = csv.reader(infile,delimiter=',',quotechar='"')
fwriter = csv.writer(outfile,delimiter=',',quotechar='"')
# remove header line
freader.next()
# write header line
fwriter.writerow(["idSeason","idSite","idRoll","idCapture",
"image","path","newtime","oldtime"])
lastcombo = "0"
for row in freader:
season = row[0]
siteID = row[1]
rollID = row[2]
capture = row[3]
image = row[4]
path = row[5]
newtime = row[6]
ot = row[7]
# convert the oldtime
oldtime = ot[0:4]+"-"+ot[5:7]+"-"+ot[8:]
# look up capture if necessary
combo = season + siteID + rollID + capture
if combo!=lastcombo:
captureID = snapshotDB.getCaptureEvent(rollID,capture)
lastcombo=combo
# write to new file with capture ID instead of capture number
fwriter.writerow([captureID,image,path,newtime,oldtime])
# import the images using this temp file
snapshotDB.addImages(outfilename3)
# make a note in the log
snapshotDB.log(fullname,"Added images for new season from file " + infilename)
# catch errors
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
# close connection to the database
finally:
if db:
db.close()
| true
|
4dce3bb4972b7a9f8759f8ebaf3fc0571a40fb54
|
Python
|
pmirallesr/Abeona
|
/transfer/mod_problems/direct_pl2pl_mod.py
|
UTF-8
| 8,925
| 2.515625
| 3
|
[] |
no_license
|
from pykep.trajopt._direct import _direct_base
import pykep as pk
import numpy as np
from utils.pow_to_mass import pow_to_mass as pw2m
from utils.pow_to_mass import min_panel_mass
import time
defaults = {
"w_mass": 0.5, "w_tof":0.5,
"prop_eff": 0.5}
class direct_pl2pl_mod(_direct_base):
"""Represents a direct transcription transfer between solar system planets.
This problem works by manipulating the starting epoch t0, the transfer time T the final mass mf and the controls
The dicision vector is::
z = [t0, T, mf, Vxi, Vyi, Vzi, Vxf, Vyf, Vzf, controls]
"""
def __init__(self,
p0="earth",
pf="mars",
mass=1000,
thrust=0.3,
isp=3000,
power=None,
nseg=20,
t0=[500, 1000],
tof=[200, 500],
vinf_dep=1e-3,
vinf_arr=1e-3,
hf=False,
**kwargs):
"""Initialises a direct transcription orbit to orbit problem.
Args:
- p0 (``str``): Departure planet name. (will be used to construct a planet.jpl_lp object)
- pf (``str``): Arrival planet name. (will be used to construct a planet.jpl_lp object)
- mass (``float``, ``int``): Spacecraft wet mass [kg].
- thrust (``float``, ``int``): Spacecraft maximum thrust [N].
- isp (``float``, ``int``): Spacecraft specific impulse [s].
- nseg (``int``): Number of colocation nodes.
- t0 (``list``): Launch epochs bounds [mjd2000].
- tof (``list``): Transfer time bounds [days].
- vinf_dep (``float``): allowed launch DV [km/s]
- vinf_arr (``float``): allowed arrival DV [km/s]
- hf (``bool``): High-fidelity. Activates a continuous representation for the thrust.
"""
# Init args
self.i=0
self.args = defaults
for arg in self.args:
if arg in kwargs:
self.args[arg] = kwargs[arg]
# Init optim weights
self.w_mass = self.args["w_mass"]
self.w_tof = self.args["w_tof"]
# Init power
if power:
self.power = power
else:
self.power = 0.5*thrust*isp/self.args["prop_eff"]
# initialise base
_direct_base.__init__(self, mass, thrust, isp, nseg, pk.MU_SUN, hf)
# planets
if all([isinstance(pl, str) for pl in [p0, pf]]):
self.p0 = pk.planet.jpl_lp(p0)
self.pf = pk.planet.jpl_lp(pf)
else:
raise TypeError("Planet names must be supplied as str.")
# bounds
assert t0[1] - t0[0] >= tof[0]
assert all(t > 0 for t in tof)
assert tof[1] > tof[0]
self.t0 = t0
self.tof = tof
# boundary conditions on velocity
self.vinf_dep = vinf_dep * 1000 # (in m)
self.vinf_arr = vinf_arr * 1000 # (in m)
# The class is built around solar system planets hence mu is always the
# SUN
self.mu = pk.MU_SUN
def fitness(self, z):
# z = t0, tof, mf, [vinf_0], [vinf_f], [u]
# epochs (mjd2000)
t0 = pk.epoch(z[0])
tf = pk.epoch(z[0] + z[1])
# final mass
mf = z[2]
# controls: 60 element vector, containing ux, uy, uz for each segment
u = z[9:]
# compute Cartesian states of planets
r0, v0 = self.p0.eph(t0)
rf, vf = self.pf.eph(tf)
# add the vinfs from the chromosome
v0 = [a + b for a, b in zip(v0, z[3:6])]
vf = [a + b for a, b in zip(vf, z[6:9])]
# spacecraft states
x0 = pk.sims_flanagan.sc_state(r0, v0, self.sc.mass)
xf = pk.sims_flanagan.sc_state(rf, vf, mf)
# set leg
self.leg.set(t0, x0, u, tf, xf)
# compute equality constraints
ceq = np.asarray(self.leg.mismatch_constraints(), np.float64)
# nondimensionalise equality constraints
ceq[0:3] /= pk.AU
ceq[3:6] /= pk.EARTH_VELOCITY
ceq[6] /= self.sc.mass
# compute inequality constraints
cineq = np.asarray(self.leg.throttles_constraints(), np.float64)
# compute inequality constraints on departure and arrival velocities
v_dep_con = (z[3] ** 2 + z[4] ** 2 + z[5] ** 2 - self.vinf_dep ** 2)
v_arr_con = (z[6] ** 2 + z[7] ** 2 + z[8] ** 2 - self.vinf_arr ** 2)
# nondimensionalize inequality constraints
v_dep_con /= pk.EARTH_VELOCITY ** 2
v_arr_con /= pk.EARTH_VELOCITY ** 2
return np.hstack(([self.obj_func(z)], ceq, cineq, [v_dep_con, v_arr_con]))
def obj_func(self, z):
self.i+=1
tof, mf = z[1:3]
u = z[9:]
pwr = [self.power*(u[i]**2 + u[i+1]**2 + u[i+2]**2)**0.5 for i in range(0,len(u),3)]
# get states
# x = list(self.leg.get_states())[2] # <-- Big delay! And really costly. Let's use an approximate method
# # remove matchpoint duplicate
# x.pop(self.nseg)
# # convert to numpy.ndarray
# x = np.asarray(x, np.float64)
# x.reshape((self.nseg * 2 + 1, 3))
# r = [(x[i][0]**2 + x[i][1]**2 + x[i][2]**2)**0.5/pk.AU for i in range(0,len(x),3)]
# We approximate distance as a piecewise function to accelerate the opt procedure
r = [1 for _ in pwr[:-6]] + [1.6 for _ in pwr[-6:]]
masses = [pw2m(pwr[i], tof, r[i]) for i in range(len(r))]
mpow = min(200,max(masses))
mprop = self.sc.mass - mf
transfer_mass = mprop + mpow - min_panel_mass # We don't penalize the essential panel mass
# 0 if we arrive in min time, 1 if we arrive in max time
weighted_tof_score = self.w_tof*(tof-self.tof[0])/(self.tof[1]-self.tof[0])
# 0 if all mass is dry, 1 if all mass is prop or power
weighted_mass_score = self.w_mass*transfer_mass/self.sc.mass
return weighted_tof_score + weighted_mass_score
def get_nic(self):
return super().get_nic() + 2
def get_bounds(self):
lb = [self.t0[0], self.tof[0], self.sc.mass * 0.1] + \
[-self.vinf_dep] * 3 + [-self.vinf_arr] * 3 + \
[-1, -1, -1] * self.nseg
ub = [self.t0[1], self.tof[1], self.sc.mass] + \
[self.vinf_dep] * 3 + [self.vinf_arr] * 3 + \
[1, 1, 1] * self.nseg
return (lb, ub)
def _plot_traj(self, z, axis, units):
# times
t0 = pk.epoch(z[0])
tf = pk.epoch(z[0] + z[1])
# plot Keplerian
pk.orbit_plots.plot_planet(
self.p0, t0, units=units, color=(0.8, 0.8, 0.8), axes=axis)
pk.orbit_plots.plot_planet(
self.pf, tf, units=units, color=(0.8, 0.8, 0.8), axes=axis)
def pretty(self, z):
"""
pretty(x)
Args:
- x (``list``, ``tuple``, ``numpy.ndarray``): Decision chromosome, e.g. (``pygmo.population.champion_x``).
Prints human readable information on the trajectory represented by the decision vector x
"""
data = self.get_traj(z)
result = self._pretty(z)
sun_pos = (data[-1, 1], data[-1, 2], data[-1, 3])
sun_speed = (data[-1, 4], data[-1, 5], data[-1, 6])
result += ("\nSpacecraft Initial Mass (kg) : {!r}".format(data[0, 7]))
result += ("\nSpacecraft Final Mass (kg) : {!r}".format(data[-1, 7]))
result += ("\nSpacecraft Initial Position (m) : [{!r}, {!r}, {!r}]".format(
data[0, 1], data[0, 2], data[0, 3]))
result += ("\nSpacecraft Initial Velocity (m/s): [{!r}, {!r}, {!r}]".format(
data[0, 4], data[0, 5], data[0, 6]))
result += ("\nSpacecraft Final Position (m) : [{!r}, {!r}, {!r}]".format(
*sun_pos))
result += ("\nSpacecraft Final Velocity (m/s): [{!r}, {!r}, {!r}]".format(
*sun_speed))
return result
def _pretty(self, z):
result = ""
result += ("\nLow-thrust NEP transfer from " +
self.p0.name + " to " + self.pf.name)
result += ("\nLaunch epoch: {!r} MJD2000, a.k.a. {!r}".format(
z[0], pk.epoch(z[0])))
result += ("\nArrival epoch: {!r} MJD2000, a.k.a. {!r}".format(
z[0] + z[1], pk.epoch(z[0] + z[1])))
result += ("\nTime of flight (days): {!r} ".format(z[1]))
result += ("\nLaunch DV (km/s) {!r} - [{!r},{!r},{!r}]".format(np.sqrt(
z[3]**2 + z[4]**2 + z[5]**2) / 1000, z[3] / 1000, z[4] / 1000, z[5] / 1000))
result += ("\nArrival DV (km/s) {!r} - [{!r},{!r},{!r}]".format(np.sqrt(
z[6]**2 + z[7]**2 + z[8]**2) / 1000, z[6] / 1000, z[7] / 1000, z[8] / 1000))
return result
@staticmethod
def _get_controls(z):
return z[9:]
| true
|
d299e332ce29e79ae0f5bb5817701dd2db7e1fed
|
Python
|
weecology/retriever
|
/retriever/engines/jsonengine.py
|
UTF-8
| 5,520
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
"""Engine for writing data to a JSON file"""
import json
import os
from collections import OrderedDict
from retriever.lib.defaults import DATA_DIR
from retriever.lib.dummy import DummyConnection
from retriever.lib.models import Engine
from retriever.lib.tools import open_fr, open_fw
from retriever.lib.engine_tools import json2csv, sort_csv
class engine(Engine):
"""Engine instance for writing data to a JSON file."""
name = "JSON"
abbreviation = "json"
auto_column_number = 0
datatypes = {
"auto": "INTEGER",
"int": "INTEGER",
"bigint": "INTEGER",
"double": "REAL",
"decimal": "REAL",
"char": "TEXT",
"bool": "INTEGER",
}
insert_limit = 1000
required_opts = [
("table_name", "Format of table name", "{db}_{table}.json"),
("data_dir", "Install directory", DATA_DIR),
]
table_names = []
def create_db(self):
"""Override create_db since there is no database just a JSON file"""
return None
def create_table(self):
"""Create the table by creating an empty json file"""
table_path = os.path.join(self.opts["data_dir"], self.table_name())
self.output_file = open_fw(table_path, encoding=self.encoding)
self.output_file.write("[")
self.table_names.append((self.output_file, table_path))
self.auto_column_number = 1
# Register all tables created to enable
# testing python files having custom download function
if self.script.name not in self.script_table_registry:
self.script_table_registry[self.script.name] = []
self.script_table_registry[self.script.name].append(
(self.table_name(), self.table))
def disconnect(self):
"""Close out the JSON with a `\\n]}` and close the file.
Close all the file objects that have been created
Re-write the files stripping off the last comma and then close with a `\\n]}`.
"""
if self.table_names:
for output_file_i, file_name in self.table_names:
output_file_i.close()
current_input_file = open_fr(file_name, encoding=self.encoding)
file_contents = current_input_file.readlines()
current_input_file.close()
file_contents[-1] = file_contents[-1].strip(',\n')
current_output_file = open_fw(file_name, encoding=self.encoding)
current_output_file.writelines(file_contents)
current_output_file.writelines(['\n]'])
current_output_file.close()
self.table_names = []
def execute(self, statement, commit=True):
"""Write a line to the output file"""
self.output_file.writelines(statement)
def executemany(self, statement, values, commit=True):
"""Write a line to the output file"""
self.output_file.writelines(statement)
def format_insert_value(self, value, datatype):
"""Formats a value for an insert statement"""
v = Engine.format_insert_value(self, value, datatype)
if v == 'null':
return ""
try:
if len(v) > 1 and v[0] == v[-1] == "'":
v = '"%s"' % v[1:-1]
except BaseException:
pass
return v
def insert_statement(self, values):
if not hasattr(self, 'auto_column_number'):
self.auto_column_number = 1
keys = self.table.get_insert_columns(join=False, create=True)
if self.table.columns[0][1][0][3:] == 'auto':
newrows = []
for rows in values:
insert_stmt = [self.auto_column_number] + rows
newrows.append(insert_stmt)
self.auto_column_number += 1
else:
newrows = values
json_dumps = []
pretty = bool("pretty" in self.opts and self.opts["pretty"])
for line_data in newrows:
tuples = (zip(keys, line_data))
write_data = OrderedDict(tuples)
if not pretty:
json_dumps.append(json.dumps(write_data, ensure_ascii=False) + ",")
else:
json_dumps.append(
json.dumps(write_data, ensure_ascii=False, indent=2) + ",")
return json_dumps
def table_exists(self, dbname, tablename):
"""Check to see if the data file currently exists"""
tablename = self.table_name(name=tablename, dbname=dbname)
tabledir = self.opts["data_dir"]
table_name = os.path.join(tabledir, tablename)
return os.path.exists(table_name)
def to_csv(self, sort=True, path=None, select_columns=None):
"""Export table from json engine to CSV file"""
for table_item in self.script_table_registry[self.script.name]:
header = table_item[1].get_insert_columns(join=False, create=True)
outputfile = os.path.normpath(
os.path.join(
path if path else '',
os.path.splitext(os.path.basename(table_item[0]))[0] + '.csv'))
csv_outfile = json2csv(table_item[0],
output_file=outputfile,
header_values=header,
encoding=self.encoding)
sort_csv(csv_outfile, encoding=self.encoding)
def get_connection(self):
"""Gets the db connection."""
self.get_input()
return DummyConnection()
| true
|
d7cfc0c6b9a94503d675faf62da8f3b59d0466ef
|
Python
|
ashirwadsangwan/Python
|
/OOPs/classMethods.py
|
UTF-8
| 1,140
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
class Employee:
number_of_employees = 0
raise_amount = 1.04
def __init__(self, first, last, pay): ## self is the instance here
self.first = first
self.last = last
self.pay = pay
self.email = first + "." + last + "@company.com"
Employee.number_of_employees += 1
def fullName(self):
return "{} {}".format(self.first, self.last)
def applyRaise(self):
self.pay = int(
self.pay * self.raise_amount
) ## we'll have to use this through instance or class variable
@classmethod
def setRaiseAmount(cls, amount):
cls.raise_amount = amount
@classmethod
def fromString(cls, emp_str):
first, last, pay = emp_str.split("-")
return cls(first, last, pay)
"""
You can also use instances instead of class in classmethods and it'll still work.
"""
emp1 = Employee("Ashirwad", "Sangwan", 50000)
emp2 = Employee("Test", "User", 70000)
print(Employee.raise_amount)
Employee.setRaiseAmount(1.05)
print(Employee.raise_amount)
str_emp1 = "Joe-Root-40000"
new_emp1 = Employee.fromString(str_emp1)
print(new_emp1.email)
| true
|
bf207a48790bd22e1b41c59eb129bf3181410938
|
Python
|
gl-coding/OpenBaseCode
|
/NlpBase/utils.py
|
UTF-8
| 764
| 3.484375
| 3
|
[] |
no_license
|
import time
def print_help(res):
if isinstance(res, list):
for item in res[:min(10,len(res))]:
print item
elif isinstance(res, dict):
for k, v in res:
print k, v
else:
print res
def deco(f):
def wrapper(*args, **kwargs):
start_time = time.time()
res = f(*args, **kwargs)
end_time = time.time()
execution_time = (end_time - start_time)*1000
print_help(res)
print("time is %d ms" %execution_time)
return wrapper
@deco
def f(a,b):
print("be on")
time.sleep(1)
print("result is %d" %(a+b))
@deco
def f2(a,b,c):
print("be on")
time.sleep(1)
print("result is %d" %(a+b+c))
if __name__ == '__main__':
f2(3,4,5)
f(3,4)
| true
|
821ce992b6ad817c97407d49095f9768705c0cf2
|
Python
|
lesteve/markdown-it-py
|
/markdown_it/common/entities.py
|
UTF-8
| 450
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
"""HTML5 entities map: { name -> characters }."""
import html
class _Entities:
def __getattr__(self, name):
try:
return DATA[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, name):
return DATA[name]
def __contains__(self, name):
return name in DATA
entities = _Entities()
DATA = {name.rstrip(";"): chars for name, chars in html.entities.html5.items()}
| true
|
36222ece45752befa30ccd7f342645775a3f258e
|
Python
|
zhengminhui/leetcode-python
|
/src/convertToTitle.py
|
UTF-8
| 205
| 3.640625
| 4
|
[] |
no_license
|
def convertToTitle(n):
"""
:type n: int
:rtype: str
"""
s = ''
while n > 0:
n -= 1
s = chr(n%26 + 65) + s
n /= 26
return s
print convertToTitle(27)
| true
|
45a4c0c19fdf58be842f86c0914d48b33626b1d0
|
Python
|
mlbudda/Checkio
|
/o_reilly/remove_all_after.py
|
UTF-8
| 685
| 3.734375
| 4
|
[] |
no_license
|
# Remove all after
from typing import Iterable
def remove_all_after(items: list, border: int) -> Iterable:
""" Removes all of the elements after the given one from list """
try:
return items[:(items.index(border)+1)]
except ValueError:
return items
# Running some tests...
print(list(remove_all_after([1, 2, 3, 4, 5], 3)) == [1, 2, 3])
print(list(remove_all_after([1, 1, 2, 2, 3, 3], 2)) == [1, 1, 2])
print(list(remove_all_after([1, 1, 2, 4, 2, 3, 4], 2)) == [1, 1, 2])
print(list(remove_all_after([1, 1, 5, 6, 7], 2)) == [1, 1, 5, 6, 7])
print(list(remove_all_after([], 0)) == [])
print(list(remove_all_after([7, 7, 7, 7, 7, 7, 7, 7, 7], 7)) == [7])
| true
|
a6b796b00215f3417c1520862d774d13f347742d
|
Python
|
murex971/cryptopals
|
/Set-1/Solution-2.py
|
UTF-8
| 228
| 3.140625
| 3
|
[] |
no_license
|
from binascii import unhexlify,hexlify
def XORfunc(str1, str2):
a = unhexlify(str1)
b = unhexlify(str2)
output = ""
for x in range(0,len(a)):
output+= (chr(ord(a[x])^ord(b[x])))
print hexlify(output)
| true
|
afca42f40349f960a28a981e7e8c04e08c9c1fa3
|
Python
|
shanti-uva/mms_image_import
|
/file-import.py
|
UTF-8
| 544
| 2.765625
| 3
|
[] |
no_license
|
import sys
import subprocess
import os
if len(sys.argv) > 1:
fpath = sys.argv[1]
print("File path is: {}".format(fpath))
with open(fpath, 'r') as inf:
for ln in inf:
print(ln)
try:
mid = int(ln.strip())
cmd = "python import.py -coll 346 -i {}".format(mid)
os.system(cmd)
# subprocess.call(cmd, Shell=True)
except Exception as e:
print("Exception: {}".format(e))
print("--------------------\n")
| true
|
65ea14706e29e771c91514560afddb5c2642fd9e
|
Python
|
ching-yi-hsu/practice_python
|
/8_Rock_Paper_Scissors/rock_paper_scissors.py
|
UTF-8
| 911
| 3.6875
| 4
|
[] |
no_license
|
def RPS_game():
rock = 1
paper = 2
scissors = 3
player_1 = int((input("you are play1 , rock = 1, paper = 2, scissors = 3, please enter a number : ")))
player_2 = int((input("you are play2 , rock = 1, paper = 2, scissors = 3, please enter a number : ")))
if player_1 == player_2 :
print("this game is even")
elif player_1 <= 2 and player_2 <= 2 :
if player_1 > player_2 :
print(" Play1 won !")
else :
print("play2 won !")
elif player_1 == 3 or player_2 == 3 :
if player_1 == 1 :
print("Play1 won !")
elif player_2 == 2 :
print("play1 Won!")
else:
print("Play2 won!")
play_game = str(input("would you like to start the game ? Y/N ? "))
while play_game == "Y" or play_game == "y" :
RPS_game()
play_game = input("would you like to start the game again ? Y/N ? ")
| true
|
b614d30d3186c88376489272c35db5aabb86b102
|
Python
|
kids-first/kf-lib-data-ingest
|
/kf_lib_data_ingest/validation/hierarchy.py
|
UTF-8
| 1,648
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
from math import inf
from graph import Graph # https://github.com/root-11/graph-theory
from kf_lib_data_ingest.validation.default_hierarchy import DEFAULT_HIERARCHY
def get_full_hierarchy(H=None):
HIERARCHY = H if H is not None else DEFAULT_HIERARCHY
# Make a faster lookup than HIERARCHY.nodes()
ANCESTOR_LOOKUP = {n: HIERARCHY.nodes(n) for n in HIERARCHY.nodes()}
# Find a hierarchy node without ancestors to use as a starting point, and
# breadth-first crawl the hierarchy from there to create a decent ordering
# for listing node counts
#
# (This is optional and could just be removed as unimportant later)
_bidirectional = Graph()
for a, b, _ in HIERARCHY.edges():
_bidirectional.add_edge(a, b, bidirectional=True)
top = None
for n, v in ANCESTOR_LOOKUP.items():
if not v:
top = n
break
HIERARCHY_ORDER = list(_bidirectional.breadth_first_walk(top))
# Make a faster lookup than H.is_connected (also bakes in distance costs)
#
# (It doesn't matter that the edge weights in the hierarchy graph are arbitrary
# cardinality enumerations and not actual distances. As long as the values are
# all positive, the "cost" here will monotonically increase along the
# hierarchy, which is all we care about.)
HIERARCHY_PATHS = {
source: {
dest: cost
for dest, cost in connected.items()
if cost != inf and cost != 0
}
for source, connected in HIERARCHY.all_pairs_shortest_paths().items()
}
return HIERARCHY, HIERARCHY_ORDER, HIERARCHY_PATHS, ANCESTOR_LOOKUP
| true
|
44756735423df33703d4d7bff915bb2270e80541
|
Python
|
MarianDanaila/Competitive-Programming
|
/LeetCode_30days_challenge/2020/June/Power of Two.py
|
UTF-8
| 187
| 3.296875
| 3
|
[] |
no_license
|
def isPowerOfTwo(n):
while n % 2 == 0:
n //= 2
if n == 1:
return 1
else:
return 0
"""
# with bit manipulation
return n > 0 and (n & (n-1) == 0)
"""
| true
|
c6e6c39e05e1919ff8baf104a83bf0b12dd2fcac
|
Python
|
phamous2day/rpg_PythonExercises
|
/bubblesort.py
|
UTF-8
| 486
| 3.578125
| 4
|
[] |
no_license
|
def bubbleSort(my_array):
for pass_number in range(len(my_array)-1,0,-1):
print "This is pass_number, round: %r" % pass_number
for i in range(pass_number):
if my_array[i]>my_array[i+1]:
temp = my_array[i]
my_array[i] = my_array[i+1]
print "my_array[i] is %r" %(my_array[i])
my_array[i+1] = temp
my_array = [54,26,93,17,77,31,44,55,20]
bubbleSort(my_array)
print(my_array)
| true
|
4175c3e5992bef93a800da4b0546b73c74d1ab47
|
Python
|
lunkaleung/HackerRank
|
/Practice/Python/Arithmetic Operators/arithmetic_operators.py
|
UTF-8
| 194
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
if __name__ == '__main__':
a = int(input())
b = int(input())
if(a >= 1 and a <= 10**10) and (b >= 1 and b <= 10**10):
print(a + b)
print(a - b)
print(a * b)
| true
|
e6568f0dd0820e6faac88113d925e30ed7ba7bdc
|
Python
|
AWildDevAppears/Porcupine
|
/constants/config.py
|
UTF-8
| 548
| 2.609375
| 3
|
[] |
no_license
|
from tkinter import filedialog
from yaml import load
class Config:
CONFIG_ROOT_PATH = './Dataset'
CONFIG_OUT_PATH = './out'
@staticmethod
def load_config():
filename = filedialog.askopenfile(filetypes=('Config file', '*.yml'))
if filename:
config = load(open(filename, 'r'))
if 'CONFIG_ROOT_PATH' in config:
Config.CONFIG_ROOT_PATH = config['CONFIG_ROOT_PATH']
if 'CONFIG_OUT_PATH' in config:
Config.CONFIG_OUT_PATH = config['CONFIG_OUT_PATH']
| true
|
7664d3a5141dd1804e63a583e9cd6f31ab420a64
|
Python
|
jlopez1423/python-crash-course-tutorials
|
/chapter-4-exercises/animals.py
|
UTF-8
| 302
| 4.6875
| 5
|
[] |
no_license
|
# Animals think of three animals that have a common characteristics.
# Use a for loop to print them out
animals = ['wolves', 'foxes', 'dogs']
for animal in animals:
print("A " + animal + " would make a great pet.")
print("Any of these animals would make a great pet")
print("I think, maybe not.")
| true
|
35869cfd0a88c49baf8cd8ace010564daad9a52f
|
Python
|
talhaHavadar/ProjectChelonoidis
|
/sensorkit/motorcontrol_test.py
|
UTF-8
| 1,359
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
from RPi import GPIO as io
from sensorkit.UltrasonicHCSR04 import UltrasonicHCSR04
from sensorkit.MotorControl_L298N import MotorControl_L298N
mcontrol = MotorControl_L298N(input_pin0=17, input_pin1=27, enable_pin=22)
while(True):
command = raw_input("Give a command(up, down, stop, forward, backward): ")
if command == "up":
mcontrol.setSpeed(100)
elif command == "down":
mcontrol.setSpeed(50)
elif command == "forward":
mcontrol.forward()
elif command == "backward":
mcontrol.backward()
elif command == "stop":
mcontrol.stop()
break;
mcontrol.cleanup()
"""
import time
import threading
input0 = 17
input1 = 27
enable = 22
io.setmode(io.BCM)
io.setup(input0, io.OUT)
io.setup(input1, io.OUT)
io.setup(enable, io.OUT)
pwm = io.PWM(enable, 100)
dc = 30.0
pwm.start(30)
while(True):
command = raw_input("Give a command(up, down, stop, forward, backward): ")
if command == "up":
dc += 10
pwm.ChangeDutyCycle(dc)
elif command == "down":
dc -= 10
pwm.ChangeDutyCycle(dc)
elif command == "forward":
io.output(input0, True)
io.output(input1, False)
elif command == "backward":
io.output(input0, False)
io.output(input1, True)
elif command == "stop":
break;
pwm.stop()
io.cleanup()
"""
| true
|
79abb3ac17b67272d36723bf3139c2972f78b1ee
|
Python
|
RensZ/thesis2
|
/Python/SEPPartialUnitTest.py
|
UTF-8
| 4,902
| 2.75
| 3
|
[] |
no_license
|
def CentralGravity(mu, position):
return -mu * position / (np.linalg.norm(position)**Decimal(3.0))
def SEPcorrection(mu, r_1, r_2, dr):
r_1_c = r_1 + dr
r_12 = r_2 - r_1
r_12_c = r_2 - r_1_c
a = CentralGravity(mu, r_12)
a_c = CentralGravity(mu, r_12_c)
# print("a_c: ", a_c)
# print("a: ", a)
# print("dif: ", a_c-a)
return a_c - a
def PartialWrtPosition(mu, r_1, r_2, dr):
r_1_c = r_1 + dr
r_12 = r_2 - r_1
r_12_c = r_2 - r_1_c
d_12 = np.linalg.norm(r_12)
d_12_c = np.linalg.norm(r_12_c)
p1 = Decimal(3.0) * np.outer(r_12, r_12_c.T) / d_12_c**5
p2 = Decimal(-3.0) * np.outer(dr, r_12_c.T) / d_12_c**5
p3 = Decimal(-3.0) * np.outer(r_12, r_12.T) / d_12**5
p4 = np.dot(( Decimal(1.0) / d_12**3 - Decimal(1.0) / d_12_c**3 ), np.identity(3, dtype=Decimal))
return -mu*(p1+p2+p3+p4)
def PartialWrtMu(mu, r_1, r_2, dr, acc):
r_1_c = r_1 + dr
r_12 = r_2 - r_1
r_12_c = r_2 - r_1_c
d_12 = np.linalg.norm(r_12)
d_12_c = np.linalg.norm(r_12_c)
p1 = acc/mu
p2a = np.identity(3, dtype=Decimal) / (d_12_c**3)
p2b = Decimal(3.0) * np.outer(r_12_c, r_12_c.T) / (d_12_c**5)
p2 = np.matmul((p2a - p2b), dr)
# print("p1 \n", p1)
# print("p2a \n", p2a)
# print("p2b \n", p2b)
# print("p2 \n", p2)
return p1 - p2
# c = dr / mu
#
# p1 = r_12_c / (d_12_c**3)
# p2 = Decimal(3.0) * np.dot(r_12_c.T, r_12_c) * c / (mu * d_12_c**5)
# p3 = c / (mu * d_12_c**3)
# p4 = r_12 / d_12**3
# return -p1 + p2 - p3 + p4
def PartialWrtEta(mu, r_1, r_2, dr, n):
c = dr / n
r_1_c = r_1 + c*n
r_12 = r_2 - r_1
r_12_c = r_2 - r_1_c
d_12 = np.linalg.norm(r_12)
d_12_c = np.linalg.norm(r_12_c)
# p1 = -mu * c / (d_12_c**3)
# p2 = Decimal(3.0) * mu * np.dot(r_12_c.T, r_12_c) * c / (d_12_c**5)
brackets1 = np.identity(3, dtype=Decimal) / (d_12_c**3)
brackets2 = Decimal(3.0) * np.outer(r_12_c, r_12_c.T) / (d_12_c**5)
partial = mu * np.matmul( (brackets1 - brackets2) , dr) / n
return partial
def CentralDifferenceWrtPos(p, mu, r_1, r_2, dr):
p_x = np.asarray([p, Decimal(0.0), Decimal(0.0)])
p_y = np.asarray([Decimal(0.0), p, Decimal(0.0)])
p_z = np.asarray([Decimal(0.0), Decimal(0.0), p])
cd_x = (SEPcorrection(mu, r_1 + p_x, r_2, dr) - SEPcorrection(mu_S, r_1 - p_x, r_2, dr)) / (
Decimal(2.0) * p)
cd_y = (SEPcorrection(mu, r_1 + p_y, r_2, dr) - SEPcorrection(mu_S, r_1 - p_y, r_2, dr)) / (
Decimal(2.0) * p)
cd_z = (SEPcorrection(mu, r_1 + p_z, r_2, dr) - SEPcorrection(mu_S, r_1 - p_z, r_2, dr)) / (
Decimal(2.0) * p)
return np.vstack([cd_x, cd_y, cd_z])
def CentralDifferenceWrtMu(p, mu, r_1, r_2, dr):
dr_up = dr*mu/(mu+p)
dr_down = dr*mu/(mu-p)
# print(dr, dr_up, dr_down)
cd = (SEPcorrection(mu + p, r_1, r_2, dr_up) - SEPcorrection(mu_S - p, r_1, r_2, dr_down)) / (
Decimal(2.0) * p)
return cd
def CentralDifferenceWrtEta(p, mu, r_1, r_2, dr, n):
dr_up = dr*(n+p)/n
dr_down = dr*(n-p)/n
cd = (SEPcorrection(mu, r_1, r_2, dr_up) - SEPcorrection(mu_S, r_1, r_2, dr_down)) / (
Decimal(2.0) * p)
return cd
import numpy as np
from decimal import *
getcontext().prec = 33
mu_test = 1.32712440041939e+20
mu_S = Decimal(1.32712440041939e+20)
r_S = np.asarray(([Decimal(-1058202435.85883), Decimal(-407616171.803058) , Decimal(-143292503.024126) ]))
r_M = np.asarray([Decimal(17776989161.8444), Decimal(-56861189168.2378), Decimal(-32252099174.0247) ])
dr_SEP_nvfalse = np.asarray([Decimal(2.92724249666266), Decimal(2.33826700072565), Decimal(0.898587531648646) ])
dr_SEP_nvtrue = np.asarray([Decimal(0.390298999555446), Decimal(0.311768933430425), Decimal(0.119811670886616) ])
dr_SEP = dr_SEP_nvfalse
da = SEPcorrection(mu_S, r_S, r_M, dr_SEP)
p_pos = Decimal(1000.0)
cd_pos = CentralDifferenceWrtPos(p_pos, mu_S, r_S, r_M, dr_SEP)
partial_pos = PartialWrtPosition(mu_S, r_S, r_M, dr_SEP)
print("central difference wrt position: \n", cd_pos)
print("partial wrt position: \n", partial_pos)
print("partial - central difference wrt position: \n", partial_pos-cd_pos)
# p_mu = Decimal(1E19)
# cd_mu = CentralDifferenceWrtMu(p_mu, mu_S, r_S, r_M, dr_SEP)
# partial_mu = PartialWrtMu(mu_S, r_S, r_M, dr_SEP, da)
#
# print("central difference wrt mu: \n", cd_mu)
# print("partial wrt mu: \n", partial_mu)
# print("partial - central difference wrt mu: \n", partial_mu-cd_mu)
# p_eta = Decimal(1.0E-4)
# eta = Decimal(1.0E-3)
# cd_eta = CentralDifferenceWrtEta(p_eta, mu_S, r_S, r_M, dr_SEP, eta)
# partial_eta = PartialWrtEta(mu_S, r_S, r_M, dr_SEP, eta)
#
# print("central difference wrt eta: \n", cd_eta)
# print("partial wrt eta: \n", partial_eta)
# print("partial - central difference wrt eta: \n", partial_eta-cd_eta)
| true
|
8d09a4f09ed64fab80ca676606caab3914f1b3fa
|
Python
|
shakyasaijal/Google-Foobar-Challenge
|
/re-id.py
|
UTF-8
| 316
| 3.203125
| 3
|
[] |
no_license
|
def solution(n):
if n < 0 or n > 10000:
return 0
prime_numbers = ""
for x in range(1, n+100):
if x > 1:
for i in range(2, x):
if(x % i == 0):
break
else:
prime_numbers += str(x)
return prime_numbers[n:n+5]
| true
|
d2771a78f19f1519e906b8e00897f4316c67d0a0
|
Python
|
Torlinski/football-modelling
|
/soccermatics/tracking/lecture4_5.py
|
UTF-8
| 1,387
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import Metrica_IO as mio
import Metrica_Viz as mviz
import Metrica_Velocities as mvel
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
# In[2]:
# set up initial path to data
WORKING_DIR = os.getcwd()
BASE_DIR = os.path.dirname(os.path.dirname(WORKING_DIR))
DATA_DIR = os.path.join(BASE_DIR, 'data/metrica/data')
DATADIR = DATA_DIR
game_id = 2 # let's look at sample match 2
# read in the event data
events = mio.read_event_data(DATADIR,game_id)
# read in tracking data
tracking_home = mio.tracking_data(DATADIR,game_id,'Home')
tracking_away = mio.tracking_data(DATADIR,game_id,'Away')
# Convert positions from metrica units to meters (note change in Metrica's coordinate system since the last lesson)
tracking_home = mio.to_metric_coordinates(tracking_home)
tracking_away = mio.to_metric_coordinates(tracking_away)
events = mio.to_metric_coordinates(events)
# reverse direction of play in the second half so that home team is always attacking from right->left
tracking_home,tracking_away,events = mio.to_single_playing_direction(tracking_home,tracking_away,events)
# In[4]:
# Making a movie of the second home team goal
PLOTDIR = DATADIR
mviz.save_match_clip(tracking_home.iloc[73600:73600+500],tracking_away.iloc[73600:73600+500],PLOTDIR,fname='home_goal_2',include_player_velocities=False)
| true
|
24e4e957f336cb5ade09877013db83792dcdafbf
|
Python
|
MirandaKoubi/School_Projects
|
/A_Star_Project/mrk3865_astar_project.py
|
UTF-8
| 9,929
| 3.9375
| 4
|
[] |
no_license
|
#Name: Miranda Koubi
#CLID: mrk3865
#Class: CMPS 420 Spring 2015
#Due Date: February 20, 2015 at 10:00 am
#Project: #1 8-Puzzle Solver using A* Algorithm
#Assignment:
#Implement a program that solves an 8-puzzle using the A* shortest path algorithm.
import heapq
import time
startInput = " "
goalInput = " "
class Board:
def __init__(self, state, parent):
#self.state equals an empty 3x3 array
self.state = [[0 for x in range (3)] for x in range (3)]
#fills the self.state array with the values passed in from state
for i in range(3):
for j in range(3):
self.state[i][j] = state[i][j]
self.manhattan()
self.setParent(parent)
self.name = self.toString()
#converts states to strings for comparisons
def toString(self):
string = ""
for i in range(3):
for j in range(3):
string += self.state[i][j]
return string
def setParent(self, parent):
self.g = 0
self.f = 0
self.parent = parent
#sets the g value of the current state to the g value of its parent + 1
if parent:
self.g = parent.g + 1
#the f value is the g value plus the h value
self.f = self.h + self.g
#calculate the distance from where a tile is to where it should be
def manhattan(self):
distance = 0
for i in range(3):
for j in range(3):
if goal[i][j] != "0":
#get location of where that tile ([i][j]) should be in the goal state
location = self.getTile(goal[i][j])
distance += abs(location[0] - i) + abs(location[1] - j)
self.h = distance
#takes a tile and gets where it is on the board
def getTile(self, tile):
#iterates through self.state and finds where tile is located
for i in range(3):
for j in range(3):
if self.state[i][j] == tile:
return (i,j)
def __eq__(self, other):
#does this board position equal other
#comparison of two states
#overlaod equals operator
return self.name == other.name
def __ne__(self, other):
#does this board position not equal other?
#overload not equals operator
return not self == other
def __lt__(self, other):
return self.f < other.f
def __gt__(self, other):
return self.f > other.f
def __le__(self, other):
return self.f <= other.f
def __ge__(self, other):
return self.f >= other.f
def getNeighbors(self):
neighbors = []
#get tuple where 0 is located
spaceJam = self.getTile("0")
a = spaceJam[0]
b = spaceJam[1]
if spaceJam[0] > 0:
#create an empty matrix for neighborState
neighborState = [[0 for x in range (3)] for x in range (3)]
#fill the neighborState will indicies from self.state
for i in range(3):
for j in range(3):
neighborState[i][j] = self.state[i][j]
#swicth the space's position from where it is in self.state to one to the left in neighbior
temp = neighborState[a][b]
neighborState[a][b] = neighborState[a-1][b]
neighborState[a-1][b] = temp
#create a new board out of this neighbor
newNeighbor = Board(neighborState, self)
#add the neighbor to the list of possible neighbors
neighbors.append(newNeighbor)
if spaceJam[0] < 2:
#create an empty matrix for neighborState
neighborState = [[0 for x in range (3)] for x in range (3)]
#fill the neighborState will indicies from self.state
for i in range(3):
for j in range(3):
neighborState[i][j] = self.state[i][j]
#swicth the space's position from where it is in self.state to one to the right in neighbior
temp = neighborState[a][b]
neighborState[a][b] = neighborState[a+1][b]
neighborState[a+1][b] = temp
#create a new board out of this neighbor
newNeighbor = Board(neighborState, self)
#add the neighbor to the list of possible neighbors
neighbors.append(newNeighbor)
if spaceJam[1] > 0:
#create an empty matrix for neighborState
neighborState = [[0 for x in range (3)] for x in range (3)]
#fill the neighborState will indicies from self.state
for i in range(3):
for j in range(3):
neighborState[i][j] = self.state[i][j]
#swicth the space's position from where it is in self.state to one below in neighbior
temp = neighborState[a][b]
neighborState[a][b] = neighborState[a][b-1]
neighborState[a][b-1] = temp
#create a new board out of this neighbor
newNeighbor = Board(neighborState, self)
#add the neighbor to the list of possible neighbors
neighbors.append(newNeighbor)
if spaceJam[1] < 2:
#create an empty matrix for neighborState
neighborState = [[0 for x in range (3)] for x in range (3)]
#fill the neighborState will indicies from self.state
for i in range(3):
for j in range(3):
neighborState[i][j] = self.state[i][j]
#swicth the space's position from where it is in self.state to one to the left in neighbior
temp = neighborState[a][b]
neighborState[a][b] = neighborState[a][b+1]
neighborState[a][b+1] = temp
#create a new board out of this neighbor
newNeighbor = Board(neighborState, self)
#add the neighbor to the list of possible neighbors
neighbors.append(newNeighbor)
#return list of neighbors
return neighbors
def printBoard(self):
#print board as a matrix
for row in self.state:
for column in row:
print(column if column != "0" else " ", end=" ")
print(end="\n")
print(" ")
def isSolvable(startInput, goalInput):
#have list of start and list of goal
#if what is before the current index in start is after the current index in goal
#then it is an inversion
inversions = 0
#turn the start and goal inputs into lists
startList = list(startInput)
goalList = list(goalInput)
#remove 0 from both lists, since 0 is not counted when determining solvability
goalList.remove("0")
startList.remove("0")
#loop through elements in list
for i in range(8):
goalIndex = goalList.index(startList[i])
#splice the start list and goal lists from where i is in both
startSplice = startList[0:i]
goalSplice = goalList[goalIndex + 1:8]
#whatever is in both the startSplice and goalSplice is an inversion and is added to inversions
inversions += len(set(startSplice) & set(goalSplice))
#if there are an even number of inversions, return True and the puzzle is solvable
#if there are an odd number of inversions, return False and the puzzle is not solvable
if (inversions % 2 == 0):
return True
else:
return False
#A* algorithm
def puzzleMaster():
startTime = time.time()
startState = Board(start, None)
goalState = Board(goal, None)
open = []
closed = []
path = []
#puts the start state into the heap
heapq.heappush(open, startState)
count = 0
while (open != []):
currentState = heapq.heappop(open)
if currentState == goalState:
#return path from start to currentState
path.append(currentState)
while currentState.parent:
path.append(currentState.parent)
currentState = currentState.parent
path.reverse()
for i in path:
i.printBoard()
print("Solution Found!")
print(" ")
print("Solution was found in " + str(count) + " state examinations.")
print("Number of moves: " + str(len(path) - 1))
print("Solution found in about " + str(int(time.time() - startTime)) + " seconds.")
print(" ")
return path
else:
children = currentState.getNeighbors()
for child in children:
if child in open:
#if this state already exists in the open list
#check the g values of the two states to see which one is shorter
#if the current state's path is shorter, assign the shorter to open
#assign open's parent to be the current state
openState = open[open.index(child)]
if currentState.g + 1 < openState.g:
openState.setParent(currentState)
open.sort()
elif child in closed:
# print("Found in closed!")
closedIndex = closed.index(child)
closedState = closed[closedIndex]
if currentState.g + 1 < closedState.g:
closed.pop(closedIndex)
heapq.heappush(open, child)
else:
# print("Found in neither!")
count += 1
heapq.heappush(open, child)
closed.append(currentState)
return None
def main():
print("This is a program that solves the 8-puzzle problem. The input is two strings of non-repeating numbers. One for the start state, and one for the goal state. 0 represents the open space in the puzzle.")
print("example:012345678 is a possible start state, and 876543210 is a possible goal state. Not every puzzle will have a solution.")
print(" ")
correctInput = False
correctStart = False
correctGoal = False
#checks to make sure input is valid
while (correctInput != True):
startInput = input("Start State (numbers 0-8, 0 for empty space): ")
goalInput = input("Goal State (numbers 0-8, 0 for empty space): ")
if startInput.isnumeric() == False or '0' not in startInput or '9' in startInput or len(startInput) != 9 or len(startInput) != len(set(startInput)):
print ("Invalid Start State.")
else:
correctStart = True
if goalInput.isnumeric() == False or '0' not in goalInput or '9' in goalInput or len(goalInput) != 9 or len(goalInput) != len(set(goalInput)):
print ("Invalid Goal State.")
else:
correctGoal = True
if correctStart == True and correctGoal == True:
correctInput = True
global start
global goal
#list(start)
start = [[startInput[0], startInput[1], startInput[2]], [startInput[3], startInput[4], startInput[5]], [startInput[6], startInput[7], startInput[8]]]
#list(goal)
goal = [[goalInput[0], goalInput[1], goalInput[2]], [goalInput[3], goalInput[4], goalInput[5]], [goalInput[6], goalInput[7], goalInput[8]]]
#if the board given is valid and the puzzle is solvable, runs the puzzle solver
if isSolvable(startInput, goalInput):
puzzleMaster()
else:
print("Puzzle is not solvable.")
input('Press ENTER to exit')
main()
| true
|
179e0f43caceff1928c95f82a21534568e1d92c3
|
Python
|
nv-jeff/pytorch
|
/torch/utils/data/webdataset/bench.py
|
UTF-8
| 3,102
| 2.765625
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
import argparse
import time
from collections import Counter
import itertools as itt
from . import dataset, loader, filters
description = """Check validity of dataset and measure I/O speed.
This command lets you quickly check whether a WebDataset is valid (i.e., has no
repeated keys, which happens if you accidentally store the files unsorted).
It will also compute I/O speeds in terms of samples per second and bytes
per second, letting you check and compare loader speeds for different storage
devices and preprocessing optinos.
For testing I/O pipeline performance, using the `-l` option, you can load
an arbitrary `.py` file that contains a function definition for `make_dataset(url)`.
By default, this only loads up to 1000 samples; you can adjust this number with
the `-c` argument; `-c -1` means loading all samples in every shard.
Examples:
python -m torch.utils.data.webdataset.bench -c 100 'pipe:gsutil cat gs://nvdata-ytsamples/yt8m-clips-000000.tar'
python -m torch.utils.data.webdataset.bench -c 100 'pipe:curl -s -L https://storage.googleapis.com/nvdata-ytsamples/yt8m-clips-000000.tar'
"""
class TotalSize:
"""Estimate the total size and count of data records."""
def __init__(self):
self.count = 0
self.total = 0
def __call__(self, sample):
self.count += 1
self.total += sum(len(x) for x in sample.values())
return sample
def main(args):
for shard in args.shards:
print()
print("===", shard)
totals = TotalSize()
if args.load != "":
dsmod = loader.load_file("dsmod", args.load)
ds = dsmod.make_dataset(shard)
ds.pipeline = ds.pipeline[:1] + [filters.map(totals)] + ds.pipeline[1:]
else:
ds = dataset.Dataset(shard)
ds.map(totals)
keys = set()
skeys = Counter()
delta = None
start = None
for i, sample in itt.islice(enumerate(ds), 1, 1 + args.count):
assert sample["__key__"] not in keys, "bad shard: detected duplicate keys"
if i == 1:
start = time.time()
keys = tuple(sorted(set(sample.keys())))
skeys.update([keys])
delta = time.time() - start
print()
print(f"#samples/sec: {totals.count/delta:15.2f}")
print(f"#bytes/sec: {totals.total/delta:15.2f}")
print()
print("sample types:")
stats = list(skeys.most_common())
for key, count in stats:
print(f"{count:9d} {key}")
if len(stats) > 1:
print()
print("WARNING: multiple different sample types found")
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=description, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("-c", "--count", type=int, default=1000)
parser.add_argument("-l", "--load", default="")
parser.add_argument("shards", nargs="+")
args = parser.parse_args()
if args.count < 0:
args.count = 999999999
main(args)
| true
|
753c58d288e7e7af809ee50b07f428ec106df23c
|
Python
|
marmute360/Python-Exercises
|
/calculando lanches.py
|
UTF-8
| 522
| 3.390625
| 3
|
[] |
no_license
|
print("==============================================")
print("=== Calculando lanches ===")
print("==============================================")
Cod100 = float(input("PRODUTO CÓD. 100 (Preço Unitário: R$ 5,30), informe a quantidade: "))
Cod101 = float(input("PRODUTO CÓD. 101 (Preço Unitário: R$ 6,00), informe a quantidade: "))
Cod102 = float(input("PRODUTO CÓD. 102 (Preço Unitário: R$ 3,20), informe a quantidade: "))
print("Total do lanche: ",(Cod100*5.3)+(Cod101*6)+(Cod102*3.2))
| true
|
17448f77bdf640daf166670dda0ebe83c0ee2f6c
|
Python
|
gzgdouru/python_study
|
/python3_cookbook/chapter06/demo09.py
|
UTF-8
| 261
| 3.203125
| 3
|
[] |
no_license
|
'''
编码和解码十六进制数
'''
import binascii
import base64
if __name__ == "__main__":
s = b"hello"
h = binascii.b2a_hex(s)
print(h)
print(binascii.a2b_hex(h))
h = base64.b16encode(s)
print(h)
print(base64.b16decode(h))
| true
|
f72d148b55fce2aad537160681650a40aec6d7d5
|
Python
|
softmaxhuanchen/optimal-double-execution
|
/src/data/gbm.py
|
UTF-8
| 1,045
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
import numpy as np
from .base import Price
class GBM(Price):
"""Brownian motion."""
def __init__(self, T=1., sigma1=0.02, sigma2=0.01, s1=1., s2=1.,
drift1=0., drift2=0., n=100):
self.sigma1 = sigma1
self.sigma2 = sigma2
self.drift1 = drift1
self.drift2 = drift2
self.n = n
self.s1 = s1
self.s2 = s2
self.T = T
def generate(self):
dt1 = self.sigma1 ** 2 * self.T / self.n
dt2 = self.sigma2 ** 2 * self.T / self.n
bm1 = np.r_[[0.], np.sqrt(dt1) * np.random.randn(self.n - 1).cumsum()]
bm2 = np.r_[[0.], np.sqrt(dt2) * np.random.randn(self.n - 1).cumsum()]
path = np.c_[np.linspace(0, self.T, self.n), bm1, bm2]
path[:, 1] = np.exp((self.drift1 - self.sigma1 ** 2 / 2.) * path[:, 0] + self.sigma1 * path[:, 1])
path[:, 2] = np.exp((self.drift2 - self.sigma2 ** 2 / 2.) * path[:, 0] + self.sigma2 * path[:, 2])
path[:, 1] *= self.s1
path[:, 2] *= self.s2
return path
| true
|
188e8b1f947bd45afa4564af5dffbc3f575bf165
|
Python
|
raywiis/advent-of-code-2020
|
/src/day-17.py
|
UTF-8
| 2,975
| 3.171875
| 3
|
[] |
no_license
|
from itertools import product
from collections import defaultdict
f = open('./day-17-problem.txt')
start = [list(line) for line in f.read().splitlines()]
def count_activated_around(a, pos, directions):
x, y, z = pos
return sum([
1 for d in directions
if a[(x + d[0], y + d[1], z + d[2])] == '#'
])
def show(a, dx, dy, dz):
for z in range(-dz, dz + 1):
print('\n\n', z)
for y in range(-dy, dy + 1):
line = []
for x in range(-dx, dx + 1):
pos = (x, y, z)
line += [a[pos]]
print(''.join(line))
def iterate(start, to, dx, dy, dz, directions):
for z in range(-dz, dz + 1):
for y in range(-dy, dy + 1):
for x in range(-dx, dx + 1):
pos = (x, y, z)
activated = count_activated_around(start, pos, directions)
if activated == 3:
to[pos] = '#'
elif activated == 2 and start[pos] == '#':
to[pos] = '#'
else:
to[pos] = '.'
def count_activated_around_4(a, pos, directions):
x, y, z, w = pos
return sum([
1 for d in directions
if a[(x + d[0], y + d[1], z + d[2], w + d[3])] == '#'
])
def iterate_4(start, to, dx, dy, dz, dw, directions):
for w in range(-dw, dw + 1):
for z in range(-dz, dz + 1):
for y in range(-dy, dy + 1):
for x in range(-dx, dx + 1):
pos = (x, y, z, w)
activated = count_activated_around_4(
start, pos, directions)
if activated == 3:
to[pos] = '#'
elif activated == 2 and start[pos] == '#':
to[pos] = '#'
else:
to[pos] = '.'
def part_1(start):
directions = list(product([0, -1, 1], repeat=3))[1:]
a = defaultdict(lambda: '.')
b = defaultdict(lambda: '.')
for y, line in enumerate(start):
for x, char in enumerate(line):
a[(x, y, 0)] = char
dx, dy, dz = len(start[0]), len(start), 1
for _ in range(6):
iterate(a, b, dx, dy, dz, directions)
a, b = b, a
dx += 1
dy += 1
dz += 1
return len(list(filter(lambda v: v == '#', a.values())))
def part_2(start):
directions = list(product([0, -1, 1], repeat=4))[1:]
a = defaultdict(lambda: '.')
b = defaultdict(lambda: '.')
for y, line in enumerate(start):
for x, char in enumerate(line):
a[(x, y, 0, 0)] = char
dx, dy, dz, dw = len(start[0]), len(start), 1, 1
for _ in range(6):
iterate_4(a, b, dx, dy, dz, dw, directions)
a, b = b, a
dx += 1
dy += 1
dz += 1
dw += 1
return len(list(filter(lambda v: v == '#', a.values())))
print(part_1(start))
print(part_2(start))
| true
|
d8bf2d28d4e731fd1af23e40cdfd27fee2740480
|
Python
|
lizenghui1121/DS_algorithms
|
/剑指offer/09.链表中倒数第k个节点.py
|
UTF-8
| 798
| 3.3125
| 3
|
[] |
no_license
|
"""
输出链表中倒数第K个节点
@Author: Li Zenghui
@Date: 2020-03-03 16:06
"""
class Solution:
def FindKthToTail(self, head, k):
# write code here
if head is None or k==0:
return None
length = 0
r = head
while r:
r = r.next
length += 1
if k > length:
return None
p = head
q = head
for i in range(k-1):
q = q.next
while q.next is not None:
p = p.next
q = q.next
return p
class Solution2:
def FindKthToTail(self, head, k):
# write code here
l=[]
while head!=None:
l.append(head)
head=head.next
if k>len(l) or k<1:
return
return l[-k]
| true
|
135122eef70e3b207a1f2799128447ead6103565
|
Python
|
smachage2019/edoc-stuff
|
/time-freq.py
|
UTF-8
| 8,270
| 3.4375
| 3
|
[] |
no_license
|
# # Analysis of harmonics
#
# The idea behind this script is to explore the data series for seasonal trends, using a
# fourier decomposition of the time series, aggregated by climatic seasons (resampled and sliced
# every 3 months).
#
# The analysis goes as follows:
#
# - Loading the data
# - Filling the gaps
# - Resampling at different scales (3 months, 6 months, 1 and 4 years)
# - Slicing the data by seasons
# - Applying the Fourier transform over each of the seasonal data
# - Filtering out minor frequencies (below 500 in the power spectrum)
# - Recomposing the original series only with the major frequency
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from numpy.fft import fft, ifft
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# Import data
global_temp = pd.read_csv('../input/GlobalTemperatures.csv', index_col='dt', parse_dates=True)
# Fill the gaps in the series
global_temp.fillna(method='ffill')
# Skip the first years and start the series at the beginning of spring,
# so seasonal variations can be captured
global_temp = global_temp['1753-03-21':]
# Plot initial data
plt.figure(figsize=(15,4))
global_temp['LandAverageTemperature'].plot()
plt.grid()
plt.show()
# # Data resampling
# Resample the series and visualise at different scales
plt.figure(figsize=(15,16))
# Seasonal
seasonal_temp = global_temp.resample('3M', how='mean')
plt.subplot(4,1,1)
seasonal_temp['LandAverageTemperature'].plot()
plt.ylim([0,18])
plt.grid()
# half year
bi_seasonal_temp = global_temp.resample('6M', how='mean')
plt.subplot(4,1,2)
bi_seasonal_temp['LandAverageTemperature'].plot()
plt.ylim([0,18])
plt.grid()
# Yearly
year_temp = global_temp.resample('A', how='mean')
plt.subplot(4,1,3)
year_temp['LandAverageTemperature'].plot()
plt.ylim([0,18])
plt.grid()
# 4-Yearly
year_4_temp = global_temp.resample('4A', how='mean')
plt.subplot(4,1,4)
year_4_temp['LandAverageTemperature'].plot()
plt.ylim([0,18])
plt.grid()
plt.show()
# # Explore autocorrelation of the time series (at motnhly scale)
## eplore the autocorrelation of temperature data
lat = np.array(global_temp['LandAverageTemperature'])
# detrend the seasonal data by removing the average
det_lat = lat - np.average(lat)
# Get correlogram for 24 seasons (2 years)
seasonal_correlogram = [1.0, ]
seasonal_correlogram.extend([np.corrcoef(det_lat[:-i], det_lat[i:])[0, 1] for i in range(1, 25)])
plt.plot(seasonal_correlogram)
plt.grid()
plt.xlabel('Periods [Months]')
plt.ylabel('Correlation')
plt.title('Autocorrelation')
plt.show()
## Therefore cold winters are followed by hot summers, or hot summer followed by cold winters
# # Slicing data into seasons
# Analysing seasonal changes over time
seasonal_lat = np.array(seasonal_temp['LandAverageTemperature'])
# Parse into stations
spring = seasonal_lat[::4]
summer = seasonal_lat[1::4]
fall = seasonal_lat[2::4]
winter = seasonal_lat[3::4]
plt.figure(figsize=(12,3))
ax = plt.subplot(1,1,1)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
plt.plot(spring, label='Spring')
plt.plot(summer, label='Summer')
plt.plot(fall, label='Fall')
plt.plot(winter, label='Winter')
plt.xlim([0, len(summer)])
plt.grid()
plt.xlabel('Year')
plt.ylabel('Average Temperature [C]')
plt.legend(bbox_to_anchor=(1.18, 1.04))
# # Preparing data for trend analysis over each season
#
# The data is detrended using a mean filter (basically removing the mean value for each of the series),
# to later apply a fft transform of the detrended data.
# Seasonal analysis
seasons = [spring, summer, fall, winter]
seasons_string = ['spring', 'summer', 'fall', 'winter']
# Detrend for each of the seasons
seasons_average = [np.average(season) for season in seasons]
seasons_det = [seasons[i] - seasons_average[i] for i in range(len(seasons))]
plt.figure(figsize=[12,6])
plt.subplot(2,1,1)
[plt.plot(seasons_det[i], label=seasons_string[i]) for i in range(len(seasons))]
plt.ylabel('Centered Temperature')
plt.grid()
plt.xlim([0, len(seasons_det[0])])
## do the regression analysis
# Get the fourier coefficients
seasons_fft = [fft(season) for season in seasons_det]
# Get the power spectrum
seasons_ps = [np.abs(season)**2 for season in seasons_fft]
plt.subplot(2,1,2)
[plt.plot(seasons_ps[i], label=seasons_string[i]) for i in range(len(seasons))]
plt.xlabel('Frequency [Months]')
plt.ylabel('Power spectrum')
plt.xlim([0, 30])
plt.grid()
plt.show()
# # Filter frequencies in the low part of the power spectrum and re-construct the series
#
# A filter in the value of 500 of the power spectrum was set. In other words, if the value
# of the power spectrum is below this threshold, it will be set to 0. this will allow to focus
# on the signal of the data, instead that in the fluctuations that comes from the randomness of
# the process and the measurements.
#
# The selection of the 500 threshold was arbitrary and of course is open for debate.
## Clean each of the time series in the seasons by selecting such that the power spectrum is higher than 500
clean_seasons_ps = seasons_ps[:]
clean_seasons_ps = [[seasons_fft[season_i][year_i] if seasons_ps[season_i][year_i] > 500 else 0
for year_i in range(len(seasons_fft[0]))] for season_i in range(len(seasons_ps))]
plt.figure(figsize=[12,9])
plt.subplot(3,1,1)
plt.plot(np.transpose(clean_seasons_ps))
plt.xlim([0, 30])
plt.grid()
## redraw the series only with significant harmonics
seasons_series_clean = [np.real(ifft(serie)) for serie in clean_seasons_ps]
plt.subplot(3,1,2)
[plt.plot(seasons_series_clean[i], label=seasons_string[i]) for i in range(len(seasons))]
plt.xlim([0, len(seasons_det[0])])
plt.legend(bbox_to_anchor=(1.18, 1.04))
plt.grid()
## put the trend back into the dataset
seasonal_trends = [seasons_series_clean[i] + seasons_average[i] for i in range(len(seasons))]
plt.subplot(3,1,3)
[plt.plot(seasonal_trends[i], label=seasons_string[i]) for i in range(len(seasons))]
plt.xlim([0, len(seasons_det[0])])
plt.legend(bbox_to_anchor=(1.18, 1.04))
plt.grid()
plt.show()
# # Results and conclusions
#
# From the analysiis it can be seen that indeed there seems to be a trend in the
# last 150 years (from year 100 and forth) to increment the average temperature in
# each season.
#
# Seems that the average temperature in spring is more variable thatn the rest of the
# seasons, however one of the main harmonics of the series seem to reveal that the
# large temperature fluctations in the winter are consistent with the main variations
# of temperature in the winter. This appear to occur in a 25-30 years cycles at the
# beggining of the series, and in 18-20 years cycles at the end of the series
# (industrialisation perhaps?).
#
# On the contrary, oscilations in the fall are far more stable, indicating more stable
# patterns. Therefore, we migh think on using average fall temperature as an indicator
# of the Land Average Temperature (LAM), in the detection of long term variations of temperature.
#
# Also is interesting to see how the trends between winter and summer have appear to change
# in the latter 150 years. In the first period, summer and winter appear to have similar
# trends, as cold winters lead to cold summers, however this trend seem to change in the
# second period, especially towards the end, in which an inversion is found, for which cold winters
# seem to be paired up with warm summers. Looks like the weather may be changing indeed.
| true
|
d42822f15489a3931726cf50f055ea095e0654bd
|
Python
|
one3chens/cloudpunch
|
/cloudpunch/slave/ping.py
|
UTF-8
| 2,813
| 2.546875
| 3
|
[
"GPL-2.0-only",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
import re
import subprocess
import logging
import collections
import time
from threading import Thread
class CloudPunchTest(Thread):
def __init__(self, config):
self.config = config
self.final_results = []
super(CloudPunchTest, self).__init__()
def run(self):
try:
default_config = {
'ping': {
'target': 'google.com',
'duration': 10
}
}
self.merge_configs(default_config, self.config)
self.config = default_config
self.runtest()
except Exception as e:
# Send exceptions back to master
logging.error('%s: %s', type(e).__name__, e.message)
self.final_results = '%s: %s' % (type(e).__name__, e.message)
def runtest(self):
# Configuration setup
target = self.config['match_ip'] if self.config['server_client_mode'] else self.config['ping']['target']
duration = str(self.config['ping']['duration'])
results = []
logging.info('Starting ping command to server %s for %s seconds', target, duration)
ping = subprocess.Popen(['ping', '-c', duration, target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(ping.stdout.readline, ''):
latency = re.findall(r'time=(\d+\.\d+)', line)
now = time.time()
if latency:
latency = float(latency[0])
# Over time results
if self.config['overtime_results']:
self.final_results.append({
'time': now,
'latency': latency
})
# Summary results
else:
results.append(latency)
# Ping failed
elif 'Request timeout' in line and self.config['overtime_results']:
self.final_results.append({
'time': now,
'latency': 0
})
ping.stdout.close()
# Send back summary if not over time
if not self.config['overtime_results']:
try:
self.final_results = {
'latency': sum(results) / len(results)
}
except ZeroDivisionError:
self.final_results = {
'latency': -1
}
def merge_configs(self, default, new):
for key, value in new.iteritems():
if (key in default and isinstance(default[key], dict) and
isinstance(new[key], collections.Mapping)):
self.merge_configs(default[key], new[key])
else:
default[key] = new[key]
| true
|
4fbfc79d1b48d651282d6a6d513ec28182620be3
|
Python
|
KotR9001/sqlalchemy-challenge
|
/Python Files/app.py
|
UTF-8
| 13,184
| 3.03125
| 3
|
[] |
no_license
|
#########Climate App
#####Re-Enter Code From Jupyter Notebook
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
import time
###Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
#Method to Resolve Thread Issue Found on https://stackoverflow.com/questions/48218065/programmingerror-sqlite-objects-created-in-a-thread-can-only-be-used-in-that-sa/51147168
engine = create_engine("sqlite:///C:/Users/bjros/OneDrive/Desktop/KU_Data_Analytics_Boot_Camp/Homework Assignments/Homework Week 10/sqlalchemy-challenge/SQLite File/hawaii.sqlite", connect_args={'check_same_thread': False})
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
session
###Exploratory Climate Analysis
##Precipitation
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Display the Data Types
data_types = type(session.query(Measurement).first())
# Display the date of the last data point in the database
dates = session.query(Measurement.date).all()
last_date = dates[-1]
# Calculate the date 1 year ago from the last data point in the database
dates = session.query(Measurement.date).all()
last_date = dates[-1]
last_date1 = pd.to_datetime(last_date)
year_ago = last_date1 - dt.timedelta(days=365)
#Convert the Dates to Timestamps
last_date1 = dt.datetime.strptime('08/23/2017', "%m/%d/%Y")
year_ago = dt.datetime.strptime('08/23/2016', "%m/%d/%Y")
# Perform a query to retrieve the date and precipitation scores
yearly_dates = [row for row in session.query(Measurement.date).filter(func.date(Measurement.date)>=year_ago).filter(func.date(Measurement.date)<=last_date1).all()]
yearly_prcp = [row for row in session.query(Measurement.prcp).filter(func.date(Measurement.date)>=year_ago).filter(func.date(Measurement.date)<=last_date1).all()]
# Save the query results as a Pandas DataFrame and set the index to the date column
#Found method to save query results as Pandas DataFrame from https://stackoverflow.com/questions/35937579/pandas-read-sql-columns-not-working-when-using-index-col-returns-all-columns-i
precipitation_df = pd.DataFrame({'Precipitation':yearly_prcp}, index=yearly_dates)
#Rename the Index Column
#Method Found at https://stackoverflow.com/questions/19851005/rename-pandas-dataframe-index
precipitation_df = precipitation_df.rename_axis("Date")
#Sort the Data and Make the Date Column Accessible
precipitation_df = precipitation_df.sort_index().reset_index()
#Pull Values Out of Tuples in Columns
#Method Found at https://stackoverflow.com/questions/29550414/how-to-split-column-of-tuples-in-pandas-dataframe
precipitation_df['Date'] = pd.DataFrame(precipitation_df['Date'].tolist())
precipitation_df['Precipitation'] = pd.DataFrame(precipitation_df['Precipitation'].tolist())
##Stations
# Design a query to show how many stations are available in this dataset?
num_stations = session.query(Measurement.station).group_by(Measurement.station).count()
num_stations
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
stations = [row for row in session.query(Measurement.station).group_by(Measurement.station).all()]
station_counts = [row for row in session.query(func.count(Measurement.station)).group_by(Measurement.station).all()]
#Put the Data in a DataFrame
stations_df = pd.DataFrame({'Station':stations, 'Station Activity':station_counts})
#Take the Values Out of Tuples
#Method to Take Values Out of Tuples Found at https://stackoverflow.com/questions/16296643/convert-tuple-to-list-and-back
stations_df['Station'] = pd.DataFrame(stations_df['Station'].tolist())
stations_df['Station Activity'] = pd.DataFrame(map(list, stations_df['Station Activity']))
#Sort the Values by Station Activity Counts
stations_df = stations_df.sort_values('Station Activity', ascending=False)
##Temperatures
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
low_temp = session.query(func.min(Measurement.tobs)).group_by(Measurement.station).filter(Measurement.station=='USC00519281').all()
high_temp = session.query(func.max(Measurement.tobs)).group_by(Measurement.station).filter(Measurement.station=='USC00519281').all()
avg_temp = session.query(func.round(func.avg(Measurement.tobs))).group_by(Measurement.station).filter(Measurement.station=='USC00519281').all()
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
#Determine the last date
last_date_station = [row for row in session.query(Measurement.date).filter(Measurement.station=='USC00519281').order_by(Measurement.date.desc()).first()]
#Convert to DateTime
last_date_station = pd.to_datetime(last_date_station)
print(last_date_station)
#Calculate the Date from a Year Ago
start_date_station = last_date_station - dt.timedelta(days=365)
print(start_date_station)
#Convert the Dates to Timestamps
last_date_station = dt.datetime.strptime('08/18/2017', "%m/%d/%Y")
start_date_station = dt.datetime.strptime('08/18/2016', "%m/%d/%Y")
#Perform Query
yearly_dates = [row for row in session.query(Measurement.date).filter(Measurement.station=='USC00519281').filter(func.date(Measurement.date)>=start_date_station).filter(func.date(Measurement.date)<=last_date_station).all()]
yearly_temps = [row for row in session.query(Measurement.tobs).filter(Measurement.station=='USC00519281').filter(func.date(Measurement.date)>=start_date_station).filter(func.date(Measurement.date)<=last_date_station).all()]
# Save the query results as a Pandas DataFrame and set the index to the date column
#Found method to save query results as Pandas DataFrame from https://stackoverflow.com/questions/35937579/pandas-read-sql-columns-not-working-when-using-index-col-returns-all-columns-i
temp_df = pd.DataFrame({'Temperature':yearly_temps}, index=yearly_dates)
#Rename the Index Column
#Method Found at https://stackoverflow.com/questions/19851005/rename-pandas-dataframe-index
temp_df = temp_df.rename_axis("Date")
#Sort the Data and Make the Date Column Accessible
temp_df = temp_df.sort_index().reset_index()
#Pull Values Out of Tuples in Columns
#Method Found at https://stackoverflow.com/questions/29550414/how-to-split-column-of-tuples-in-pandas-dataframe
temp_df['Date'] = pd.DataFrame(temp_df['Date'].tolist())
temp_df['Temperature'] = pd.DataFrame(temp_df['Temperature'].tolist())
#Close All Existing Sessions
#Method Found at https://docs.sqlalchemy.org/en/13/orm/session_api.html
#session.close_all()
###Create New Code for the Climate App
#Import Flask & climate_starter notebook
#Method to Allow Import of Python Files Found at https://stackoverflow.com/questions/4142151/how-to-import-the-class-within-the-same-directory-or-sub-directory
from flask import Flask, jsonify, Response
#Create App
app = Flask(__name__)
#Define the Start & End Dates
start = dt.datetime.strptime('3/18/2012', '%m/%d/%Y')
end = dt.datetime.strptime('3/18/2013', '%m/%d/%Y')
#Create the Homepage
@app.route("/")
def home():
return(
f"Here is the homepage</br>."
f"---------------------------------</br>"
f"Here is the directory of routes</br>."
f"-----------------------------------------</br>"
f"Here is the page with precipitation data</br>"
f"/api/v1.0/precipitation</br>"
f"----------------------------------------</br>"
f"Here is the page with the stations list</br>"
f"/api/v1.0/stations</br>"
f"-----------------------------------------------------------------------------------------------</br>"
f"Here is the page with the temperature data from the most active station from the previous year</br>"
f"/api/v1.0/tobs</br>"
f"------------------------------------------------------------------------------------------------------------------------------</br>"
f"Here is the list of minimum, average, and maximum temperature values from the specified start date to the last available date</br>"
f"/api/v1.0/start</br>"
f"------------------------------------------------------------------------------------------------------------------------------</br>"
f"Here is the list of minimum, average, and maximum temperature values from the specified start date to the specified end date</br>"
f"/api/v1.0/start/end"
)
#Create the Precipitation Page
@app.route("/api/v1.0/precipitation")
def precipitation():
#Create the Precipitation List
prcp_list = []
#session = Session(engine)
for date, prcp in session.query(Measurement.date, Measurement.prcp).all():
prcp_dict = {}
prcp_dict["date"] = date
prcp_dict["prcp"] = prcp
prcp_list.append(prcp_dict)
#session.close()
return jsonify(prcp_list)
#Create the Stations Page
@app.route("/api/v1.0/stations")
def station():
#Create the Stations List
stations_list = []
#session = Session(engine)
for station in session.query(Measurement.station).group_by(Measurement.station).all():
stations_dict = {}
stations_dict["station"] = station
stations_list.append(stations_dict)
#session.close()
return jsonify(stations_list)
#Create the Temperature Page for the Most Active Station from the Last Year
@app.route("/api/v1.0/tobs")
def tobs():
#Create the Temperature List for the Most Active Station from the Last Year
temp_list = []
#session = Session(engine)
for date, tobs in session.query(Measurement.date, Measurement.tobs).all():
temp_dict = {}
temp_dict["date"] = date
temp_dict["tobs"] = tobs
temp_list.append(temp_dict)
#session.close()
return jsonify(temp_list)
#Create the List of Minimum, Average, and Maximum Temperature Values from the Specified Start Date Where the End Date is Not Specified
@app.route("/api/v1.0/start")
def extremes1():
#session = Session(engine)
#Perform Queries for Minimum, Average, & Maximum Temperature Values where the End Date is not Specified in the URL
low_temp1 = session.query(func.min(Measurement.tobs)).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=last_date1).all()
print(f"The lowest recorded temperature at the most active station was {low_temp1}oC.")
high_temp1 = session.query(func.max(Measurement.tobs)).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=last_date1).all()
print(f"The highest recorded temperature at the most active station was {high_temp1}oC.")
avg_temp1 = session.query(func.round(func.avg(Measurement.tobs))).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=last_date1).all()
print(f"The average temperature at the most active station was {avg_temp1}oC.")
#session.close()
return(f"The minimum temperature in the date range is: {low_temp1}oF.</br>"
f"The average temperature in the date range is: {avg_temp1}oF.</br>"
f"The maximum temperature in the date range is: {high_temp1}oF.</br>")
#Create the List of Minimum, Average, and Maximum Temperature Values from the Specified Start Date to the Specified End Date
@app.route("/api/v1.0/start/end")
def extremes2():
#session = Session(engine)
#Perform Queries for Minimum, Average, & Maximum Temperature Values where the End Date is Specified in the URL
low_temp2 = session.query(func.min(Measurement.tobs)).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=end).all()
print(f"The lowest recorded temperature at the most active station was {low_temp2}oC.")
high_temp2 = session.query(func.max(Measurement.tobs)).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=end).all()
print(f"The highest recorded temperature at the most active station was {high_temp2}oC.")
avg_temp2 = session.query(func.round(func.avg(Measurement.tobs))).filter(func.date(Measurement.date)>=start).filter(func.date(Measurement.date)<=end).all()
print(f"The average temperature at the most active station was {avg_temp2}oC.")
#session.close()
return (f"The minimum temperature in the date range is: {low_temp2}oF.</br>"
f"The average temperature in the date range is: {avg_temp2}oF.</br>"
f"The maximum temperature in the date range is: {high_temp2}oF.</br>")
#Create the URL
if __name__ == "__main__":
app.run(debug=True)
| true
|
227c59bb246cb920265502f866e52e1c4669e290
|
Python
|
xiongmi39/leetcode
|
/py/stack/503nextGreaterElements.py
|
UTF-8
| 650
| 3.171875
| 3
|
[] |
no_license
|
from typing import List
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
db_nums = nums*2
ans = [-1]*len(nums)
stack = [0]
len_db = len(db_nums)-1
for i in range(len_db,-1,-1):
while len(stack) >1 and db_nums[i] >= nums[stack[-1]]:
stack.pop()
if len(stack) >1:
top_idx = stack[-1]
tmp = int(i%len(nums))
ans[tmp] = nums[top_idx]
stack.append(i % len(nums))
return ans
solution = Solution()
ans = solution.nextGreaterElements([3,4,2,1,6,7,-1,-5,10,-9])
print(ans)
| true
|
c2e8680c9bdbada014005d0277d3ea7aee04c406
|
Python
|
eduardopds/Programming-Lab1
|
/mediafinal/mediafinal.py
|
UTF-8
| 253
| 3.453125
| 3
|
[] |
no_license
|
# coding: utf-8
# media final
# Eduardo Pereira / Programação 1
nota_1 = float(raw_input())
nota_2 = float(raw_input())
peso_1 = 60
peso_2 = 40
media_final = ((nota_1 * peso_1) + (nota_2 * peso_2)) / (peso_1 + peso_2)
print 'Média final: %.1f' % media_final
| true
|
00949fd7089d26747f4e1fea3ec301898957f3a3
|
Python
|
evgenykurbatov/kb21-hotjup-migration-adv
|
/config_xray_min.py
|
UTF-8
| 2,260
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
# -*- coding: utf-8 -*-
##
## Photoevaporation model
##
import numpy as np
from numpy import pi, sqrt, exp, sin, cos, tan, log, log10
import scipy as sp
import scipy.integrate
import const
from aux import *
path = 'xray'
##
## The X-ray luminosity evolution based on two papers:
## i) Louden T, Wheatley P J, Briggs K. Reconstructing the high-energy irradiation of the evaporating hot Jupiter HD 209458b
## https://ui.adsabs.harvard.edu/abs/2017MNRAS.464.2396L
## log10(L_X) = 27.08 +/- 0.07
## ii) Tu L et al. The extreme ultraviolet and X-ray Sun in Time: High-energy evolutionary tracks of a solar-like star
## https://ui.adsabs.harvard.edu/abs/2015A%26A...577L...3T
## L_X ~ t^{-1.42}
L_X = lambda t : 0.5 * 10**(27.08) * (t/t_age)**(-1.42)
##
## The model of photoevaporation
## Owen J E, Clarke C J, Ercolano B, 2012, MNRAS, 422, 1880
## https://ui.adsabs.harvard.edu/abs/2012MNRAS.422.1880O
##
from dataclasses import dataclass
@dataclass
class Photoevaporation:
pass
pe = Photoevaporation()
def tmpfn(y):
a = -0.438226
b = -0.10658387
c = 0.5699464
d = 0.010732277
e = -0.131809597
f = -1.32285709
return (a*b*exp(b*y) + c*d*exp(d*y) + e*f*exp(f*y)) * exp(-(y/57)**10)
pe.f = tmpfn
y = np.linspace(0, 500, 5000)
pe.C_y = sp.integrate.simps(tmpfn(y), y, even='avg')
print("pe.C_y =", pe.C_y)
pe.C = 4.8e-9 * const.M_sol/const.yr / (2*pi/0.95 * pe.C_y * const.AU**2)
print("pe.C = %.2e [g cm-2 s-1]" % pe.C)
def dotSigma_pe(t, r, r_0):
m = M_s/const.M_sol
y = 0.95/m * (r - r_0)/const.AU
cond = (r >= r_0) & (r >= r_g)
return pe.C * m**(-1.148) * (L_X(t)/1e30)**1.14 / (r/const.AU) \
* np.where(cond, pe.f(y), np.zeros_like(r))
def dotM_pe(t, r_0):
m = M_s/const.M_sol
y_min = np.where(r_g >= r_0, 0.95/m * (r_g - r_0)/const.AU, 0)
y = np.linspace(y_min, 500, 5000).T
integral = sp.integrate.simps(pe.f(y), y, even='avg')
return 4.8e-9 * const.M_sol/const.yr * m**(-0.148) * (L_X(t)/1e30)**1.14 * integral/pe.C_y
##
## Ride S K, Walker A B C Jr. Absorption of X-rays in the interstellar medium
## https://ui.adsabs.harvard.edu/abs/1977A%26A....61..339R
##
## Opacity for the solar sbundance for ~ 1 keV photons:
kappa_X = 2e-22 / const.m_p
print("kappa_X = %.2e [cm^2/g]" % kappa_X)
| true
|
ed83ff4e611549d97bc8b2a50ce2bbef7b530cd1
|
Python
|
dewhurstwill/simulate_coin_flip_python
|
/main.py
|
UTF-8
| 1,542
| 4.46875
| 4
|
[
"MIT"
] |
permissive
|
######################################################
# #
# A simple program to simulate the flipping a coin #
# multiple times. #
# #
######################################################
# Importing the random module, this will be used to
# simulate a coin flip, random number between 1-2
import random
# Function for flipping coin x times
def flip_coins(number_of_flips):
# Variable to track the number of heads and tails
number_of_heads = 0
number_of_tails = 0
# Loop number_of_flip times
for _ in range(number_of_flips):
# Flip coin (Heads - 1, Tails - 2)
flipped_coin = random.randint(1,2)
# If flipped_coin was 1
if flipped_coin == 1:
# Increment heads counter by 1
number_of_heads += 1
# If flipped_coin was 2
else:
# Increment tails counter by 1
number_of_tails += 1
# If number_of_flips was greater than 1,
if number_of_flips > 1:
# Print number of heads Vs number of tails
print("Number of heads: ", number_of_heads)
print("Number of tails: ", number_of_tails)
else:
# If the coin was only flipped once
if number_of_heads == 1:
# Print heads
print("It was heads")
else:
# Print tails
print("It was tails")
# Set to 1 for a head/tails tool
# Set to anyhing greater than 1 to calculate the probability
times_to_flip = 10
# Run the simulation
flip_coins(times_to_flip)
| true
|
fe129fdfc319851bf95852a862d9bb1a07fbd578
|
Python
|
PribhuPatel/python-training-programs
|
/day6/6-2.py
|
UTF-8
| 778
| 2.953125
| 3
|
[] |
no_license
|
"""Try serializing file handler object."""
import io
import json
try:
file = open("6-1.json", "r")
except FileNotFoundError:
print("File not Found")
exit(1)
file_json = {
'name': file.name,
'data': file.read()
}
file.close()
with open("6-2.json", "w") as f:
json.dump(file_json, f)
with open("6-2.json", "r") as f:
loaded_json = json.load(f)
file2 = io.BytesIO(bytes(loaded_json["data"], encoding="utf-8"))
file2.name = loaded_json["name"]
file2 = io.TextIOWrapper(file2)
print(file2.read())
print(file2.name)
print(file2.__class__)
print(file2.closed)
file2.close()
print(file2.closed)
"""Pickle cannot Serialize all type of objects. There are limitations.
It cannot serialize Objects wich are temporary with program runtime."""
| true
|
f171d65781b92fef5e4e6ec97e6aa92937d9dd95
|
Python
|
CandyTt20/Notes
|
/algorithm/add2.py
|
UTF-8
| 1,549
| 3.625
| 4
|
[] |
no_license
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
p1 = []
p2 = []
while l1 != None:
p1.append(l1.val)
l1 = l1.next
while l2 != None:
p2.append(l2.val)
l2 = l2.next
y = []
temp = 0
if len(p1) > len(p2):
for i in range(len(p1) - len(p2)):
p2.append(0)
elif len(p2) > len(p1):
for i in range(len(p2) - len(p1)):
p1.append(0)
for i in range(len(p1)):
y.append((p1[i] + p2[i] + temp) % 10)
if p1[i] + p2[i] + temp >= 10:
temp = 1
else:
temp = 0
if temp == 1:
y.append(1)
enter = ListNode(y[0])
result = enter
for i in range(len(y)-1):
result.next = ListNode(y[i + 1])
result = result.next
return enter
def appendList(l):
result = ListNode(l[0])
head = result
for i in range(len(l) - 1):
head.next = ListNode(l[i + 1])
head = head.next
return result
def showList(head):
l = []
while head != None:
l.append(head.val)
head = head.next
return l
x = appendList([2,4,3])
y = appendList([5,6,4])
z = Solution().addTwoNumbers(x, y)
print(showList(z))
| true
|
3b38b2ea189b27d480b41ea5261259b123ae7dda
|
Python
|
brent-harrison/CS660Assignment1
|
/bandits/controller.py
|
UTF-8
| 1,914
| 2.75
| 3
|
[] |
no_license
|
import numpy as np
import sys
import copy
import time
import random
from randAgent import randomAgent
from epsGreedyAgent import epsGreedyAgent
from UCBAgent import UCBAgent
from thompsonAgent import thompsonAgent
import argparse
######################################################
AGENTS_MAP = {'randomAgent' : randomAgent,
'epsGreedyAgent' : epsGreedyAgent,
'UCBAgent': UCBAgent,
'thompsonAgent': thompsonAgent }
class bandit:
def __init__(self, file):
f = open(file, "r")
lines = f.readlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip("\n")
self.arms = []
for i in range(1, len(lines)):
self.arms.append(float(lines[i]))
def pull_arm(self, arm):
prob = self.arms[arm]
randNum = random.random()
if randNum <= prob:
return 1
else:
return 0
def getNumArms(self):
return len(self.arms)
parser = argparse.ArgumentParser(description='Define bandit problem and agents.')
parser.add_argument('--input', choices=['input/test0.txt', 'input/test1.txt'], default='input/test1.txt', help='The input file, can be input/test0.txt or input/test1.txt')
parser.add_argument('--agent', choices=AGENTS_MAP.keys(), default='randomAgent', help='The bandit AI. Can be randomAgent, epsGreedyAgent, UCBAgent, or thompsonAgent')
parser.add_argument('--num_plays', type=int, default = 10000, help='The number of pulls an agent has.')
args = parser.parse_args()
testBandit = bandit(args.input)
agent = AGENTS_MAP[args.agent]()
history = []
cumulative_reward = 0
for numRuns in range(args.num_plays):
testArm = agent.recommendArm(testBandit, history)
reward = testBandit.pull_arm(testArm)
cumulative_reward += reward
history.append((testArm, reward))
print(cumulative_reward)
| true
|
8de5996d4c8a20644e70d03956bf5d49331d979a
|
Python
|
VVeremjova/physiopsychotest
|
/admin/saveInDB.py
|
UTF-8
| 1,553
| 2.9375
| 3
|
[] |
no_license
|
import sqlite3
class SaveInDB() :
def __init__(self,filename = 'example.db'):
db_file = filename
self.conn = sqlite3.connect(filename)
def createDB(self):
# Create table
c = self.conn.cursor()
c.execute('''CREATE TABLE Users
(id_ text, name text, gender text, date_age text,
occupation text, work_experience text,
last_work_experience text,
comments text)''')
c.execute('''CREATE TABLE Results
(name text, total_errors int, correct_answers int)''')
self.conn.commit()
def addNewClient(self,params):
c = self.conn.cursor()
c.execute("INSERT INTO Users VALUES (?,?, ?, ?, ?,? ,? , ?)",params)
# Save (commit) the changes
self.conn.commit()
def searchClient(self, user_name):
params = (user_name,)
c = self.conn.cursor()
curs = c.execute('SELECT * FROM Users WHERE name=?', [user_name])
res=c.fetchone()
return res
def updateClient(self,params):
# Insert a row of data
c = self.conn.cursor()
c.execute("UPDATE Users SET id_ = ?, gender = ?,date_age = ?,occupation =?,work_experience=?,last_work_experience=?, comments = ? WHERE name = ?;",params)
self.conn.commit()
def close(self):
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
self.conn.close()
| true
|
3f28514c3fbf693ac85c3a8ad3a661da6605580d
|
Python
|
JustasJJ/D16-Hangman
|
/main.py
|
UTF-8
| 5,338
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
import os
import random
from flask import (
Flask,
session,
render_template,
redirect,
url_for,
request
)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'vavavava'
@app.route('/', methods=['GET', 'POST'])
def index():
session.clear()
return render_template("Hangman.html", score=0)
@app.route('/guess_input', methods=['GET', 'POST'])
def guess_input():
return render_template("guess_input.html", score=0)
@app.route('/game', methods=['GET', 'POST'])
def game():
session['answer'] = request.form['answer']
session['s'] = '_'*len(session['answer'])
session['puzzle'] = list(session['s'])
session['score'] = 0
session['h'] = [] # history of guesses as a list
session['question'] = ""
if request.method == 'POST':
return render_template("Game.html",
answer=session['answer'],
puzzle=session['puzzle'],
puzzle_st=session['s'],
score=0,
question=session['question']
)
else:
return redirect(url_for('index'))
@app.route('/guess_random', methods=['GET', 'POST'])
def guess_random():
words = [line.rstrip().lower() for line in open("words.txt")]
word = random.choice(words)
session['answer'] = word
session['s'] = '_'*len(word)
session['puzzle'] = list('_'*len(word))
session['score'] = 0
session['h'] = [] # history of guesses as a list
session['question'] = ""
return render_template("Game.html",
answer=session['answer'],
puzzle=session['puzzle'],
puzzle_st=session['s'],
score=0,
question=session['question']
)
@app.route('/guess_riddle', methods=['GET', 'POST'])
def guess_riddle():
return render_template("guess_riddle.html", score=0)
@app.route('/riddle_game', methods=['GET', 'POST'])
def riddle_game():
ctg = request.form['category']
dct = {}
f = open("riddles.txt", "r")
for line in f:
if line.strip() == '*' + ctg + '*': # start reading file
break
for line in f:
if "*" not in line and len(line) > 3: # populate dict with riddles
(key, val) = line.split("?")
dct[key+"?"] = val[1:-1]
else:
break # ends reading file
f.close()
riddle = random.choice(list(dct.keys())) # select random riddle
session['answer'] = dct[riddle]
session['s'] = '_'*len(session['answer'])
session['puzzle'] = list(session['s'])
session['score'] = 0
session['h'] = [] # history of guesses as a list
session['question'] = riddle[:-1] + ". Kas?"
if request.method == 'POST':
return render_template("Game.html",
answer=session['answer'],
puzzle=session['puzzle'],
puzzle_st=session['s'],
score=0,
question=session['question']
)
@app.route('/guess', methods=['GET', 'POST'])
def guess():
results = {}
answer = session['answer']
session['guess'] = request.form['guess']
g = session['guess']
results = check_guesses(answer,
g,
session['score'],
session['puzzle'],
session['s'],
session['h']
)
session['score'] = results["c"]
session['s'] = results["s"]
session['puzzle'] = results["p"]
session['h'] = results["h"]
# DEBUG
# print("answer",answer)
# print("guess",g)
# print("puzzle", session['puzzle'])
# print("s", session['s'])
# print("h",session['h'])
# print("score",session['score'])
if request.method == 'POST':
return render_template("Game.html",
guess=g,
answer=answer,
puzzle=session['puzzle'],
history=session['h'],
score=session['score'],
puzzle_st=session['s'],
question=session['question'])
else:
return redirect(url_for('index'))
# functions for guessing algorithm
def word_to_list(w): # "wheel" ---> "[w, h, e, e, l]"
p = []
for i in range(0, len(w)):
p.append(w[i])
return p
def puzzle_to_string(p): # "[w, "_", "_", e, l]" --> "w _ _ e l"
s = ""
for i in range(0, len(p)):
s += p[i]
return s
def check_guesses(answer, g, c, p, s, h):
if g not in h:
h.append(g)
if g in answer:
f = answer.find(g)
while f >= 0:
p[f] = g.upper()
f = answer.find(g, f + 1)
s = puzzle_to_string(p)
else:
c += 1
if c - 6 == 0 or s == answer:
p = word_to_list(answer.upper())
return {"p": p, "h": h, "c": c, "s": s.lower()}
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| true
|
b5b4c7d2a0e719279f2d3cd706e2f3a26b838bf8
|
Python
|
Voljega/BestArcade
|
/conf.py
|
UTF-8
| 416
| 3.046875
| 3
|
[] |
no_license
|
def cleanString(string):
return string.rstrip('\n\r ').lstrip()
def loadConf(confFile):
conf = dict()
file = open(confFile, 'r', encoding="utf-8")
for line in file.readlines():
if not line.startswith('#'):
confLine = line.split("=")
if len(confLine) == 2:
conf[cleanString(confLine[0])] = cleanString(confLine[1])
file.close()
return conf
| true
|
de4438e5df82fff40baacf0ea8b16ad2c207bacc
|
Python
|
manojgudi/yelp
|
/function_testing/regexp.py
|
UTF-8
| 1,195
| 3.1875
| 3
|
[] |
no_license
|
#script for detecting functions defined but not used in scilab
import re
file_name = 'C:\Users\kushal\Documents\python programs\scilab.txt'
file_obj = open(file_name, 'r')
file_data = file_obj.read()
file_obj.close()
def useless(file_data):
ret = []
functionline = 'function [^(]*' # detects function declaration line
results = re.findall(functionline, file_data)
for result in results:
name = re.findall("=[^(]*", result)[0][1:].strip()
count = file_data.count(name)
if count==1:
ret.append(name)
return ret
def var(file_data):
ret=[]
temp_names=[]
var_declaration = '[a-zA-Z_][a-zA-Z0-9_]* *='
results = re.findall(var_declaration, file_data)
for result in results:
name = result.replace("=","").strip()
count = file_data.count(name)
if count==1:
temp_names.append(name)
line_no=1
for line in file_data.split("\n"):
for name in temp_names:
if name in line:
ret.append({'var_name':name,'line_no':line_no})
line_no+=1
return ret
print 'Unused variables :',var(file_data)
print '\nUnused functions :',useless(file_data)
| true
|
a14903aa0d29f522f160f5139e5015403fa3a396
|
Python
|
2014arki/Assignments
|
/untitled/Lesson_18_02_16.py
|
UTF-8
| 873
| 2.84375
| 3
|
[] |
no_license
|
#! /usr/bin/env python
"""
"""
from __future__ import division, print_function
def fasta_reader(fp):
"""
:param fp:
:return:
"""
pass
def hamming(seq1, seq2):
"""
:type seq1: str
:param seq1:
:type seq2: str
:param seq2:
:return:
"""
if len(seq1) != len(seq2):
raise ValueError("")
return sum(a != b for a, b in zip(seq1, seq2))
def matrix_mul(matr1, matr2):
nrow_matr1, ncol_matr1 = len(matr1), len(matr1[0])
nrow_matr2, ncol_matr2 = len(matr2), len(matr2[0])
if ncol_matr1 != nrow_matr2:
raise ValueError("")
matrix_mult_res = [[None] for i in xrange(ncol_matr2) #генер-р списков, влож-й в генер-р списков
for i in xrange(nrow_matr1)]
def get_col(matr, j):
return [row[j] for row in matr]
| true
|
02bcecef95303dab9509cd37fd935dd3bcaca57f
|
Python
|
ixxra/projecteuler
|
/problem040/problem40.py
|
UTF-8
| 333
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env python2
def seq():
a = 1
while True:
digits = map(int, str(a))
for d in digits:
yield d
a += 1
prod = 1
for i, s in enumerate(seq()):
if i in (0, 9, 99, 999, 9999, 99999):
prod *= s
if i == 999999:
prod *= s
break
print prod
| true
|
32c3fc8607a234c9327d3daa036684461d034807
|
Python
|
80pctsols/breaker
|
/breaker/test_breaker.py
|
UTF-8
| 2,774
| 3.15625
| 3
|
[] |
no_license
|
from .breaker import (
CircuitBreaker,
CircuitBreakerException
)
import datetime
import time
# Functions used in tests
def func(x1, x2, x3=10):
return x1 + x2 + x3
def error_func():
bad_list = []
return bad_list[10]
def error2_func():
raise Exception
# Class used to test cb use with class
class MyClassTester(object):
def __init__(self):
self.cbs = {}
def _func(self):
return 10 * 10
def func(self):
return self.get_cb(self._func).call()
def get_cb(self, function):
func_name = function.__name__
if func_name in self.cbs:
return self.cbs[func_name]
self.cbs[func_name] = CircuitBreaker(function)
return self.cbs[func_name]
# Test functions
def test_call():
cb = CircuitBreaker(func)
assert cb.call(1, 2) == func(1, 2)
assert cb.call(10, 20, 30) == func(10, 20, 30)
def test_reset():
cb = CircuitBreaker(func)
cb.num_failures = 10
cb.reset()
assert cb.num_failures == 0
def test_failure():
cb = CircuitBreaker(func)
old_failures = cb.num_failures
cb.failure()
assert old_failures + 1 == cb.num_failures
def test_closed_breaker():
cb = CircuitBreaker(func)
cb.num_failures = cb.failures_allowed + 1
try:
cb.call(1, 2)
assert False
except CircuitBreakerException:
assert True
def test_error_functions():
cb = CircuitBreaker(error_func)
for i in range(12):
try:
cb.call()
except CircuitBreakerException:
assert i > 10
except IndexError:
assert i <= 10
def test_not_half_open():
cb = CircuitBreaker(func)
assert cb.is_half_open() is False
def test_half_open():
cb = CircuitBreaker(error_func)
cb.num_failures = 20
double_ago = past(2 * cb.timeout)
cb.last_failure = double_ago
assert cb.is_half_open() is True
def test_half_open_close():
cb = CircuitBreaker(error_func, 1, .5)
for i in range(2):
try:
cb.call()
assert False
except Exception:
assert True
time.sleep(1)
assert cb.is_half_open() is True
# Should try the call again if its half open
try:
cb.call()
assert False
except IndexError:
assert True
except CircuitBreakerException:
assert False
assert cb.state() == cb.OPEN
assert cb.is_half_open() is False
def test_class_use():
myclass = MyClassTester()
assert myclass.func() == 100
myclass.cbs['_func'].num_failures = 20
try:
myclass.func()
assert False
except CircuitBreakerException:
assert True
def past(seconds):
return datetime.datetime.now() - datetime.timedelta(seconds=seconds)
| true
|
32b8e29b705f724b702ee4a1139637e4d676f1c9
|
Python
|
kevinhaube/LoLDataVisualizer
|
/Evaluation.py
|
UTF-8
| 1,680
| 3.421875
| 3
|
[] |
no_license
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def clear_false_index(*data_frames):
"""Clears the false indexing created by Pandas when saved as a CSV.
WARNING: ONLY USE ONCE PER DATA FRAME.
:param data_frames: Data Frames
:return: None
"""
for df in data_frames:
df.drop(df.columns[0], axis=1, inplace=True)
def make_sub_set(keys, data_frame):
"""Creates subsets of data by passing in a List of column keys and a Data Frame
to pull from
:param keys: List of String Keys
:param data_frame: Data Frame
:return: Data Frame
"""
df = data_frame[keys]
df.columns = keys
return df
def simple_feature_scale(key, data_frame):
"""Uses Simple Feature Scaling to normalize a column.
Col / Col.max()
:param key: String Key
:param data_frame: Data Frame
:return: Data Frame Column
"""
return data_frame[key]/data_frame[key].max()
def create_bins(key, data_frame, div):
"""Creates a Numpy Linspace based on the min/max of the column,
and how many dividers you pass in.
:param key: String Key
:param data_frame: Data Frame
:param div: Integer
:return: Numpy Linspace
"""
return np.linspace(min(data_frame[key]), max(data_frame[key]), div)
def create_binned_column(key, bin_names, data_frame):
"""Creates a binned column
:param key: String Key
:param bin_names: List of String Keys
:param data_frame: Data Frame
:return: Data Frame Column
"""
return pd.cut(data_frame[key],
create_bins(key, data_frame, len(bin_names) + 1),
labels=bin_names, include_lowest=True)
| true
|
4b5c377a7553581dace3792912de3de252a98911
|
Python
|
arifwc/JDIH_JABAR
|
/scraping_py/pandangarankab_scraping.py
|
UTF-8
| 815
| 2.53125
| 3
|
[] |
no_license
|
import json
import requests
import re
urls=['https://web.pangandarankab.go.id/public/jdih/dokumen/list/1/peraturan-daerah?_=1583154735707',
'https://web.pangandarankab.go.id/public/jdih/dokumen/list/4/keputusan-bupati?_=1583154402551',
'https://web.pangandarankab.go.id/public/jdih/dokumen/list/3/peraturan-bupati?_=1583153982973']
peraturan=[]
for url in urls:
r = requests.get(url)
jsondata = json.loads(r.content)
peraturan += jsondata['data']
data_file = open('csv/pangandarankab.csv', 'w')
count=0
for perda in peraturan:
if count==0:
data_file.write('peraturan\n')
count+=1
if perda[3]:
data_file.write("{}\n".format(perda[3]))
else:
data_file.write("{}\n".format(re.findall(r"[^\s*].*[^\s*]",perda[4])))
data_file.close()
| true
|
c8e68e4929b56a5ba5e1d25091b01e4ee442926c
|
Python
|
dr-dos-ok/Code_Jam_Webscraper
|
/solutions_python/Problem_116/688.py
|
UTF-8
| 3,211
| 3.015625
| 3
|
[] |
no_license
|
'''
Created on 13/04/13
Code Jam 2013 Qualification Round A
@author: manolo
'''
import sys
ifile = sys.stdin
def r():
return ifile.readline()[:-1]
ofile = open('./a-large.out', 'w')
def w(what):
ofile.write(what + '\n')
def check_line(l):
# print "checking line " + str(l)
x_wins = True
o_wins = True
for c in l:
# print "c = " + str(c)
if c == '.':
# print 'line: ' + str(l) + ' --> not completed'
return '.'
x_wins = x_wins and (c == 'X' or c == 'T')
o_wins = o_wins and (c == 'O' or c == 'T')
# print "x_wins: " + str(x_wins)
# print "o_wins: " + str(o_wins)
if x_wins and o_wins:
# print "X and O win, not possible"
raise
if x_wins:
# print 'line: ' + str(l) + ' --> X wins'
return 'X'
if o_wins:
# print 'line: ' + str(l) + ' --> O wins'
return 'O'
t = int(r())
for c in range(1, t+1):
line =[None] * 4
someone_won = False
game_not_completed = False
# check rows
# print str(c) + ":"
for i in range(4):
line[i] = list(r())
# print line[i]
# print
for i in range(4):
# print "line: " + str(line[i])
res =check_line(line[i])
if res == 'X':
w('Case #' + str(c) + ': X won')
someone_won = True
break
if res == 'O':
w('Case #' + str(c) + ': O won')
someone_won = True
break
if res == '.':
game_not_completed = True
# print ' '
if not someone_won:
# check cols
for i in range(4):
col = [line[0][i], line[1][i], line[2][i], line[3][i]]
res=check_line(col)
if res == 'X':
w('Case #' + str(c) + ': X won')
someone_won = True
break
if res == 'O':
w('Case #' + str(c) + ': O won')
someone_won = True
break
if res == '.':
game_not_completed = True
# print
if not someone_won:
# check diags
diag1 = [line[0][0], line[1][1], line[2][2], line[3][3]]
res=check_line(diag1)
if res == 'X':
w('Case #' + str(c) + ': X won')
someone_won = True
if res == 'O':
w('Case #' + str(c) + ': O won')
someone_won = True
if res == '.':
game_not_completed = True
if not someone_won:
diag2 = [line[0][3], line[1][2], line[2][1], line[3][0]]
res=check_line(diag2)
if res == 'X':
w('Case #' + str(c) + ': X won')
someone_won = True
if res == 'O':
w('Case #' + str(c) + ': O won')
someone_won = True
if res == '.':
game_not_completed = True
if not someone_won:
if game_not_completed:
# print 'Case #' + str(c) + ': Game has not completed'
w('Case #' + str(c) + ': Game has not completed')
else:
# print 'Case #' + str(c) + ': Draw'
w('Case #' + str(c) + ': Draw')
trash = r()
ofile.close
| true
|
80a8a4e39cd502a519deb60fb2fd705048089f68
|
Python
|
wagllgaw/Email
|
/flask_app/app.py
|
UTF-8
| 4,879
| 2.546875
| 3
|
[] |
no_license
|
import json
import requests
import socket
import time
import cPickle as pickle
import pandas as pd
import re
import StringIO
from HTMLParser import HTMLParser
from flask import Flask, request, render_template
from processor import Processor
app = Flask(__name__)
## Welcome to the Email Rank flask app. This runs a persistent flask app that takes
## email input from users and returns predictions based on the models trained in the
## Ipython notebooks found in this directory
## Please refer to the 'emailRank_home.html' for the base page that is modified here
## Global variables
PORT = 8080
MODELTO = None
MODELFROM = None
PROCESSOR = None
VERBOSE = True
## Home directory, contains a page that asks for email & a submit box
@app.route('/')
def index():
data = '''
<h1>Welcome to Email Rank <br> </h1></h2>This tool will help you gauge the authority of your email </h2>
<form action="/predict" method='POST' >
<form action="/predict" method='POST' >
<textarea name="user_input" cols="80" rows="20" >Please insert email text here...</textarea>
<br><input type="submit" />
'''
return render_template('emailRank_home.html').format(data)
## Predict method used, receives data from the index method and returns a page with predictions
@app.route('/predict', methods=['POST'])
def predict():
# Method to predict data and send to PSQL database
# is automatically called based on the score POST request
# sent by the data server
# requires the database table to be built already
text = request.form['user_input']
text = str(text.decode(errors='ignore').encode('utf-8', errors='ignore'))
if VERBOSE:
print 'predict called on:'
print text
print type(text)
df = pd.Series([text])
X = PROCESSOR.transform(df)
resultTO = MODELTO.predict(X)
resultFROM = MODELFROM.predict(X)
data = '''
<h2>Prediction model results:<br> <h2>Sent from: {0}</h2>
<img src='/Employee.jpg'>
<br> <h2>Sent to: {1}</h2>
<img src='/Employee.jpg'></h2><br>
<form action="/predict" method='POST' >
<textarea name="user_input" cols="80" rows="15" >{2}</textarea>
<br><input type="submit" />
<br><br><br>
'''.format(resultFROM, resultTO, text)
return render_template('emailRank_home.html').format(data)
## About page
@app.route('/about')
def about():
data = '''Email Rank is a tool to help better adjust our email to the expectations and norms of the corporate world.
The app uses the public Enron email database of over 100k emails to predict the corporate title of the sender and sendee of any email based on its text.
Using the prediction and importance scores, the app provides insight into why the email ranks the way is does and how you can improve the way people will perceive you.
'''
return render_template('emailRank_home.html').format(data)
## Contact page
@app.route('/contact')
def contact():
data = ''' Created by Alex Bergin as a capstone project for <a href="http://www.galvanize.com/courses/data-science/">Galvanize Data Science.</a><br>
<img src='/Alex.jpg' height="400" width="400"><br>
Alex has 5+ years of experience at a premier global business consulting firm. There he developed a reputation for extensive quantitative skills combined with a history of successful client relationships.
He has been recognized as a leader in pricing/contracting analysis having lead teams working on predictive analysis for reimbursement rates and optimization of contract terms.
Alex is focused on continuing to applying his quantitative skills to improve the solutions to complex business problems.
<br>
<a href="https://docs.google.com/document/d/1-VUpr-vOjXB8WmOJQY0qX6n0_ZoWFlZK1k9p6gQqF84/edit?usp=sharing">Resume</a>
<br><span class="email">atbergin (at) gmail (dot) com</span>
'''
return render_template('emailRank_home.html').format(data)
## Picture fetching code
@app.route('/Alex.jpg')
def alex():
with open('data/images/alex.jpg') as pic:
return pic.read(), 200, {'Content-Type': 'image/jpg'}
@app.route('/Employee.jpg')
def Employee():
with open('data/images/Employee.jpg') as pic:
return pic.read(), 200, {'Content-Type': 'image/jpg'}
## Loads the model pickles for use in the app
if __name__ == '__main__':
print '#################### SERVER START ##############################'
with open('data/model_pickles/processor.pkl') as f:
PROCESSOR = pickle.load(f)
#PROCESSOR = None
with open('data/model_pickles/modelTO.pkl') as f:
MODELTO = pickle.load(f)
with open('data/model_pickles/modelFROM.pkl') as f:
MODELFROM = pickle.load(f)
print 'Model Pickles Loaded Successfully'
# Start Flask app
app.run(host='0.0.0.0', port=PORT, debug=True)
| true
|
e64cda17aab916d64ef21aceb64a49479422fd9a
|
Python
|
mashago/study
|
/python/print.py
|
UTF-8
| 50
| 2.640625
| 3
|
[] |
no_license
|
a = 100;
if (a >= 0): print a
else: print -a
| true
|