repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex044_Gerenciador de Pagamentos.py
|
<gh_stars>1-10
print('|================= <NAME> =================|')
preco = float(input(' Informe o preço das suas compras R$: '))
print('''|================================================|
| Formas de pagamentos |
| |
| [1] à vista dinheiro/cheque: 10% de desconto |
| [2] à vista no cartão: 5% de desconto |
| [3] em até 2x no cartão: preço normal |
| [4] 3x ou mais no cartão: 20% de juros |
==================================================''')
opcao = int(input(''))
if opcao == 1:
valorfinal = preco - (preco / 100 * 10)
elif opcao == 2:
valorfinal = preco - (preco / 100 * 5)
elif opcao == 3:
valorfinal = preco
elif opcao == 4:
parcela = int(input('Em quantas parcelas? '))
valorparcela = preco / parcela
valorfinal = preco + (preco / 100 * 20)
print('O valor de sua parcela foi de R$:{:.2f}'.format(valorparcela))
print('O valor de sua compra é de R$:{:.2f}'.format(valorfinal))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex006.py
|
n1 = int(input('digite um numero:'))
print('o dobro de {} vale {}'.format(n1, (n1*2)))
print('o triplo de {} vale {}'.format(n1, (n1*3)))
print('A rais quadrada de {} vale {:.2f}'.format(n1, (n1**(1/2))))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex009.py
|
vetnuminpar = []
vetnumpar = []
intervalo = int(input('Informe o numero do intervalo: '))
for numero in range(intervalo):
if numero % 2 == 0:
vetnumpar.append(numero)
else:
vetnuminpar.append(numero)
print('-=-'*45)
print('do intervalo os numeros pares são: {}'.format(vetnumpar))
print('')
print('do intervalo os numeros inpares são: {}'.format(vetnuminpar))
print('-=-'*45)
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex012.py
|
<gh_stars>1-10
opcao = 'S'
while opcao == 'S':
preco = float(input('Informe o valor do carro: '))
ano = int(input('Informe o ano do carro: '))
if ano <= 2000:
novopreco = (preco / 100 * -12) + preco
elif ano > 2000:
novopreco = (preco / 100 * -7) + preco
print('O valor do carro a ser pado será de R$:{}{:.2f}{}'.format('\033[31m', novopreco, '\033[m'))
print('')
opcao = str(input('Deseja continuar cauculando os descontos? (S)sim e (N)não:')).upper().strip()
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex004b.py
|
a = input('digite augo:')
print('Qual e o tipo primitivo? {}'.format(type(a)))
print('só tem espaços? {}'.format(a.isspace()))
print('Ele é alfabético? {}'.format(a.isalpha()))
print('Ele é numerico? {}'.format(a.isalnum()))
print('Ele é alfanumerico? {}'.format(a.isalnum()))
print('Ele está em minúsculas? {}'.format(a.islower()))
print('Ele está em maiúsculas? {}'.format(a.isupper()))
print('Ele está capitalizada? {}'.format(a.istitle()))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex004.py
|
a = input('Digite aulgo: ')
print('o tipo primitivo é \033[31m {} \033[m'.format(type(a)))
print('só tem espaços? \033[31m {} \033[m'.format(a.isspace()))
print('É numerico? \033[31m {} \033[m'.format(a.isnumeric()))
print('É alfabético? \033[31m {} \033[m'.format(a.isalpha()))
print('É alfanumerico? \033[31m {} \033[m'.format(a.isalnum()))
print('Está em maiúscula? \033[31m {} \033[m'.format(a.isupper()))
print('Está em minúscolas? \033[31m {} \033[m'.format(a.islower()))
print('Está capitalizada? \033[31m {} \033[m'.format(a.istitle()))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex008.py
|
<filename>Exercicios/ex008.py
n1 = float(input('Digite uma distancia em metros: '))
print('adistancia {}m corresponde ha:'.format(n1))
print('{:.3f}km'.format(n1/1000))
print('{}Cm'.format(n1*100))
print('{}Mm'.format(n1*1000))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex011.py
|
l = float(input('qual e a largura da sua parede?'))
h = float(input('qual e a altura da sua parede?'))
a = l * h
print('a sua parede {}X{} tem {}m² de area!'.format(l, h, a))
print('será nessesario {}L de tinta para pintar essa parede'.format(a/2))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex016_Desafio final.py
|
<filename>SATC_exercicios/ex016_Desafio final.py
import math
# criar uma cauculadora que possua [+] Adição [-] Subtração[*] Multiplicação [/] Divizão [**] Potenciação
# [raiz] Raiz Quadrada [%] Porcentagem. E que só pare de funcionar se o usuario pedir use a biblioteca math (se quiser)
# teste do github
def raiz():
try: # tratamento de erro e resultado separado
numero1 = float(input('Informe o numero para descobrir sua raiz: '))
resultado = math.sqrt(numero1)
return resultado
except ValueError:
print('Somente numeros')
def porcentagem():
try: # tratamento de erro e resultado separado
numero1 = float(input('Informe a porcentagem: '))
numero2 = float(input('Informe o numero que deseja saber a porcentagem: '))
resultado = (numero2 / 100) * numero1
return resultado
except ValueError:
print('Somente numeros')
def adicao():
resultado = numero1 + numero2
return resultado
def subtracao():
resultado = numero1 - numero2
return resultado
def multiplicacao():
resultado = numero1 * numero2
return resultado
def divisao():
resultado = numero1 / numero2
return resultado
def potenciacao():
resultado = math.pow(numero1, numero2)
return resultado
while True:
print(f''' |{' Operação ':=^30}|
| [+] Adição |
| [-] Subtração |
| [*] Multiplicação |
| [/] Divizão |
| [**] Potenciação |
| [raiz] Raiz Quadrada |
| [%] Porcentagem |
| [S] Sair |
|{'=' * 30}|''')
operacao = str(input()).strip()
if operacao == 'raiz':
print('{:.1f}'.format(raiz()))
elif operacao == '%':
print(porcentagem())
elif operacao.upper() == 'S':
print('{: ^30}'.format('Fim do Programa'))
break
else: # verificado a operação
if (operacao == '+') or (operacao == '-') or (operacao == '*') or (operacao == '/') or (operacao == '**'):
try: # tratamento de erro
numero1 = float(input('Informe o 1° numero: '))
numero2 = float(input('Informe o 2° numero: '))
if operacao == '+': # chamando as funçoes
print(adicao())
elif operacao == '-':
print(subtracao())
elif operacao == '*':
print(multiplicacao())
elif operacao == '/':
print(divisao())
elif operacao == '**':
print(potenciacao())
except ValueError:
print('{: ^36}'.format('Somente numeros'))
else:
print('{: ^36}'.format('Operação invalida'))
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex008.py
|
custo = float(input('Custo do produto: '))
porcento = float(input('Informe o porcento a ser adicionado ao valor: '))
total = custo / 100 * porcento + custo
print('')
print('O pruduto que custou R$:{} com o acrecimo de {}%. Valor de venda R$:{}{:.2f}'.format(custo, porcento, '\033[31m', total))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex024.py
|
cit = str(input('Em que cidade você naceu? ')).strip()
print(cit[:5].upper() == 'SANTO')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex029.py
|
<gh_stars>1-10
velocidade = int(input('Informe sua atual velocidade!: '))
if velocidade > 80:
print('Você ultrapasou a velocidade maxima permitida!!')
print('O valor da sua multa foi de R$:{}'.format((velocidade-80)*7))
else:
print('Você esta dentro da velocidade permitida -> 80km/h')
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex011.py
|
numero = int(input('Informe um numero: '))
if 100 <= numero <= 200:
print('O numero {} está dentro do intervalo de 100 a 200'.format(numero))
else:
print('O numero {} não esta no intervalo entre 100 e 200'.format(numero))
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex039_Alistamento Militar.py
|
from datetime import date
sexo = str(input('Informe seu sexo (Feminino) ou (Masculino): ')).lower().strip()
print('')
if sexo == 'masculino':
nacimento = int(input('informe o ano de nacimento: '))
data = date.today().year
idade = data - nacimento
print('Nasceu em {} possui {} ano(S) no ano de {}'.format(nacimento, idade, data))
print('')
if idade < 18:
print('Ainda faltãm {} anos para o alistamento'.format(18 - idade))
print('O ano de alistamento é {}'.format(data + (18 - idade)))
elif idade > 18:
print('Você deve se alistar imidiatamente')
print('Você já deveria ter se alistado, Se passaram {} anos'.format(idade - 18))
print('Ano do alistamento foi {}'.format(data - (idade - 18)))
elif idade == 18:
print('Você deve se alistar imidiatamente')
else:
print('Você e mulher não precisa fazer alistamento!!')
|
GabrielMazzuchello/Curso-Em-Video
|
SATC_exercicios/ex006.py
|
<gh_stars>1-10
celcius = float(input('Informe a temperatura em graus C° que deseja converter F°: '))
print('Ha sua temperatura corresponde ha \033[31m{}\033[m F°'.format((celcius * 9 / 5) + 32))
|
GabrielMazzuchello/Curso-Em-Video
|
Aulas/aula06a.py
|
n1 = int(input('Digite um numero:'))
n2 = int(input('Digite outro numero:'))
s = n1 + n2
print('a soma entre {} e {} é {}'.format(n1, n2, s))
print(f'a soma é: {n1+n2}')
|
GabrielMazzuchello/Curso-Em-Video
|
Exercicios/ex033_Maior e menor valores.py
|
num1 = int(input('Primeiro valor: '))
num2 = int(input('Segundo valor: '))
num3 = int(input('Terceiro valor: '))
# Verificação do menor numero
menor = num3
if num2 < num3 and num2 < num1:
menor = num2
if num1 < num3 and num1 < num2:
menor = num1
# verificação do maior numero
maior = num3
if num2 > num1 and num2 > num3:
maior = num2
if num1 > num2 and num1 > num3:
maior = num1
print('O menor numero é {}'.format(menor))
print('O maior numero é {}'.format(maior))
|
chiww/HotDog
|
server/parser.py
|
<filename>server/parser.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
解析与处理
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, PROJECT_PATH)
import re
import time
from datetime import date, timedelta
import base64
import traceback
class Parser(object):
def __init__(self):
"""
work for data, not for stdout.
"""
self.core = ParserCore()
def _get_parser_func(self, method):
try:
return getattr(self.core, method)
except Exception as e:
print("Can not find parser function, use common function!")
return self.core.common
def run(self, dataset, method, *args, **kwargs):
parser_func = self._get_parser_func(method)
dataset = self._b64decode(dataset)
kwargs.update({'dataset': dataset})
result = []
for data in dataset.pop('data'):
_from = data['from']
_content = data['content']
kwargs.update({'data': data})
for parser_result in parser_func(_content, *args, **kwargs):
result.append({'from': _from, 'content': parser_result})
dataset['data'] = result
return dataset
def _b64decode(self, dataset):
"""
还原数据 base64 decode
:param dataset:
:return: <list>
return example: [{'from': <DATA FROM:string>, 'content': <DETAIL:(string|dict|list)>}]
"""
if not dataset['data']:
dataset['data'] = []
assert isinstance(dataset['data'], list), "data['data'] must be <list>, but <%s>" % type(dataset['data'])
if self._is_b64encode_by_rule(dataset):
for data in dataset['data']:
data['content'] = base64.b64decode(data['content'])
try:
data['content'] = data['content'].decode('utf-8')
except Exception as e:
print(traceback.format_exc())
return dataset
@staticmethod
def _is_b64encode_by_rule(dataset):
# only stdout and filestrings is the b64encode, so, if then, decode by base64:
_rule_b64_method = ['stdout', 'filestrings']
_action_method = dataset['source']['action']['method']
_format_method = dataset['source'].get('format', {}).get('method', '')
if _action_method in _rule_b64_method or _format_method in _rule_b64_method:
return True
return False
class ParserCore(object):
@staticmethod
def common(cmd_stdout, *args, **kwargs):
return [cmd_stdout]
def filestats(self, cmd_stdout, *args, **kwargs):
"""
处理文件状态
os.stat_result(st_mode=33261, st_ino=2990121, st_dev=64768, st_nlink=1, st_uid=0, st_gid=0, st_size=43408,
st_atime=1616555896, st_mtime=1585714781, st_ctime=1614931154)
:param cmd_stdout:
:return:
"""
_data = []
partern = r'os.stat_result\(st_mode=(?P<st_mode>\d+), st_ino=(?P<st_ino>\d+), st_dev=(?P<st_dev>\d+), ' \
r'st_nlink=(?P<st_nlink>\d+), st_uid=(?P<st_uid>\d+), st_gid=(?P<st_gid>\d+), ' \
r'st_size=(?P<st_size>\d+), st_atime=(?P<st_atime>\d+), st_mtime=(?P<st_mtime>\d+), ' \
r'st_ctime=(?P<st_ctime>\d+)'
_parser_data = re.match(partern, cmd_stdout, re.I).groupdict()
_parser_data['st_atime'] = self.timestamp_to_string(int(_parser_data['st_atime']))
_parser_data['st_mtime'] = self.timestamp_to_string(int(_parser_data['st_mtime']))
_parser_data['st_ctime'] = self.timestamp_to_string(int(_parser_data['st_ctime']))
return [_parser_data]
@staticmethod
def lsof(cmd_stdout, *args, **kwargs):
"""
cmd must have -F param, for example: lsof -p 4050 -F
These are the fields that lsof will produce. The single
character listed first is the field identifier.
a file access mode
c process command name (all characters from proc or
user structure)
C file structure share count
d file's device character code
D file's major/minor device number (0x<hexadecimal>)
f file descriptor (always selected)
F file structure address (0x<hexadecimal>)
G file flaGs (0x<hexadecimal>; names if +fg follows)
g process group ID
i file's inode number
K tasK ID
k link count
l file's lock status
L process login name
m marker between repeated output
M the task comMand name
n file name, comment, Internet address
N node identifier (ox<hexadecimal>
o file's offset (decimal)
p process ID (always selected)
P protocol name
r raw device number (0x<hexadecimal>)
R parent process ID
s file's size (decimal)
S file's stream identification
t file's type
T TCP/TPI information, identified by prefixes (the
`=' is part of the prefix):
QR=<read queue size>
QS=<send queue size>
SO=<socket options and values> (not all dialects)
SS=<socket states> (not all dialects)
ST=<connection state>
TF=<TCP flags and values> (not all dialects)
WR=<window read size> (not all dialects)
WW=<window write size> (not all dialects)
(TCP/TPI information isn't reported for all supported
UNIX dialects. The -h or -? help output for the
-T option will show what TCP/TPI reporting can be
requested.)
u process user ID
z Solaris 10 and higher zone name
Z SELinux security context (inhibited when SELinux is disabled)
0 use NUL field terminator character in place of NL
1-9 dialect-specific field identifiers (The output
of -F? identifies the information to be found
in dialect-specific fields.)
:param cmd_stdout:
:return:
"""
identifier = {
'a': 'access',
'c': 'command',
'p': 'pid',
'u': 'uid',
'f': 'fd',
't': 'type',
's': 'size',
'P': 'protocol',
'T': 'TCP',
'n': 'name',
'L': 'user',
'R': 'ppid',
'g': 'gid',
'S': 'stream'
}
opfile = []
_f = dict()
_p = dict()
in_proc = False
outline = cmd_stdout.split('\n')
for of in outline:
try:
of = of.strip()
if len(of) == 0:
continue
elif len(of) == 1:
character = of
field = ""
else:
character, field = of[0], of[1:]
if character not in identifier.keys():
continue
if character == 'p':
in_proc = True
_p = dict()
_p['pid'] = field
continue
if character == 'f':
opfile.append(_f)
in_proc = False
_f = dict()
_f.update(_p)
_f['fd'] = field
continue
if in_proc:
_p[identifier[character]] = field
continue
else:
if character == 'T':
if field.startswith('ST'):
_f[identifier[character]] = field.split('=')[1]
else:
continue
else:
_f[identifier[character]] = field
continue
except Exception as e:
print(traceback.format_exc())
else:
opfile.append(_f)
return opfile
@staticmethod
def process(cmd_stdout, *args, **kwargs):
"""
获取进程信息
UID PID PPID C STIME TTY TIME CMD
root 1 0 0 04:27 ? 00:00:05 /usr/lib/systemd/systemd --switched-root --system --deserialize 21
root 2 0 0 04:27 ? 00:00:00 [kthreadd]
root 820 1 0 04:27 ? 00:00:00 /usr/sbin/gssproxy -D
rpc 823 1 0 04:27 ? 00:00:00 /sbin/rpcbind -w
dbus 829 1 0 04:27 ? 00:00:00 /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation
libstor+ 835 1 0 04:27 ? 00:00:00 /usr/bin/lsmd -d
root 836 1 0 04:27 ? 00:00:00 /usr/sbin/smartd -n -q never
:param cmd_stdout:
:return:
"""
fields = ['user', 'pid', 'ppid', 'c', 'stime', 'tty', 'time', 'cmd']
def row_parse(values, row):
# field num, except 'cmd' field.
n = 7
if len(values) == n:
values.append(row)
return
val, o_val = row.split(' ', 1)
if val:
values.append(val)
row_parse(values, o_val)
# 排除空字符串
if not cmd_stdout:
return None
proc = list()
c = 0
for content in cmd_stdout.split('\n'):
if c == 0:
c += 1
continue
if not content:
continue
values = list()
row_parse(values, content)
tmp = dict(zip(fields, values))
proc.append(tmp)
return proc
@staticmethod
def wlogin(cmd_stdout, *args, **kwargs):
"""
w - Show who is logged on and what they are doing.
[root@localhost collect]# w -i
17:43:57 up 37 min, 3 users, load average: 0.00, 0.03, 0.12
USER TTY FROM LOGIN@ IDLE JCPU PCPU WHAT
root :0 :0 17:15 ?xdm? 1:10 0.28s /usr/libexec/gnome-session-binary --session gnome-classic
root pts/0 :0 17:15 2:37 0.07s 0.07s bash
root pts/1 10.0.0.28 17:17 5.00s 0.27s 0.02s w -i
:param cmd_stdout:
:return:
"""
# 排除空字符串
if not cmd_stdout:
return None
login = list()
c = 0
for content in cmd_stdout.split('\n'):
if c <= 1:
c += 1
continue
if not content:
continue
tmp = dict()
tmp['user'] = content[0:8].strip()
tmp['tty'] = content[9:17].strip()
tmp['from'] = content[18:34].strip()
tmp['login_time'] = content[35:42].strip()
tmp['idle'] = content[43:50].strip()
tmp['jcpu'] = content[51:57].strip()
tmp['pcpu'] = content[58:63].strip()
tmp['what'] = content[64:].strip()
login.append(tmp)
return login
@staticmethod
def wtmp(cmd_stdout, *args, **kwargs):
"""
[root@localhost ~]# who /var/log/wtmp
root :0 2021-03-09 10:39 (:0)
root pts/0 2021-03-09 10:41 (:0)
root pts/1 2021-03-09 10:47 (172.16.58.3)
root pts/2 2021-03-09 11:07 (172.16.58.3)
root :0 2021-03-10 17:15 (:0)
root pts/1 2021-03-10 17:17 (sf0001390586la)
root pts/0 2021-03-11 09:39 (:0)
root pts/1 2021-03-11 09:40 (172.16.58.3)
root pts/2 2021-03-11 17:14 (172.16.58.3)
root :0 2021-03-11 23:19 (:0)
root pts/0 2021-03-11 23:20 (:0)
root pts/1 2021-03-11 23:22 (sf0001390586la)
root pts/2 2021-03-12 00:04 (sf0001390586la)
root pts/4 2021-03-12 09:40 (172.16.58.3)
:param output:
:return:
"""
# 排除空字符串
if not cmd_stdout:
return None
wt = list()
for content in cmd_stdout.split('\n'):
if not content:
continue
tmp = dict()
tmp['user'] = content[0:8].strip()
tmp['line'] = content[9:21].strip()
tmp['time'] = content[22:38].strip()
tmp['comment'] = content[39:].strip()
wt.append(tmp)
return wt
@staticmethod
def shadow(cmd_stdout, *args, **kwargs):
# 排除空字符串
if not cmd_stdout:
return None
sh = list()
for content in cmd_stdout.split('\n'):
if not content:
continue
tmp = dict(
zip(['username', 'password', 'last_change', 'min_change', 'max_change', 'warm',
'failed_expire', 'expiration', 'reserved'], content.split(':')))
tmp['last_change'] = (date(1970, 1, 1) + timedelta(days=int(tmp['last_change']))).strftime('%Y-%m-%d')
sh.append(tmp)
return sh
@staticmethod
def password(cmd_stdout, *args, **kwargs):
# 排除空字符串
if not cmd_stdout:
return None
pa = list()
for content in cmd_stdout.split('\n'):
if not content:
continue
tmp = dict(
zip(['username', 'password', 'uid', 'gid', 'allname', 'homedir', 'shell'], content.split(':'))
)
pa.append(tmp)
return pa
@staticmethod
def netstat(cmd_stdout, *args, **kwargs):
"""
[root@localhost net]# netstat -tlunpa 2>/dev/null
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1086/sshd
tcp 0 0 127.0.0.1:631 0.0.0.0:* LISTEN 1087/cupsd
tcp 0 0 192.168.122.1:53 0.0.0.0:* LISTEN 1447/dnsmasq
tcp 0 0 172.16.17.32:22 172.16.58.3:55766 ESTABLISHED 6218/sshd: root@pts
tcp6 0 0 :::22 :::* LISTEN 1086/sshd
tcp6 0 0 :::111 :::* LISTEN 685/rpcbind
udp 0 0 0.0.0.0:111 0.0.0.0:* 685/rpcbind
udp6 0 0 ::1:323 :::* 696/chronyd
udp6 0 0 :::856 :::* 685/rpcbind
:return:
"""
# 排除空字符串
if not cmd_stdout:
return None
ne = list()
c = 0
for content in cmd_stdout.split('\n'):
if not content:
continue
if c < 1:
c += 1
continue
tmp = dict()
tmp['proto'] = content[0:5].strip()
tmp['recvq'] = content[6:12].strip()
tmp['sendq'] = content[13:19].strip()
tmp['local'] = content[20:43].strip()
tmp['remote'] = content[44:67].strip()
tmp['state'] = content[68:79].strip()
try:
tmp['pid'], tmp['program'] = content[80:].strip().split("/")
except Exception as e:
tmp['pid'] = '0'
tmp['program'] = "-"
ne.append(tmp)
return ne
@staticmethod
def ipaddress(cmd_stdout, *args, **kwargs):
"""
[root@localhost collect]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 08:00:27:61:9e:bd brd ff:ff:ff:ff:ff:ff
inet 172.16.17.32/22 brd 172.16.17.32 scope global noprefixroute dynamic enp0s3
valid_lft 28158sec preferred_lft 28158sec
inet6 fe80::50bc:382e:c298:3e2/64 scope link noprefixroute
valid_lft forever preferred_lft forever
3: virbr0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default qlen 1000
link/ether 52:54:00:e0:e0:7f brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: <BROADCAST,MULTICAST> mtu 1500 qdisc pfifo_fast master virbr0 state DOWN group default qlen 1000
link/ether 52:54:00:e0:e0:7f brd ff:ff:ff:ff:ff:ff
:return:
"""
# 排除空字符串
if not cmd_stdout:
return None
ad = list()
st = dict()
for content in cmd_stdout.split('\n'):
if not content:
continue
if re.match(r'^\d+.*', content, re.I):
if st:
ad.append(st)
st = dict()
parser = re.match(r'^(?P<num>\d+):\s(?P<name>.*?):\s<(?P<dest>.*?)>\s(?P<options>.*)', content, re.I).groupdict()
options = parser.pop('options').split(" ")
a, b = list(), list()
for r in range(len(options)):
if r % 2:
b.append(options[r])
else:
a.append(options[r])
st.update(parser)
st.update(dict(zip(a, b)))
if re.match(r'\s+link/', content, re.I):
st.update(re.match(r'^\s+link/(?P<type>.*?)\s(?P<mac>.*?)\s.*', content, re.I).groupdict())
if re.match(r'^\s+inet.*', content, re.I):
st.update(re.match(r'^\s+inet(?P<ipv>\d?)?\s(?P<addr>.*?)\s.*', content, re.I).groupdict())
if st:
ad.append(st)
return ad
@staticmethod
def systemctl(cmd_stdout, *args, **kwargs):
# 排除空字符串
if not cmd_stdout:
return None
sy = list()
c = 0
for content in cmd_stdout.split('\n'):
if not content:
continue
if c < 1:
c += 1
continue
tmp = dict(zip(['unit', 'state'], [i for i in content.split(" ") if i]))
sy.append(tmp)
return sy
@staticmethod
def find(cmd_stdout, *args, **kwargs):
# 排除空字符串
if not cmd_stdout:
return None
fi = list()
c = 0
for content in cmd_stdout.split('\n'):
if not content:
continue
if c < 1:
c += 1
continue
fi.append(content)
return fi
@staticmethod
def timestamp_to_string(timestamp):
"""
将时间戳转化为字符串
:param timestamp:
:return:
"""
time_struct = time.localtime(timestamp)
return time.strftime('%Y-%m-%d %H:%M:%S', time_struct)
def process_exe_filestats(self, cmd_stdout, *args, **kwargs):
dataset = kwargs.pop('dataset')
data = kwargs.pop('data')
pid = data['from'].split('/')[2]
print(pid)
result = []
for d in self.filestats(cmd_stdout, *args, **kwargs):
d['pid'] = pid
result.append(d)
return result
def command_stat(self, cmd_stdout, *args, **kwargs):
result = list()
f = dict()
li01 = re.compile(r'^File:\s(?P<file>.*?)$')
li02 = re.compile(r'^Size:\s(?P<size>\d+)\s+\tBlocks:\s(?P<blocks>\d+)\s+IO\sBlock:\s(?P<io_block>\d+)\s+(?P<file_type>.*?)$')
li03 = re.compile(r'^Device:\s(?P<device>.*?)\tInode:\s(?P<inode>\d+)\s+Links:\s(?P<link>\d+)$')
li04 = re.compile(r'^Access:\s\((?P<access>.*?)\)\s+Uid:\s\((?P<uid>.*?)\)\s+Gid:\s\((?P<gid>.*?)\)$')
li05 = re.compile(r'^Context:\s(?P<context>.*?)$')
li06 = re.compile(r'^Access:\s(?P<atime>.*?)$')
li07 = re.compile(r'^Modify:\s(?P<mtime>.*?)$')
li08 = re.compile(r'^Change:\s(?P<ctime>.*?)$')
for line in cmd_stdout.split('\n'):
line = line.strip()
if line.startswith("File"):
if f:
result.append(f)
f = dict()
line = line.replace('‘', '').replace('’', '')
for pattern in [li01, li02, li03, li04, li05, li06, li07, li08]:
match = pattern.match(line)
if match:
f.update(match.groupdict())
else:
result.append(f)
return result
if __name__ == '__main__':
p = ParserCore()
import os
cmd = os.popen('ps -efwww')
for item in p.process(cmd.read()):
print(item)
|
chiww/HotDog
|
endpoint/upload.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: <EMAIL>
结果上传
"""
import json
import traceback
import socket
import http.client
class Upload(object):
def syslog(self, body, target):
"""
syslog 推送
:param body:
:param target:
:return:
"""
host, port = target.split(":")
try:
set_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_sock.connect((host, int(port)))
ss = (json.dumps(body) + '\n').encode('raw-unicode-escape')
try:
set_sock.send(ss)
except (AttributeError, socket.error) as e:
pass
set_sock.close()
except socket.error as e:
print("socket connect error: %s" % str(e))
print(traceback.format_exc())
def post(self, body, target="127.0.0.1:5566"):
"""
:param body:
:param target:
:return:
"""
try:
headers = {'Content-type': 'application/json'}
conn = http.client.HTTPConnection(target)
conn.request("POST", "/", json.dumps(body), headers)
response = conn.getresponse()
print(response.status, response.reason)
data1 = response.read()
print(data1.decode())
except Exception as e:
print("post error: %s" % str(e))
print(traceback.format_exc())
if __name__ == '__main__':
d = {"a": "1", "b": "2", "c": "3"}
upload = Upload()
upload.post(d)
|
chiww/HotDog
|
server/forward.py
|
<reponame>chiww/HotDog
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import traceback
import copy
import socket
import json
class Splunk(object):
def __init__(self, connect_string):
self.protocol, self.host, self.port = connect_string.split(":")
def push(self, data):
for u in self.unwind(data):
getattr(self, self.protocol)(u)
def unwind(self, data):
"""
"""
if isinstance(data['data'], list):
contents = data.pop('data')
for c in contents:
_t = data
_t['data'] = c
yield _t
elif isinstance(data['data'], dict):
yield data
else:
yield data
def tcp(self, data):
"""
"""
try:
set_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_sock.connect((self.host, int(self.port)))
ss = (json.dumps(data) + '\n').encode('raw-unicode-escape')
try:
set_sock.send(ss)
except (AttributeError, socket.error) as e:
pass
set_sock.close()
return True
except socket.error as e:
print("socket connect error: %s" % str(e))
return False
|
chiww/HotDog
|
endpoint/main.py
|
<reponame>chiww/HotDog<gh_stars>1-10
#!/usr/bin/python
import sys
import os
import json
from collect import Collect
from upload import Upload
from rule import load_rule
upload = Upload()
upload_target = '172.16.1.60:5566'
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, PROJECT_PATH)
DEFAULT_RULE_PATH = "/endpoint/rule/BASE.yml"
def collect(rules=''):
"""
USAGE: rules = [
{
"id": "C0015",
"name": "nginx",
"description": "nginx",
"category": "process",
"source":{
"action":{
"method":"stdout",
"from": "ps -efwww"
}
}
}
]
:param rules:
:return:
"""
rule_file = os.path.dirname(os.path.abspath(PROJECT_PATH)) + DEFAULT_RULE_PATH
if rules == '' or rules == "[]":
rules = []
if not rules:
rules = load_rule(rule_file)
else:
try:
rules = json.loads(rules)
except Exception as e:
return 1, "rules非json格式,请校验; Error: %s 输入的内容是: %s" % (str(e), str(rules))
c = Collect(rules)
result = list()
for rule, data in c.run():
upload.post(data, target=upload_target)
result.append({'rule': rule})
return 0, result
if __name__ == '__main__':
ru = [
{'id': 'C0037',
'name': '进程执行文件状态',
'description': '获取/proc/*/exe文件状态',
'category': 'sysenv',
'source': {
"action": {"method": "filewalk", "from": "/proc", "filter": "/proc/\d+/exe"},
},
'parser': {'name': 'foo', 'args': '', 'kwargs': ''}
# 'upload': {'method': 'tcp', 'uri': 'tcp:127.0.0.1:1516'}
}]
print(collect())
|
chiww/HotDog
|
endpoint/collect.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
author: <EMAIL>
收集信息
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import platform
import time
import re
import traceback
import sys
import base64
class Collect(object):
def __init__(self, rules: list):
self.hostname = platform.node()
self.version = platform.platform()
self.time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
self.rules = rules
try:
self._check_rule_validity()
except Exception as e:
print(e)
sys.exit(1)
self.host = self.host_info()
self.task_id = "{timestamp}_{hostname}".format(**self.host)
@staticmethod
def host_info():
"""
获取基本系统信息
:return:
"""
def get_ips():
_ips = []
for i in os.popen('ip address').read().splitlines():
_ips.extend(re.findall(r'\s+inet\s(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/\d{1,2})', i))
return _ips
info = dict()
info['hostname'] = platform.node()
info['system'] = platform.platform()
info['datetime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
info['timestamp'] = int(time.time())
info['ips'] = get_ips()
return info
def _check_rule_validity(self):
"""
检查规则是否合法
:return:
"""
assert isinstance(self.rules, list), "Rules must be type <list>."
for rule in self.rules:
assert "id" in rule.keys(), "Miss <id> in rule, please check rule. rule: %s" % str(rule)
assert "name" in rule.keys(), "Miss <name> in rule, please check rule. rule: %s" % str(rule)
assert "category" in rule.keys(), "Miss <category> in rule, please check rule. rule: %s" % str(rule)
assert "source" in rule.keys(), "Miss <source> in rule, please check rule. rule: %s" % str(rule)
assert isinstance(rule['source'], dict), "<source> field must be <dict>, please check."
assert "action" in rule['source'].keys(), "Miss <source> in rule[source], please check rule. rule: %s" % str(rule)
assert isinstance(rule['source']['action'], dict), "<action> field must be <dict>, please check."
assert "method" in rule['source']['action'].keys(), "Miss <method> in rule[source], " \
"please check rule. rule: %s" % str(rule)
assert "from" in rule['source']['action'].keys(), "Miss <from> in rule, " \
"please check rule. rule: %s" % str(rule)
if rule['source']['action']['method'] == "stdout":
assert not self._is_danger_cmd(rule['source']['action']['from']), "Rule: %s contain danger command, " \
"illegal and would not run!" % str(rule)
# TODO:
# 1. upload
# 2. parser
@staticmethod
def _is_danger_cmd(command):
"""
排除命令行中高危命令执行
:param command:
:return:
"""
danger_exe = ['reboot', 'shutdown', 'halt', 'du', 'bash',
'python', 'php', 'java', 'perl'
'vim', 'sudo', 'su']
for exe in danger_exe:
if exe in command:
return True
return False
@staticmethod
def _is_match(string, regex):
"""
简单正则匹配
:param string:
:param regex:
:return: <bool>
"""
if not regex:
return True
if re.match(regex, string, re.I):
return True
return False
def stdout(self, command: str, filter_re: str = None):
"""
输出原始命令行结果
:param command
:param filter_re
:return:
{'from': <command>, 'content': <result>, 'filter_re': }
"""
if self._is_danger_cmd(command):
print("Error: <%s> contain danger command, can not run, please check!" % command)
return ""
p = os.popen("%s 2>/dev/null" % command)
return [{'from': command, 'content': base64.b64encode(p.read().encode('utf-8')).decode('utf-8')}]
@staticmethod
def filestrings(file, filter_re: str = None):
"""
原始文件strings
:param file:
:param filter_re
:return:
"""
def _get(_f):
if os.path.exists(_f):
print("strings %s 2>/dev/null" % (_f + _grep))
p = os.popen("strings %s 2>/dev/null" % (_f + _grep))
_stdout = p.read().encode('utf-8')
else:
_stdout = "No such file".encode('utf-8')
return _stdout
# 使用grep过滤匹配的字符串
if filter_re:
_grep = "| sed '%s'" % filter_re
else:
_grep = ""
if isinstance(file, list):
result = []
for f in file:
if isinstance(f, dict) and 'from' in f:
file_path = f['from']
else:
file_path = f
result.append({'from': file_path, 'content': base64.b64encode(_get(file_path)).decode('utf-8')})
elif isinstance(file, str):
result = [{'from': file, 'content': base64.b64encode(_get(file)).decode('utf-8')}]
else:
result = [{'from': file, 'content': ''}]
print('Error: error type in filestrings args[0]')
return result
def filewalk(self, directory: str, filter_re: str = None):
"""
遍历目录或者文件,获取文件路径
:param directory:
:param filter_re:
:return:
"""
def walk(path):
if os.path.isdir(path):
for f in os.listdir(path):
p = os.path.join(path, f)
if not os.path.exists(p):
continue
if self._is_match(p, '^/proc/self.*'):
continue
if self._is_match(p, '^/proc/\d+/cwd.*'):
continue
if self._is_match(p, '^/proc/\d+/task.*'):
continue
if self._is_match(p, '^/proc/\d+/root.*'):
continue
for ff in walk(p):
yield ff
else:
yield path
result = []
for file in walk(directory):
if os.path.exists(file) and self._is_match(file, filter_re):
try:
result.append({'from': file, 'content': os.readlink(file)})
except OSError as e:
result.append({'from': file, 'content': file})
# result.append({'from': file, 'content': file})
return result
def filestats(self, path: list, filter_re: str = None):
"""
获取文件状态值
:param path: [{'from': '/etc/passwd', 'content': '/etc/passwd'}]
:param filter_re
:return:
"""
try:
data = []
for p in path:
if not os.path.exists(p['content']):
continue
try:
st = {'from': p['from'], 'content': ''}
_stat = os.stat(p['content'])
st['content'] = _stat.__repr__()
data.append(st)
except Exception as e:
print(p)
print(e)
return data
except Exception as e:
print(e)
def run(self):
for rule in self.rules:
# 初始化
data = dict()
data['rule_id'] = rule['id']
data['name'] = rule['name']
data['category'] = rule['category']
data['host'] = self.host
data['source'] = rule['source']
data['parser'] = rule.get('parser', None)
data['task_id'] = self.task_id
data['data'] = None
action = rule['source']['action']
formater = rule['source'].get('format', None)
try:
raw = getattr(self, action['method'])(action['from'], filter_re=action.get('filter', None))
if formater:
output = getattr(self, formater['method'])(raw, filter_re=formater.get('filter', None))
if output:
data['data'] = output
else:
data['data'] = raw
except Exception as e:
print(rule, str(e))
traceback.print_exc()
yield rule, data
if __name__ == '__main__':
import pprint
def test_stdout():
c = Collect([{
"id": "C0001",
"name": "进程状态",
"description": "获取进程状态信息",
"category": "user",
"source": {
"action": {"method": "stdout", "from": "shutdown"},
# "format": {"method": "readlines"}
}
}])
print(c.run()[0])
def test_filestrings():
c = Collect([{
"id": "C0001",
"name": "账号信息",
"description": "获取/etc/passwd信息",
"category": "user",
"source": {
"action": {"method": "filestrings", "from": "/etc/passwd"},
"format": {"method": "readlines"}
}
}])
print(c.run()[0])
def test_filewalk():
c = Collect([{
"id": "C0001",
"name": "进程执行文件",
"description": "获取/proc/*/exe信息",
"category": "user",
"source": {
"action": {"method": "filewalk", "from": "/proc", "filter": "/proc/\d+/exe"},
"format": {"method": "filestats"}
}
}])
pprint.pprint(c.run()[0])
test_stdout()
#test_filestrings()
# test_filewalk()
|
chiww/HotDog
|
endpoint/rule/__init__.py
|
<reponame>chiww/HotDog<filename>endpoint/rule/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
采集规则
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
package = PROJECT_PATH + '/package'
sys.path.insert(0, package)
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError as e:
from yaml import Loader, Dumper
def load_rule(rule_file):
with open(rule_file, 'r', encoding="utf-8") as yml:
rule_yaml = load(yml, Loader=Loader)
return rule_yaml
|
chiww/HotDog
|
server/main.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import argparse
import traceback
from http.server import HTTPServer, SimpleHTTPRequestHandler
from socketserver import ThreadingMixIn
import sys
import io
import json
import os
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, PROJECT_PATH)
from parser import Parser
from forward import Splunk
splunk_connect = "tcp:127.0.0.1:2021"
def update_endpoint():
os.system('rm -rf endpoint.tar.gz')
os.popen('tar -zcf endpoint.tar.gz ../endpoint')
print("Update endpoint.tar.gz success!")
def handler(host, data):
data['host']['ip'] = host[0]
p = data['parser']
if not p:
p = {'method': 'common', 'args': [], 'kwargs': {}}
data['parser'] = p
try:
parser_data = Parser().run(data, p['method'], *p.get('args', []), **p.get('kwargs', {}))
print(parser_data['rule_id'], parser_data['name'], type(parser_data['data']), str(parser_data['source']),
len(parser_data['data']))
Splunk(splunk_connect).push(parser_data)
except Exception as e:
print(traceback.format_exc())
class HotDogHandler(SimpleHTTPRequestHandler):
def do_POST(self):
try:
_length = int(self.headers['Content-Length'])
_data = self.rfile.read(_length)
data = _data.decode('utf-8')
except Exception as e:
data = json.dumps({'error': traceback.format_exc()})
handler(self.client_address, json.loads(data))
self.send_response(200)
self.send_header('Content-Type',
'application/json; charset=utf-8')
self.end_headers()
out = io.TextIOWrapper(
self.wfile,
encoding='utf-8',
line_buffering=False,
write_through=True,
)
out.write(data)
out.detach()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""在一个新的线程中处理请求。"""
def run(HandlerClass=HotDogHandler,
ServerClass=ThreadedHTTPServer, protocol="HTTP/1.0", port=8000, bind=""):
"""Test the HTTP request handler class.
This runs an HTTP server on port 8000 (or the port argument).
"""
server_address = (bind, port)
HandlerClass.protocol_version = protocol
with ServerClass(server_address, HandlerClass) as httpd:
sa = httpd.socket.getsockname()
serve_message = "HotDog Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
print(serve_message.format(host=sa[0], port=sa[1]))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
help='Specify alternate bind address '
'[default: all interfaces]')
parser.add_argument('port', action='store',
default=5566, type=int,
nargs='?',
help='Specify alternate port [default: 8000]')
args = parser.parse_args()
update_endpoint()
run(HandlerClass=HotDogHandler, port=args.port, bind=args.bind)
|
realraum/RoomInvader
|
app.py
|
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, json, jsonify
from urlparse import urlparse, parse_qs
import urllib2
import subprocess
app = Flask(__name__)
nowplaying = []
queue = []
def getYouTubeID(videoURL):
app.logger.debug('getting videoID: ' + videoURL)
return parse_qs(urlparse(videoURL).query)['v'][0]
def getYouTubeTitle(videoId):
app.logger.debug('getting title for videoId: ' + videoId)
response = urllib2.urlopen("http://youtube.com/get_video_info?video_id=" + str(videoId))
query = urlparse("/?" + response.read()).query
return parse_qs(query)['title'][0]
def appendToQueue(record):
if len(nowplaying) == 0:
nowplaying.append(record)
else:
queue.append(record)
@app.route("/")
def main():
#flash('Hi')
return render_template('main.html')
@app.route('/queue', methods=['GET'])
def getQueue():
return jsonify(queue=queue)
@app.route('/np', methods=['GET'])
def np():
return jsonify(np=nowplaying)
@app.route('/enqueue/youtube', methods=['POST'])
def enqueue():
app.logger.debug('/enqueue/youtube ...')
# this is experimental, replace with mpv.py or ipc something:
#os.system("sudo -u realraum mpv --no-video %s" % request.form['url'])
#ret = subprocess.call(["sudo", "-u", "realraum", "mpv", "--no-video", request.form['url']])
# ret = subprocess.call(["mpv", "--no-video", request.form['url']])
videoID = getYouTubeID(request.form['url'])
title = getYouTubeTitle(videoID)
appendToQueue({ 'type': 'YT',
'title': title,
'artist': 'YouTube'})
return "OK: %d" % len(queue)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
|
axlecrusher/hgengine3
|
Mercury3/scripts/blenderExport.py
|
import bpy
import struct
vertices = [];
uvs = [];
class MeshData:
def __init__(self):
self.vertexOffset = 0; #offset into vertex buffer
self.indices = [];
# self.uvs = [];
bl_info = {"name": "HgMDL Model Exporter", "category": " Import-Export"}
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(ExportToHgMDL.bl_idname, text=bl_info['name'])
def register():
bpy.utils.register_class(ExportToHgMDL)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(ExportToHgMDL)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
def getMeshData(o):
global vertices;
global uvs;
mesh = o.data;
md = MeshData();
md.offset = len(vertices);
vertices = vertices + mesh.vertices.values();
for face in mesh.polygons:
indices = face.vertices;
if len(indices) != 3:
raise ValueError('Faces must be triangles')
for x in indices:
md.indices.append(x+md.offset);
uv_layer = mesh.uv_layers[0]
uvs = [i.uv for i in uv_layer.data]
return md;
def UVtoInt16(u,v):
u = float(u)
v = float(v)
a = int(u * 65535);
b = int(v * 65535);
a = max(0, min(a,0xffff))
b = max(0, min(b,0xffff))
return (a,b);
class VertexUVCombiner:
def __init__(self,vIdx,u,v):
self.vertex_index = vIdx
self.u = u
self.v = v
def __hash__(self):
return hash((self.vertex_index, self.u, self.v))
def __eq__(self, o):
return (self.vertex_index, self.u, self.v) == (o.vertex_index, o.u, o.v)
def expandVertices(indices):
#there is one UV for each vertex index, so UVs share vertices
#we can not render this so vertices need to be duplicated and assigned
#their unique UV coordinates
expandedVertices = []
newIndices = [] #index mapping into new expandedVertices list
vertexDict = dict() #for deduplication quick lookup
for i in range(len(indices)):
uv = uvs[i]
vIdx = indices[i]
key = VertexUVCombiner(vIdx, uv.x, uv.y)
if key in vertexDict:
idx = vertexDict[key]
newIndices.append(idx)
else:
idx = len(expandedVertices)
expandedVertices.append(key)
vertexDict[key] = idx
newIndices.append(idx)
return (expandedVertices,newIndices)
def write_some_data(context, filepath, use_some_setting):
global vertices;
selectedObjects = bpy.context.selected_objects;
meshData = 0;
for o in selectedObjects:
if o.type == 'MESH':
meshData = getMeshData(o);
break;
(expandedVerts, remapedIndices) = expandVertices(meshData.indices)
print("vertex count ", len(vertices))
print("uv count ", len(uvs))
print("indices count ", len(remapedIndices))
print("expanded vertex count ", len(expandedVerts))
output = open( filepath, 'wb')
output.write(struct.pack("<2I",len(expandedVerts),len(remapedIndices)))
for ev in expandedVerts:
vertex = vertices[ev.vertex_index]
co = vertex.co
n = vertex.normal
(uv_x,uv_y) = UVtoInt16(ev.u,ev.v);
output.write(struct.pack("<3f", co.x, co.y, co.z))
output.write(struct.pack("<3f", n.x, n.y, n.z))
output.write(struct.pack("<4f",0,0,0,0)) #tangent
output.write(struct.pack("<2H", uv_x, uv_y))
output.write(struct.pack('<'+'H'*len(remapedIndices),*remapedIndices))
output.close()
return {'FINISHED'}
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class ExportToHgMDL(Operator, ExportHelper):
"""Export meshes to HgMDL format"""
bl_idname = "export_hgmdl.data" # important since its how bpy.ops.export_hgmdl.data is constructed
bl_label = "Export"
# ExportHelper mixin class uses this
filename_ext = ".hgmdl"
filter_glob = StringProperty(
default="*.txt",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
use_setting = BoolProperty(
name="Example Boolean",
description="Example Tooltip",
default=True,
)
type = EnumProperty(
name="Example Enum",
description="Choose between two items",
items=(('OPT_A', "First Option", "Description one"),
('OPT_B', "Second Option", "Description two")),
default='OPT_A',
)
def execute(self, context):
return write_some_data(context, self.filepath, self.use_setting)
if __name__ == "__main__":
register()
# test call
bpy.ops.export_hgmdl.data('INVOKE_DEFAULT')
|
axlecrusher/hgengine3
|
Mercury3/scripts/objFormatter.py
|
import struct
import sys
import math
class vertex:
def __init__(self,x,y,z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
z = self.z + other.z
return vertex(x,y,z)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
z = self.z - other.z
return vertex(x,y,z)
def normalize(self):
length = math.sqrt((self.x * self.x) + (self.y * self.y) + (self.z * self.z))
return vertex(self.x/length, self.y/length,self.z/length)
def write(self,file):
file.write(struct.pack("<3f",self.x,self.y,self.z))
def write4(self,file):
file.write(struct.pack("<4f",self.x,self.y,self.z,0))
# file.write(struct.pack("<4f",self.x,self.y,self.z,self.w))
def hex(self,file):
data = struct.pack("<3f",self.x,self.y,self.z)
file.write(",".join('0x%X'%x for x in struct.iter_unpack("I",data))+ ',')
def text(self,file):
file.write(', '.join((str(self.x),str(self.y),str(self.z))) + ', ' )
def dot(self, other):
r = (self.x*other.x)
+ (self.y*other.y)
+ (self.z*other.z)
return r
def cross(a,b):
r = vertex(0,0,0)
r.x = (a.y * b.z) - (a.z * b.y)
r.y = (a.z * b.x) - (a.x * b.z)
r.z = (a.x * b.y) - (a.y * b.x)
return r
class uv:
def __init__(self,x,y):
self.u = float(x)
self.v = float(y)
# clamp();
def toInt16(self):
# a = int( ((self.u+1)*0.5) * 65535 )
# b = int( ((self.v+1)*0.5) * 65535 )
a = int(self.u * 65535);
b = int(self.v * 65535);
a = max(0, min(a,0xffff))
b = max(0, min(b,0xffff))
return (a,b);
def write(self,file):
(a,b) = self.toInt16()
file.write(struct.pack("<2H",a,b))
def hex(self,file):
(a,b) = self.toInt16()
data = struct.pack("<2H",a,b)
file.write(",".join('0x%X'%x for x in struct.iter_unpack("I",data))+ ',')
def text(self,file):
(a,b) = self.toInt16()
file.write(', '.join((str(a),str(b))) + ', ' )
class normal:
def __init__(self,x,y,z):
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __mul__(self,f):
r = normal(self.x,self.y,self.z)
r.x *= f
r.y *= f
r.z *= f
return r
def write(self,file):
file.write(struct.pack("<3f",self.x,self.y,self.z))
def hex(self,file):
data=struct.pack("<3f",self.x,self.y,self.z)
file.write(",".join('0x%X'%x for x in struct.iter_unpack("I",data))+ ',')
def text(self,file):
file.write(', '.join((str(self.x),str(self.y),str(self.z))) + ', ' )
class packed_vertex:
def __init__(self,v,uv,n):
self.vertex = v
self.normal = n
self.uv = uv
self.tangent = vertex(0,0,0) #tangent needs to be its own data type
def write(self,file):
# print(self.vertex)
self.vertex.write(file)
if (self.normal != None):
self.normal.write(file)
self.tangent.write4(file)
if (self.uv != None):
self.uv.write(file)
def hex(self,file):
# print(self.vertex)
self.vertex.hex(file)
if (self.normal != None):
self.normal.hex(file)
self.tangent.hex(file)
if (self.uv != None):
self.uv.hex(file)
def text(self,file):
# print(self.vertex)
self.vertex.text(file)
if (self.normal != None):
self.normal.text(file)
self.tangent.text(file)
if (self.uv != None):
self.uv.text(file)
class triangle:
def __init__(self,i1,i2,i3):
self.index = [int(i1),int(i2),int(i3)]
#raw data. Index into them for faces
vertices = []
uv_coord = []
normals = []
packed_vertices = []
triangles = [] #built from indexing into packed_vertices
face_map = {}
indices = []
tangent = []
def CalculateTangentArray():
# Vector3D *tan1 = new Vector3D[vertexCount * 2];
# Vector3D *tan2 = tan1 + vertexCount;
# ZeroMemory(tan1, vertexCount * sizeof(Vector3D) * 2);
tan1 = []
tan2 = []
for v in packed_vertices:
tan1.append(vertex(0,0,0))
tan2.append(vertex(0,0,0))
for triangle in triangles:
i1 = triangle.index[0];
i2 = triangle.index[1];
i3 = triangle.index[2];
v1 = packed_vertices[i1].vertex;
v2 = packed_vertices[i2].vertex;
v3 = packed_vertices[i3].vertex;
w1 = packed_vertices[i1].uv;
w2 = packed_vertices[i2].uv;
w3 = packed_vertices[i3].uv;
x1 = v2.x - v1.x;
x2 = v3.x - v1.x;
y1 = v2.y - v1.y;
y2 = v3.y - v1.y;
z1 = v2.z - v1.z;
z2 = v3.z - v1.z;
s1 = w2.u - w1.u;
s2 = w3.u - w1.u;
t1 = w2.v - w1.v;
t2 = w3.v - w1.v;
d = (s1 * t2 - s2 * t1);
r = 1
if (d!=0):
r = 1.0 / d;
sdir = vertex((t2 * x1 - t1 * x2) * r, (t2 * y1 - t1 * y2) * r, (t2 * z1 - t1 * z2) * r);
tdir = vertex((s1 * x2 - s2 * x1) * r, (s1 * y2 - s2 * y1) * r, (s1 * z2 - s2 * z1) * r);
tan1[i1] += sdir;
tan1[i2] += sdir;
tan1[i3] += sdir;
tan2[i1] += tdir;
tan2[i2] += tdir;
tan2[i3] += tdir;
for a in range(0,len(packed_vertices)):
n = packed_vertices[a].normal;
t = tan1[a];
# Gram-Schmidt orthogonalize
packed_vertices[a].tangent = (t - n * dot(n, t)).normalize();
# print (packed_vertices[a].tangent.x, packed_vertices[a].tangent.y, packed_vertices[a].tangent.z)
# Calculate handedness
packed_vertices[a].tangent.w = 1.0;
if (dot(cross(n, t), tan2[a]) < 0.0):
packed_vertices[a].tangent.w = -1.0;
def push_index(token):
token = token.replace("//","/")
if token in face_map:
indices.append( face_map[token] )
else:
idx = len(packed_vertices);
face_map[token] = idx
indices.append(idx)
i = list(map(lambda x: int(x)-1,token.split("/")));
pv = ''
length = len(i)
if (length == 3):
pv = packed_vertex(vertices[i[0]],uv_coord[i[1]],normals[i[2]])
elif (length == 2):
# print(i[0], i[0])
pv = packed_vertex(vertices[i[0]],uv(0,0),normals[i[1]])
elif (length == 1):
pv = packed_vertex(vertices[i[0]],None,None)
else:
print("Unknown face format")
exit(1)
packed_vertices.append(pv)
return face_map[token]
f = open( sys.argv[1], 'r')
currenLineNumber = 1
for line in f:
print("Processing Line {}".format(currenLineNumber))
currenLineNumber+=1
tokens = line.split();
if (tokens[0] == "v"):
vertices.append( vertex(tokens[1],tokens[2],tokens[3]) )
if (tokens[0] == "vt"):
uv_coord.append( uv(tokens[1],tokens[2]) )
if (tokens[0] == "vn"):
normals.append( normal(tokens[1],tokens[2],tokens[3]) )
if (tokens[0] == "f"):
if (len(tokens)!=4): #includes "f" token
print("Error: All faces must be triangles. ", len(tokens))
exit(1)
i1 = push_index(tokens[1])
i2 = push_index(tokens[2])
i3 = push_index(tokens[3])
print(i1,i2,i3)
triangles.append( triangle(i1,i2,i3) )
def output_binary():
output = open( sys.argv[1]+'.hgmdl', 'wb')
output.write(struct.pack("<2I",len(packed_vertices),len(indices)))
for pv in packed_vertices:
# print(pv.vertex.x,pv.vertex.y,pv.vertex.z)
pv.write(output);
output.write(struct.pack('<'+'H'*len(indices),*indices))
output.close()
def output_hex():
output = open( sys.argv[1]+'.hex', 'w')
for i in indices:
packed_vertices[i].hex(output);
output.close()
def output_text():
output = open( sys.argv[1]+'.txt', 'w')
for pv in packed_vertices:
pv.text(output);
output.write("\n")
output.write("\n")
for pv in packed_vertices:
pv.hex(output);
output.write("\n")
output.write(', '.join(str(x) for x in indices) + ', ' )
# output.text(struct.pack('<'+'H'*len(indices),*indices))
output.close()
#CalculateTangentArray()
output_binary()
output_hex()
output_text()
print ('vertices:', len(packed_vertices))
print ('indices:', len(indices))
|
2e0byo/pygallica-autobib
|
tests/test_parse_gallica.py
|
"""Test parsers for various things gallica returns.
Wouldn't it be nice if apis returned machine-readable data?!
"""
from pathlib import Path
import pytest
test_tocs = ["toc-no-cells.xml", "toc-with-cells.xml", "mix.xml"]
@pytest.mark.parametrize("xml", test_tocs)
def test_parse_toc(data_regression, gallica_resource, xml):
with (Path("tests/test_parse_gallica") / xml).open() as f:
data_regression.check(gallica_resource.parse_gallica_toc(f.read().strip()))
|
2e0byo/pygallica-autobib
|
demo_image_processing.py
|
from bs4 import BeautifulSoup
import numpy as np
from PIL import ImageOps
from gallica_autobib.gallipy import Resource
from gallica_autobib.process import extract_image
from PyPDF4 import PdfFileReader
from io import BytesIO
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.patches import Rectangle
from collections import namedtuple
Point = namedtuple("Point", ["x", "y"])
Box = namedtuple("Box", ["upper", "lower"])
ark = "https://gallica.bnf.fr/ark:/12148/bpt6k65545564"
r = Resource(ark)
def fetch_stuff(pno):
pg = r.content_sync(startview=pno, nviews=1, mode="pdf").value
reader = PdfFileReader(BytesIO(pg))
data, type_ = extract_image(reader.getPage(2))
ocr = r.ocr_data_sync(view=pno).value
soup = BeautifulSoup(ocr.decode())
upper_bound = [0, 0]
lower_bound = [0, 0]
page = soup.find("page")
height, width = int(page.get("height")), int(page.get("width"))
xscale = data.height / height
yscale = data.width / width
height *= yscale
printspace = soup.find("printspace")
text_height = round(int(printspace.get("height")) * yscale)
text_width = round(int(printspace.get("width")) * xscale)
vpos = int(printspace.get("vpos")) * yscale
hpos = int(printspace.get("hpos")) * xscale
upper = Point(round(hpos), round(vpos))
return upper, text_height, text_width, data, height
def gen_doc_data():
pno = 128
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
fig.savefig(
"docs/img/content_box.svg", bbox_inches="tight", transparent=True, dpi=72
)
ax2 = ax.twiny()
a = np.array(ImageOps.grayscale(data))
mean = a.mean(axis=1)
ax2.plot(mean, range(len(mean)), label="mean")
gradient = np.gradient(mean) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differential")
plt.legend()
fig.savefig("docs/img/mean.svg", bbox_inches="tight", transparent=True, dpi=72)
gstd = np.std(gradient)
gmean = gradient.mean()
ax2.vlines([gmean - 1.5 * gstd, gmean + 1.5 * gstd], 0, data.height, color="orange")
fig.savefig(
"docs/img/mean_bounds.svg", bbox_inches="tight", transparent=True, dpi=72
)
search = round(height * 0.05)
upper_bound = upper.y - search
search_height = text_height + 2 * search
search_upper = Point(upper.x, upper_bound)
search_box = ax.add_patch(
Rectangle(
search_upper,
text_width,
search_height,
edgecolor="green",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/search.svg", bbox_inches="tight", transparent=True, dpi=72)
upper_search = gradient[upper_bound : upper.y]
lower_search = gradient[upper.y + text_height : upper_bound + search_height]
lower_thresh = gmean - 1.5 * gstd
upper_thresh = gmean + 1.5 * gstd
peaked = 0
for up, x in enumerate(reversed(upper_search)):
if not peaked and x >= upper_thresh:
peaked = 1
if peaked and x <= lower_thresh:
peaked = 2
print("Line above detected.")
break
up = up if peaked == 2 else 0
peaked = 0
for down, x in enumerate(lower_search):
if not peaked and x <= lower_thresh:
peaked = 1
if peaked and x >= upper_thresh:
peaked = 2
print("Line below detected.")
break
down = down if peaked == 2 else 0
final_upper = Point(upper.x, upper.y - up)
final_height = text_height + up + down
search_box = ax.add_patch(
Rectangle(
final_upper,
text_width,
final_height,
edgecolor="pink",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/searched.svg", bbox_inches="tight", transparent=True, dpi=72)
stretch = round(height * 0.005)
streched_upper = Point(final_upper[0] - stretch, final_upper[1] - 2 * stretch)
stretched_width = text_width + 2 * stretch
stretched_height = final_height + 4 * stretch
fig, ax = plt.subplots()
plt.imshow(data)
final_box = ax.add_patch(
Rectangle(
streched_upper,
stretched_width,
stretched_height,
edgecolor="black",
facecolor="none",
lw=1,
)
)
fig.savefig("docs/img/stretched.svg", bbox_inches="tight", transparent=True, dpi=72)
def process_page(pno):
upper, text_height, text_width, data, height = fetch_stuff(pno)
fig, ax = plt.subplots()
plt.imshow(data)
text_box = ax.add_patch(
Rectangle(
upper, text_width, text_height, edgecolor="red", facecolor="none", lw=2
)
)
ax2 = ax.twiny()
a = np.array(ImageOps.grayscale(data))
mean = a.mean(axis=1)
gradient = np.gradient(mean) + 70
ax2.plot(gradient, range(len(gradient)), color="green", label="differential")
gstd = np.std(gradient)
gmean = gradient.mean()
ax2.vlines([gmean - 1.5 * gstd, gmean + 1.5 * gstd], 0, data.height, color="orange")
search = round(height * 0.05)
upper_bound = upper.y - search
search_height = text_height + 2 * search
search_upper = Point(upper.x, upper_bound)
search_box = ax.add_patch(
Rectangle(
search_upper,
text_width,
search_height,
edgecolor="green",
facecolor="none",
lw=1,
)
)
upper_search = gradient[upper_bound : upper.y]
lower_search = gradient[upper.y + text_height : upper_bound + search_height]
lower_thresh = gmean - 1.5 * gstd
upper_thresh = gmean + 1.5 * gstd
peaked = 0
for up, x in enumerate(reversed(upper_search)):
if not peaked and x >= upper_thresh:
peaked = 1
if peaked and x <= lower_thresh:
peaked = 2
print("Line above detected.")
break
up = up if peaked == 2 else 0
peaked = 0
for down, x in enumerate(lower_search):
if not peaked and x <= lower_thresh:
peaked = 1
if peaked and x >= upper_thresh:
peaked = 2
print("Line below detected.")
break
down = down if peaked == 2 else 0
final_upper = Point(upper.x, upper.y - up)
final_height = text_height + up + down
search_box = ax.add_patch(
Rectangle(
final_upper,
text_width,
final_height,
edgecolor="pink",
facecolor="none",
lw=1,
)
)
stretch = round(height * 0.005)
streched_upper = Point(final_upper[0] - stretch, final_upper[1] - 2 * stretch)
stretched_width = text_width + 2 * stretch
stretched_height = final_height + 4 * stretch
final_box = ax.add_patch(
Rectangle(
streched_upper,
stretched_width,
stretched_height,
edgecolor="black",
facecolor="none",
lw=1,
)
)
gen_doc_data()
# process_page(128)
# process_page(136)
# process_page(79)
# process_page(136)
|
2e0byo/pygallica-autobib
|
tests/test_module.py
|
<reponame>2e0byo/pygallica-autobib
import pytest
from gallica_autobib.models import Article, BibBase, Book, Collection, Journal
def test_article():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
assert isinstance(a, Article)
assert isinstance(a._source(), Journal)
assert (
a.generate_query()
== 'bib.publicationdate all "1930" and bib.title all "La vie spirituelle" and bib.recordtype all "per"'
)
assert a._source().translate()["title"] == "La vie spirituelle"
assert isinstance(a.pages, list)
assert isinstance(a.pages[0], str)
assert a.name() == "Pour lire saint Augustin (M.-D. Chenu)"
assert a.name(short=4) == "Pour (M.-D)"
ahash = a.__hash__()
assert ahash == hash(a)
a.publicationdate = 1940
assert ahash != hash(a)
def test_book():
a = Book(title="Title", publisher="Cerf", year=1901, author="me")
assert isinstance(a, Book)
assert a._source() is a
assert a._source().translate() == {
k: v for k, v in dict(a).items() if k != "editor"
}
def test_collection():
a = Collection(title="Title", publisher="Cerf", year=1901, author="me")
assert isinstance(a, Collection)
assert a._source() is a
assert a._source().translate() == {
k: v for k, v in dict(a).items() if k != "editor"
}
query_candidates = [
[
{"title": "La vie spirituelle", "recordtype": "per"},
'bib.title all "la vie spirituelle" and bib.recordtype all "per',
],
[{"title": "la vie spirituelle"}, 'bib.title all "la vie spirituelle'],
]
@pytest.mark.parametrize("kwargs,outstr", query_candidates)
def test_assemble_query(kwargs, outstr):
assert BibBase.assemble_query(kwargs=outstr)
def test_bibtex_render_article(file_regression):
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
volume=12,
number=1,
)
file_regression.check(a.bibtex(), extension=".bib")
@pytest.fixture
def article():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
yield a
|
2e0byo/pygallica-autobib
|
tests/test_real_queries.py
|
import pytest
from gallica_autobib.models import Article
from gallica_autobib.query import Query
def test_match_query():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="Daniélou",
year=1930,
)
q = Query(a)
resp = q.run()
assert resp.target
assert resp.candidate.journaltitle == "La Vie spirituelle, ascétique et mystique"
candidates = [
[
Article(
journaltitle="La Vie spirituelle",
author="<NAME>",
pages=list(range(547, 552)),
volume=7,
year=1923,
title="Ascèse et péché originel",
),
dict(ark="http://catalogue.bnf.fr/ark:/12148/cb34406663m"),
]
]
@pytest.mark.parametrize("candidate,params", candidates)
def test_queries(candidate, params):
q = Query(candidate)
resp = q.run()
assert resp.target
assert resp.candidate.ark == params["ark"]
|
2e0byo/pygallica-autobib
|
gallica_autobib/__init__.py
|
from importlib.metadata import version
__version__ = version(__name__)
|
2e0byo/pygallica-autobib
|
tests/test_gallica_resource.py
|
import pickle
from pathlib import Path
from re import search
from typing import Union
import pytest
from gallica_autobib.gallipy import Ark, Resource
from gallica_autobib.gallipy.ark import ArkParsingError
from gallica_autobib.models import Article, Book, Collection, Journal
from gallica_autobib.query import GallicaResource, Query
@pytest.fixture(scope="session")
def pages():
inf = Path("tests/test_gallica_resource/pages.pickle")
with inf.open("rb") as f:
yield pickle.load(f)
def test_ark(gallica_resource):
ark = gallica_resource.ark
assert get_ark(ark) == get_ark("ark:/12148/bpt6k9735634r")
def test_by_vol(gallica_resource):
gallica_resource.target.volume = 24
ark = gallica_resource.ark
assert get_ark(ark) == get_ark("ark:/12148/bpt6k9735634r")
def test_resource(gallica_resource):
res = gallica_resource.resource
assert isinstance(res, Resource)
def test_pnos(gallica_resource):
assert 141 == gallica_resource.start_p
assert 163 == gallica_resource.end_p
def test_generate_blocks(gallica_resource):
list(gallica_resource._generate_blocks(0, 20, 5))
def test_generate_short_block(gallica_resource):
expected = [(0, 4)]
res = list(gallica_resource._generate_blocks(0, 3, 100))
assert res == expected
def test_download_pdf(gallica_resource, file_regression, tmp_path, check_pdfs):
gallica_resource.ark # trigger search before we edit pages
gallica_resource.target.pages = gallica_resource.target.pages[:3]
outf = tmp_path / "test.pdf"
gallica_resource.download_pdf(outf)
with outf.open("rb") as f:
file_regression.check(
f.read(), binary=True, extension=".pdf", check_fn=check_pdfs
)
@pytest.mark.xfail
def test_book():
book = Book(title="t", author="s", editor="e")
GallicaResource(book, book)
@pytest.mark.xfail
def test_journal():
journal = Journal(journaltitle="j", year="1930")
GallicaResource(journal, journal)
@pytest.mark.xfail
def test_collection():
coll = Collection(title="t", author="a")
GallicaResource(coll, coll)
def test_invalid_ark():
source = Journal(journaltitle="j", year="1930", ark="notavalidark")
target = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 158)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
with pytest.raises(ArkParsingError):
GallicaResource(target, source)
def test_repr(gallica_resource, data_regression):
data_regression.check(str(gallica_resource))
candidates = [
[
Article(
journaltitle="La Vie spirituelle",
author="<NAME>",
pages=list(range(547, 552)),
volume=7,
year=1923,
title="Ascèse et péché originel",
),
dict(ark="ark:/12148/bpt6k97356214"),
],
[
Article(
journaltitle="La Vie spirituelle",
author="<NAME>",
year=1921,
title="La perfection de la charité",
volume=2,
pages=list(range(1, 21)),
),
dict(ark="ark:/12148/bpt6k9736026f"),
],
]
def get_ark(arkstr: Union[str, Ark]):
"""Reliable way of matching arks."""
return search(r".*(ark:/.*)", str(arkstr)).group(1)
@pytest.mark.parametrize("candidate, params", candidates)
def test_real_queries_no_toc(candidate, params):
source = Query(candidate).run().candidate
gallica_resource = GallicaResource(candidate, source)
gallica_resource.consider_toc = False
assert get_ark(gallica_resource.ark) == get_ark(params["ark"])
@pytest.mark.parametrize("candidate, params", candidates)
def test_real_queries_toc(candidate, params):
source = Query(candidate).run().candidate
gallica_resource = GallicaResource(candidate, source)
assert get_ark(gallica_resource.ark) == get_ark(params["ark"])
def test_parse_description_range(gallica_resource):
desc = "1922/10 (A4,T7,N37)-1923/03 (A4,T7,N42)."
resp = gallica_resource.parse_description(desc)
assert resp["year"] == [1922, 1923]
assert resp["volume"] == 7
assert resp["number"] == list(range(37, 43))
def test_parse_description_shorthand(gallica_resource):
desc = "1924/04 (A5,T10)-1924/09."
resp = gallica_resource.parse_description(desc)
assert resp["year"] == 1924
assert resp["volume"] == 10
assert resp["number"] is None
def test_parse_description_range_everywhere(gallica_resource):
desc = "1922/10 (A4,T7,N37)-1923/03 (A4,T8,N42)."
resp = gallica_resource.parse_description(desc)
assert resp["year"] == [1922, 1923]
assert resp["volume"] == [7, 8]
assert resp["number"] == list(range(37, 43))
def test_parse_description(gallica_resource):
desc = "1922/10 (A4,T7,N37)."
resp = gallica_resource.parse_description(desc)
assert resp["year"] == 1922
assert resp["volume"] == 7
assert resp["number"] == 37
def test_parse_no_description(gallica_resource):
desc = ""
resp = gallica_resource.parse_description(desc)
assert all(v is None for k, v in resp.items())
def test_parse_partial_description(gallica_resource):
desc = "1922/10 (T7)."
resp = gallica_resource.parse_description(desc)
assert resp["year"] == 1922
assert resp["volume"] == 7
assert resp["number"] is None
def test_physical_pno(gallica_resource, pages):
resp = gallica_resource.get_physical_pno("10", pages=pages)
assert resp == "16"
def test_last_pno(gallica_resource, pages):
resp = gallica_resource.get_last_pno(pages)
assert resp == "676"
@pytest.mark.xfail
def test_ocr_find_article_in_journal(gallica_resource):
ark = "ark:/12148/bpt6k9737289z"
target = Article(
title="La contemplation mystique requiert-elle des idées infuses ?",
journaltitle="La Vie spirituelle, ascétique et mystique (Supplément)",
year=1922,
pages=list(range(1, 22)),
author="<NAME>",
)
journal = Resource(ark)
pages = journal.pagination_sync().value
assert not gallica_resource.ocr_find_article_in_journal(journal, pages)
gallica_resource.target = target
assert gallica_resource.ocr_find_article_in_journal(journal, pages)
|
2e0byo/pygallica-autobib
|
gallica_autobib/models.py
|
import datetime
from hashlib import sha1
from typing import List, Optional, Tuple, Union
from pydantic import BaseModel, Field
from slugify import slugify
from .templating import env, latex_env
from .util import prettify, pretty_page_range
record_types = {
"Article": None,
"Journal": "per",
"Book": "mon",
"Collection": "col"
# "Collection:", ["rec", "col", "ens"]
}
VALID_QUERIES = (
"anywhere",
"author",
"title",
"subject",
"doctype",
"recordtype",
"status",
"recordid",
"persistentid",
"ean",
"isbn",
"issn",
"ismn",
"isrc",
"comref",
"otherid",
"abstract",
"authorRole",
"cote",
"date",
"dewey",
"digitized",
"FrenchNationalBibliography",
"fuzzyIsbn",
"isni",
"language",
"LegalDepositType",
"LegalDepositDate",
"local",
"publicationdate",
"publicationplace",
"publisher",
"serialtitle",
"set",
"technicaldata",
"unimarc:doctype",
"col2bib",
"ens2bib",
"rec2bib",
"author2bib",
"subject2bib",
"work2bib",
"creationdate",
"lastmodificationdate",
)
class BibBase(BaseModel):
"""Properties shared with all kinds of bibliographic items."""
publicationdate: Union[int, List[int]] = Field(None, alias="year")
publisher: Optional[str] = None
ark: Optional[str] = None
def __hash__(self) -> int:
return hash(repr(self))
def key(self) -> str:
return sha1(repr(self).encode()).hexdigest()
@staticmethod
def assemble_query(**kwargs: dict) -> str:
"""Put together an sru query from a dict."""
return " and ".join(f'{k} all "{v}"' for k, v in kwargs.items())
def _source(self) -> "RecordTypes":
return self # type: ignore
def translate(self) -> dict:
return self.dict(exclude={"editor"})
@staticmethod
def format_query_item(item: Union[list, str]) -> str:
if isinstance(item, list):
return " ".join(str(x) for x in item)
else:
return item
def generate_query(self) -> str:
"""Get query str"""
source = self._source()
data = source.translate()
data["recordtype"] = record_types[type(source).__name__]
data = {
f"bib.{k}": self.format_query_item(v)
for k, v in data.items()
if v and k in VALID_QUERIES
}
return self.assemble_query(**data)
@property
def omit(self) -> Union[Tuple[str], Tuple]:
return ()
def bibtex_transform(self) -> dict:
return {k: v for k, v in self.dict(by_alias=True).items() if k not in self.omit}
def bibtex(self) -> str:
props = self.bibtex_transform()
name = type(self).__name__.lower()
if "pages" in props.keys():
props["pages"] = pretty_page_range(props["pages"])
if "year" in props.keys() and isinstance(props["year"], list):
props["year"] = prettify(props["year"], True)
return latex_env.get_template(f"{name}.bib").render(rep=props, obj=self)
def ris(self) -> str:
args = {k: v for k, v in dict(self).items() if k not in self.omit}
args["name"] = type(self).__name__.lower()
return env.get_template(f"{args['name']}.ris").render(rep=args, obj=self)
class HasTitle(BibBase):
title: str
subtitle: Optional[str]
class AuthorTitleMixin:
def name(self, short: Optional[int] = None, slug: bool = False) -> str:
if short is None:
n = f"{self.title} ({self.author})" # type: ignore
else:
n = f"{self.title[:short]} ({self.author[:short]})" # type: ignore
if slug:
return slugify(n)
else:
return n
class HasPublisher(HasTitle):
publisher: Optional[str] = None
location: Optional[str] = None
page_count: Optional[int] = None
def bibtex_transform(self) -> dict:
props = super().bibtex_transform()
if self.page_count:
props["note"] = f"{self.page_count} pp."
return props
class Book(HasPublisher, AuthorTitleMixin):
author: str
editor: Optional[str] = None
class Collection(HasPublisher, AuthorTitleMixin):
author: str
editor: Optional[str] = None
class Journal(BibBase):
"""A Journal
args:
journaltitle: the title of the journal
year: Union[list, int]: the year(s) of publication
number: number
volume: vol
"""
journaltitle: str
publicationdate: Union[list, int] = Field(alias="year")
number: Optional[int] = None
volume: Optional[int] = None
def translate(self) -> dict:
data = self.dict(exclude={"journaltitle"})
data["title"] = self.journaltitle
return data
def name(self, short: Optional[int] = None, slug: bool = False) -> str:
if short is not None:
n = f"{self.journaltitle[:short]} {self.publicationdate}"
else:
n = f"{self.journaltitle} {self.publicationdate}"
n += f" vol. {self.volume}" if self.volume else ""
n += f" n. {self.number}" if self.number else ""
if slug:
return slugify(n)
else:
return n
class Article(HasTitle, AuthorTitleMixin):
"""An article."""
journaltitle: str
pages: List[str]
author: str
editor: Optional[str] = None
number: Union[None, int, List[int]] = None
volume: Union[None, int, List[int]] = None
physical_pages: Optional[List[int]] = None
def _source(self) -> Journal:
return Journal.parse_obj(self.dict(by_alias=True))
class NewspaperArticle(BibBase, AuthorTitleMixin):
"""A newspaper article, which has no page range.
This is actually a kind of journal."""
journaltitle: str
date: datetime.date
author: str
title: str
class GallicaBibObj(BaseModel):
"""Class to represent Gallica's response."""
ark: str
title: str
language: str
type: str
date: str
publisher: Optional[str] = None
@staticmethod
def safe_convert(thing: str) -> Optional[int]:
try:
return int(thing)
except ValueError:
return None
def convert(self) -> "RecordTypes":
"""Return the right kind of model."""
data = {
"ark": self.ark,
"title": self.title,
"journaltitle": self.title,
"publisher": self.publisher,
"year": [],
} # type: ignore
for r in self.date.split(","):
split = r.split("-")
if len(split) > 1:
if len(split) > 2:
raise Exception(f"Unable to handle date {r}")
start, end = (self.safe_convert(x) for x in split)
if not start and not end:
raise Exception(f"Unable to handle date {r}")
if start and end:
data["year"] += list(range(int(start), int(end) + 1)) # type: ignore
else:
data["year"] = start if start else end # type: ignore
else:
data["year"].append(int(r)) # type: ignore
return type_to_class[self.type].parse_obj(data)
type_to_class = {"publication en série imprimée": Journal}
RecordTypes = Union[Article, Collection, Book, Journal]
|
2e0byo/pygallica-autobib
|
tests/test_pipeline.py
|
from concurrent.futures import ProcessPoolExecutor
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from gallica_autobib import pipeline
from gallica_autobib.pipeline import BibtexParser, InputParser, RisParser
@pytest.fixture()
def bibtex_parser():
"""A bibtex parser which downloads 1 page."""
with TemporaryDirectory() as tmp_path:
parser = BibtexParser(Path(tmp_path), fetch_only=1)
yield parser
def mock_download_pdf(self, path, blocksize=100, trials=3, fetch_only=None):
with (Path("tests/test_pdfs") / path.name).open("rb") as f:
with path.open("wb") as o:
o.write(f.read())
return True
def mock_ark():
return "https://gallica.bnf.fr/ark:/12148/bpt6k9735634r"
@pytest.fixture()
def mock_bibtex_parser(tmp_path, mocker):
"""A bibtex parser for the pour-lire-augustin result which neither searches nor downloads."""
download_pdf = pipeline.GallicaResource.download_pdf
ark = pipeline.GallicaResource.ark
pipeline.GallicaResource.download_pdf = mock_download_pdf
pipeline.GallicaResource.ark = mock_ark
parser = pipeline.BibtexParser(tmp_path)
parser.suppress_cover_page = True
yield parser
pipeline.GallicaResource.download_pdf = download_pdf
pipeline.GallicaResource.ark = ark
def test_pipeline_attrs(bibtex_parser):
assert bibtex_parser.progress is None
test_bibliographies_bibtex = [
"""@Article{danielou30:_pour_augus,
author = {<NAME>},
title = {Pour lire saint Augustin},
journaltitle = {La Vie spirituelle},
year = 1930,
language = {french},
volume = 24,
pages = {135-57}}"""
]
ids = ["pour-lire-augustin"]
# downloads 1 page
@pytest.mark.parametrize("bibtex", test_bibliographies_bibtex, ids=ids)
def test_bibtex_parser(bibtex, file_regression, tmp_path, check_pdfs):
parser = BibtexParser(tmp_path, fetch_only=1, clean=False)
parser.read(bibtex)
no = len(parser.records)
assert not parser.progress
parser.run()
assert parser.progress == 1
assert len(parser.executing) == no, "Parser executed other stuff."
assert len(parser.results) == no, "Spurious entry in results."
assert len(parser.results) == no, "Spurious accumulation of results."
res = parser.results[0]
with res.processed.open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
assert str(parser.generate_outf(res.record.target)) == str(res.unprocessed).replace(
".pdf", "-1.pdf"
)
assert res.record.raw == bibtex
assert res.record.kind == "bibtex"
def test_bibtex_parser_single_thread_clean(
mock_bibtex_parser, file_regression, tmp_path, check_pdfs
):
"""Test the processing step only, mocking the others.
Neither downloads nor searches.
"""
parser = mock_bibtex_parser
assert parser.suppress_cover_page
parser.read(test_bibliographies_bibtex[0])
outf = parser.generate_outf(parser.records[0].target)
result = parser.process_record(
parser.records[0],
outf,
parser.process,
True,
fetch_only=parser.fetch_only,
process_args=parser.process_args,
download_args=parser.download_args,
suppress_cover_page=parser.suppress_cover_page,
)
assert not result.unprocessed
assert not outf.exists()
with result.processed.open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
def test_bibtex_parser_single_thread_no_clean(
mock_bibtex_parser, file_regression, tmp_path, check_pdfs
):
"""Test not cleaning.
Neither downloads nor searches."""
parser = mock_bibtex_parser
parser.read(test_bibliographies_bibtex[0])
outf = parser.generate_outf(parser.records[0].target)
result = parser.process_record(
parser.records[0],
outf,
parser.process,
False,
fetch_only=parser.fetch_only,
process_args=parser.process_args,
download_args=parser.download_args,
suppress_cover_page=parser.suppress_cover_page,
)
assert result.processed != result.unprocessed
assert result.unprocessed
assert outf.exists() # no need to regression check as same file as above.
def test_bibtex_parser_single_thread_no_process(
mock_bibtex_parser, file_regression, tmp_path, check_pdfs
):
"""Test not processing.
Neither downloads nor searches.
"""
parser = mock_bibtex_parser
parser.read(test_bibliographies_bibtex[0])
outf = parser.generate_outf(parser.records[0].target)
result = parser.process_record(
parser.records[0],
outf,
False,
parser.clean,
fetch_only=parser.fetch_only,
process_args=parser.process_args,
download_args=parser.download_args,
suppress_cover_page=parser.suppress_cover_page,
)
assert not result.processed
assert result.unprocessed
assert outf.exists()
sourcefile = Path("tests/test_pdfs") / outf.name
assert result.unprocessed.stat().st_size == sourcefile.stat().st_size
@pytest.fixture(scope="module")
def parser(fixed_tmp_path):
"""A parser which has loaded something but won't actually download it."""
tmpf = fixed_tmp_path / "pour-lire-saint-augustin-m-d-chenu.pdf"
outf = fixed_tmp_path / "processed-pour-lire-saint-augustin-m-d-chenu.pdf"
with outf.open("w") as f:
f.write("-")
with tmpf.open("w") as f:
f.write("-")
args = dict(skip_existing=True)
parser = BibtexParser(fixed_tmp_path, process_args=args, fetch_only=1)
parser.process_args = {"skip_existing": True}
parser.read(test_bibliographies_bibtex[0])
parser.run()
yield parser
report_types = ["txt", "org", "html"]
@pytest.mark.parametrize("template", report_types)
def test_templates(parser, template, file_regression, fixed_tmp_path):
"""Test templates. Runs queries to generate templates. Doesn't download."""
parser.output_template = template
report = parser.report()
file_regression.check(report)
test_bibliographies_ris = [
[
"""TY - JOUR
TI - Une opinion inconnue de l'école de Gilbert de la Porrée
AU - M.-D. Chenu
JO - Revue d'Histoire Ecclésiastique
VL - 26
IS - 2
SP - 347
EP - 353
SN - 0035-2381
PY - 1930
PB - Université catholique de Louvain.
ER -""",
False,
],
[
"""TY - JOUR
TI - La surnaturalisation des vertus
AU - M.-D. Chenu
T2 - Bulletin Thomiste
PY - 1932
SP - 93
EP - 96
ER -""",
False,
],
]
ris_ids = ["inconnue", "surnaturalisation"]
from devtools import debug
@pytest.mark.parametrize("ris, status", test_bibliographies_ris, ids=ris_ids)
def test_ris_parser(ris, status, file_regression, tmp_path, check_pdfs):
"""Would download if any matched---but they don't.
This fn can be replaced with an equivalence test on the generated objects between bibtex and ris."""
parser = RisParser(tmp_path, fetch_only=1)
parser.read(ris)
debug(parser.records)
report = parser.run()
if status:
assert "Processed:" in report
with parser.results[0].open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
else:
assert "Failed to match :(" in report
res = parser.results[0]
assert res.record.raw == ris
assert res.record.kind == "ris"
def test_base_parser():
parser = InputParser(Path("."))
with pytest.raises(NotImplementedError):
parser.read("Inputstr")
# downloads 1 page
@pytest.mark.asyncio
async def test_submit(mock_bibtex_parser, file_regression, check_pdfs):
mock_bibtex_parser.process = False
pool = ProcessPoolExecutor(1)
mock_bibtex_parser.read(test_bibliographies_bibtex[-1])
mock_bibtex_parser.pool(pool)
assert mock_bibtex_parser.progress is None
await mock_bibtex_parser.submit()
assert mock_bibtex_parser.progress
for result in mock_bibtex_parser.results:
outf = result.unprocessed
sourcef = Path("tests/test_pdfs") / outf.name
assert outf.stat().st_size == sourcef.stat().st_size
assert result.record.kind == "bibtex"
|
2e0byo/pygallica-autobib
|
tests/test_cache.py
|
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from gallica_autobib import cache
@pytest.fixture
def tmp_cache():
with TemporaryDirectory() as tmp_path:
cache.Cached.cachedir = Path(tmp_path) / "cache"
yield cache.Cached
def test_cache(tmp_cache):
print(tmp_cache.cachedir)
cache = tmp_cache("test")
print(cache.cachedir)
assert not cache[7]
cache[7] = "this"
assert cache[7] == "this"
cache[7] = dict(seven=7)
assert cache[7] == dict(seven=7)
del cache
cache = tmp_cache("test")
assert cache[7] == dict(seven=7)
|
2e0byo/pygallica-autobib
|
gallica_autobib/pipeline.py
|
<reponame>2e0byo/pygallica-autobib<gh_stars>0
"""Pipeline to match and convert."""
import asyncio
import logging
from concurrent.futures import Future, ProcessPoolExecutor
from pathlib import Path
from time import sleep
from typing import List, Literal, Optional, TextIO, Union
from urllib.error import URLError
from jinja2 import Template
from pydantic import BaseModel
from slugify import slugify
from .models import RecordTypes
from .parsers import parse_bibtex, parse_ris
from .process import process_pdf
from .query import (
DownloadError,
GallicaResource,
Match,
MatchingError,
Query,
source_match_cache,
)
from .templating import env
logger = logging.getLogger(__name__)
class Record(BaseModel):
"""Input"""
target: RecordTypes
raw: str
kind: Literal["bibtex", "ris"]
class Result(BaseModel):
"""Result of an pipeline run."""
record: Record
match: Optional[Match] = None
unprocessed: Optional[Path] = None
processed: Optional[Path] = None
errors: Optional[List[str]] = None
status: Optional[bool] = None
class Config:
arbitrary_types_allowed = True
class InputParser:
"""Class to parse input. This base class should be subclassed."""
def __init__(
self,
outdir: Path,
output_template: Union[str, Path] = None,
process_args: dict = None,
download_args: dict = None,
process: bool = True,
clean: bool = True,
fetch_only: Optional[int] = None,
ignore_cache: bool = False,
):
self.records: List[Record] = []
self.raw: List[str] = []
self.len_records: int = 0
self.process = process
self.process_args = process_args if process_args else {}
self.download_args = download_args if download_args else {}
self._outfs: List[Path] = []
self.outdir = outdir
self.clean = clean
self._results: List[Union[Path, None, bool]] = []
self.output_template: Template = output_template # type: ignore
self.fetch_only = fetch_only
self._pool: Optional[ProcessPoolExecutor] = None
self.processes = 6
self.executing: List[Future] = []
self.ignore_cache = ignore_cache
self.suppress_cover_page: bool = False
@property
def successful(self) -> int:
return len([x for x in self.results if x])
@property
def total(self) -> int:
return len(self.results)
@property
def results(self) -> List[Union[Path, None, bool]]:
for x in self.executing:
if x.done():
res = x.result()
if res not in self._results:
self._results.append(res)
return self._results
@property
def output_template(self) -> Template:
return self._output_template
@output_template.setter
def output_template(self, output_template: Union[str, Path] = None) -> None:
if isinstance(output_template, str):
self._output_template = env.get_template(f"output.{output_template}")
elif isinstance(output_template, Path):
self._output_template = Template(output_template.open().read())
else:
self._output_template = env.get_template("output.txt")
@property
def progress(self) -> Optional[float]:
"""Progress in matching or failing."""
if not self.executing:
return None
return len([x for x in self.executing if x.done()]) / len(self.executing)
def read(self, stream: Union[TextIO, str]) -> None:
"""Read input data."""
raise NotImplementedError
def generate_outf(self, result: RecordTypes) -> Path:
outf = self.outdir / (slugify(f"{result.name()}") + ".pdf")
i = 0
while outf in self._outfs:
i += 1
outf = self.outdir / (slugify(f"{result.name()} {i}") + ".pdf")
self._outfs.append(outf)
return outf
def pool(self, pool: Optional[ProcessPoolExecutor] = None) -> ProcessPoolExecutor:
"""Create or register pool, or return pool if extant."""
if pool:
if self._pool:
self.pool.shutdown(wait=True) # type: ignore
self._pool = pool
elif not pool:
self._pool = ProcessPoolExecutor(self.processes)
return self._pool # type: ignore
def run(self) -> str:
"""Run query, blocking until finished.
Returns:
Rendered report.
"""
logger.debug("Generating tasks.")
self.executing = self._send_records()
while self.progress < 1: # type: ignore
sleep(1)
return self.report()
def _send_records(self) -> List[Future]:
"""Send records to pool."""
return [
self.pool().submit(
self.process_record,
record,
self.generate_outf(record.target),
self.process,
self.clean,
fetch_only=self.fetch_only,
process_args=self.process_args,
download_args=self.download_args,
cache=not self.ignore_cache,
suppress_cover_page=self.suppress_cover_page,
)
for record in self.records
]
async def submit(self) -> str:
"""Submit query to pool.
This is designed for use in web servers which may wish to use a
centrally defined pool shared between n queries.
Returns:
Rendered report.
"""
logger.debug("Submitting tasks")
futures = self._send_records()
self.executing = futures
await asyncio.gather(*[asyncio.wrap_future(f) for f in futures])
return self.report()
def report(self) -> str:
return self.output_template.render(obj=self)
@staticmethod
def process_record(
record: Record,
outf: Path,
process: bool,
clean: bool,
fetch_only: Optional[bool] = None,
process_args: Optional[dict] = None,
download_args: Optional[dict] = None,
cache: bool = True,
suppress_cover_page: bool = False,
) -> Result:
"""
Run pipeline on item, returning a Result() object.
"""
key = record.target.key()
match = source_match_cache[key] if cache else None
if not match:
query = Query(record.target)
match = query.run()
args = dict(record=record) # type: ignore
if not process_args:
process_args = {}
if not download_args:
download_args = {}
if not match:
logger.info(f"No match found for {record.target.name}")
args["status"] = None # type: ignore
return Result.parse_obj(args)
logger.debug("Generating gallica resource.")
gallica_resource = GallicaResource(record.target, match.candidate, cache=cache)
gallica_resource.suppress_cover_page = suppress_cover_page
if not download_args:
download_args = {}
try:
logger.debug("Starting download.")
gallica_resource.download_pdf(outf, fetch_only=fetch_only, **download_args)
args["match"] = gallica_resource.match # type: ignore
except MatchingError as e:
logger.info(f"Failed to match. ({e})")
args["errors"] = [str(e)] # type: ignore
args["status"] = None # type: ignore
return Result.parse_obj(args)
except (URLError, DownloadError) as e:
logger.info(f"Failed to download. {e}")
args["errors"] = [str(e)] # type: ignore
args["status"] = False # type: ignore
return Result.parse_obj(args)
args["status"] = True # type: ignore
if process:
logger.debug("Processing...")
processed = process_pdf(
outf, has_cover_page=not suppress_cover_page, **process_args
)
args["processed"] = processed # type: ignore
if clean:
logger.debug("Deleting original file.")
outf.unlink()
else:
args["unprocessed"] = outf # type: ignore
return Result.parse_obj(args)
else:
args["unprocessed"] = outf # type: ignore
return Result.parse_obj(args)
class BibtexParser(InputParser):
"""Class to parse bibtex."""
def read(self, stream: Union[TextIO, str]) -> None:
"""Read a bibtex file-like object and convert to records.
Args:
stream: Union[TextIO: stream to read
str]: string to parse.
"""
records, raw = parse_bibtex(stream)
self.records = [
Record(target=records[i], raw=raw[i], kind="bibtex")
for i in range(len(records))
]
class RisParser(InputParser):
"""Class to parse ris."""
def read(self, stream: Union[TextIO, str]) -> None:
"""Read a ris file-like object and convert to records.
Args:
stream: Union[TextIO: stream to read
str]: string to parse.
"""
records, raw = parse_ris(stream)
self.records = [
Record(target=records[i], raw=raw[i], kind="ris")
for i in range(len(records))
]
|
2e0byo/pygallica-autobib
|
gallica_autobib/util.py
|
<reponame>2e0byo/pygallica-autobib
from typing import Union
import roman
def pretty_page_range(pages: list[str]) -> str:
"""Prettify a page range."""
ranges: list[dict] = []
try:
int(pages[0])
arabic = True
except ValueError:
arabic = False
pp = []
for p in pages:
if arabic:
try:
pp.append(int(p))
except ValueError:
ranges.append(dict(arabic=arabic, pages=pp))
arabic = False
pp = [roman.fromRoman(p.upper())]
else:
try:
pp.append(roman.fromRoman(p.upper()))
except roman.InvalidRomanNumeralError:
ranges.append(dict(arabic=arabic, pages=pp))
arabic = True
pp = [int(p)]
ranges.append(dict(arabic=arabic, pages=pp))
pretty = []
for r in ranges:
pp = [r["pages"][0]]
arabic = r["arabic"]
for pqr in r["pages"][1:]:
if pqr == pp[-1] + 1:
pp.append(pqr)
else:
pretty.append(prettify(pp, arabic))
pp = [pqr]
pretty.append(prettify(pp, arabic))
return ", ".join(pretty)
def prettify(pages: list[int], arabic: bool) -> str:
"""Pages is a continuous range of ints."""
if arabic:
start = str(pages[0])
end = str(pages[-1])
if len(start) == len(end):
end = "".join(end[i] for i in range(len(end)) if end[i] != start[i])
return f"{start}--{end}"
else:
# for now we don't do anything clever with roman numerals, although
# combining is possible.
return f"{roman.toRoman(pages[0]).lower()}--{roman.toRoman(pages[-1]).lower()}"
def deprettify(rangestr: Union[str, int]) -> Union[list[int], int, None]:
try:
return int(rangestr)
except ValueError:
pass
pages = []
ranges = rangestr.split(",") # type: ignore
for r in ranges:
try:
start, end = r.replace("--", "-").split("-")
ls, le = len(start), len(end)
if le < ls:
end = start[: ls - le] + end
pages += list(range(int(start), int(end) + 1))
except ValueError:
pages.append(int(r))
return pages if len(pages) > 1 else pages[0] if pages else None
|
2e0byo/pygallica-autobib
|
tests/test_query.py
|
from copy import deepcopy
import pytest
from gallica_autobib.models import Article, Book, Journal
from gallica_autobib.query import GallicaSRU, Match, Query, make_string_boring
strings = [["asciitest", "asciitest"], [None, None]]
@pytest.mark.parametrize("inp,out", strings)
def test_boring_string(inp, out):
assert make_string_boring(inp) == out
def test_match_duplicate():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="Daniélou",
year=1930,
)
b = a.copy()
m = Match(a, b)
assert m.score == 1
def test_close_match():
a = Journal(journaltitle="La vie spirituelle", year=1930)
b = Journal(
journaltitle="La vie spirituelle, ascétique et mystique",
year=list(range(1920, 1950)),
)
m = Match(a, b)
assert m.score > 0.7
def test_match_repr(data_regression):
a = Journal(journaltitle="La vie spirituelle", year=1930)
b = Journal(
journaltitle="La vie spirituelle, ascétique et mystique",
year=list(range(1920, 1950)),
)
m = Match(a, b)
data_regression.check(repr(m))
def test_sort_match():
a = Journal(journaltitle="La vie spirituelle", year=1930)
c = Journal(journaltitle="La vie spirituelle", year=1931)
b = Journal(
journaltitle="La vie spirituelle, ascétique et mystique",
year=list(range(1920, 1950)),
)
m1 = Match(a, b)
m2 = Match(a, c)
assert m1 == deepcopy(m1)
assert m1 > m2
assert m2 < m1
def test_missing_editor():
a = Book(
title="My very long title",
year=1960,
publisher="Cerf",
author="me",
editor="you",
)
b = Book(title="My very long title", year=1960, publisher="Cerf", author="me")
m = Match(a, b)
assert m.score > 0.7
query_candidates = [
[
{"title": "La vie spirituelle", "recordtype": "per"},
'bib.title all "la vie spirituelle" and bib.recordtype all "per',
],
[{"title": "la vie spirituelle"}, 'bib.title all "la vie spirituelle'],
]
@pytest.fixture
def query():
a = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 138)),
title="Pour lire saint Augustin",
author="Daniélou",
year=1930,
)
q = Query(a)
yield q
def test_bibobj(query):
data = {
"schema": "dc",
"identifier": [
"http://catalogue.bnf.fr/ark:/12148/cb34406663m",
"ISSN 09882480",
],
"title": "La Vie spirituelle, ascétique et mystique",
"publisher": "Le Cerf (Paris)",
"date": "1919-1945",
"language": ["fre", "français"],
"type": [
{"lang": "fre", "text": "publication en série imprimée"},
{"lang": "eng", "text": "printed serial"},
{"lang": "eng", "text": "text"},
],
}
resp = query.resp_to_obj(data.copy())
assert isinstance(resp, Journal)
assert resp.ark == data["identifier"][0]
assert len(resp.ark) > 1
assert resp.journaltitle == data["title"]
assert resp.publisher == data["publisher"]
def test_get_at_str(query):
assert query.get_at_str("this") == "this"
assert query.get_at_str(["this"]) == "this"
assert query.get_at_str(["this", "that"]) == "this"
assert query.get_at_str(None) is None
assert query.get_at_str([None, "this"]) is None
l = [
"http://catalogue.bnf.fr/ark:/12148/cb34406663m",
"ISSN 09882480",
]
assert query.get_at_str(l) == "http://catalogue.bnf.fr/ark:/12148/cb34406663m"
def test_gallica_sru():
g = GallicaSRU()
assert [k for k, v in g.__repr_args__()] == ["client"]
|
2e0byo/pygallica-autobib
|
gallica_autobib/gallipy/helpers.py
|
<gh_stars>0
"""
Gallipy - Python wrapper for the Gallica APIs
Copyright (C) 2019 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
https://github.com/GeoHistoricalData/gallipy
"""
import json
import urllib.parse
from bs4 import BeautifulSoup
from .monadic import Either, Left
_BASE_PARTS = {"scheme": "https", "netloc": "gallica.bnf.fr"}
def fetch(url, timeout=30):
"""Fetches data from an URL
Fetch data from URL and wraps the unicode encoded response in an Either object.
Args:
url (str): An URL to fetch.
timeout (:obj:int, optional): Sets a timeout delay (Optional).
Returns:
Either[Exception Unicode]: The response content if everything went fine
and Exception otherwise.
"""
try:
with urllib.request.urlopen(url, timeout=timeout) as res:
content = res.read()
if content:
return Either.pure(content)
raise Exception("Empty response from {}".format(url))
except Exception as ex:
pattern = "Error while fetching URL {}\n{}"
err = urllib.error.URLError(pattern.format(url, str(ex)))
return Left(err)
def fetch_xml_html(url, parser="xml", timeout=30):
"""Fetches xml or html from an URL
Retrieves xml or html data from an URL and wraps it in an Either object.
The resulting data is a simple utf-8 string.
Args:
url (str): An URL to fetch.
parser (str): Any BeautifulSoup4 parser, e.g. 'html.parser'. Default: xml.
timeout (:obj:int, optional): Sets a timeout delay (Optional).
Returns:
Either[Exception String]: String if everything went fine, Exception
otherwise.
"""
try:
return fetch(url, timeout).map(lambda res: str(BeautifulSoup(res, parser)))
except urllib.error.URLError as ex:
pattern = "Error while fetching XML from {}\n{}"
err = urllib.error.URLError(pattern.format(url, str(ex)))
return Left(err)
def fetch_json(url, timeout=30):
"""Fetches json from an URL
Retrieves json data from an URL and wraps it in an Either object.
Args:
url (str): An URL to fetch.
timeout (:obj:int, optional): Sets a timeout delay (Optional).
Returns:
Either[Exception Unicode]: Unicode if everything went fine and
Exception otherwise.
"""
try:
return fetch(url).map(json.loads)
except urllib.error.URLError as ex:
pattern = "Error while fetching JSON from {}\n{}"
err = urllib.error.URLError(pattern.format(url, str(ex)))
return Left(err)
def build_service_url(parts=None, service_name=""):
"""Creates an URL to access Gallica services
Given a dictionary of urllib URL parts and a service name,
builds a complete URL to reach and query this Web service on Gallica.
Args:
parts (dict): URL parts used to build the URL.
See the doc of urllib.parse
service_name (str): name of the service to query.
service_name will be ignored if parts has a key named 'path'.
Returns:
str: A string representation of the URL built.
"""
this_parts = {"path": "services/" + service_name}
all_parts = _BASE_PARTS.copy()
all_parts.update(this_parts)
all_parts.update(parts)
return build_url(all_parts)
def build_base_url(parts=None, ark=None):
"""Creates the URL of a Gallica document from its ARK ID.
Given an ARK ID and a dictionary of URL parts, builds a complete URL
to reach the document identified by this ARK ID on Gallica .
Args:
parts (dict): URL parts used to build the URL. See the doc of urllib.parse
ark (str): ARK ID of the document. Parameter ark must be unset if parts['path']
already contains the ARK ID of the document.
Returns:
str: The URL to reach the document identified by ark, as a string.
"""
this_parts = {"path": str(ark)}
all_parts = _BASE_PARTS.copy()
all_parts.update(this_parts)
all_parts.update(parts)
return build_url(all_parts)
def build_url(parts, quote_via=urllib.parse.quote_plus):
"""Creates a URL from a dictionary of parts.
Creates a URL from a dictionary of urllib parts. See the documentation
of urllib.parses.
Args:
parts (dict): The parts of the URL to build.
quote_via (function): A function to encode spaces and special characters.
Defaults to quote_plus. See the documentations of
urllib.parse.urlencode.
Returns:
str: The URL as a string.
"""
all_parts = parts.copy()
query = parts.get("query")
if query:
all_parts["query"] = urllib.parse.urlencode(query, quote_via=quote_via)
elements = ["scheme", "netloc", "path", "params", "query", "fragment"]
sorted_parts = [all_parts.get(key) for key in elements]
return urllib.parse.urlunparse(sorted_parts)
|
2e0byo/pygallica-autobib
|
gallica_autobib/gallipy/monadic.py
|
# Shamelessly copy/pasted from this awesome article :
# https://www.toptal.com/javascript/option-maybe-either-future-monads-js*
# by <NAME>
import threading
from functools import reduce
class Monad:
# pure :: a -> M a. Same as unit: a -> M a
@staticmethod
def pure(x):
raise Exception("pure method needs to be implemented")
# flat_map :: # M a -> (a -> M b) -> M b
def flat_map(self, f):
raise Exception("flat_map method needs to be implemented")
# map :: # M a -> (a -> b) -> M b
def map(self, f):
return self.flat_map(lambda x: self.pure(f(x)))
class Option(Monad):
# pure :: a -> Option a
@staticmethod
def pure(x):
return Some(x)
# flat_map :: # Option a -> (a -> Option b) -> Option b
def flat_map(self, f):
if self.defined:
return f(self.value)
else:
return nil
class Some(Option):
def __init__(self, value):
self.value = value
self.defined = True
class Nil(Option):
def __init__(self):
self.value = None
self.defined = False
nil = Nil()
class Either(Monad):
# pure :: a -> Either a
@staticmethod
def pure(value):
return Right(value)
# flat_map :: # Either a -> (a -> Either b) -> Either b
def flat_map(self, f):
if self.is_left:
return self
else:
return f(self.value)
class Left(Either):
def __init__(self, value):
self.value = value
self.is_left = True
class Right(Either):
def __init__(self, value):
self.value = value
self.is_left = False
class Future(Monad):
# __init__ :: ((Either err a -> void) -> void) -> Future (Either err a)
def __init__(self, f):
self.subscribers = []
self.cache = nil
self.semaphore = threading.BoundedSemaphore(1)
f(self.callback)
# pure :: a -> Future a
@staticmethod
def pure(value):
return Future(lambda cb: cb(Either.pure(value)))
def exec(f, cb):
try:
data = f()
cb(Right(data))
except Exception as err:
cb(Left(err))
def exec_on_thread(f, cb):
t = threading.Thread(target=Future.exec, args=[f, cb])
t.start()
def asyn(f):
return Future(lambda cb: Future.exec_on_thread(f, cb))
# flat_map :: (a -> Future b) -> Future b
def flat_map(self, f):
return Future(
lambda cb: self.subscribe(
lambda value: cb(value)
if (value.is_left)
else f(value.value).subscribe(cb)
)
)
# traverse :: [a] -> (a -> Future b) -> Future [b]
def traverse(arr):
return lambda f: reduce(
lambda acc, elem: acc.flat_map(
lambda values: f(elem).map(lambda value: values + [value])
),
arr,
Future.pure([]),
)
# callback :: Either err a -> void
def callback(self, value):
self.semaphore.acquire()
self.cache = Some(value)
while len(self.subscribers) > 0:
sub = self.subscribers.pop(0)
t = threading.Thread(target=sub, args=[value])
t.start()
self.semaphore.release()
# subscribe :: (Either err a -> void) -> void
def subscribe(self, subscriber):
self.semaphore.acquire()
if self.cache.defined:
self.semaphore.release()
subscriber(self.cache.value)
else:
self.subscribers.append(subscriber)
self.semaphore.release()
|
2e0byo/pygallica-autobib
|
tests/test_process.py
|
from collections import namedtuple
from pathlib import Path
import pytest
from gallica_autobib.process import (
ExtractionError,
deanomalise,
detect_spine,
extract_image,
filter_algorithm_brute_force,
generate_filename,
get_crop_bounds,
prepare_img,
process_pdf,
)
from PIL import Image
from PyPDF4 import PdfFileReader
def test_extract_no_image():
with pytest.raises(ExtractionError, match=".*No image.*"):
reader = PdfFileReader("tests/test_process/test-blank.pdf")
page1 = reader.getPage(0)
extract_image(page1)
ImgTest = namedtuple("ImgTest", ("testfile", "type"))
test_image_pdf = [
ImgTest(Path("tests/test_process/test5.pdf"), "png"),
ImgTest(Path("tests/test_process/test2.pdf"), "jpg"),
ImgTest(Path("tests/test_process/test3.pdf"), "jp2"),
ImgTest(Path("tests/test_process/tiff.pdf"), "tiff"),
]
@pytest.mark.parametrize("img_test", test_image_pdf)
def test_extract_image(img_test, image_regression, tmp_path):
with img_test.testfile.open("rb") as f:
reader = PdfFileReader(f)
page1 = reader.getPage(0)
img, type_ = extract_image(page1)
assert type_ == img_test.type
outf = tmp_path / f"test.{type_}"
img.save(str(outf))
with outf.open("rb") as f:
image_regression.check(f.read())
def test_deanomalise():
assert deanomalise([0, 1, 1, 5]) == 1
assert deanomalise([1, 1, 1]) == 1
assert deanomalise([12]) == 12
def test_detect_spine():
inf = "tests/test_process/lh.jpg"
img = Image.open(inf)
img = prepare_img(img, 128)
assert detect_spine(img).lh_page
inf = "tests/test_process/rh.jpg"
img = Image.open(inf)
img = prepare_img(img, 128)
assert not detect_spine(img).lh_page
def test_crop_bounds_lh():
inf = "tests/test_process/lh.jpg"
img = Image.open(inf)
bbox = (46, 116, 841, 1393)
res = get_crop_bounds(img)
for i, val in enumerate(bbox):
assert abs(res[i] - val) < 7
def test_crop_bounds_rh():
inf = "tests/test_process/rh.jpg"
img = Image.open(inf)
bbox = (161, 158, 899, 1394)
res = get_crop_bounds(img)
for i, val in enumerate(bbox):
assert abs(res[i] - val) < 7
filter_tests = [
"tests/test_process/rh.jpg",
"tests/test_process/lh.jpg",
"tests/test_process/aug-000.jpg",
"tests/test_process/aug-001.jpg",
"tests/test_process/aug-002.jpg",
"tests/test_process/aug-020.jpg",
"tests/test_process/ascese-000.jpg",
"tests/test_process/ascese-001.jpg",
"tests/test_process/rais-003.jpg",
"tests/test_process/rais-004.jpg",
"tests/test_process/tiff-000.tif",
]
@pytest.mark.parametrize("inf", filter_tests)
def test_filter_brute_force(inf, image_regression, tmp_path):
img = Image.open(inf)
img = img.crop(get_crop_bounds(img))
if img.mode != "1":
img = filter_algorithm_brute_force(img)
img.save(f"{tmp_path}/test.jpg")
with (tmp_path / f"test.jpg").open("rb") as f:
image_regression.check(f.read())
def test_process_pdf_no_preserve(file_regression, tmp_path, check_pdfs):
inf = Path("tests/test_gallica_resource/test_download_pdf.pdf")
process_pdf(inf, tmp_path / "test1.pdf", has_cover_page=True)
with (tmp_path / "test1.pdf").open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
def test_process_pdf_preserve(file_regression, tmp_path, check_pdfs):
inf = Path("tests/test_gallica_resource/test_download_pdf.pdf")
with inf.open("rb") as i:
with (tmp_path / "test.pdf").open("wb") as f:
f.write(i.read())
inf = tmp_path / "test.pdf"
process_pdf(inf, preserve_text=True, has_cover_page=True)
with (tmp_path / "processed-test.pdf").open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
@pytest.mark.xfail
def test_process_pdf_equal_size(file_regression, tmp_path, check_pdfs):
inf = Path("tests/test_gallica_resource/test_download_pdf.pdf")
process_pdf(inf, tmp_path / "test1.pdf", equal_size=True, has_cover_page=True)
with (tmp_path / "test1.pdf").open("rb") as f:
file_regression.check(
f.read(), extension=".pdf", binary=True, check_fn=check_pdfs
)
def test_generate_filename(tmp_path):
start = tmp_path / "test-1.txt"
outf = generate_filename(start)
with outf.open("w") as f:
f.write("-")
outf = generate_filename(start)
assert outf == tmp_path / "test-1-0.txt"
with outf.open("w") as f:
f.write("!-")
outf = generate_filename(start)
assert outf == tmp_path / "test-1-1.txt"
start = tmp_path / "augustin.pdf"
outf = generate_filename(start)
with outf.open("w") as f:
f.write("=")
outf = generate_filename(start)
assert outf == tmp_path / "augustin-0.pdf"
|
2e0byo/pygallica-autobib
|
tests/test_parsers.py
|
<filename>tests/test_parsers.py
from io import StringIO
import pytest
from gallica_autobib.models import Article
from gallica_autobib.parsers import ParsingError, parse_bibtex, parse_ris
def test_invalid_bibtex():
with pytest.raises(ParsingError, match="Unable to parse"):
parse_bibtex("loadanonsensestring")
parse_bibtex(None)
def test_invalid_ris():
with pytest.raises(ParsingError, match="Unable to parse"):
parse_ris("loadanonsensestring")
parse_ris(None)
def test_bib_article():
bib = r"""
@Article{danielou30:_pour_augus,
author = {<NAME>},
title = {Pour lire saint Augustin},
journaltitle = {La Vie spirituelle},
year = 1930,
language = {french},
volume = 24,
pages = {135--57}}
"""
art = Article(
journaltitle="La Vie spirituelle",
pages=list(range(135, 158)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
language="french",
volume=24,
)
objs, raw = parse_bibtex("\n\n".join([bib * 3]))
assert objs[0] == objs[1]
assert objs[0] == art
assert len(objs) == len(raw)
assert raw[0] == "\n".join([x for x in bib.split("\n") if x.strip()])
assert parse_bibtex(StringIO(bib))[0][0] == art
def test_bib_article_one_page():
bib = """
@Article{danielou30:_pour_augus,
author = {<NAME>},
title = {Pour lire saint Augustin},
journaltitle = {La Vie spirituelle},
year = 1930,
language = {french},
volume = 24,
pages = 12}
"""
art = Article(
journaltitle="La Vie spirituelle",
pages=[12],
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
language="french",
volume=24,
)
assert parse_bibtex(bib)[0][0] == art
def test_bib_article_roman():
bib = """
@Article{danielou30:_pour_augus,
author = {<NAME>},
title = {Pour lire saint Augustin},
journaltitle = {La Vie spirituelle},
year = 1930,
language = {french},
volume = 24,
pages = {i-xi}}
"""
art = Article(
journaltitle="La Vie spirituelle",
pages=["i", "ii", "iii", "iv", "v", "vi", "vii", "viii", "ix", "x", "xi"],
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
language="french",
volume=24,
)
assert parse_bibtex(bib)[0][0] == art
def test_bib_inbook():
bib = """
@inbook{danielou30:_pour_augus,
author = {<NAME>},
title = {Pour lire saint Augustin},
journaltitle = {La Vie spirituelle},
year = 1930,
language = {french},
volume = 24,
pages = {i-xi},
url={http://nonsuch.org}
}
"""
with pytest.raises(ParsingError, match=".*Unsupported.*"):
parse_bibtex(bib)
def test_ris_article():
ris = """
TY - JOUR
TI - HENRI BREMOND E IL MODERNISMO
AU - Savignano, Armando
C1 - Full publication date: ottobre-dicembre 1982
DB - JSTOR
EP - 649
IS - 4
PB - Vita e Pensiero – Pubblicazioni dell’Università Cattolica del Sacro Cuore
PY - 1982
SN - 00356247, 18277926
SP - 627
T2 - Rivista di Filosofia Neo-Scolastica
UR - http://www.jstor.org/stable/43061043
VL - 74
Y2 - 2021/05/07/
ER -
"""
art = Article(
journaltitle="Rivista di Filosofia Neo-Scolastica",
volume=74,
pages=list(range(627, 650)),
title="HENRI BREMOND E IL MODERNISMO",
author="<NAME>",
year="1982",
publisher="Vita e Pensiero – Pubblicazioni dell’Università Cattolica del Sacro Cuore",
number=4,
)
objs, raw = parse_ris("\n".join([ris * 3]))
assert objs[0] == objs[1]
assert objs[0] == art
assert len(objs) == len(raw)
assert raw[0] == "\n".join([x for x in ris.split("\n") if x.strip()])
assert parse_ris(StringIO(ris))[0][0] == art
def test_ris_other():
ris = """
TY - ABST
TI - HENRI BREMOND E IL MODERNISMO
AU - Savignano, Armando
C1 - Full publication date: ottobre-dicembre 1982
DB - JSTOR
EP - 649
IS - 4
PB - Vita e Pensiero – Pubblicazioni dell’Università Cattolica del Sacro Cuore
PY - 1982
SN - 00356247, 18277926
SP - 627
T2 - Rivista di Filosofia Neo-Scolastica
UR - http://www.jstor.org/stable/43061043
VL - 74
Y2 - 2021/05/07/
ER -
"""
with pytest.raises(ParsingError, match=".*Unsupported.*"):
parse_ris(ris)
|
2e0byo/pygallica-autobib
|
gallica_autobib/cli.py
|
<reponame>2e0byo/pygallica-autobib
import logging
from pathlib import Path
from typing import Dict, Optional
import typer
from . import __version__
from .pipeline import BibtexParser, RisParser
logger = logging.getLogger(__name__)
class AutoBibError(Exception):
pass
log_level = [logging.NOTSET, logging.ERROR, logging.DEBUG]
app = typer.Typer()
def version_callback(value: bool) -> None:
if value:
typer.echo(f"Gallica-Autobib Version: {__version__}")
raise typer.Exit()
@app.command()
def process_bibliograpy(
bibfile: Path = typer.Argument(..., help="Bibliographic file to read."),
outdir: Path = typer.Argument(..., help="Output directory."),
version: Optional[bool] = typer.Option(
None, "--version", callback=version_callback
),
post_process: bool = typer.Option(True, help="Post-process download."),
preserve_text: bool = typer.Option(True, help="Preserve text in post processing."),
processes: int = typer.Option(
6,
help="Number of processes to run. We are largely network bound so > nproc might make sense.",
),
clean: bool = typer.Option(True, help="Clean up intermediate files."),
template: Path = typer.Option(None, help="Path to output template to use."),
template_format: str = typer.Option(
None,
help="Which internal template to use. Ignored if a template path is provided.",
),
verbosity: int = typer.Option(1, help="Verbosity between 0 and 2."),
out: Path = typer.Option(None, help="Output path for report. Default is STDOUT."),
ignore_cache: bool = typer.Option(
False,
help="Ignore cache and rematch. Note this will overwrite the cache with any matches.",
),
suppress_cover_page: bool = typer.Option(
False, help="Suppress Gallica's cover page."
),
) -> None:
"""
Process a bibliography file.
"""
process_args = {"preserve_text": preserve_text}
download_args: Dict[str, bool] = {}
logging.basicConfig(level=log_level[verbosity])
args = dict(
outdir=outdir,
process_args=process_args,
download_args=download_args,
process=post_process,
clean=clean,
output_template=template if template else template_format,
ignore_cache=ignore_cache,
)
if bibfile.suffix == ".bib":
logger.debug("Detected bibtex.")
parser = BibtexParser(**args) # type: ignore
elif bibfile.suffix == ".ris":
logger.debug("Detected ris.")
parser = RisParser(**args) # type: ignore
else:
raise AutoBibError("Input is not bibtex or ris.")
parser.processes = processes
parser.suppress_cover_page = suppress_cover_page
with bibfile.open() as f:
parser.read(f)
report = parser.run()
if out:
with out.open("w") as f:
f.write(report)
else:
print(report)
if __name__ == "__main__":
app()
|
2e0byo/pygallica-autobib
|
tests/conftest.py
|
<reponame>2e0byo/pygallica-autobib<gh_stars>0
import logging
import shutil
from pathlib import Path
import pytest
from diff_pdf_visually import pdfdiff
logging.basicConfig(level=logging.DEBUG)
from gallica_autobib.models import Article, Journal
from gallica_autobib.query import GallicaResource
@pytest.fixture
def check_pdfs():
def check(a, b):
assert pdfdiff(a, b, threshold=30), f"Pdf files {a} and {b} differ"
yield check
@pytest.fixture(scope="module")
def fixed_tmp_path():
path = Path("/tmp/pytest-template-tmpdir/")
if path.exists():
raise Exception("tmpdir exists")
path.mkdir()
yield path
shutil.rmtree(path)
@pytest.fixture
def gallica_resource():
target = Article(
journaltitle="La vie spirituelle",
pages=list(range(135, 158)),
title="Pour lire saint Augustin",
author="<NAME>",
year=1930,
)
source = Journal(
year=[
1919,
1920,
1921,
1922,
1923,
1924,
1925,
1926,
1927,
1928,
1929,
1930,
1931,
1932,
1933,
1934,
1935,
1936,
1937,
1938,
1939,
1940,
1941,
1942,
1943,
1944,
1945,
],
publisher="Le Cerf (Paris)",
ark="http://catalogue.bnf.fr/ark:/12148/cb34406663m",
journaltitle="La Vie spirituelle, ascétique et mystique",
number=None,
volume=None,
)
yield GallicaResource(target, source)
|
2e0byo/pygallica-autobib
|
gallica_autobib/templating.py
|
<reponame>2e0byo/pygallica-autobib
from jinja2 import Environment, PackageLoader, select_autoescape
latex_env = Environment(
block_start_string="\\BLOCK{",
block_end_string="}",
variable_start_string="\\VAR{",
variable_end_string="}",
comment_start_string="\\#{",
comment_end_string="}",
line_statement_prefix="%%",
line_comment_prefix="%#",
trim_blocks=True,
autoescape=False,
loader=PackageLoader("gallica_autobib", "templates"),
)
env = Environment(
loader=PackageLoader("gallica_autobib", "templates"),
autoescape=select_autoescape(["html", "xml"]),
)
|
2e0byo/pygallica-autobib
|
gallica_autobib/query.py
|
<reponame>2e0byo/pygallica-autobib
import imghdr
import logging
import unicodedata
from functools import total_ordering
from pathlib import Path
from re import search
from time import sleep
from traceback import print_exc
from typing import (
TYPE_CHECKING,
Any,
Generator,
List,
Optional,
OrderedDict,
Tuple,
Union,
)
import sruthi
from bs4 import BeautifulSoup
from fuzzysearch import find_near_matches
from fuzzywuzzy import fuzz
from pydantic.utils import Representation
from PyPDF4 import PageRange, PdfFileMerger
from requests_downloader import downloader
from sruthi.response import SearchRetrieveResponse
from .cache import Cached
from .gallipy import Ark, Resource
from .models import Article, Book, Collection, GallicaBibObj, Journal
if TYPE_CHECKING: # pragma: nocover
from pydantic.typing import ReprArgs # pragma: nocover
Pages = OrderedDict[str, OrderedDict[str, OrderedDict]]
ark_cache = Cached("ark")
source_match_cache = Cached("source_match")
class MatchingError(Exception):
pass
class DownloadError(Exception):
pass
def make_string_boring(unicodestr: str) -> Optional[str]:
"""Return unicode str as ascii for fuzzy matching."""
if not unicodestr:
return None
normal = unicodedata.normalize("NFKD", unicodestr)
return normal.lower().strip()
@total_ordering
class Match(
Representation,
):
"""Object representing a match."""
def __init__(self, target: Any, candidate: Any):
self.target = target
self.candidate = candidate
self._score: Optional[float] = None
@property
def score(self) -> float:
if not self._score:
self._score = self._calculate_score()
return self._score
@property
def confidence(self) -> str:
return f"{self.score*100:2.4} %"
def _calculate_score(self) -> float:
"""Calculate the score for a given match."""
vals = {}
candidate = self.candidate
for k, v in self.target.dict().items():
candidate_v = getattr(candidate, k)
if v and not candidate_v:
vals[k] = 0.5
if isinstance(v, str):
vals[k] = (
fuzz.ratio(make_string_boring(v), make_string_boring(candidate_v))
/ 100
)
if isinstance(v, int):
if not candidate_v:
vals[k] = 0.5
continue
if isinstance(candidate_v, int):
vals[k] = 1 if candidate_v == v else 0
elif isinstance(candidate_v, list):
vals[k] = 1 if v in candidate_v else 0
else:
raise NotImplementedError(v, candidate_v)
if isinstance(v, list):
matches = []
if isinstance(candidate_v, list):
matches = [1 if i in candidate_v else 0 for i in v]
elif candidate_v in v:
matches.append(1 / len(v)) # type: ignore
else:
matches.append(0)
vals[k] = sum(matches) / len(matches)
self._vals = vals
return sum(v for _, v in vals.items()) / len(vals)
def __lt__(self, other: "Match") -> bool:
return self.score < other.score
def __gt__(self, other: "Match") -> bool:
return self.score > other.score
def __eq__(self, other: object) -> bool:
if not isinstance(other, Match):
return NotImplemented
return self.score == other.score
def __repr_args__(self) -> "ReprArgs":
return self.__dict__.items() # type: ignore
class GallicaSRU(
Representation,
):
"""Class to interact wtih Gallica"""
URL = "http://catalogue.bnf.fr/api/SRU"
def __init__(self) -> None:
self.client = sruthi.Client(url=self.URL, record_schema="dublincore")
def fetch_query(self, query: str) -> SearchRetrieveResponse:
return self.client.searchretrieve(query)
def __repr_args__(self) -> "ReprArgs":
return self.__dict__.items() # type: ignore
class Query(
GallicaSRU,
Representation,
):
"""Class to represent a query"""
def __init__(self, target: Union[Article, Journal, Book, Collection]) -> None:
super().__init__()
self.target = target._source()
self.fetcher = GallicaSRU()
self.logger = logging.getLogger(f"QU {target.name(short=6)}")
@staticmethod
def get_at_str(obj: Union[str, List[str]]) -> Optional[str]:
if not obj:
return None
if isinstance(obj, str):
return obj
else:
return obj[0]
def resp_to_obj(self, resp: dict) -> GallicaBibObj:
"""Convert resp to GallicaBibObj"""
resp["ark"] = self.get_at_str(resp["identifier"])
# could use a Language() obj to internationalise this
resp["language"] = resp["language"][1]
resp["type"] = resp["type"][0]["text"]
if "publisher" in resp.keys():
resp["publisher"] = self.get_at_str(resp["publisher"])
resp["title"] = self.get_at_str(resp["title"])
# resp["publisher"] = resp["publisher"][0]
# resp["title"] = resp["title"][0]
obj = GallicaBibObj.parse_obj(resp).convert()
return obj # type: ignore
def run(self, give_up: int = 50) -> Any:
"""Try to get best match."""
self.logger.debug("Generting query")
query = self.target.generate_query()
try:
self.logger.debug("Fetching query")
resps = self.fetcher.fetch_query(query)
except Exception:
print_exc()
return None
self.logger.debug(f"Got {len(list(resps))} candidates.")
matches = []
for i, resp in enumerate(resps[:give_up]):
candidate = self.resp_to_obj(resp)
match = Match(self.target, candidate)
matches.append(match)
for m in matches: # use a real loop so as to update _score
if i < 3:
break
if m.score > 0.7:
break
if not matches:
self.logger.debug("Failed to match.")
return None
match = max(matches)
self.logger.debug(f"Matched. {repr(match)}")
return match
def __repr_args__(self) -> "ReprArgs":
return self.__dict__.items() # type: ignore
class GallicaResource(Representation):
"""A resource on Gallica."""
BASE_TIMEOUT = 60
def __init__(
self,
target: Union[Article, Book, Collection, Journal],
source: Union[Journal, Book, Collection],
cache: bool = True,
):
if any(isinstance(target, x) for x in (Book, Collection, Journal)):
raise NotImplementedError("We only handle article for now")
if any(isinstance(source, x) for x in (Book, Collection)):
raise NotImplementedError("We only handle fetching from journals")
self.logger = logging.getLogger(f"GR {target.name(short=6)}")
self.target = target
self.key = target.key()
self.source = source
a = Ark.parse(source.ark)
if a.is_left:
raise a.value
self.series_ark = a.value
self._ark = ark_cache[self.key] if cache else None
self.logger.debug(f"Ark is {self._ark}, {self.key}")
self._resource: Optional[Resource] = None # so we can pass resource around
self._start_p = None
self._end_p = None
self._pages: Optional[Pages] = None
self.consider_toc = True
self.source_match = source_match_cache[self.key] if cache else None
self.logger.debug(f"Source match is {self.source_match}")
self.minimum_confidence = 0.5
self.suppress_cover_page: bool = False
@property
def ark(self) -> Optional[Union[str, Ark]]:
"""Ark for the final target."""
if not self._ark:
if isinstance(self.source, Journal):
self.logger.debug("No ark, Finding best match.")
self.source_match = self.get_best_article_match()
source_match_cache[self.key] = self.source_match
if self.source_match:
self._ark = self.source_match.candidate.ark
else:
raise MatchingError("Unable to match.")
else:
self._ark = self.source.ark
ark_cache[self.key] = self._ark
self.logger.debug(f"Saving ark {self.key} = {ark_cache[self.key]}")
return self._ark
@property
def match(self) -> Optional[Match]:
"""The Match() object representing our choice.
This will trigger a match even if we use a cached download. (However if
we have a cached match and have not disabled caching, we will use
that.)
"""
if not self.source_match:
self.ark
source_match_cache[self.key] = self.source_match
return self.source_match
def get_possible_issues(self) -> List[OrderedDict]:
"""Get possible issues.
We go a year in each direction since sometimes collections of issues
are made for two years.
"""
self.logger.debug("Getting possible issues.")
source = self.target._source()
issues = []
years = []
if isinstance(source.publicationdate, list):
for year in source.publicationdate:
years += list(range(year - 1, year + 2))
else:
years = list(range(source.publicationdate - 1, source.publicationdate + 2))
for year in set(years):
issue = Resource(self.series_ark).issues_sync(str(year))
if not issue.is_left:
issues.append(issue.value)
else:
self.logger.debug(f"unable to fetch year {year}")
if not issues:
raise MatchingError("Failed to find any matching issues")
details = []
for year in issues:
detail = year["issues"]["issue"]
if isinstance(detail, list):
details += detail
else:
details.append(detail)
return details
@classmethod
def parse_description(cls, desc: str) -> dict:
"""Parse a dublincore description as retrieved from galllica."""
resp = dict(year=None, volume=None, number=None)
if "-" in desc:
start, end = [cls.parse_description(x) for x in desc.split("-")]
for k, v in start.items():
end_v = end[k]
if not end_v or v == end_v:
continue
else:
start[k] = list(range(v, end_v + 1))
return start
else:
resp["year"] = search(r"([0-9][0-9][0-9][0-9])", desc) # type: ignore
resp["volume"] = search(r"T([0-9]+)", desc) # type: ignore
resp["number"] = search(r"N([0-9]+)", desc) # type: ignore
resp.update({k: int(v.group(1)) for k, v in resp.items() if v}) # type: ignore
return resp
def ocr_find_article_in_journal(
self, journal: Resource, pages: OrderedDict
) -> bool:
"""Use ocr to find an article in a journal.
The algorithm:
1. determines the physical page number of the first page of the article
2. fetches this page as text (the physical page number = the view number)
3. does a fuzzy search on this page for the title
4. if it matches, looks for the author's name on the page
5. if not found, gets the view number of the last page of the article and fetches that
6. looks for the author's name on this page
This is a good deal simpler than trying to parse an ocrd text back into
individual articles, which is probably a non-starter.
"""
target: Article = self.target # type: ignore
start_p = self.get_physical_pno(target.pages[0], pages)
start_page = make_string_boring(self.fetch_text(journal, start_p))
if not start_page:
return False
title = make_string_boring(target.title)
author = make_string_boring(target.author)
matches = find_near_matches(title, start_page, max_l_dist=2)
if not matches:
self.logger.debug("Failed to find title on page")
return False
if matches := find_near_matches(author, start_page, max_l_dist=5):
if fuzz.ratio(matches[0].matched, author) > 80:
return True
else:
self.logger.debug("Failed to find author on first page.")
end_p = self.get_physical_pno(target.pages[-1], pages)
end_page = make_string_boring(self.fetch_text(journal, end_p))
if matches := find_near_matches(author, end_page, max_l_dist=5):
if fuzz.ratio(matches[0].matched, author) > 80:
return True
self.logger.debug("Failed to find author on last page.")
return False
@classmethod
def fetch_text(cls, resource: Resource, pno: int) -> str:
"""Fetch text from resource as str."""
either = resource.content_sync(startview=pno, nviews=1, mode="texteBrut")
if either.is_left:
raise either.value
soup = BeautifulSoup(either.value, "xml")
return " ".join(x.text for x in soup.hr.next_siblings if not x.name == "hr")
def toc_find_article_in_journal(
self, journal: Resource, toc: str, pages: Pages, data: dict
) -> List[Article]:
"""Find article in journal using journal's toc.
This is preferable to relying on fuzzy matching ocr, but not all
journals have tocs.
Currently we build _all possible articles_ and return them all. This is
probably redundant, and we could adopt the strategy of
`ocr_find_article_in_journal` i.e. predict and see if the prediction
holds.
"""
target: Article = self.target # type: ignore
target_start_p = str(target.pages[0])
entries = [x for x in self.parse_gallica_toc(toc) if x[0] == target_start_p]
entries.sort(key=lambda x: int(x[0]))
articles = []
for i, x in enumerate(entries):
args = data.copy()
start_p, title = x
try:
author, title = search(r"(.+?)\.* - (.+)", title).groups() # type: ignore
except AttributeError:
try:
author, title = search(r"(.+?)\. (.+)", title).groups() # type: ignore
except AttributeError:
self.logger.debug(f"Unable to parse toc entry {x[1]}")
author = ""
args["author"] = author.strip()
args["title"] = title.strip()
try:
end_p = int(entries[i + 1][0]) - 1
except IndexError:
end_p = self.get_last_pno(pages) # type: ignore
args["pages"] = list(range(int(start_p), int(end_p) + 1))
physical_start_p = self.get_physical_pno(start_p, pages)
physical_end_p = self.get_physical_pno(str(end_p), pages)
args["physical_pages"] = list(
range(int(physical_start_p), int(physical_end_p) + 1)
)
articles.append(Article.parse_obj(args))
return articles
@staticmethod
def parse_gallica_toc(xml: str) -> List[Tuple[str, str]]:
"""Parse Gallica' toc xml. There are, needless to say, *several* forms."""
soup = BeautifulSoup(xml, "xml")
toc = []
if soup.find("row"):
for row in soup.find_all("row"):
title = pno = None
if seg := row.find("seg"):
title = seg.text.strip()
if xref := row.find("xref"):
pno = xref.text.strip()
if title and pno:
toc.append((pno, title))
else:
for item in soup.find_all("item"):
if not item.find("seg"):
continue
toc.append((item.xref.text.strip(), item.seg.text.strip()))
return toc
def get_article_candidates(self) -> List[Match]:
"""Generate match objs for each article in the corresponding issues.
Returns:
A list of Match() objects in order of decreasing score.
"""
self.logger.debug("Generating article candidates")
details = self.get_possible_issues()
matches = []
for detail in details:
self.logger.debug(f"Considering {detail['#text']}")
data = {}
ark = Ark(naan=self.series_ark.naan, name=detail["@ark"])
issue = Resource(ark)
either = issue.oairecord_sync()
if either.is_left:
raise either.value
oai = either.value
dublin = oai["results"]["notice"]["record"]["metadata"]["oai_dc:dc"]
description = dublin["dc:description"][1]
data.update(self.parse_description(description))
data["journaltitle"] = dublin["dc:title"]
data["publisher"] = dublin["dc:publisher"]
data["ark"] = dublin["dc:identifier"]
either = issue.pagination_sync()
if either.is_left:
raise either.value
pages = either.value
articles = []
if self.consider_toc and not (either := issue.toc_sync()).is_left:
articles = self.toc_find_article_in_journal(
issue, either.value, pages, data
)
matches += [Match(self.target, a) for a in articles]
if not articles:
if self.ocr_find_article_in_journal(issue, pages):
args = dict(self.target)
args.update(data)
matches.append(Match(self.target, Article.parse_obj(args)))
matches.sort(reverse=True)
if matches and matches[0].score > 0.7:
break
return matches[:5]
def get_best_article_match(self) -> Optional[Match]:
"""Get best available article match."""
matches = self.get_article_candidates()
for match in matches:
if match.score < self.minimum_confidence:
self.logger.debug(
f"Match score {match.score} below"
"minimum threshold {self.minimum_confidence}"
)
return None
res = Resource(match.candidate.ark)
either = res.content_sync(1, 1, "texteBrut")
if not either.is_left:
self._resource = res
return match
else:
self.logger.debug(f"Skipping unavailable match {match.candidate}")
return None
@property
def resource(self) -> Resource:
"""Resource()"""
if not self._resource:
self._resource = Resource(self.ark)
self._resource.timeout = self.BASE_TIMEOUT
return self._resource
@property
def timeout(self) -> int:
"Timeout in url requests."
return self.resource.timeout
@timeout.setter
def timeout(self, val: int) -> None:
self.resource.timeout = val
@property
def pages(self) -> Optional[Pages]:
if not self._pages:
either = self.resource.pagination_sync()
if either.is_left:
raise either.value
self._pages = either.value
return self._pages
def get_physical_pno(self, logical_pno: str, pages: Pages = None) -> int:
"""Get the physical pno for a logical pno."""
if not pages:
pages = self.pages
pnos = pages["livre"]["pages"]["page"] # type: ignore
# sadly we have to do it ourselves
for p in pnos:
if p["numero"] == logical_pno:
break
return p["ordre"]
@staticmethod
def get_last_pno(pages: Pages) -> str:
"""Get last page number of internal volume."""
pnos = pages["livre"]["pages"]["page"]
for p in reversed(pnos):
if p["pagination_type"] == "A":
break
return p["ordre"]
@property
def start_p(self) -> Optional[int]:
"""Physical page we start on."""
if not self._start_p:
self._start_p = int(self.get_physical_pno(self.target.pages[0])) # type: ignore
return self._start_p
@property
def end_p(self) -> Optional[int]:
"""Physical page we end on."""
if not self._end_p:
try:
self._end_p = int(self.get_physical_pno(self.target.pages[-1])) # type: ignore
except AttributeError:
pass
return self._end_p
def download_pdf(
self, path: Path, blocksize: int = 100, trials: int = 3, fetch_only: int = None
) -> bool:
"""Download a resource as a pdf in blocks to avoid timeout."""
partials = []
if path.exists():
return True
try:
if not self.start_p or not self.end_p:
raise Exception("No pages.")
end_p = (
self.start_p + fetch_only - 1 if fetch_only is not None else self.end_p
)
for i, (start, length) in enumerate(
self._generate_blocks(self.start_p, end_p, blocksize) # type: ignore
):
fn = path.with_suffix(f".pdf.{i}")
status = self._fetch_block(start, length, trials, fn)
if not status:
raise DownloadError("Failed to download.")
with fn.open("rb") as f:
with Path("/tmp/test.pdf").open("wb") as o:
o.write(f.read())
partials.append(fn)
self._merge_partials(path, partials)
finally:
for fn in partials:
fn.unlink()
assert partials
return False
@staticmethod
def _generate_blocks(start: int, end: int, size: int) -> Generator:
"""Generate Blocks"""
beginnings = range(start, end + 1, size)
for i in beginnings:
length = end - i + 1 if i + size > end else size # truncate last block
yield (i, length)
def _merge_partials(self, path: Path, partials: List[Path]) -> None:
"""Merge partial files"""
merger = PdfFileMerger()
for i, fn in enumerate(partials):
if self.suppress_cover_page:
args = {"pages": PageRange("2:")} # if i else {}
else:
args = {"pages": PageRange("2:")} if i else {}
merger.append(str(fn.resolve()), **args)
with path.open("wb") as f:
merger.write(f)
def _fetch_block(self, startview: int, nviews: int, trials: int, fn: Path) -> bool:
"""Fetch block."""
url = self.resource.content_sync(
startview=startview, nviews=nviews, url_only=True
)
for i in range(trials):
status = downloader.download(
url,
download_file=str(fn.resolve()),
timeout=120,
)
if status:
if imghdr.what(fn):
print("We got ratelimited, sleeping for 5 minutes.")
sleep(60 * 5)
else:
return True
sleep(2 ** (i + 1))
return False
def __repr_args__(self) -> "ReprArgs":
return self.__dict__.items() # type: ignore
|
2e0byo/pygallica-autobib
|
gallica_autobib/parsers.py
|
"""Parsers for input data in various formats."""
from typing import List, TextIO, Tuple, Union
import bibtexparser
import rispy
from bibtexparser.bparser import BibTexParser
from bibtexparser.customization import convert_to_unicode
from roman import fromRoman, toRoman
from .models import Article, Book, Collection, RecordTypes
from .util import deprettify
class ParsingError(Exception):
pass
def parse_bibtex(bibtex: Union[str, TextIO]) -> Tuple[List[RecordTypes], List[str]]:
parser = BibTexParser()
parser.customization = convert_to_unicode
try:
if isinstance(bibtex, str):
db = bibtexparser.loads(bibtex, parser=parser)
rawlines = bibtex.split("\n")
else:
db = bibtexparser.load(bibtex, parser=parser)
bibtex.seek(0) # type: ignore
rawlines = (x.strip("\n") for x in bibtex.readlines()) # type: ignore
except Exception:
raise ParsingError("Unable to parse")
parsed = []
for record in db.entries:
pages = record["pages"]
if not isinstance(pages, list):
roman = "i" in pages.lower()
lower = "i" in pages
try:
pages = pages.replace("--", "-")
start, end = pages.split("-")
startno = fromRoman(start.upper()) if roman else int(start)
endno = fromRoman(end.upper()) if roman else int(end)
if not roman and endno < startno:
endno = int(f"{start[0]}{end}")
record["pages"] = list(range(startno, endno + 1))
if roman:
record["pages"] = [
toRoman(x).lower() if lower else toRoman(x)
for x in record["pages"]
]
except ValueError:
record["pages"] = [record["pages"]]
record["year"] = deprettify(record["year"])
type_ = record["ENTRYTYPE"]
mapping = {"article": Article, "book": Book, "collection": Collection}
if type_ in mapping:
parsed.append(mapping[type_].parse_obj(record))
else:
raise ParsingError("Unsupported type")
raw = []
entry: List[str] = []
for line in rawlines:
if line.strip().startswith("@"):
if entry:
raw.append("\n".join(line for line in entry if line.strip()))
entry = [line]
elif line.strip():
entry.append(line)
raw.append("\n".join(line for line in entry if line.strip()))
return parsed, raw # type: ignore
def parse_ris(ris: Union[str, TextIO]) -> Tuple[List[RecordTypes], List[str]]:
try:
if isinstance(ris, str):
db = rispy.loads(ris)
rawlines = ris.split("\n")
else:
db = rispy.load(ris)
ris.seek(0) # type: ignore
rawlines = (x.strip("\n") for x in ris.readlines()) # type: ignore
except Exception:
raise ParsingError("Unable to parse")
parsed = []
for record in db:
record["pages"] = list(
range(int(record["start_page"]), int(record["end_page"]) + 1)
)
record["author"] = "and".join(record["authors"])
try:
record["journaltitle"] = record["journal_name"]
except KeyError:
record["journaltitle"] = record["secondary_title"]
mapping = {"JOUR": Article, "BOOK": Book, "COLL": Collection}
type_ = record["type_of_reference"]
if type_ in mapping:
parsed.append(mapping[type_].parse_obj(record))
else:
raise ParsingError("Unsupported type")
raw = []
entry = []
for line in rawlines:
if not line.strip():
continue
if line.strip().startswith("ER"):
entry.append(line)
raw.append("\n".join(entry))
entry = []
else:
entry.append(line)
return parsed, raw # type: ignore
|
2e0byo/pygallica-autobib
|
tests/test_util.py
|
import pytest
from gallica_autobib import util
page_ranges = [
[[str(i) for i in range(1, 11)], "1--10"],
[[str(i) for i in range(450, 456)], "450--5"],
[[str(i) for i in range(450, 470)], "450--69"],
[["10", "11", "12", "13", "17", "18", "19"], "10--3, 17--9"],
[["i", "ii", "iii"], "i--iii"],
[["1", "2", "i", "ii", "iii"], "1--2, i--iii"],
]
@pytest.mark.parametrize("inp,oup", page_ranges)
def test_page_ranges(inp, oup):
assert util.pretty_page_range(inp) == oup
@pytest.mark.parametrize("inp,oup", page_ranges[:4])
def test_deprettify(inp, oup):
pretty = util.pretty_page_range(inp)
assert util.deprettify(pretty) == [int(i) for i in inp]
|
2e0byo/pygallica-autobib
|
gallica_autobib/cache.py
|
"""Handle our internal cache, which we use to avoid hammering Gallica's
servers, and to make our life easier when re-running."""
import sqlite3
from typing import TYPE_CHECKING, Any, Optional
import jsonpickle
from xdg import xdg_cache_home
if TYPE_CHECKING: # pragma: nocover
from .gallipy import Ark # pragma: nocover
class Cached:
"""Cached resource."""
cachedir = xdg_cache_home() / "gallica_autobib" # TODO what happens if not on unix?
CACHEFN = "cache.db"
def __init__(self, cachename: str) -> None:
"""A resource in the cache, stored in a separate table."""
self.tablename = cachename
if not self.cachedir.exists():
self.cachedir.mkdir(parents=True)
cache = self.cachedir / self.CACHEFN
self.con = sqlite3.connect(cache)
MAKE_TABLE = f'CREATE TABLE IF NOT EXISTS "{cachename}" (key TEXT PRIMARY KEY, value BLOB)'
self.con.execute(MAKE_TABLE)
self.con.commit()
def __del__(self) -> None:
self.con.close()
def __getitem__(self, key: str) -> Optional[Any]:
GET_ITEM = f'SELECT value FROM "{self.tablename}" WHERE key = (?)'
item = self.con.execute(GET_ITEM, (key,)).fetchone()
if item:
return jsonpickle.loads(item[0])
else:
return None
def __setitem__(self, key: str, val: Any) -> None:
SET = f'REPLACE INTO "{self.tablename}" (key, value) VALUES (?,?)'
self.con.execute(SET, (key, jsonpickle.dumps(val)))
self.con.commit()
|
2e0byo/pygallica-autobib
|
gallica_autobib/gallipy/ark.py
|
<gh_stars>0
"""
Gallipy - Python wrapper for the Gallica APIs
Copyright (C) 2019 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
https://github.com/GeoHistoricalData/gallipy
"""
import rfc3987
from lark import Lark, Transformer
from lark.exceptions import ParseError, UnexpectedCharacters
from .monadic import Either, Left
__all__ = ["Ark", "ArkParsingError"]
# ARKID structure : ark:/NAAN/Name[Qualifier]
_GRAMMAR = """
arkid: _SCHEME _COLON _SLASH naan _SLASH name [_SLASH qualifier]
naan: SAFESTRING
name: SAFESTRING
qualifier: SAFESTRING
_SLASH: "/"
_COLON: ":"
_SCHEME: "ark"i
SAFESTRING: /[^@:\\/#\\[\\]\\?]+/
"""
_ARKID_SCHEME = "ark"
class Ark:
"""Object representation of an Archival Resource Key.
An object representation of an Archival Resource Key, which can be
either a full ARK URL or an ARK ID, following the structure
[urlscheme://authority/]ark:/naan/name[/qualifier].
Args:
ark_parts (dict): A dictionary of ark parts.
The following keys are valid:
naan (str): Name Assigning Authority Number.
name (str): ARK ID name.
qualifier (:obj:`str`, optional): ARK qualifier.
scheme (:obj:`str`, optional): The scheme of the ARK.
Can be any http scheme, or 'ark'. Defaults to 'ark'.
authority (:obj:`str`, optional): The naming authority, e.g. gallica.bnf.fr.
If authority is set them scheme must be a http scheme.
ark_parts Must contain at least keys 'naan' and 'name'.
If key 'authority' is set, key 'scheme' must be different from 'ark'.
Attributes:
ark_parts (dict): A dictionary of ark parts. Must contain at least keys
'naan' and 'name'. If key 'authority' is set, key 'scheme' must be
different from 'ark'.
Raises:
ValueError: If parameters naan or name are undefined, or if scheme='ark'
and authority is set.
"""
def __init__(self, **ark_parts):
valid_keys = ["scheme", "authority", "naan", "name", "qualifier"]
parts = {key: ark_parts.get(key) for key in valid_keys}
# Scheme must be ark if authority is unset. 'ark' is also scheme's
# default value.
if not (parts["scheme"] and parts["authority"]):
parts["scheme"] = "ark"
# naan and name are required
if not parts["naan"]:
raise ValueError("Parameter naan is required.")
if not parts["name"]:
raise ValueError("Parameter name is required.")
# scheme cannot be 'ark' if authority is set
if parts["authority"] and parts["scheme"] == _ARKID_SCHEME:
msg = """
Cannot create an Ark object with parts {}
Scheme cannot be '{}' if authority is set.
""".format(
str(parts), _ARKID_SCHEME
)
raise ValueError(msg)
self._ark_parts = parts
def copy(self):
"""Copy constructor.
Returns:
Ark: A copy of self
"""
return Ark(**self.parts)
@property
def scheme(self):
"""The scheme of this Ark.
Returns:
str: The scheme of self.
"""
return self._ark_parts.get("scheme")
@property
def authority(self):
"""The authority of this Ark.
Returns:
str: The authority of self.
"""
return self._ark_parts.get("authority")
@property
def naan(self):
"""The naming authority assigning number of this Ark.
Returns:
str: The naming assigning number of self.
"""
return self._ark_parts.get("naan")
@property
def name(self):
"""The name of this Ark.
Returns:
str: The name of self.
"""
return self._ark_parts.get("name")
@property
def qualifier(self):
"""The qualifier of this Ark.
Returns:
str: The qualifier of self.
"""
return self._ark_parts.get("qualifier")
@property
def arkid(self):
"""The ARK ID of this Ark.
If this Ark is an URL, extract the ARK ID nester inside as a new Ark
object. Otherwise, return self.
Returns:
Ark: If self is a full ARK URL, a new Ark object representing the
ARK ID nested in self. Otherwise, return self.
"""
if self.is_arkid():
return self
parts = self.parts
parts["scheme"] = "ark"
del parts["authority"]
return Ark(**parts)
@property
def root(self):
"""Get the root (i.e the ark id without qualifier) of this ARK.
Returns:
Ark: the root ark id of self.
"""
return Ark(naan=self.naan, name=self.name)
@property
def parts(self):
"""A copy of the parts composing this ARK."""
return self._ark_parts.copy()
def is_arkid(self):
"""The ARK ID of this Ark.
Returns:
bool: True if self is an ARK ID, False if self is a full ARK URL.
"""
return self.scheme == _ARKID_SCHEME
@staticmethod
def parse(ark_str):
"""Parse an ARK URL or an ARK ID string into an Ark oject
Args:
ark_str (str): The string to parse.
Returns:
Ark: The parsed ARK.
Raises:
ArkParsingError: If parsing fails.
"""
try:
parts = rfc3987.parse(ark_str, rule="URI") # Ensure ark is a URI
parser = Lark(_GRAMMAR, start="arkid")
# Extract an ARK ID from ark_str if ark_str is a full ARK URL.
if parts["scheme"] != _ARKID_SCHEME:
arkid_str = parts["path"].lstrip("/")
if not parts["authority"]: # NMA is required
msg = "Name Mapping Authority cannot be null."
raise ArkParsingError(msg, ark_str)
else:
arkid_str = ark_str
tree = parser.parse(arkid_str)
ark_parts = ArkIdTransformer().transform(tree)
ark_parts.update(parts)
ark = Ark(**ark_parts)
return Either.pure(ark)
except (TypeError, ValueError, ParseError, UnexpectedCharacters) as ex:
return Left(ArkParsingError(str(ex), ark_str))
def __str__(self):
"""Simple string representation of this Ark"""
pattern = "{scheme}://{authority}/" if not self.is_arkid() else ""
pattern += _ARKID_SCHEME + ":/{naan}/{name}"
pattern += "/{qualifier}" if self.qualifier else ""
return pattern.format(**self._ark_parts)
def __repr__(self):
"""Simple string representation of the parts composing this Ark"""
return str(self._ark_parts)
class ArkParsingError(ValueError):
"""A simple parsing exceptions for Arks."""
def __init__(self, message, arkstr):
string = """
Parsing error, ARK '{}' is invalid. See details below.
{}
""".format(
arkstr, message
)
super().__init__(string)
class ArkIdTransformer(Transformer):
"""A tree transformer for Ark parsing."""
@staticmethod
def naan(item):
"""Get naan item from the naan TreeNode."""
return "naan", str(item[-1])
@staticmethod
def name(item):
"""Get name item from the naan TreeNode."""
return "name", str(item[-1])
@staticmethod
def qualifier(item):
"""Get qualifier item from the naan TreeNode."""
return "qualifier", str(item[-1])
@staticmethod
def arkid(items):
"""Get arkid item from the naan TreeNode."""
return dict(items)
|
2e0byo/pygallica-autobib
|
tests/test_cli.py
|
import pytest
from devtools import debug
from gallica_autobib.cli import app
from typer.testing import CliRunner
runner = CliRunner()
@pytest.mark.xfail(strict=False)
def test_app(fixed_tmp_path, file_regression):
result = runner.invoke(app, ["tests/test_cli/test.bib", str(fixed_tmp_path)])
debug(result.stdout)
try:
debug(result.stderr)
except:
pass
assert result.exit_code == 0
file_regression.check(result.stdout)
|
2e0byo/pygallica-autobib
|
gallica_autobib/process.py
|
"""Fns to process. These are wrapped in a class in pipeline, which is probably what you want."""
import logging
from collections import namedtuple
from io import BytesIO
from pathlib import Path
from typing import Tuple
import numpy as np
from PIL import Image, ImageChops, ImageOps
from PyPDF4 import PdfFileReader, PdfFileWriter
from PyPDF4.pdf import PageObject
logger = logging.getLogger(__name__)
class ExtractionError(Exception):
pass
def extract_image(page: PageObject) -> Tuple[Image.Image, str]:
"""
Extract image from pdf without resampling.
Modified from
https://github.com/mstamy2/PyPDF2/blob/master/Scripts/pdf-image-extractor.py
itself modified from
https://stackoverflow.com/questions/2693820/extract-images-from-pdf-without-resampling-in-python
"""
if "/XObject" in page["/Resources"]:
xObject = page["/Resources"]["/XObject"].getObject()
for obj in xObject:
if xObject[obj]["/Subtype"] == "/Image":
size = (xObject[obj]["/Width"], xObject[obj]["/Height"])
data = xObject[obj].getData()
if xObject[obj]["/ColorSpace"] == "/DeviceRGB":
mode = "RGB"
else:
mode = "P"
if "/Filter" in xObject[obj]:
filter_ = xObject[obj]["/Filter"]
if isinstance(filter_, list):
filter_ = filter_[0]
if filter_ == "/FlateDecode":
data = Image.frombytes(mode, size, data)
type_ = "png"
elif filter_ == "/DCTDecode":
type_ = "jpg"
elif filter_ == "/JPXDecode":
type_ = "jp2"
elif filter_ == "/CCITTFaxDecode":
type_ = "tiff"
else:
continue
else:
type_ = "png"
data = Image.frombytes(mode, size, data)
if isinstance(data, bytes):
data = Image.open(BytesIO(data))
assert data
assert type_
logger.debug(f"Extracted image of kind {type_}.")
return data, type_
else:
raise ExtractionError("No image found.")
def filter_point(point: int) -> int:
"""Filter a point.
If point is below threshold, divide it by divisor. If above, multiple it by
multiplier. This is a crude but effective way of skewing an image to
black-and-white without actually thresholding it.
"""
if point < 160:
return round(point / 1.2)
else:
return round(point * 2)
_results = namedtuple("_results", ("lh_page", "crop", "bbox"))
def filter_algorithm_brute_force(img: Image.Image) -> Image.Image:
img = ImageOps.autocontrast(img)
img = ImageOps.posterize(img, 5)
img = ImageOps.grayscale(img).point(filter_point)
img = ImageOps.autocontrast(img)
return img
def deanomalise(data: list) -> int:
mean = np.mean(data)
std = np.std(data)
if not std:
return data[0]
data = [x for x in data if abs(x - mean) < 1.5 * std]
return round(np.mean(data))
def detect_spine(img: Image.Image) -> _results:
logger.debug("Detecting spine")
threshold = 40
midpoint = round(img.height / 2)
lower = midpoint - 20
upper = midpoint + 20
first_lefts = []
first_rights = []
for height in (midpoint, lower, upper):
for i in range(img.width):
if img.getpixel((i, height)) < threshold:
first_lefts.append(i)
break
for i in range(img.width - 1, 0, -1):
if img.getpixel((i, height)) < threshold:
first_rights.append(img.width - i)
break
assert first_lefts
assert first_rights
first_left = deanomalise(first_lefts)
first_right = deanomalise(first_rights)
if first_left < first_right:
crop = first_left + 10
return _results(True, crop, (crop, 0, img.width, img.height))
else:
crop = first_right - 10
return _results(False, crop, (0, 0, img.width - crop, img.height))
def prepare_img(img: Image.Image, threshold: int = 60) -> Image.Image:
img = ImageOps.grayscale(img)
img = ImageOps.autocontrast(img)
return img.point(lambda p: p > threshold and 255)
def get_crop_bounds(img: Image.Image) -> Tuple:
"""Get crop bounds for text on page.
The algorithm:
1. grayscales and thresholds the image aggressively
3. crops to slightly wider than content
This is not very robust, but Gallica is quite standardised in its pdfs.
We don't bother with spine detection as it seems to work fine without it
using a very aggressive thresholding.
Args:
img: Image.Image: The image to process.
Returns:
A tuple of the rectangle to crop to.
"""
if img.mode != "1":
img = prepare_img(img)
# img.show()
# res = detect_spine(img)
# logger.debug(res.lh_page)
# crop out corner errors
x = 40
img = img.crop((x, x, img.width - x, img.height - x))
# crop to border
bg = Image.new(img.mode, img.size, 255)
diff = ImageChops.difference(img, bg)
left, upper, right, lower = diff.getbbox()
left += x - 10
upper += x - 10
right += x + 10
lower += x + 10
return (left, upper, right, lower)
def generate_filename(candidate: Path) -> Path:
"""Generate a filename which doesn't exist on the disk. This is not atomic."""
orig = candidate
i = 0
while candidate.exists():
stem = orig.stem
candidate = orig.with_stem(f"{stem}-{i}")
i += 1
return candidate
def process_pdf(
pdf: Path,
outf: Path = None,
preserve_text: bool = False,
equal_size: bool = False,
skip_existing: bool = False,
has_cover_page: bool = False,
) -> Path:
"""Process a pdf.
Note that currently preserve_text implies not editing the image, and
equal_size is unimplemented.
Args:
pdf: Path: The pdf file to crop.
outf: Path: The output path. Default is to calculate.
preserve_text: bool: Preserve OCRd text. (Default value = False)
equal_size: Make all pages equal sized. (Default value = False)
skip_existing: Whether to skip existing files. (Default value = False)
has_cover_page: bool: Whether we have a cover page to resize (Default value=False.)
Returns:
A Path() object pointing to the cropped pdf.
"""
if equal_size:
raise NotImplementedError("Equal sizes not yet implemented.")
if not outf:
if skip_existing:
outf = pdf.with_stem(f"processed-{pdf.stem}")
else:
outf = generate_filename(pdf.with_stem(f"processed-{pdf.stem}"))
if outf.exists():
logger.info("Skipping already processed file.")
return outf
reader = PdfFileReader(str(pdf))
pages = reader.pages
writer = PdfFileWriter()
if has_cover_page:
pages = pages[2:]
if preserve_text:
logger.debug("Preserving text.")
for page in pages:
img, _ = extract_image(page)
bbox = get_crop_bounds(img)
scale = page.mediaBox.getWidth() / img.width
page.cropBox.lowerLeft = (bbox[0] * scale, bbox[1] * scale)
page.cropBox.upperRight = (bbox[2] * scale, bbox[3] * scale)
writer.addPage(page)
with outf.open("wb") as f:
writer.write(f)
else:
logger.debug("Not preserving text.")
imgs = []
for i, page in enumerate(pages):
logger.debug(f"Processing page {i}")
img, _ = extract_image(page)
bbox = get_crop_bounds(img)
img = img.crop(bbox)
if img.mode != "1":
img = filter_algorithm_brute_force(img)
imgs.append(img)
imgs[0].save(
str(outf), "PDF", resolution=100.0, save_all=True, append_images=imgs[1:]
)
logger.info(f"Finished processing {str(outf)}")
return outf
|
2e0byo/pygallica-autobib
|
gallica_autobib/gallipy/resource.py
|
from xmltodict import parse as parsexmltodict
from . import helpers as h
from .ark import Ark
from .monadic import Future, Left
class Resource:
"""Class Resource is the entry point to the Document and IIIF APIs.
A Resouce is a representation of an archival document identified by
an ARK (id or full url). A Resource objects do not actually contain data.
If provides a set of methods to retrieve data, metadata and to query
the corresponding document on gallica.bnf.fr; using the IIIF and Document
APIs.
Most methods have a signature similar to their corresponding documentation
(api.bnf.fr/api-document-de-gallica, in French), but there is some particularities:
1. 'pages' are here named 'views', as 'page 10' of a document refers to the
10th view of the scanned document and its actual page 10.
2. Resource corresponds to a Document in Gallicas's terms.
3. ARK qualifiers are **always ignored** by the methods. Arguments are
here to parameterize any API call. See the API docs for more details.
4. Synchronous methods (*_sync) return Either objets, whereas synchronous ones
return Future[Either].
Args:
ark (str or Ark): The ARK of this resource.
Attributes:
ark (Ark): The ark object of this resource.
Raises:
ValueError: If ark is neither a string or an Ark object, or if the
parsing failed.
"""
def __init__(self, ark):
if isinstance(ark, Ark):
self._ark = ark
elif isinstance(ark, str):
either = Ark.parse(ark)
if either.is_left:
raise either.value
self._ark = either.value
else:
raise ValueError("ark must be of type Ark or str.")
self.timeout = 30
@property
def ark(self):
return self._ark
@property
def arkid(self):
return self.ark.arkid
# ---
# ASYNCHRONOUS METHODS
# ---
def issues(self, year=""):
"""Fetches metadata about the issues of a periodical journal (Async version).
The Document API service Issues retrieves metadata about a periodical journal.
If a year is provided, issues_sync fetches metadata about all the issues of
this specific year.
Qualifiers are ignored.
Args:
year (:obj:int, optional): The year for which to retrieve the issues metadata.
Returns:
Future: A Future object that will holds an Either object if it resolved.
This Either will hold the fetched data (Right) or an Exception (Left).
For more details, see Resource.issues_sync.
"""
return Future.asyn(lambda: self.issues_sync(year))
def oairecord(self):
"""Retrieves the OAI record of a document (Async version).
The Document API service OAIRecord retrieves the OAI record of a document.
Qualifiers are ignored.
Returns:
Future: A Future object that will holds an Either object if it resolved.
representation of the metadata.
This Either will hold the fetched data (Right) or an Exception (Left).
For more details, see Resource.oairecord_sync.
"""
return Future.asyn(self.oairecord_sync)
def pagination(self):
"""Fetches paging metadata of a resource (Async version).
The Document API service Pagination retrieves metadata about the paging
of a document.
Qualifiers are ignored.
Returns:
Future: A Future object that will holds an Either object if it resolved.
representation of the metadata.
This Either will hold the fetched data (Right) or an Exception (Left).
For more details, see Resource.pagination_sync.
"""
return Future.asyn(self.pagination_sync)
def image_preview(self, resolution="thumbnail", view=1):
""" """
l = lambda: self.image_preview_sync(resolution, view)
return Future.asyn(l)
def fulltext_search(self, query="", view=1, results_per_set=10):
""" """
l = lambda: self.fulltext_search_sync(query, view, results_per_set)
return Future.asyn(l)
def toc(self):
""" """
return Future.asyn(self.toc_sync)
def content(self, startview=None, nviews=None, mode="pdf"):
""" """
l = lambda: self.content_sync(startview, nviews, mode)
return Future.asyn(l)
def ocr_data(self, view):
""" """
l = lambda: self.ocr_data_sync(view)
return Future.asyn(l)
def iiif_info(self, view=""):
""" """
l = lambda: self.iiif_info_sync(image)
return Future.asyn(l)
def iiif_data(
self,
view="",
region=None,
size="full",
rotation=0,
quality="native",
imformat="png",
):
""" """
l = lambda: self.iiif_data_sync(
image, region, size, rotation, quality, imformat
)
return Future.asyn(l)
# ---
# SYNCHRONOUS METHODS
# ---
def oairecord_sync(self):
"""Retrieves the OAI record of a document (Sync version).
Wraps Document API service 'OAIRecord'.
The Document API service OAIRecord retrieves the OAI record of a document.
Qualifiers are ignored.
Returns:
Either[Exception OrderedDict]: A Right object containing an OrderedDict
representation of the metadata in case of success.
Otherwise, a Left object containing an Exception.
"""
try:
url_parts = {"query": {"ark": self.ark.name}}
url = h.build_service_url(url_parts, service_name="OAIRecord")
return h.fetch_xml_html(url, timeout=self.timeout).map(parsexmltodict)
except Exception as ex:
return Left(ex)
def issues_sync(self, year=""):
"""Fetches metadata about the issues of a periodical journal (Sync version).
Wraps Document API service 'Issues'.
The Document API service Issues retrieves metadata about a periodical journal.
If a year is provided, issues_sync fetches a set of of Gallica
assumes that the ark ends with qualifier /date. Qualifiers are ignored.
Args:
year (:obj:int, optional): The year for which to retrieve
the issues metadata.
Returns:
Either[Exception OrderedDict]: If fetch is successful, a Right object
containing an OrderedDict representation of the metadata.
Otherwise, a Left object containing an Exception.
"""
try: # Try/catch because Ark(...) can throw an exception.
parts = self.ark.arkid.parts
parts["qualifier"] = "date" # Qualifier must be 'date'
url_parts = {"query": {"ark": Ark(**parts), "date": year}}
url = h.build_service_url(url_parts, service_name="Issues")
return h.fetch_xml_html(url, timeout=self.timeout).map(parsexmltodict)
except Exception as ex:
return Left(ex)
def pagination_sync(self):
"""Fetches paging metadata of a resource (Sync version).
Wraps Document API service 'Pagination'.
The Document API service Pagination retrieves metadata about the paging
of a document.
Qualifiers are ignored.
Returns:
Either[Exception OrderedDict]: If fetch is successful, a Right object
containing an OrderedDict representation of the metadata.
Otherwise, a Left object containing an Exception.
"""
url_parts = {"query": {"ark": self.ark.name}}
url = h.build_service_url(url_parts, service_name="Pagination")
return h.fetch_xml_html(url, timeout=self.timeout).map(parsexmltodict)
def image_preview_sync(self, resolution="thumbnail", view=1):
"""Retrieves the preview image of a view in a resource (Sync version).
Wraps Document API method 'Image précalculée'.
Retrieves the preview of a view in a resource.
Qualifiers are ignored.
Args:
resolution (:obj:str, optional): One of 'thumbnail', 'lowres', 'medres', 'highres'.
Defaults to 'thumbnail'.
age (:obj:int, optional): The view to get the preview from. Defaults to 1.
Returns:
Either[Exception, Unicode]: If successful, a Right object
containing the data of the preview image in JPEG format.
Otherwise, a Left object containing an Exception.
"""
url = h.build_base_url(
{"path": "{}/f{}.{}".format(self.ark.root, view, resolution)}
)
return h.fetch(url, self.timeout)
def fulltext_search_sync(self, query, view=1, results_per_set=10):
"""Performs a full-text search in a plain-text Resource (sync version).
Wraps Document API service 'ContentSearch'.
Performs a word ou phrase search in a Resource with plain text available,
using the service SearchContent. Returns the set of results as an OrderedDict.
Phrases must be double quoted, e.g '"candidat à la présidence"'.
You can combine multiple queries with commas or spaces.
Note that the service SearchContent seems buggy: combining a phrase with
a single word will result in Gallica searching for each word in the phrase,
even if it is correctly double-quoted.
Qualifiers are ignored.
Args:
query (str): The words to search for in the text.
view (:obj:int, optional): The view in wich to search. If unset, query is
performed on all views.
results_per_set (:obj:int, optional): Organises the results in sets of size
results_per_set (max 10). Equivalent to startResult in the Document API.
Defaults to 10.
Returns:
Either[Exception, OrderedDict]: If successful, a Right object
containing the set of results as an OrderedDict.
Otherwise, a Left object containing an Exception.
"""
urlparts = {
"query": {
"ark": self.ark.name,
"query": query,
"startResult": results_per_set,
"page": view,
}
}
url = h.build_service_url(urlparts, service_name="ContentSearch")
return h.fetch_xml_html(url, timeout=self.timeout).map(parsexmltodict)
def toc_sync(self):
"""Retrieves the table of content of a resource as a HTML document.
Wraps Document API service 'Toc'.
Qualifiers are ignored.
Returns:
Either: If successful, a Right object containing the HTML ToC.
Otherwise, a Left object containing an Exception.
"""
urlparts = {"query": {"ark": self.ark.name}}
url = h.build_service_url(urlparts, service_name="Toc")
return h.fetch_xml_html(url, "html.parser", self.timeout)
def content_sync(self, startview=1, nviews=None, mode="pdf", url_only=False):
"""Retrieves the content of a document.
Wraps Document API method 'Texte Brut' and 'PDF'.
self.qualifier is ignored by content_sync.
If nviews is not defined, the wholed document is downloaded using
metadata retrieved by pagination_sync.
Qualifiers are ignored.
Args:
startview (:obj:int, optional): The starting view to retrieve. Default: 1
nviews (:obj:int, optional): The number of view to retrieve.
mode (:obj:int, optional): One of {'pdf, 'texteBrut'}. Default: 'pdf'
Returns:
Either[Exception Unicode]: The Unicode data of the content.
Otherwise, a Left object containing an Exception.
"""
_nviews = 1
if not nviews:
either = self.pagination_sync()
if not either.is_left:
_nviews = int(
either.value.get("livre").get("structure").get("nbVueImages")
)
_nviews = _nviews - startview + 1
else:
_nviews = nviews
pattern = "{}/f{}n{}.{}"
arkstr = pattern.format(self.ark.root, startview, _nviews, mode)
urlparts = {"path": arkstr}
url = h.build_base_url(urlparts)
if url_only:
return url
else:
return (
h.fetch(url, self.timeout)
if mode == "pdf"
else h.fetch_xml_html(url, "html.parser", self.timeout)
)
def ocr_data_sync(self, view):
"""Retrieves the OCR data from a ocrized document.
The OCR data is retrienve in XML ALTO and transfomed into an OrderedDict.
Qualifiers are ignored.
Args:
view (int): View number from wich to retrieve the OCR data.
Returns:
Either[Exception OrderedDict]: an Either object containing the OCR data in XML ALTO.
Otherwise, a Left object containing an Exception.
"""
query = {"O": self.ark.name, "E": "ALTO", "Deb": view}
urlparts = {"path": "RequestDigitalElement", "query": query}
url = h.build_base_url(urlparts)
return h.fetch(url)
def iiif_info_sync(self, view=1):
"""Retrieve IIIF metadata of a resource.
Qualifiers are ignored.
"""
if view:
path = "{}/{}/f{}/{}".format("iiif", self.ark.root, view, "info.json")
else:
# No image param : user wants the whole document infos
path = "{}/{}/{}".format("iiif", self.ark.root, "manifest.json")
url = h.build_base_url({"path": path})
return h.fetch_json(url).map(dict)
def iiif_data_sync(
self,
view=1,
region=None,
size="full",
rotation=0,
quality="native",
imformat="png",
):
"""Retrieve image data from a resource using the IIIF API.
Qualifiers are ignored.
Args:
view (:obj:int, optional): View number to retrieve as an image.
region (:obj:tuple, optional): The rectangular region of the
image to extract as any 4-int iterable object :
(lower left pixel, lower left pixel, width, height).
If no region is provided, iiif_info_sync will be called to determine
the size of the image. The entire image will be retrieved.
If metadata retrieval fails, a window of size 1px will be extacted.
size (:obj:str, optional): The size of the image to retrieve. Defaults to 'full'.
rotation (:obj:int, optional): Rotate the image by an angle in degrees.
Values between [0,360]. Defaults to 0.
quality (:obj:str, optional): The quality of the retrieved image. Defaults
to 'native'.
imformat (:obj:str, optional): The returned data will be encoded for this format.
Possible values are 'png', 'tif', 'jpg' and 'gif'. Defaults to 'png'.
Returns:
Either[Exception Unicode]: an Either object holding the image data, or an Exception.
"""
# If no region is provided, get the image size using iiif_info_sync(view)
if not region:
info = self.iiif_info_sync(view)
width = 1 if info.is_left else info.value["width"]
height = 1 if info.is_left else info.value["height"]
region = (0, 0, width, height)
region_str = ",".join(map(str, region))
pattern = "iiif/{}/f{}/{}/{}/{}/{}.{}"
path = pattern.format(
self.ark.root, view, region_str, size, rotation, quality, imformat
)
urlparts = {"path": path}
url = h.build_base_url(urlparts)
return h.fetch(url, self.timeout)
|
3560frc/3560frc.github.io
|
app.py
|
from flask import render_template
from flask_frozen import Freezer
from flask import Flask
from flask import abort
import json
with open('pages.json', 'r') as file:
pages = json.load(file)
app = Flask(__name__)
app.config['FREEZE'] = True
freezer = Freezer(app)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def page(path):
page_html = None
for page in pages:
if pages[page]['path'] == path:
page_html = page + '.html'
if not page_html:
abort(404)
return render_template(page_html, pages=dict(pages))
@freezer.register_generator
def page():
for page in pages:
yield {'path': pages[page]['path']}
if __name__ == '__main__':
if app.config['FREEZE']:
freezer.freeze()
else:
app.run()
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/urls.py
|
<reponame>tsudd/student-arena-discord-bot
from django.urls import path, include
from .views import *
urlpatterns = [
path('firestore/sessions', sessions_api, name="sessions"),
path('firestore/topics', topics, name="topics"),
path('firestore/questions', questions, name="questions"),
path('firestore/players/<int:pk>', player_info, name="player-detail")
]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/views.py
|
<filename>restful_backend/service/firestore/views.py
from .parsers import CSVTextParser
from threading import stack_size
from .firebase import *
from django.shortcuts import render
from rest_framework import status, views
from rest_framework.decorators import api_view, parser_classes
from rest_framework.response import Response
import logging
from .config import *
# Create your views here.
@api_view(["GET", "POST"])
def sessions_api(request):
if request.method == 'POST':
data = request.data
try:
assert DEAD_AMOUNT in data and \
PLAYERS_AMOUNT in data and \
TOPIC_FIELD in data and \
DATETIME_FIELD in data and \
ROUNDS_AMOUNT in data and \
ROUNDS_ACCESSOR in data and \
PLAYERS_ACCESSOR in data
except AssertionError:
logging.error("Bad data in POST request.")
return Response(status=status.HTTP_400_BAD_REQUEST)
try:
session = add_session_and_return(request.data)
for p in data[PLAYERS_ACCESSOR]:
save_player_and_part(p, session)
add_rounds(data[ROUNDS_ACCESSOR], session)
except ValueError:
return Response(status=status.HTTP_410_GONE)
return Response(status=status.HTTP_200_OK)
elif request.method == "GET":
params = dict(request.query_params)
if ID_QUERY in params:
session = get_session(params[ID_QUERY][0])
if session is None:
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(session)
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(["GET"])
def topics(request):
if request.method == 'GET':
return Response(get_topics())
@api_view(["GET", "PUT"])
@parser_classes([CSVTextParser])
def questions(request):
if request.method == "GET":
params = dict(request.query_params)
amount = 10
if AMOUNT_QUERY in params:
amount = int(params[AMOUNT_QUERY][0])
if TOPIC_QUERY in params:
if params[TOPIC_QUERY][0] == "5":
return Response(get_mixed_questions(amount))
return Response(get_questions(params[TOPIC_QUERY][0], amount))
else:
return Response(status=status.HTTP_204_NO_CONTENT)
elif request.method == "PUT":
try:
res = put_questions(request.data)
return Response({
AMOUNT_QUERY: res
})
except:
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET"])
def player_info(request, pk):
if request.method == "GET":
p = get_player(pk)
if p is None:
return Response(status=status.HTTP_204_NO_CONTENT)
params = dict(request.query_params)
amount = 10
if AMOUNT_QUERY in params:
amount = int(params[AMOUNT_QUERY][0])
sessions = get_player_sessions(pk, amount)
return Response({
PLAYER_ACCESSOR: p,
"sessions": sessions
})
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/config.py
|
# emos for bot
JOIN_EMOJI = '👍'
ONE_EMOJI = '1️⃣'
TWO_EMOJI = '2️⃣'
THREE_EMOJI = '3️⃣'
FOUR_EMOJI = '4️⃣'
FIVE_EMOJI = '5️⃣'
SIX_EMOJI = '6️⃣'
SEVEN_EMOJI = '7️⃣'
EIGHT_EMOJI = '8️⃣'
NINE_EMOJI = "9️⃣"
# bot settings and info
COMMAND_PREFIX = "!"
ADMIN_CHANNEL = 833201507888267265
BROADCAST_CHANNEL = "835537204188545073"
INFO_CHANNEL = 835908933725978645
CATEGORY_ID = 840303711544934407
BOT_ID = 833194405594529803
GUILD_TOKEN = '<PASSWORD>'
# config structure
COMMAND_PREFIX_ACCESSOR = "commandPrefix"
ADMIN_CHANNEL_ACCESSOR = "adminChannel"
BROADCAST_CHANNEL_ACCESSOR = "broadcastChannel"
INFO_CHANNEL_ACCESSOR = "infoChannel"
CHANNELS_CATEGORY_ACCESSOR = "channelsCategory"
SELF_BOT_OPTION = "selfBot"
BACKEND_BASE_URL_ACCESSOR = "backendBaseUrl"
JOIN_SECONDS_ACCESSOR = "waitingForJoin"
LOAD_QUESTIONS_ACCESSOR = "loadQuestions"
COMMANDS_ACCESSOR = "commands"
COMMAND_NAME_ACCESSOR = "commandName"
COMMAND_KEYWORD_ACCESSOR = "commandKeyWord"
COMMAND_ENABLE_ACCESSOR = "enabled"
COMMAND_CONTEXT_ACCESSOR = "passContext"
COMMAND_DESCRIPTION = "description"
COMMAND_HELP = "help"
# commands accessors
INFO_COMMAND = "info"
MAKEARENA_COMMAND = "makeArena"
CLEANALL_COMMAND = "cleanAll"
CLEANARENA_COMMAND = "cleanArena"
PONG_COMMAND = "pong"
GETPLAYERINFO_COMMAND = "getPlayerInfo"
LAUNCHEDARENAS_COMMAND = "ps"
GETSESSIONINFO_COMMAND = "sessionInfo"
ARGS_FLAGS = {
"-t": "ANSWER_TIME",
"-q": "QUESTIONS_AMOUNT",
}
TOPIC_ACCESSOR = "TOPICS"
ANSWER_TIME_ACCESSOR = "ANSWER_TIME"
QUESTION_AMOUNT_ACCESSOR = "QUESTIONS_AMOUNT"
SESSIONS_ACCESSOR = "sessions"
# question processing
VARIANTS = {
ONE_EMOJI: 1,
TWO_EMOJI: 2,
THREE_EMOJI: 3,
FOUR_EMOJI: 4
}
# info outputs
DIVADER = "#" * 50 + '\n'
PRINT_HL = "-" * 50 + '\n'
WINNERS_AMOUNT = 1
RULES_MESSAGE = f"Hello everyone! If you read this, it means you are on the battle-royale Arena. " \
f"Yoo will have %d round(s). " \
f"Your task is survive as many rounds as you can. Every round consist of one question " \
f"with 4 variants of answer. Use EMOJI to select your answer. You will have %d seconds on every " \
f"question to select the answer, so after this time your answers will be recorded and processed. Pay " \
f"attention, because if you are wrong, you will be kicked out of the game. Try to become the king of " \
f"Arena or answer all questions. Good luck!!!\n\nLets meet our warriors:\n"
ARENA_INVITE = "Arena in <#%d> will start in %d seconds. React below to join!"
WRONG_ARGUMENTS_START = "Unable to create new battle: wrong arguments"
NO_ARGUMENTS = "Couldn't execute this command: no arguments. Watch info."
TOPICS_SEQUENCE = " Insided topic: %s."
TOPICS_SELECTION_MESSAGE = "To start arena choose one or more topics below.\n" \
"%s."
BATTLE_ABORTED = "Battle in %s was aborted: holding for too long."
COMMAND_ERROR = "Couldn't exec the command: %s."
END_OF_ANSWERING = "Answers recorded!"
PLAYERS_KICKED = "<@%d> was kicked from %s."
BATTLE_STOPPED_AND_WHY = "Battle in %s was stop by %s."
CANT_GET_INFO = "Couldn't get info about %s."
ARENA_DELETED = "Battle in %s was stopped and deleted."
ROUND_RESULT_TOPIC = "Round result.\nStill in game %d:\n"
POINTS_NAME = "points"
BANNED_PLAYERS_INFO = "%d players was banned.\n"
ARENA_INFO_TOPIC = "Launched arenas list\n"
ARENA_INFO_STRING = "%d. %s %s. Players kicked - %d. Players alive - %d.\n"
ARENA_IN_PROGRESS_STRING = "in progress..🛐"
ARENA_ENDED_STRING = "ended☯️"
GAME_RESULT_TOPIC = DIVADER + "After %d rounds battle in %s ended.\n" + \
PRINT_HL + "Survivors and scores:\n"
KICKED_PLAYERS_MESSAGE = PRINT_HL + "Who didn't make it...\n"
GAME_TOPICS_INFO = "In this game you will meet %s topics. Forewarned is forearmed."
CLICK_TO_START_MESSAGE = f"Please, vote {JOIN_EMOJI} below to start."
ROUND_START_TOPIC = "Round %d.\nPlayers dead %d. Players alive %d.\n"
PLAYER_ANSWERED = "<@%d> answered!"
PLAYER_REMOVED = "<@%d> was removed!"
NO_ATTACHMENTS = "No attachments to the message. Attache file to send questions."
PUT_QUESTIONS_ERROR = "Couldn't load questions from %s. Make sure, that file format is csv and filled correctly."
LOADED_QUESTIONS = "Loaded %d questions to the database.\n"
SHOUTS = [
"Poor thing! ",
"Damn son... ",
"Mission failed. ",
"Bruh... ",
"Bakayaro... ",
"It was going so well... ",
"Didn't want to say it, but... "
]
# battle settings
BATTLE_HOLDING = 100
TOPIC_CHOOSING = 15
HOLDING_BETWEEN_MESSAGES = 3
ANSWER_TIME = 20
DEFAULT_QUESTIONS_AMOUNT = 10
SECONDS_TO_JOIN = 11
# strings templates
BATTLE_CHANNEL_TEMPLATE = "Arena #%d"
BATTLE_ROLE_TEMPLATE = "Arena %d warrior"
DATETIME_TEMPLATE = '%d.%m.%Y %H:%M:%S'
# information outputs
NO_INFO = "Nothing to output"
PLAYER_INFO = """----Info about <@%d>----
Arenas played - %d.
Average lifetime - %.2f percent.
Wins - %d.
"""
PLAYERS_SESSIONS_TITLE = "----Last %d arenas----\n"
SESSION_INFO_STRING = "%d. Arena from %s\nID - %s\nPlayers amount - %d\nRounds amount - %d\nTopic - %s\n"
SESSION_INFO_TITLE = "Information about %s session\nArena from %s\nPlayers amount - %d\nRounds amount - %d\nTopic - %s\n"
SESSION_ROUNDS_TITLE = f"----Session rounds----\n"
ROUND_INFO = "----Round #%d----\nQuestion - %s.\nPlayers answers:\n"
ANSWER_INFO = "%d. <@%d> answered %s, which is %s.\n"
RIGHT_ANSWER = "right✅"
WRONG_ANSWER = "wrong❌"
# other
CHANNEL_LINK_REGEX = r"<#([\d]+)>"
CONFIGURATIONS_PATH = "./bot/start_configs/"
CONFIG_FILENAME = "config.json"
STANDART_CONFIG_FILE_PATH = "./"
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/urls.py
|
<reponame>tsudd/student-arena-discord-bot
from django.urls import path, include
from .views import *
urlpatterns = [
path('api/topics', TopicsList.as_view(), name="topics"),
path('api/questions', QuestionsList.as_view(), name="questions"),
path('api/sessions', SessionsList.as_view(), name="sessions"),
path('api/players', PlayerDetail.as_view(), name="players"),
path('api/sessions/<int:pk>', SessionDetail.as_view(), name="session-detail")
]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/models.py
|
<filename>restful_backend/service/firestore/models.py
from .config import *
import datetime
# Create your models here.
class Player(object):
def __init__(
self,
dis_id=228,
nick="NO NICK",
lifetime=1,
games_amount=0,
wins=0
) -> None:
super().__init__()
self.dis_id = dis_id
self.nick = nick
self.lifetime = lifetime
self.games_amount = games_amount
self.wins = wins
self.id = str(self.dis_id)
@staticmethod
def from_dict(data):
return Player(
dis_id=data[DISID_ACCESSOR],
nick=data[NAME_ACCESSOR],
lifetime=data[LIFETIME_ACCESSOR],
games_amount=data[GAMES_AMOUNT_ACCESSOR],
wins=data[WINS_AMOUNT_ACCESSOR]
)
def to_dict(self):
return {
DISID_ACCESSOR: self.dis_id,
NAME_ACCESSOR: self.nick,
LIFETIME_ACCESSOR: self.lifetime,
GAMES_AMOUNT_ACCESSOR: self.games_amount,
WINS_AMOUNT_ACCESSOR: self.wins,
ID_ACCESSOR: self.dis_id
}
def update_lifetime(self, score, rounds, alive):
total = self.games_amount + 1
cycle = score / rounds if not alive else 1
self.lifetime = (self.lifetime * self.games_amount + cycle) / total
self.games_amount = total
def update_info(self, data: dict, rounds_amount):
if data[ALIVE_ACCESSOR]:
self.wins += 1
self.update_lifetime(data[TOTAL_RIGHTS_ACCESSOR],
rounds_amount, data[ALIVE_ACCESSOR])
def __str__(self):
return f"{self.nick} with {self.games_amount} games."
class QuestionTopic(object):
def __init__(self, name=DEFAULT_EMPTY_STRING, emoji="🖕🏻", tid="0") -> None:
super().__init__()
self.name = name
self.emoji = emoji
self.id = tid
@staticmethod
def from_dict(data):
return QuestionTopic(
name=data["name"],
emoji=data["emoji"],
tid=data[ID_ACCESSOR]
)
def to_dict(self):
return {
"name": self.name,
"emoji": self.emoji,
"id": self.id
}
def __str__(self):
return f"{self.name} topic."
class QuestionVariant(object):
def __init__(self, variant=DEFAULT_EMPTY_STRING, vid="0") -> None:
super().__init__()
self.variant = variant
self.id = vid
@staticmethod
def from_dict(data, many=False):
if many:
ans = []
for v in data:
ans.append(
QuestionVariant(
variant=v[QUESTION_VARIANT],
vid=v[ID_ACCESSOR]
)
)
return ans
return QuestionVariant(
variant=data[QUESTION_VARIANT],
vid=data[ID_ACCESSOR]
)
@staticmethod
def many_to_list(data):
ans = []
for v in data:
ans.append(
v.to_dict()
)
return ans
def to_dict(self):
return {
QUESTION_VARIANT: self.variant,
ID_ACCESSOR: self.id
}
def __str__(self):
return f"Question variant {self.variant}"
class Question(object):
def __init__(
self,
question_string=DEFAULT_EMPTY_STRING,
variants=None,
topic="0",
right_ind="1",
qid="0"
) -> None:
super().__init__()
self.question_string = question_string
self.variants = QuestionVariant.from_dict(variants, many=True)
self.topic = topic
self.right_ind = right_ind
self.id = qid
@staticmethod
def from_dict(data):
return Question(
question_string=data[QUESTION_STRING_FIELD],
variants=data[VARIANTS_ACCESSOR],
topic=data[TOPIC_QUERY],
right_ind=data[QUESTION_RIGHT_ANSWER],
qid=data[ID_ACCESSOR]
)
def to_dict(self):
return {
QUESTION_STRING_FIELD: self.question_string,
VARIANTS_ACCESSOR: QuestionVariant.many_to_list(self.variants),
TOPIC_QUERY: self.topic,
QUESTION_RIGHT_ANSWER: self.right_ind,
ID_ACCESSOR: self.id
}
def __str__(self):
return f"Question {self.question_string}."
class Round(object):
def __init__(
self,
session="0",
question="0",
variants=None,
right_ind="0",
rid="0"
) -> None:
super().__init__()
self.session = session
self.question = question
self.variants = variants
self.right_ind = right_ind
self.id = rid
@staticmethod
def from_dict(data):
return Round(
session=data[SESSION_ACCESSOR],
question=data[QUESTION_ID_ACCESSOR],
variants=data[VARIANTS_ACCESSOR],
right_ind=data[QUESTION_RIGHT_ANSWER],
rid=data[ID_ACCESSOR]
)
def to_dict(self):
return {
ID_ACCESSOR: self.id,
SESSION_ACCESSOR: self.session,
QUESTION_ID_ACCESSOR: self.question,
VARIANTS_ACCESSOR: self.variants,
QUESTION_RIGHT_ANSWER: self.right_ind,
}
def __str__(self):
return f"Round of session {self.session} with question {self.question}."
class Answer(object):
def __init__(
self,
session="0",
round_arg="0",
player="0",
answer="0",
right=False,
aid="0"
) -> None:
super().__init__()
self.session = session
self.round = round_arg
self.player = player
self.answer = answer
self.right = right
self.id = aid
@staticmethod
def from_dict(data):
return Answer(
session=data[SESSION_ACCESSOR],
round_arg=data[ROUND_ACCESSOR],
player=data[PLAYER_ACCESSOR],
answer=data[QUESTION_VARIANT],
right=data[ANSWER_STATUS_ACCESSOR],
aid=data[ID_ACCESSOR]
)
def to_dict(self):
return {
ID_ACCESSOR: self.id,
SESSION_ACCESSOR: self.session,
ROUND_ACCESSOR: self.round,
PLAYER_ACCESSOR: self.player,
QUESTION_VARIANT: self.answer,
ANSWER_STATUS_ACCESSOR: self.right
}
def __str__(self):
return f"{self.right} answer of {self.player}."
class Session(object):
def __init__(
self,
players_amount=0,
dead_amount=0,
rounds_amount=1,
date=datetime.datetime.now().isoformat(sep='T'),
topic="0",
sid="0"
) -> None:
super().__init__()
self.players_amount = players_amount
self.dead_amount = dead_amount
self.rounds_amount = rounds_amount
self.date = date
self.topic = topic
self.id = sid
@staticmethod
def from_dict(data):
return Session(
players_amount=data[PLAYERS_AMOUNT],
dead_amount=data[DEAD_AMOUNT],
rounds_amount=data[ROUNDS_AMOUNT],
date=data[DATETIME_FIELD],
topic=data[TOPIC_QUERY],
sid=data[ID_ACCESSOR]
)
def to_dict(self):
return {
PLAYERS_AMOUNT: self.players_amount,
DEAD_AMOUNT: self.dead_amount,
ROUNDS_AMOUNT: self.rounds_amount,
DATETIME_FIELD: self.date,
TOPIC_QUERY: self.topic
}
def __str__(self):
return f"Session of {self.date} with {self.topic}"
class Participation(object):
def __init__(self, session="0", player="0") -> None:
super().__init__()
self.session = session
self.player = player
@staticmethod
def from_dict(data):
return Participation(
session=data[SESSION_ACCESSOR],
player=data[PLAYER_ACCESSOR]
)
def to_dict(self):
return {
SESSION_ACCESSOR: self.session,
PLAYER_ACCESSOR: self.player
}
def __str__(self):
return f"Participation record of {self.player}."
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/parsers.py
|
from rest_framework.parsers import BaseParser
import io
import csv
import re
class CSVTextParser(BaseParser):
"""
Custom CSV text parser.
"""
media_type = 'text/csv'
def parse(self, stream, media_type, parser_context):
form = re.match("charset=([\w\W]*)", media_type)
if form is None:
form = "utf-8"
csv_reader = csv.reader(io.StringIO(stream.read().decode("utf-8")))
ans = []
for row in csv_reader:
ans.append(row)
return ans
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/bot.py
|
<filename>bot/arenabot/bot/bot.py
import asyncio
from discord import colour
from discord.ext.commands.errors import DisabledCommand
from .entities.recorder_config import *
import logging
import random
import re
import time as tm
import discord
from discord.ext import commands
from .config import *
from .entities.quiz import Quiz
from .dataprovider.data_provider import DataProvider
from .dataprovider.back_config import *
class StudentArenaBot(commands.Bot):
def __init__(self, config: dict, intents=None):
commands.Bot.__init__(
self, command_prefix=config[COMMAND_PREFIX_ACCESSOR], self_bot=config[SELF_BOT_OPTION], intents=intents)
self.battles = {}
self.broadcast_channel = config[BROADCAST_CHANNEL_ACCESSOR]
self.admin_channel = config[ADMIN_CHANNEL_ACCESSOR]
self.info_channel = config[INFO_CHANNEL_ACCESSOR]
self.data_provider = DataProvider(config[BACKEND_BASE_URL_ACCESSOR])
self.messages = {}
self.answers = {}
self.setting = Settings(config)
logging.info(self.intents.members)
self.__link_commands(config[COMMANDS_ACCESSOR])
async def on_ready(self):
self.broadcast_channel = await self.fetch_channel(self.broadcast_channel)
self.admin_channel = await self.fetch_channel(self.admin_channel)
self.info_channel = await self.fetch_channel(self.info_channel)
logging.info(f"Bot creation - succeed. Logged as {self.user}")
def __link_commands(self, commands):
# self.add_command(Command(info, name="info", pass_context=True))
try:
@self.command(
name=commands[MAKEARENA_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[MAKEARENA_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[MAKEARENA_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[MAKEARENA_COMMAND][COMMAND_DESCRIPTION],
help=commands[MAKEARENA_COMMAND][COMMAND_HELP]
)
async def start_battle(ctx, *args):
if ctx.channel.id == ADMIN_CHANNEL:
try:
parsed_args = StudentArenaBot.parse_arguments([*args])
parsed_args[TOPIC_ACCESSOR] = await self.get_topic_id(ctx)
logging.info("got it")
except ValueError:
logging.info(WRONG_ARGUMENTS_START)
await self.send_admin(WRONG_ARGUMENTS_START)
return
except Exception as e:
logging.info(f"{e}. Ending start battle")
return
logging.info(
f"Starting battle with arguments {parsed_args}")
role = await self.create_arena_role(ctx.guild)
text_channel = await self.create_battle_channel(ctx.guild, role)
players = await self.get_players(text_channel, parsed_args[TOPIC_ACCESSOR])
if len(players) == 0:
# not enough players
s = f"Stopped battle in {text_channel.name}. No players."
logging.info(s)
await self.send_admin(s)
return
await self.give_role(players, role)
new_battle = Quiz(
text_channel.id,
ctx.me,
players,
self.data_provider.topics[parsed_args[TOPIC_ACCESSOR]],
self.data_provider.get_questions(
parsed_args[QUESTION_AMOUNT_ACCESSOR], parsed_args[TOPIC_ACCESSOR])
)
self.battles[new_battle.cid] = [
new_battle, text_channel, role]
try:
await self.launch_game(new_battle)
logging.info(
f"Arena in {text_channel.name} has ended.")
# make all records, ok da?
if new_battle.state.game_ended:
logging.info(
f"Sending info about session in {text_channel.name} to the backend.")
self.data_provider.send_session_info(
new_battle.dump_game())
except Exception as e:
logging.info(
f"Game {new_battle.cid} stopped by exception {e}.")
if new_battle.cid in self.battles:
await self.admin_channel.send(BATTLE_STOPPED_AND_WHY % (text_channel.name, e))
self.battles[new_battle.cid][0].state.game_in_progress = False
except KeyError:
logging.info("Couldn't deploy arena creation comand.")
try:
@self.command(
name=commands[CLEANALL_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[CLEANALL_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[CLEANALL_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[CLEANALL_COMMAND][COMMAND_DESCRIPTION],
help=commands[CLEANALL_COMMAND][COMMAND_HELP]
)
async def clean_all(ctx):
if ctx.channel.id == ADMIN_CHANNEL:
logging.info("Cleaning all info.")
for attrs in self.battles.values():
await attrs[1].delete()
await attrs[2].delete()
logging.info(f"{attrs[1]} was deleted.")
self.battles.clear()
else:
await ctx.reply("Nice try you dummy.")
except KeyError:
logging.info("Couldn't deploy \"clean all\" command")
try:
@self.command(
name=commands[CLEANARENA_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[CLEANARENA_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[CLEANARENA_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[CLEANARENA_COMMAND][COMMAND_DESCRIPTION],
help=commands[CLEANARENA_COMMAND][COMMAND_HELP]
)
async def clean_arena(ctx, *args):
if ctx.channel.id == ADMIN_CHANNEL:
arguments = [*args]
logging.info(f"Got arguments to delete: {arguments}")
if len(arguments) > 0:
for a in arguments:
cid = int(re.match(CHANNEL_LINK_REGEX, a).group(1))
ch = await self.fetch_channel(cid)
logging.info(
f"Deleting channel {ch} with {cid} id.")
if ch is not None and ch.id in self.battles:
await self.info_channel.send(ARENA_DELETED % ch.name)
await self.clean_game(cid)
else:
await ctx.reply("Nice try you dummy.")
except KeyError:
logging.info("Couldn't deploy arena deletion comand.")
try:
@self.command(
name=commands[PONG_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[PONG_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[PONG_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[PONG_COMMAND][COMMAND_DESCRIPTION],
help=commands[PONG_COMMAND][COMMAND_HELP]
)
async def pong(ctx, *arg):
await ctx.channel.send(f"Pong {[*arg]}")
except KeyError:
logging.info("Couldn't deploy pong comand.")
try:
@self.command(
name=commands[GETPLAYERINFO_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[GETPLAYERINFO_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[GETPLAYERINFO_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[GETPLAYERINFO_COMMAND][COMMAND_DESCRIPTION],
help=commands[GETPLAYERINFO_COMMAND][COMMAND_HELP]
)
async def get_player_info(ctx):
ans = ""
for user in ctx.message.mentions:
logging.info(f"Getting info about {user.name}")
try:
data = self.data_provider.get_player_sessions(user.id)
ans += self.form_player_data(data)
except ValueError:
logging.error(
f"Couldn't get info about {user.name} from backed")
ans += CANT_GET_INFO % user.name
except Exception as e:
logging.error(e)
await ctx.reply(ans if len(ans) > 0 else NO_INFO)
except KeyError:
logging.info("Couldn't deploy comand for getting player info.")
try:
@self.command(
name=commands[LAUNCHEDARENAS_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[LAUNCHEDARENAS_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[LAUNCHEDARENAS_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[LAUNCHEDARENAS_COMMAND][COMMAND_DESCRIPTION],
help=commands[LAUNCHEDARENAS_COMMAND][COMMAND_HELP]
)
async def ps_battles(ctx):
ans = ARENA_INFO_TOPIC
num = 1
if len(self.battles) > 0:
for b in self.battles.values():
ans += ARENA_INFO_STRING % (
num,
b[1].name,
ARENA_IN_PROGRESS_STRING if b[0].state.game_in_progress else ARENA_ENDED_STRING,
b[0].state.dead_counter,
b[0].state.player_counter
)
num += 1
else:
ans += "None."
await ctx.reply(ans)
except KeyError:
logging.info("Couldn't deploy ps command")
try:
@self.command(
name=commands[GETSESSIONINFO_COMMAND][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[GETSESSIONINFO_COMMAND][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[GETSESSIONINFO_COMMAND][COMMAND_ENABLE_ACCESSOR],
description=commands[GETSESSIONINFO_COMMAND][COMMAND_DESCRIPTION],
help=commands[GETSESSIONINFO_COMMAND][COMMAND_HELP]
)
async def session_info(ctx, *args):
ans = ""
for i in args:
logging.info(f"Getting info about {i} session.")
try:
data = self.data_provider.get_session_info(i)
ans += self.form_session_data(data)
except ValueError:
logging.error(
f"Couldn't get info about {i} from backed")
# except Exception as e:
# logging.error(e)
if len(ans) == 0:
ans += CANT_GET_INFO % i
await ctx.reply(ans)
except KeyError:
logging.info("Couldn't deploy comand for getting session info")
try:
@self.command(
name=commands[LOAD_QUESTIONS_ACCESSOR][COMMAND_KEYWORD_ACCESSOR],
pass_context=commands[LOAD_QUESTIONS_ACCESSOR][COMMAND_CONTEXT_ACCESSOR],
enabled=commands[LOAD_QUESTIONS_ACCESSOR][COMMAND_ENABLE_ACCESSOR],
description=commands[LOAD_QUESTIONS_ACCESSOR][COMMAND_DESCRIPTION],
help=commands[LOAD_QUESTIONS_ACCESSOR][COMMAND_HELP]
)
async def upload_questions(ctx, *args):
if ctx.channel.id == ADMIN_CHANNEL:
ans = ""
for a in ctx.message.attachments:
try:
response = self.data_provider.put_questions(
await a.read(), a.content_type)
ans += LOADED_QUESTIONS % response[AMOUNT_QUERY]
except Exception:
ans += PUT_QUESTIONS_ERROR % a.filename
if len(ans) == 0:
ans = NO_ATTACHMENTS
await ctx.reply(ans)
except KeyError:
logging.info("Couldn't deploy comand for questions loading.")
def form_player_data(self, data: dict):
player = data[PLAYER_ACCESSOR]
ans = PLAYER_INFO % (
player[ID_ACCESSOR],
player[GAMES_AMOUNT_ACCESSOR],
round(player[LIFETIME_ACCESSOR], 4) * 100,
player[WINS_ACCESSOR]
)
if len(data[SESSIONS_ACCESSOR]) > 0:
ans += PLAYERS_SESSIONS_TITLE % len(data[SESSIONS_ACCESSOR])
num = 1
for s in data[SESSIONS_ACCESSOR]:
ans += SESSION_INFO_STRING % (
num,
s[DATETIME_ACCESSOR],
s[ID_ACCESSOR],
s[PLAYERS_AMOUNT],
s[ROUNDS_AMOUNT],
self.data_provider.get_topic_str(s[TOPIC_QUERY])
)
num += 1
ans += '-' * 50
return ans
def form_session_data(self, data: dict):
ans = SESSION_INFO_TITLE % (
data[ID_ACCESSOR],
data[DATETIME_ACCESSOR],
data[PLAYERS_AMOUNT],
data[ROUNDS_AMOUNT],
self.data_provider.get_topic_str(data[TOPIC_QUERY])
)
ans += SESSION_ROUNDS_TITLE
num = 1
for r in data[ROUNDS_ACCESSOR]:
ans += ROUND_INFO % (
num,
r[QUESTION_ID_ACCESSOR],
)
j = 1
for answer in r[ANSWERS_ACCESSOR]:
ans += ANSWER_INFO % (
j,
answer[PLAYER_ACCESSOR],
answer[ANSWER_ACCESSOR],
RIGHT_ANSWER if answer[ANSWER_STATUS_ACCESSOR] else WRONG_ANSWER
)
j += 1
num += 1
return ans
async def send_admin(self, message: str):
await self.admin_channel.send(message)
async def get_topic_id(self, ctx):
s = ""
logging.info("Getting topic for new game.")
for topic in self.data_provider.topics.values():
s += f"- {topic[EMOJI_ACCESSOR]} is {topic[NAME_ACCESSOR]}.\n"
message_string = TOPICS_SELECTION_MESSAGE % s
mes = await ctx.reply(message_string, mention_author=True)
logging.debug(f"Sent message to react {mes.id}")
self.messages[mes.id] = (False, None)
for emo in self.data_provider.topic_emojis.keys():
await mes.add_reaction(emo)
t0 = tm.time()
while not self.messages[mes.id][0]:
logging.info("Waiting for choosing the topic for new game.")
if tm.time() - t0 > TOPIC_CHOOSING:
raise ValueError
await asyncio.sleep(3)
ans = self.messages[mes.id][1]
del self.messages[mes.id]
logging.info(f"Got topic {self.data_provider.topics[ans]}")
return ans
@staticmethod
def parse_arguments(args):
parsed = {
ANSWER_TIME_ACCESSOR: ANSWER_TIME,
QUESTION_AMOUNT_ACCESSOR: DEFAULT_QUESTIONS_AMOUNT
}
for i in range(len(args)):
if args[i].startswith("-") and args[i] in ARGS_FLAGS:
parsed[ARGS_FLAGS[args[i]]] = int(args[i + 1])
logging.info(f"Parsed arguments {parsed}")
return parsed
async def get_players(self, channel, topic):
players = []
info = TOPICS_SEQUENCE % (
self.data_provider.topics[topic][NAME_ACCESSOR])
message = await self.broadcast_invite(channel, self.setting.join_seconds, info)
logging.info(f"Sent invite for {channel.name}.")
await message.add_reaction(JOIN_EMOJI)
await asyncio.sleep(self.setting.join_seconds)
message = await self.broadcast_channel.fetch_message(message.id)
if join_react := message.reactions[0]:
async for user in join_react.users():
if user.id == BOT_ID:
continue
players.append(user.id)
players = await self.get_members_with_id(players)
logging.info(f"List of players to join - {players}")
return players
async def get_members_with_id(self, ids: list):
members = []
# bad code
for i in ids:
# members.append(utils.get(self.get_all_members(), id=i))
members.append(await self.guilds[0].fetch_member(i))
return members
async def broadcast_invite(self, channel, time, salt=""):
return await self.broadcast_channel.send(ARENA_INVITE % (channel.id, time) + salt)
async def launch_game(self, quiz: Quiz):
channel = await self.fetch_channel(quiz.cid)
mes = await channel.send(quiz.get_start_quiz())
await mes.add_reaction(JOIN_EMOJI)
t0 = tm.time()
while True:
message = await channel.fetch_message(mes.id)
logging.info(message.reactions)
if message.reactions[0].count - 1 == quiz.state.player_counter:
break
if tm.time() - t0 > BATTLE_HOLDING:
await self.admin_channel.send(BATTLE_ABORTED % channel.name)
return
await asyncio.sleep(HOLDING_BETWEEN_MESSAGES)
while quiz.state.game_in_progress:
quiz.update_answer_statuses()
quiz.question_message = await channel.send(quiz.get_start_new_round())
await asyncio.sleep(HOLDING_BETWEEN_MESSAGES)
await self.get_answers(quiz, channel, quiz.question_message)
quiz.question_message = None
await channel.send(END_OF_ANSWERING)
wrong_players = quiz.check_answers_and_kill(
self.answers, quiz.state.last_question)
players_to_ban = await self.get_members_with_id(wrong_players)
logging.info(f"Players to kill {players_to_ban}")
await self.kick_players(players_to_ban, self.battles[quiz.cid][2], channel)
await channel.send(quiz.get_round_result())
quiz.record_round(self.answers)
quiz.is_game_end()
await asyncio.sleep(HOLDING_BETWEEN_MESSAGES)
result = quiz.get_game_result()
await self.info_channel.send(result)
await channel.send(result)
quiz.state.game_ended = True
async def get_answers(self, quiz, channel, mes):
self.answers = {}
for player in quiz.players.values():
if not player.alive:
continue
self.answers[player.uid] = -1
for var in VARIANTS.keys():
await mes.add_reaction(var)
await asyncio.sleep(quiz.answer_time)
logging.info(f"Got reactions from {channel.name} - {self.answers}")
async def kick_players(self, players, role, channel):
for p in players:
await channel.send(SHOUTS[random.randint(0, len(SHOUTS) - 1)] + PLAYER_REMOVED % p.id)
await p.remove_roles(role)
async def give_role(self, players, role):
for p in players:
await p.add_roles(role)
async def clean_game(self, cid):
logging.info(f"Cleaning battle {cid} in {self.battles}.")
attrs = self.battles[cid]
await attrs[1].delete()
await attrs[2].delete()
del self.battles[cid]
async def create_battle_channel(self, guild, role):
arenas = len(guild.categories[0].channels)
name = BATTLE_CHANNEL_TEMPLATE % (arenas + 1)
overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
guild.me: discord.PermissionOverwrite(read_messages=True),
role: discord.PermissionOverwrite(
read_messages=True, send_messages=False)
}
channel = await guild.create_text_channel(name, overwrites=overwrites, category=guild.categories[0])
logging.info(f"Created channel {name} with {channel.id}.")
return channel
async def create_arena_role(self, guild):
role_name = BATTLE_ROLE_TEMPLATE % (len(self.battles) + 1)
# add random color generation -- done
role = await guild.create_role(name=role_name, colour=self.get_random_color())
logging.info(f"{role_name} role was created. {role}")
return role
async def on_raw_reaction_add(self, payload):
if payload.user_id == self.user.id:
return
cid = payload.channel_id
if cid in self.battles and \
str(payload.emoji) in VARIANTS and \
self.battles[cid][0].state.game_in_progress and \
payload.user_id in self.battles[cid][0].players and \
self.battles[cid][0].question_message is not None and\
self.battles[cid][0].question_message.id == payload.message_id:
quiz = self.battles[cid][0]
player = quiz.players[payload.user_id]
member = await self.guilds[0].fetch_member(payload.user_id)
logging.info(f"{player.name} just reacted!")
if not player.answered:
player.answered = True
self.answers[player.uid] = VARIANTS[str(payload.emoji)]
logging.info(f"{member.name} is making decision!")
else:
await quiz.question_message.remove_reaction(payload.emoji, member)
logging.info(
f"{member.name} tried to select more than one answer. Abort.")
elif cid == self.admin_channel.id:
emo = str(payload.emoji)
if payload.message_id in self.messages and \
not self.messages[payload.message_id][0] and \
emo in self.data_provider.topic_emojis:
self.messages[payload.message_id] = True, self.data_provider.topic_emojis[emo]
logging.info(
f"Got {payload.emoji} reaction when selecting topic.")
async def on_command_error(self, context, exception):
if isinstance(exception, DisabledCommand):
logging.info(
f"Tried to exec disabled command but got error: {exception}.")
await context.reply(COMMAND_ERROR % exception)
return
return await super().on_command_error(context, exception)
def get_random_color(self):
return discord.Colour.from_rgb(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
# pathetic attempt to make canceling answer
# async def on_reaction_remove(self, reaction, user):
# cid = reaction.message.channel.id
# logging.info(f"{user.name} canceled reation.")
# if cid in self.battles and \
# self.battles[cid][0].state.game_in_progress and \
# user.id in self.battles[cid][0].players and \
# self.battles[cid][0].question_message is not None and \
# self.battles[cid][0].question_message.id == reaction.message.id:
# quiz = self.battles[cid][0]
# player = quiz.players[user.id]
# if player.answered:
# player.answered = False
# logging.info(f"{user.name} canceled his answer!")
class Settings(object):
def __init__(self, settings) -> None:
super().__init__()
self.join_seconds = settings[JOIN_SECONDS_ACCESSOR]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/migrations/0004_auto_20210521_2335.py
|
# Generated by Django 3.0.8 on 2021-05-21 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('arenastatistics', '0003_auto_20210519_1935'),
]
operations = [
migrations.AlterField(
model_name='player',
name='dis_id',
field=models.BigIntegerField(default=228, verbose_name='User ID'),
),
]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/migrations/0002_questiontopic_emoji.py
|
# Generated by Django 3.0.8 on 2021-05-19 07:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('arenastatistics', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='questiontopic',
name='emoji',
field=models.CharField(default='🖕🏻', max_length=10, verbose_name='Topic emoji'),
),
]
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/dataprovider/back_config.py
|
BACKEND_BASE_URL = "http://172.19.0.3:8000/api/"
BACKEND_CONNECTION_TIME = 15
SLEEPING_TIME_CONNECTION = 3
TOPICS_URL = "topics"
QUESTIONS_URL = "questions"
SESSIONS_URL = "sessions"
PLAYERS_URL = "players"
NAME_ACCESSOR = "name"
EMOJI_ACCESSOR = "emoji"
TOPIC_QUERY = "topic"
AMOUNT_QUERY = "amount"
ID_QUERY = "id"
# accessors
DISID_ACCESSOR = "dis_id"
NICK_ACCESSOR = "nick"
PLAYER_ACCESSOR = "player"
LIFETIME_ACCESSOR = "lifetime"
GAMES_AMOUNT_ACCESSOR = "games_amount"
WINS_ACCESSOR = "wins"
DATETIME_ACCESSOR = "date"
# messages
HTTP_ERROR_MESSAGE = "Couldn't get data from backend."
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/views.py
|
<gh_stars>0
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import render
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from .serializers import *
from .models import *
from .config import *
import random
import logging
# import firebase_admin
# from firebase_admin import firestore
# from firebase_admin import credentials
# # Create your views here.
# # Use a service account
# cred = credentials.Certificate(
# '/home/appuser/project/arenastatistics/farebase/sudent-arena-bot-firebase-adminsdk-ps8j6-7da2d15a35.json')
# default = firebase_admin.initialize_app(cred, name="app")
# db = firestore.client(default)
class TopicsList(generics.ListAPIView):
serializer_class = QuestionTopicSerializer
queryset = QuestionTopic.objects.all()
class QuestionsList(generics.ListAPIView):
serializer_class = QuestionSerializer
queryset = Question.objects.all()
def get(self, request, *args, **kwargs):
params = dict(request.query_params)
amount = 10
questions = []
if AMOUNT_QUERY in params:
amount = int(params[AMOUNT_QUERY][0])
if TOPIC_QUERY in params:
topic_id = int(params[TOPIC_QUERY][0])
all_questions = list(Question.objects.filter(topic__id=topic_id))
random.shuffle(all_questions)
if len(all_questions) >= amount:
questions = all_questions[:amount]
else:
questions = all_questions
logging.info(
f"Sending questions list by params: amount - {amount}, topic - {topic_id}")
return Response(
self.serializer_class(
questions, many=True, context=self.get_serializer_context()).data
)
else:
return super().get(request, args, kwargs)
class SessionsList(generics.ListCreateAPIView):
serializer_class = SessionSerializer
queryset = Session.objects.all()
def get_player_or_default(self, player: dict) -> Player:
player_obj = None
try:
player_obj = Player.objects.get(dis_id=player[ID_ACCESSOR])
except:
player_obj = Player()
player_obj.dis_id = player[ID_ACCESSOR]
player_obj.nick = player[NAME_ACCESSOR]
return player_obj
def post(self, request, *args, **kwargs):
data = request.data
try:
assert DEAD_AMOUNT in data and \
PLAYERS_AMOUNT in data and \
TOPIC_FIELD in data and \
DATETIME_FIELD in data and \
ROUNDS_AMOUNT in data and \
ROUNDS_ACCESSOR in data and \
PLAYERS_ACCESSOR in data
except AssertionError:
logging.error("Bad data in POST request.")
return Response(status=status.HTTP_400_BAD_REQUEST)
try:
logging.info("Saving session.")
session = Session(
players_amount=data[PLAYERS_AMOUNT],
dead_amount=data[DEAD_AMOUNT],
rounds_amount=data[ROUNDS_AMOUNT],
date=data[DATETIME_FIELD],
topic=QuestionTopic.objects.get(pk=data[TOPIC_FIELD])
)
session.save()
players = {}
for p in data[PLAYERS_ACCESSOR]:
players[p[ID_ACCESSOR]] = self.get_player_or_default(p), p
logging.info("Saving players")
for p in players.values():
if p[1][ALIVE_ACCESSOR]:
p[0].wins += 1
p[0].update_lifetime(
p[1][TOTAL_RIGHTS_ACCESSOR],
session.rounds_amount,
p[1][ALIVE_ACCESSOR]
)
p[0].save()
logging.info("Saving rounds and answers")
for r in data[ROUNDS_ACCESSOR]:
roun = Round(
session=session,
question=Question.objects.get(pk=r[QUESTION_ID_ACCESSOR]),
first_variant=QuestionVariant.objects.get(
pk=r[QUESTION_ANSWERS_FIELDS[0]]),
second_variant=QuestionVariant.objects.get(
pk=r[QUESTION_ANSWERS_FIELDS[1]]),
three_variant=QuestionVariant.objects.get(
pk=r[QUESTION_ANSWERS_FIELDS[2]]),
four_variant=QuestionVariant.objects.get(
pk=r[QUESTION_ANSWERS_FIELDS[3]]),
right_ind=r[QUESTION_RIGHT_ANSWER]
)
roun.save()
for ans in r[ANSWERS_ACCESSOR]:
answer = Answer(
session=session,
round=roun,
player=players[ans[UID_ACCESSOR]][0],
answer=QuestionVariant.objects.get(
pk=ans[QUESTION_VARIANT]),
right=ans[ANSWER_STATUS_ACCESSOR]
)
answer.save()
logging.info("Saving participation")
for p in players.values():
part = Participation(session=session, player=p[0])
part.save()
return Response(status=status.HTTP_201_CREATED)
except Exception as e:
logging.info(f"Exception while saving data: {e}")
return Response(status=status.HTTP_400_BAD_REQUEST)
class PlayerDetail(generics.ListAPIView):
serializer_class = PlayerSerializer
queryset = Player.objects.all()
def get(self, request, *args, **kwargs):
params = dict(request.query_params)
if len(params) == 0:
return super().get(request, args, kwargs)
uid = None
if ID_QUERY in params:
uid = int(params[ID_QUERY][0])
else:
raise AssertionError
amount = 10
if AMOUNT_QUERY in params:
amount = int(params[AMOUNT_QUERY][0])
player = Player.objects.get(dis_id=uid)
parts = list(Participation.objects.filter(
player=player).order_by("session__date")[:amount].values("session_id"))
ids = [record["session_id"] for record in parts]
sessions = Session.objects.filter(pk__in=ids)
return Response({
"player": self.serializer_class(player, context=self.get_serializer_context()).data,
"sessions": SessionSerializer(sessions, many=True, context=self.get_serializer_context()).data
})
class SessionDetail(generics.RetrieveAPIView):
queryset = Session.objects.all()
serializer_class = SessionSerializer
def get(self, request, *args, **kwargs):
params = kwargs
try:
pk = params["pk"]
session = Session.objects.get(pk=pk)
round_objects = list(Round.objects.filter(session__id=pk))
rounds = []
for r in round_objects:
anses = AnswerSerializer(
list(Answer.objects.filter(round__id=r.id)), many=True).data
rr = RoundSerializer(r).data
rr.update({
"answers": anses
})
rounds.append(rr)
except ObjectDoesNotExist:
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(
{**self.serializer_class(
session, context=self.get_serializer_context()).data,
"rounds": rounds})
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/config.py
|
<gh_stars>0
# data fields
DEAD_AMOUNT = "dead_amount"
PLAYERS_AMOUNT = "players_amount"
DATETIME_FIELD = "date"
TOPIC_FIELD = "topic"
ROUNDS_AMOUNT = "rounds_amount"
ROUNDS_ACCESSOR = "rounds"
# player model
PLAYERS_ACCESSOR = "players"
UID_ACCESSOR = "uid"
ID_ACCESSOR = "id"
NAME_ACCESSOR = "nick"
ALIVE_ACCESSOR = "alive"
TOTAL_RIGHTS_ACCESSOR = "rights"
# question model
QUESTION_STRING_FIELD = 'question_string'
QUESTION_ANSWERS_FIELDS = ["varOne", "varTwo", "varThree", "varFour"]
QUESTION_RIGHT_ANSWER = "right_ind"
QUESTION_VARIANT = "variant"
QUESTION_ID_ACCESSOR = "question"
ANSWERS_ACCESSOR = "answers"
ANSWER_STATUS_ACCESSOR = "right"
# params
TOPIC_QUERY = "topic"
AMOUNT_QUERY = "amount"
ID_QUERY = "id"
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/config.py
|
<reponame>tsudd/student-arena-discord-bot<gh_stars>0
# data fields
DEAD_AMOUNT = "dead_amount"
PLAYERS_AMOUNT = "players_amount"
DATETIME_FIELD = "date"
TOPIC_FIELD = "topic"
ROUNDS_AMOUNT = "rounds_amount"
ROUNDS_ACCESSOR = "rounds"
DEFAULT_EMPTY_STRING = "NO DATA"
# player model
PLAYERS_ACCESSOR = "players"
UID_ACCESSOR = "uid"
ID_ACCESSOR = "id"
DISID_ACCESSOR = "dis_id"
NAME_ACCESSOR = "nick"
LIFETIME_ACCESSOR = "lifetime"
GAMES_AMOUNT_ACCESSOR = "games_amount"
WINS_AMOUNT_ACCESSOR = "wins"
ALIVE_ACCESSOR = "alive"
TOTAL_RIGHTS_ACCESSOR = "rights"
# question model
QUESTION_STRING_FIELD = 'question_string'
QUESTION_ANSWERS_FIELDS = ["varOne", "varTwo", "varThree", "varFour"]
QUESTION_RIGHT_ANSWER = "right_ind"
QUESTION_VARIANT = "variant"
QUESTION_ID_ACCESSOR = "question"
ANSWERS_ACCESSOR = "answers"
ANSWER_ACCESSOR = "answer"
ANSWER_STATUS_ACCESSOR = "right"
SESSION_ACCESSOR = "session"
ROUND_ACCESSOR = "round"
PLAYER_ACCESSOR = "player"
VARIANTS_ACCESSOR = "variants"
# firestore collections names
SESSIONS_COLLECTION = u"sessions"
QUESIONS_COLLECTION = u"questions"
TOPICS_COLLECTION = u"topics"
PARTICIPATIONS_COLLECTION = u"participations"
# params
TOPIC_QUERY = "topic"
AMOUNT_QUERY = "amount"
ID_QUERY = "id"
# templates
DATETIME_TEMPLATE = '%d.%m.%Y %H:%M'
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/entities/recorder_config.py
|
# fields accessors
ID_ACCESSOR = "id"
UID_ACCESSOR = "uid"
QUESTION_STRING_FIELD = 'question_string'
QUESTION_ANSWERS_FIELDS = ["varOne", "varTwo", "varThree", "varFour"]
QUESTION_RIGHT_ANSWER = "right_ind"
QUESTION_VARIANT = "variant"
QUESTION_VARIANTS = "variants"
# player model
PLAYERS_MODELS_ACCESSOR = "players"
NICK_ACCESSOR = "nick"
LIVECYCLE_ACCESSOR = "lifecycle"
PLAYED_GAMES_ACCESSOR = "played"
LAST_ARENA_ACCESSOR = "last-arena"
USUALLY_WRONG_ACCESSOR = "wrong"
WINS_ARENAS_ACCESSOR = "wins"
TOTAL_RIGHTS_ACCESSOR = "rights"
ALIVE_ACCESSOR = "alive"
# question model
QUESTION_ID_ACCESSOR = "question"
ANSWERS_ACCESSOR = "answers"
ANSWER_STATUS_ACCESSOR = "right"
QUESTION_STRING_ACCESSOR = "question_string"
ANSWER_ACCESSOR = "answer"
# dump fields
DEAD_AMOUNT = "dead_amount"
PLAYERS_AMOUNT = "players_amount"
DATETIME_FIELD = "date"
TOPIC_FIELD = "topic"
ROUNDS_AMOUNT = "rounds_amount"
ROUNDS_ACCESSOR = "rounds"
|
tsudd/student-arena-discord-bot
|
bot/arenabot/main.py
|
<filename>bot/arenabot/main.py
#!/home/tsudd/anaconda3/envs/battle-royale-discord-bot/bin/python3
import logging
import discord
from bot.info import TOKEN
from bot.config import CONFIGURATIONS_PATH, CONFIG_FILENAME, STANDART_CONFIG_FILE_PATH
from bot.bot import StudentArenaBot
import json
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
def get_config():
try:
fs = open(f"{CONFIGURATIONS_PATH}{CONFIG_FILENAME}", "r")
c = json.load(fs)
fs.close()
logging.info("Successfully read provided configuration for bot.")
except FileNotFoundError:
try:
fs = open(f"{STANDART_CONFIG_FILE_PATH}{CONFIG_FILENAME}", "r")
c = json.load(fs)
fs.close()
logging.info("Successfully read default configuration for bot.")
except FileNotFoundError:
raise AssertionError("Couldn't find config file.")
return c
def main():
intents = discord.Intents.default()
intents.members = True
intents.reactions = True
conf = get_config()
bot = StudentArenaBot(conf, intents=intents)
bot.run(conf["botToken"] if "botToken" in conf else TOKEN)
if __name__ == '__main__':
main()
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/dataprovider/data_provider.py
|
<reponame>tsudd/student-arena-discord-bot<filename>bot/arenabot/bot/dataprovider/data_provider.py
#!/usr/bin/env python3
import logging
import time
import requests
from requests.exceptions import HTTPError
from .back_config import *
class DataProvider(object):
"""
Class for providing data to the bot from backend. Uses requests to get and post data.
"""
def __init__(self, back_url) -> None:
logging.info("Creating data provider")
self.topic_emojis = {}
self.backend_base_url = back_url
t0 = time.time()
self.topics = None
while time.time() - t0 < BACKEND_CONNECTION_TIME:
try:
self.topics = self._get_topics()
break
except HTTPError:
logging.info(
"Couldn't establish connection with backend. Trying again")
time.sleep(SLEEPING_TIME_CONNECTION)
if not self.topics:
raise HTTPError(HTTP_ERROR_MESSAGE)
for t in self.topics.values():
self.topic_emojis[t[EMOJI_ACCESSOR]] = t['id']
def _get_topics(self):
r = DataProvider.make_get(self.backend_base_url + TOPICS_URL)
if r is None:
raise HTTPError(HTTP_ERROR_MESSAGE)
gotted_topics = r.json()
topics = {}
for t in gotted_topics:
topics[t['id']] = t
return topics
def get_questions(self, amount, topic):
r = DataProvider.make_get(
self.backend_base_url + QUESTIONS_URL + f"?{TOPIC_QUERY}={topic}&{AMOUNT_QUERY}={amount}")
if r is None:
raise HTTPError(HTTP_ERROR_MESSAGE)
return r.json()
def put_questions(self, file, content_type):
headers = {
"Content-type": content_type
}
r = DataProvider.make_put(
self.backend_base_url + QUESTIONS_URL,
headers,
file
)
if r is None:
raise HTTPError()
return r.json()
def get_player_sessions(self, uid, amount=10):
r = DataProvider.make_get(
self.backend_base_url + PLAYERS_URL +
f"/{uid}" + f"?{AMOUNT_QUERY}={amount}"
)
if r is None:
raise HTTPError(HTTP_ERROR_MESSAGE)
if r.status_code == 204:
raise ValueError
return r.json()
def get_session_info(self, sid):
r = DataProvider.make_get(
self.backend_base_url + SESSIONS_URL + f"?{ID_QUERY}={sid}"
)
if r is None:
raise HTTPError(HTTP_ERROR_MESSAGE)
if r.status_code == 204:
raise ValueError
return r.json()
def send_session_info(self, data):
r = DataProvider.make_post(
self.backend_base_url + SESSIONS_URL, data=data) # another one
@ staticmethod
def make_get(url, headers=None):
try:
response = requests.get(url, headers=headers)
response.raise_for_status()
except HTTPError as http_error:
logging.info(f"{http_error} in get request to {url}.")
except Exception as err:
logging.info(f"Error {err} occurred in request to {url}.")
else:
return response
@ staticmethod
def make_post(url, headers=None, data=None):
try:
response = requests.post(url, headers=headers, json=data)
response.raise_for_status()
except HTTPError as http_error:
logging.info(f"{http_error} in post request to {url}.")
except Exception as err:
logging.info(f"Error {err} occurred in request to {url}.")
else:
return response
@staticmethod
def make_put(url, headers=None, data=None):
try:
response = requests.put(url, headers=headers, data=data)
response.raise_for_status()
except HTTPError as http_error:
logging.info(f"{http_error} in post request to {url}.")
except Exception as err:
logging.info(f"Error {err} occurred in request to {url}.")
else:
return response
def get_topic_str(self, tid):
return self.topics[tid][NAME_ACCESSOR] + self.topics[tid][EMOJI_ACCESSOR]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/firebase.py
|
from .config import SESSIONS_COLLECTION
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from .config import *
from .models import *
import random
# Use the application default credentials
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred)
db = firestore.client()
def get_topic_ref(tid):
return db.collection(u"topics").document(str(tid))
def _get_player_or_default(data):
doc = db.collection(PLAYERS_ACCESSOR).document(
str(data[ID_ACCESSOR])).get()
if not doc.exists:
return Player(
dis_id=data[ID_ACCESSOR],
nick=data[NAME_ACCESSOR],
)
return Player.from_dict(doc.to_dict())
def save_player_and_part(data: dict, session: Session):
p = _get_player_or_default(data)
p.update_info(data, session.rounds_amount)
db.collection(PLAYERS_ACCESSOR).document(str(p.id)).set(p.to_dict())
db.collection(PARTICIPATIONS_COLLECTION).add(
{
SESSION_ACCESSOR: db.document(f"{SESSIONS_COLLECTION}/{session.id}"),
PLAYER_ACCESSOR: db.document(f"{PLAYERS_ACCESSOR}/{p.id}")
}
)
def add_session_and_return(data):
new_doc = db.collection(SESSIONS_COLLECTION).add({
PLAYERS_AMOUNT: data[PLAYERS_AMOUNT],
DEAD_AMOUNT: data[DEAD_AMOUNT],
ROUNDS_AMOUNT: data[ROUNDS_AMOUNT],
DATETIME_FIELD: data[DATETIME_FIELD],
TOPIC_QUERY: data[TOPIC_QUERY]
})
doc = new_doc[1].get()
if not doc.exists:
raise ValueError
serialized = doc.to_dict()
serialized.update({ID_ACCESSOR: doc.id})
return Session.from_dict(serialized)
def add_rounds(data: list, session: Session):
s_ref = db.collection(SESSIONS_COLLECTION).document(session.id)
num = 0
for r in data:
num += 1
q_ref = _get_question_ref(r[QUESTION_ID_ACCESSOR])
round_doc = s_ref.collection(ROUNDS_ACCESSOR).add(
{
QUESTION_ID_ACCESSOR: q_ref,
ID_ACCESSOR: num
}
)
_add_player_answers(
round_doc[1], r[ANSWERS_ACCESSOR])
def get_player(uid):
doc = _get_player_ref(uid).get()
if doc.exists:
return doc.to_dict()
def _get_question_ref(qid):
# maybe there is no need in str
return db.collection(QUESIONS_COLLECTION).document(str(qid))
def _add_player_answers(r_ref, answers: list):
for a in answers:
r_ref.collection(ANSWERS_ACCESSOR).add(a)
def _get_player_ref(uid):
return db.collection(PLAYERS_ACCESSOR).document(str(uid))
def _get_topic_ref(tid):
return db.collection(TOPICS_COLLECTION).document(str(tid))
def get_topics():
return _get_dicts_by_refs(TOPICS_COLLECTION)
def _get_variants(doc):
ans = []
for d in doc.stream():
ans.append(d.to_dict())
ans[-1][ID_ACCESSOR] = d.id
return ans
def get_player_sessions(uid, amount=10):
parts = db.collection(PARTICIPATIONS_COLLECTION).where(
PLAYER_ACCESSOR, u"==", _get_player_ref(uid))
ans = []
c = 0
for d in parts.stream():
if c == amount:
break
ses = d.to_dict()[SESSION_ACCESSOR].get()
ser_session = ses.to_dict()
if ses is None:
break
ans.append(ser_session)
ans[-1][ID_ACCESSOR] = ses.id
c += 1
return ans
def _get_dicts_by_refs(collection):
ans = []
docs = db.collection(collection).get()
for d in docs:
ans.append(d.to_dict())
ans[-1][ID_ACCESSOR] = d.id
return ans
def get_questions(topic, amount=10):
ans = []
docs = db.collection(QUESIONS_COLLECTION).where(
TOPIC_QUERY, "==", _get_topic_ref(topic)).stream()
refs = []
for d in docs:
refs.append(d)
random.shuffle(refs)
for d in refs[:amount]:
q_ser = d.to_dict()
q = {
ID_ACCESSOR: d.id,
QUESTION_STRING_FIELD: q_ser[QUESTION_STRING_FIELD],
QUESTION_RIGHT_ANSWER: int(q_ser[QUESTION_RIGHT_ANSWER]),
TOPIC_QUERY: q_ser[TOPIC_QUERY].id,
VARIANTS_ACCESSOR: _get_variants(db.collection(
f"{QUESIONS_COLLECTION}/{d.id}/{VARIANTS_ACCESSOR}"))
}
ans.append(q)
return ans
def get_session(sid):
s_ref = db.collection(SESSIONS_COLLECTION).document(sid)
s = s_ref.get()
if s.exists:
ans = s.to_dict()
ans[ID_ACCESSOR] = s.id
ans[ROUNDS_ACCESSOR] = _get_rounds(s_ref)
return ans
def _get_rounds(s_ref):
rounds = s_ref.collection(ROUNDS_ACCESSOR).stream()
ans = []
for r in rounds:
r_ser = r.to_dict()
variants = r_ser[QUESTION_ID_ACCESSOR].collection(
VARIANTS_ACCESSOR).get()
ans.append({
ANSWERS_ACCESSOR: _get_answers(s_ref.collection(ROUNDS_ACCESSOR).document(r.id), variants),
QUESTION_ID_ACCESSOR: r_ser[QUESTION_ID_ACCESSOR].get().to_dict()[
QUESTION_STRING_FIELD]
})
return ans
def _get_answers(r_ref, variants):
answers = r_ref.collection(ANSWERS_ACCESSOR).stream()
ans = []
for a in answers:
ser_a = a.to_dict()
ans.append({
PLAYER_ACCESSOR: ser_a[UID_ACCESSOR],
ANSWER_STATUS_ACCESSOR: ser_a[ANSWER_STATUS_ACCESSOR],
ANSWER_ACCESSOR: variants[int(ser_a[QUESTION_VARIANT]) -
1].get(QUESTION_VARIANT)
})
return ans
def put_questions(questions):
assert type(questions) == list
amount = 0
for q in questions:
new_doc = db.collection(QUESIONS_COLLECTION).add({
QUESTION_STRING_FIELD: q[0],
QUESTION_RIGHT_ANSWER: q[5],
TOPIC_QUERY: _get_topic_ref(q[6])
})
for i in range(1, 5):
new_doc[1].collection(VARIANTS_ACCESSOR).document(str(i)).set({
QUESTION_VARIANT: q[i]
})
amount += 1
return amount
def get_mixed_questions(amount=10):
topics = db.collection(TOPICS_COLLECTION).get()
ts = int(amount / (len(topics) - 1))
questions = {}
for t in topics:
questions[t.id] = db.collection(
QUESIONS_COLLECTION).where(TOPIC_QUERY, "==", _get_topic_ref(t.id)).get()
for qs in questions.values():
random.shuffle(qs)
query = []
for qs in questions.values():
am = ts
while am > 0:
if len(qs) == 0:
break
query.append(qs.pop())
am -= 1
ans = []
for d in query:
q_ser = d.to_dict()
q = {
ID_ACCESSOR: d.id,
QUESTION_STRING_FIELD: q_ser[QUESTION_STRING_FIELD],
QUESTION_RIGHT_ANSWER: int(q_ser[QUESTION_RIGHT_ANSWER]),
TOPIC_QUERY: q_ser[TOPIC_QUERY].id,
VARIANTS_ACCESSOR: _get_variants(db.collection(
f"{QUESIONS_COLLECTION}/{d.id}/{VARIANTS_ACCESSOR}"))
}
ans.append(q)
return ans
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/admin.py
|
<gh_stars>0
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Player)
admin.site.register(QuestionTopic)
admin.site.register(QuestionVariant)
admin.site.register(Question)
admin.site.register(Session)
admin.site.register(Round)
admin.site.register(Participation)
admin.site.register(Answer)
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/entities/quiz.py
|
from .recorder_config import *
from ..dataprovider.back_config import NAME_ACCESSOR
import logging
import random
import datetime
from .player import Player
from .question import Question
from ..config import *
class Quiz(object):
def __init__(self, cid, initiator, players: list, topic: list, questions: list, time_to_ans=10):
self.players = {}
self.cid = cid
self.initiator = initiator
self.state = State()
self.topic = topic
self.answer_time = time_to_ans
self.rounds_amount = len(questions)
self.question_message = None
self.questions = []
for player in players:
self.players[player.id] = Player(
player.id, player.nick if player.nick else player.name)
for question in questions:
self.questions.append(Question(question))
self.question_stack = [*self.questions]
self.state.player_counter = len(self.players)
self.state.question_amount = len(self.questions)
logging.info(
f"Game in {self.cid} with {self.state.question_amount} questions was created.")
def check_answers_and_kill(self, player_answers: dict, question: Question):
kill_uid_list = []
for playerid, answer in player_answers.items():
if not question.check_answer(answer):
self.players[playerid].kill()
self.players[playerid].bad_question = self.state.last_question
logging.info(f"{self.players[playerid].name} was killed!")
self.state.dead_players.append(self.players[playerid])
kill_uid_list.append(playerid)
else:
logging.info(
f"{self.players[playerid].name} got points after write answer!")
self.players[playerid].add_points()
self.state.last_ban_amount = len(kill_uid_list)
self.state.dead_counter += self.state.last_ban_amount
self.state.player_counter -= self.state.last_ban_amount
self.state.question_answered += 1
return kill_uid_list
def get_question(self):
if len(self.question_stack) == 0:
return "No question."
q = self.question_stack.pop()
logging.info(f"New question was sent to players - {q.question_string}")
self.state.last_question = q
return q.get_question_message()
def get_round_result(self):
ans = ROUND_RESULT_TOPIC % self.state.player_counter
for player in self.players.values():
if not player.alive:
continue
ans += f" - {player.name} - {player.score} {POINTS_NAME}. (+{self.state.added_score})\n"
ans += BANNED_PLAYERS_INFO % self.state.last_ban_amount
logging.info(f"Round ended.\n{ans}")
return ans
def get_game_result(self, arena_num=0):
if self.state.game_in_progress:
return "Game still in progress."
ans = GAME_RESULT_TOPIC % (
self.state.question_answered, BATTLE_CHANNEL_TEMPLATE % (arena_num + 1))
for player in self.players.values():
if not player.alive:
continue
ans += f" - {player.name} - {player.score} {POINTS_NAME}.\n"
ans += KICKED_PLAYERS_MESSAGE
date = datetime.datetime.now().strftime("%d.%m.%Y")
for player in self.state.dead_players:
ans += f" F to {player.name}(?-{date}) - {player.score} {POINTS_NAME}.\n"
ans += DIVADER
logging.info(f"Round results.\n{ans}")
return ans
def get_start_quiz(self):
if not self.state.game_in_progress:
return f"This game is no longer active."
ans = RULES_MESSAGE % (self.rounds_amount, self.answer_time)
for player in self.players.values():
ans += f" - {player.name} - {player.score} {POINTS_NAME}.\n"
ans += GAME_TOPICS_INFO % self.topic[NAME_ACCESSOR]
ans += CLICK_TO_START_MESSAGE
logging.info(
f"The game in {self.cid} with {self.state.player_counter} player is about to start")
return ans
def get_start_new_round(self):
ans = ROUND_START_TOPIC % (
self.state.question_answered + 1, self.state.dead_counter, self.state.player_counter)
ans += self.get_question()
self.state.last_ban_amount = 0
logging.info(
f"Round {self.state.question_answered} in {self.cid} started.")
return ans
def is_game_end(self):
if self.state.player_counter <= 1:
self.state.game_in_progress = False
if self.state.question_answered == self.state.question_amount:
self.state.game_in_progress = False
def update_answer_statuses(self):
for player in self.players.values():
player.answered = False
def record_round(self, answers):
roundd = {}
# bad code
roundd[QUESTION_ID_ACCESSOR] = self.state.last_question.id
roundd[ANSWERS_ACCESSOR] = []
for uid, ans in answers.items():
# bad code
answer = {
UID_ACCESSOR: uid,
QUESTION_VARIANT: self.state.last_question.answers[ans - 1][2]
}
if self.state.last_question.answer == ans:
answer[ANSWER_STATUS_ACCESSOR] = True
else:
answer[ANSWER_STATUS_ACCESSOR] = False
roundd[ANSWERS_ACCESSOR].append(answer)
self.state.rounds.append(roundd)
def dump_game(self):
answer = {}
answer[PLAYERS_AMOUNT] = len(self.players)
answer[DEAD_AMOUNT] = self.state.dead_counter
answer[ROUNDS_AMOUNT] = self.state.question_answered
answer[TOPIC_FIELD] = self.topic[ID_ACCESSOR]
answer[DATETIME_FIELD] = datetime.datetime.now(
).strftime(DATETIME_TEMPLATE)
answer[ROUNDS_ACCESSOR] = self.state.rounds
answer[PLAYERS_MODELS_ACCESSOR] = [player.dump()
for player in self.players.values()] # wat
return answer
class State(object):
def __init__(self):
self.dead_counter = 0
self.player_counter = 0
self.question_amount = 0
self.question_answered = 0
self.game_in_progress = True
self.added_score = 1
self.game_ended = False
self.last_question = None
self.last_ban_amount = 0
self.dead_players = []
self.rounds = []
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/entities/question.py
|
<filename>bot/arenabot/bot/entities/question.py
import logging
import random
from .recorder_config import ID_ACCESSOR, QUESTION_RIGHT_ANSWER, QUESTION_STRING_FIELD, QUESTION_ANSWERS_FIELDS, QUESTION_VARIANT, QUESTION_VARIANTS
class Question(object):
def __init__(self, question_fields: dict):
self.question_string = question_fields[QUESTION_STRING_FIELD]
self.id = question_fields[ID_ACCESSOR]
self.answer = question_fields[QUESTION_RIGHT_ANSWER]
answers = []
for var in question_fields[QUESTION_VARIANTS]:
answers.append([var[QUESTION_VARIANT], False,
var[ID_ACCESSOR]])
logging.info(f"{answers}")
answers[question_fields[QUESTION_RIGHT_ANSWER] - 1][1] = True
random.shuffle(answers)
num = 1
s = ""
for answer in answers:
s += f"{num}. " + answer[0] + '\n'
if answer[1]:
self.answer = num
num += 1
self.answers_string = s
self.answers = answers
# self.description = description
def get_question_message(self):
return f"{self.question_string}\n{self.answers_string}\n"
def check_answer(self, answer: int):
if answer == self.answer:
return True
return False
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/apps.py
|
from django.apps import AppConfig
class ArenastatisticsConfig(AppConfig):
name = 'arenastatistics'
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/migrations/0003_auto_20210519_1935.py
|
# Generated by Django 3.0.8 on 2021-05-19 16:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arenastatistics', '0002_questiontopic_emoji'),
]
operations = [
migrations.RemoveField(
model_name='answer',
name='question',
),
migrations.AddField(
model_name='session',
name='dead_amount',
field=models.IntegerField(default=0, verbose_name='Dead amount'),
),
migrations.AlterField(
model_name='answer',
name='answer',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.QuestionVariant'),
),
]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/firestore/apps.py
|
<gh_stars>0
from django.apps import AppConfig
class FirestoreConfig(AppConfig):
name = 'firestore'
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/serializers.py
|
from rest_framework import serializers
from .models import *
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = '__all__'
class QuestionTopicSerializer(serializers.ModelSerializer):
class Meta:
model = QuestionTopic
fields = '__all__'
class QuestionVariantSerializer(serializers.ModelSerializer):
class Meta:
model = QuestionVariant
fields = '__all__'
class QuestionSerializer(serializers.ModelSerializer):
varOne = QuestionVariantSerializer(read_only=True)
varTwo = QuestionVariantSerializer(read_only=True)
varThree = QuestionVariantSerializer(read_only=True)
varFour = QuestionVariantSerializer(read_only=True)
class Meta:
model = Question
fields = [
'id',
'question_string',
'right_ind',
"varOne",
"varTwo",
"varThree",
"varFour",
"topic"
]
class SessionSerializer(serializers.ModelSerializer):
topic = QuestionTopicSerializer(read_only=True)
date = serializers.DateTimeField(format='%d.%m.%Y %H:%M')
class Meta:
model = Session
fields = [
"id",
"players_amount",
"rounds_amount",
"date",
"topic"
]
class RoundSerializer(serializers.ModelSerializer):
question = serializers.SlugRelatedField(
read_only=True,
slug_field="question_string"
)
class Meta:
model = Round
fields = [
"session",
"question"
]
# why?
class ParticipationSerializer(serializers.ModelSerializer):
session = SessionSerializer(read_only=True)
class Meta:
model = Participation
fields = [
"session"
]
class AnswerSerializer(serializers.ModelSerializer):
player = serializers.SlugRelatedField(
read_only=True,
slug_field="dis_id"
)
answer = serializers.SlugRelatedField(
read_only=True,
slug_field="variant"
)
class Meta:
model = Answer
fields = [
"player",
"right",
"answer",
]
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/models.py
|
from django.db import models
DEFAULT_EMPTY_STRING = "Empty"
# Create your models here.
class Player(models.Model):
dis_id = models.BigIntegerField("User ID", default=228)
nick = models.CharField("Nickname", max_length=90, default="NO NICK")
lifetime = models.FloatField("Average lifetime", default=1)
games_amount = models.IntegerField("Arenas played", default=0)
wins = models.IntegerField("Won in total", default=0)
def update_lifetime(self, score, rounds, alive):
total = self.games_amount + 1
cycle = score / rounds if not alive else 1
self.lifetime = (self.lifetime * self.games_amount + cycle) / total
self.games_amount = total
def __str__(self):
return f"{self.nick} with {self.games_amount} games."
class QuestionTopic(models.Model):
name = models.CharField("Topic name", max_length=30,
default=DEFAULT_EMPTY_STRING)
emoji = models.CharField("Topic emoji", max_length=10, default="🖕🏻")
def __str__(self):
return f"{self.name} topic."
class QuestionVariant(models.Model):
variant = models.TextField(
"Variant", max_length=300, default=DEFAULT_EMPTY_STRING)
def __str__(self):
return f"Question variant {self.variant}"
class Question(models.Model):
question_string = models.TextField(
"Question", max_length=300, default=DEFAULT_EMPTY_STRING)
varOne = models.ForeignKey(QuestionVariant, on_delete=models.CASCADE,
null=True, blank=True, related_name="first_question_variant")
varTwo = models.ForeignKey(QuestionVariant, on_delete=models.CASCADE,
null=True, blank=True, related_name="second_question_variant")
varThree = models.ForeignKey(QuestionVariant, on_delete=models.CASCADE,
null=True, blank=True, related_name="third_question_variant")
varFour = models.ForeignKey(QuestionVariant, on_delete=models.CASCADE,
null=True, blank=True, related_name="fourth_question_variant")
right_ind = models.IntegerField("Index of the right answer", default=1)
topic = models.ForeignKey(
QuestionTopic, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"Question {self.question_string}."
class Session(models.Model):
players_amount = models.IntegerField("Players amount", default=0)
dead_amount = models.IntegerField("Dead amount", default=0)
rounds_amount = models.IntegerField("Rounds played", default=1)
date = models.DateTimeField("Arena ended", auto_now_add=True)
topic = models.ForeignKey(
QuestionTopic, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"Session of {self.date} with {self.topic}"
class Round(models.Model):
session = models.ForeignKey(
Session, on_delete=models.CASCADE, null=True, blank=True)
question = models.ForeignKey(
Question, on_delete=models.CASCADE, null=True, blank=True)
first_variant = models.ForeignKey(
QuestionVariant, on_delete=models.CASCADE, null=True, blank=True, related_name="first_round_variant")
second_variant = models.ForeignKey(
QuestionVariant, on_delete=models.CASCADE, null=True, blank=True, related_name="second_round_variant")
three_variant = models.ForeignKey(
QuestionVariant, on_delete=models.CASCADE, null=True, blank=True, related_name="third_round_variant")
four_variant = models.ForeignKey(
QuestionVariant, on_delete=models.CASCADE, null=True, blank=True, related_name="fouth_round_variant")
right_ind = models.IntegerField("Index of the right answer", default=-1)
def __str__(self):
return f"Round of session {self.session} with question {self.question}."
class Participation(models.Model):
session = models.ForeignKey(
Session, on_delete=models.CASCADE, null=True, blank=True)
player = models.ForeignKey(
Player, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"Participation record of {self.player}."
class Answer(models.Model):
session = models.ForeignKey(
Session, on_delete=models.CASCADE, null=True, blank=True)
round = models.ForeignKey(
Round, on_delete=models.CASCADE, null=True, blank=True)
player = models.ForeignKey(
Player, on_delete=models.CASCADE, null=True, blank=True)
answer = models.ForeignKey(
QuestionVariant, on_delete=models.CASCADE, null=True, blank=True)
right = models.BooleanField("Answer is right", default=False)
def __str__(self):
return f"{self.right} answer of {self.player}."
|
tsudd/student-arena-discord-bot
|
restful_backend/service/arenastatistics/migrations/0001_initial.py
|
<reponame>tsudd/student-arena-discord-bot<gh_stars>0
# Generated by Django 3.0.8 on 2021-05-18 20:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dis_id', models.IntegerField(default=228, verbose_name='User ID')),
('nick', models.CharField(default='NO NICK', max_length=90, verbose_name='Nickname')),
('lifetime', models.FloatField(default=1, verbose_name='Average lifetime')),
('games_amount', models.IntegerField(default=0, verbose_name='Arenas played')),
('wins', models.IntegerField(default=0, verbose_name='Won in total')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_string', models.TextField(default='Empty', max_length=300, verbose_name='Question')),
('right_ind', models.IntegerField(default=1, verbose_name='Index of the right answer')),
],
),
migrations.CreateModel(
name='QuestionTopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='Empty', max_length=30, verbose_name='Topic name')),
],
),
migrations.CreateModel(
name='QuestionVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variant', models.TextField(default='Empty', max_length=300, verbose_name='Variant')),
],
),
migrations.CreateModel(
name='Session',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('players_amount', models.IntegerField(default=0, verbose_name='Players amount')),
('rounds_amount', models.IntegerField(default=1, verbose_name='Rounds played')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Arena ended')),
('topic', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.QuestionTopic')),
],
),
migrations.CreateModel(
name='Round',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('right_ind', models.IntegerField(default=-1, verbose_name='Index of the right answer')),
('first_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='first_round_variant', to='arenastatistics.QuestionVariant')),
('four_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='fouth_round_variant', to='arenastatistics.QuestionVariant')),
('question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Question')),
('second_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='second_round_variant', to='arenastatistics.QuestionVariant')),
('session', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Session')),
('three_variant', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='third_round_variant', to='arenastatistics.QuestionVariant')),
],
),
migrations.AddField(
model_name='question',
name='topic',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.QuestionTopic'),
),
migrations.AddField(
model_name='question',
name='varFour',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='fourth_question_variant', to='arenastatistics.QuestionVariant'),
),
migrations.AddField(
model_name='question',
name='varOne',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='first_question_variant', to='arenastatistics.QuestionVariant'),
),
migrations.AddField(
model_name='question',
name='varThree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='third_question_variant', to='arenastatistics.QuestionVariant'),
),
migrations.AddField(
model_name='question',
name='varTwo',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='second_question_variant', to='arenastatistics.QuestionVariant'),
),
migrations.CreateModel(
name='Participation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Player')),
('session', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Session')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.IntegerField(default=-1, verbose_name='Selected answer')),
('right', models.BooleanField(default=False, verbose_name='Answer is right')),
('player', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Player')),
('question', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Question')),
('round', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Round')),
('session', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='arenastatistics.Session')),
],
),
]
|
tsudd/student-arena-discord-bot
|
bot/arenabot/bot/entities/player.py
|
<filename>bot/arenabot/bot/entities/player.py
from .recorder_config import ALIVE_ACCESSOR, ID_ACCESSOR, NICK_ACCESSOR, TOTAL_RIGHTS_ACCESSOR
import logging
class Player(object):
def __init__(self, uid, username):
self.score = 0
self.uid = uid
self.alive = True
self.name = username
self.answered = False
self.bad_question = None
def kill(self):
self.alive = False
def add_points(self, amount=1):
self.score += amount
def dump(self):
answer = {}
answer[ID_ACCESSOR] = self.uid
answer[NICK_ACCESSOR] = self.name
answer[ALIVE_ACCESSOR] = self.alive
answer[TOTAL_RIGHTS_ACCESSOR] = self.score
return answer
|
salgozino/pyRofex_bot
|
challenge.py
|
# -*- coding: utf-8 -*-
import sys
import getopt
from market import Market, pyRofex
import time
try:
ticker = sys.argv[1]
login_data, args = getopt.getopt(sys.argv[2:], 'u:p:a:')
user = login_data[0][1]
password = login_data[1][1]
if len(login_data) > 2:
account = login_data[2][1]
else:
account = ""
except IndexError or getopt.GetoptError as e:
print("""Error intentando leer el ticker y las credenciales de ingreso.
Recuerde que el formato para llamar al script es:
> challenge.py ticker -u USER -p PASSWORD -a ACCOUNT
Si no ingresa el account, no podrá enviar órdenes al mercado""")
print(e)
sys.exit(2)
market = Market(user=user, password=password, account=account)
# %% Initialize market subscription
market.init_con()
tickers = ['DOOct20', 'DONov20']
market.subscription(tickers)
time.sleep(2)
# %% Envio orden al mercado
ticker = 'DOOct20'
try:
last_md = market.ticker_last_price(ticker)
last_bid = market.ticker_bid(ticker)
if last_bid:
market.send_order(ticker=ticker,
side=pyRofex.Side.BUY,
order_type=pyRofex.OrderType.LIMIT,
price=last_bid['price']-0.01,
size=1)
else:
market.send_order(ticker=ticker,
side=pyRofex.Side.BUY,
order_type=pyRofex.OrderType.LIMIT,
price=75.25,
size=1)
except Exception as e:
print("Error ejecutando la estrategia")
print(e)
market.close_con()
sys.exit(2)
#%% While para no cerrar el bot hasta que el usuario lo disponga.
# Para salir precionar ctrl + c
try:
while True:
pass
except KeyboardInterrupt:
print("El usuario ha cerrado el programa")
market.close_con()
sys.exit(2)
except Exception as e:
print("Ups, ocurrio el siguiente error, me cerraré luego de esto")
print(e)
sys.exit(2)
|
salgozino/pyRofex_bot
|
market.py
|
# -*- coding: utf-8 -*-
import pyRofex
class Market():
def __init__(self, user, password, account=None, environment=pyRofex.Environment.REMARKET):
self.environment = environment
self.account = account
self.is_initialized = False
self.last_message = None # last message received
self.data = {} # dict with all the market data
self.open_orders = []
self.available_instruments = set()
self.orders = []
try:
pyRofex.initialize(user=user,
password=password,
account=account,
environment=environment)
self.is_initialized = True
self.available_instruments = self.get_instruments_list()
except Exception as e:
print("Error trying to initialize the connection")
print(e)
def market_data_handler(self, message):
self.data[message['instrumentId']['symbol']].append({'timestamp': message['timestamp'],
'marketData': message['marketData']})
self.last_message = message
def order_report_handler(message):
print("Market Data Message Received: {0}".format(message))
def error_handler(self, message):
print("Error Message Received: {0}".format(message))
self.last_message = message
def exception_handler(self, e):
print("Exception Occurred: {0}".format(e.message))
self.last_message = e
def init_con(self):
print("Iniciando sessión en "+self.environment.name)
pyRofex.init_websocket_connection(market_data_handler=self.market_data_handler,
error_handler=self.error_handler,
exception_handler=self.exception_handler,
order_report_handler=self.order_report_handler)
if self.account:
print("Subscribing to the account messages")
pyRofex.order_report_subscription()
else:
print("Not logged in to the order report because there is no information about your account")
def subscription(self, tickers, entries=None):
if not self.is_initialized:
raise ConnectionError("Operación no válida. Primero debe conectarse al mercado")
if not entries:
entries = [pyRofex.MarketDataEntry.BIDS,
pyRofex.MarketDataEntry.OFFERS,
pyRofex.MarketDataEntry.LAST]
# create a list if the insput is just one ticker
if isinstance(tickers, str):
tickers = [tickers]
for ticker in tickers:
if ticker not in self.available_instruments:
print(f"el instrumento {ticker} no se puede operar, revise la definición (incluído mayusculas y minusculas)")
if ticker not in self.data.keys():
# add the ticker to the data dictionary
self.data[ticker] = []
pyRofex.market_data_subscription(tickers=tickers,
entries=entries)
def get_instruments_list(self):
if not self.is_initialized:
raise ConnectionError("Operación no válida. Primero debe inicializar al mercado")
response = pyRofex.get_all_instruments()
instruments = set()
if response['status'] != 'OK':
raise "Response error when trying to ask for the list of instruments"
else:
for ticker in response['instruments']:
instruments.add(ticker['instrumentId']['symbol'])
return instruments
def ticker_last_price(self, ticker):
print(f"Consultando simbolo: {ticker}")
if ticker in self.data.keys():
last_price = self.data[ticker][-1]['marketData']['LA']
print(f'Último precio operado para {ticker}: {last_price}')
else:
print('No estas suscripto a la este ticker')
last_price = None
return last_price
def ticker_bid(self, ticker):
print("Consultado BID")
if ticker in self.data.keys():
try:
bid = self.data[ticker][-1]['marketData']['BI'][0]
print(f'BID más alto para {ticker}: {bid}')
except IndexError:
bid = None
print("No hay punta compradora")
else:
print('No estas suscripto a la este ticker')
bid = None
return bid
def ticker_last_market_data(self, ticker):
if ticker in self.data.keys():
return self.data[ticker][-1]['marketData']
else:
print('No estas suscripto a la este ticker')
return None
def send_order(self, ticker, side, size, price=None, order_type=pyRofex.OrderType.LIMIT):
"""
Enviar una orden al mercado
Parameters
----------
ticker : str
Ticker del instrumento
side : pyRofex.Side
Lado (buy or sell)
price : float
Precio para emitir la orden, salvo que sea una orden market.
size : int
Tamaño de la orde .
order_type : pyRofex.OrderType, optional
Tipo de orden. The default is pyRofex.OrderType.LIMIT.
Returns
-------
None.
"""
print(f"Enviando una order al mercado para el instrumento {ticker}")
if order_type == pyRofex.OrderType.MARKET:
print(f"Side: {side.name}, Cant: {size}")
else:
if not price:
raise ValueError("The price must be an input if the order type is not Market")
price = round(price, 3) # hago un round por si viene con muchos decimales, pq eso da error sino
print(f"Side: {side.name}, Cant: {size}, Price: {price}")
order = pyRofex.send_order(ticker=ticker,
side=side,
price=price,
size=size,
order_type=order_type)
self.orders.append(order)
def close_con(self):
print("Cerrando sessión en "+self.environment.name)
pyRofex.close_websocket_connection()
self.is_initialized = False
|
ksweta/BeamIt-Server
|
beamit/controllers/signup.py
|
"""
Signup controller
"""
from flask import request
from json import dumps, loads
from requests import codes
from werkzeug.exceptions import Conflict
from beamit.app import db
from beamit.resources.signup import SignupRequest, SignupResponse
from beamit.model.user import User
def create_signup_routes(app):
@app.route("/api/signup", methods=["POST"])
def signup():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
signup_request_resource = SignupRequest.from_dict(loads(request.get_data()))
user = User.query.filter_by(email=signup_request_resource.email).first()
if user:
raise Conflict(
"User already exists for email ({})".format(signup_request_resource.email),
)
user = User(signup_request_resource.email, signup_request_resource.password)
try:
db.session.add(user)
db.session.commit()
signup_response = SignupResponse(user.id)
app.logger.info("signup request: {}, response: {}".format(
signup_request_resource,
signup_response,
))
return dumps(signup_response.to_dict()), codes.created
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
|
ksweta/BeamIt-Server
|
beamit/resources/invite.py
|
from beamit.resources.base import Resource
class EmailInvite(Resource):
MEDIA_TYPE = 'application/vnd.beamit.invite.email+json'
def __init__(self, user_id, invitee_email):
self.user_id = user_id
self.invitee_email = invitee_email
def __repr__(self):
return "<Email user_id: {}, invitee_email: {}>".format(self.user_id, self.invitee_email)
def to_dict(self):
return dict(
user_id=self.user_id,
invitee_email=self.invitee_email,
)
@classmethod
def from_dict(cls, dct):
return cls(
user_id=dct.get("user_id"),
invitee_email=dct.get("invitee_email"),
)
|
ksweta/BeamIt-Server
|
beamit/model/contact.py
|
<gh_stars>0
from beamit.app import db
from beamit.resources.contact import Contact as ContactResource
class Contact(db.Model):
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
email = db.Column(db.String(60), nullable=False)
name = db.Column(db.String(64))
phone = db.Column(db.String(15))
company = db.Column(db.String(50))
linkedin_url = db.Column(db.String(100))
photo = db.Column(db.LargeBinary)
def __init__(self, owner_id, email, name=None, phone=None, company=None, linkedin_url=None, photo=None): # noqa
self.owner_id = owner_id
self.email = email
self.name = name
self.phone = phone
self.company = company
self.linkedin_url = linkedin_url
self.photo = photo
def __repr__(self):
return "<Contact id: {}, owner_id: {}, email: {}, name: {}, phone: {}, "\
"company: {}, linkedin_url: {}, photo_present: {}>".format(
self.id,
self.owner_id,
self.email,
self.name,
self.phone,
self.company,
self.linkedin_url,
True if self.photo else False,
)
def to_contact_resource(self):
"""
This method converts `beamit.resources.contact.Contact` object to model object
"""
return ContactResource(
id=self.id,
owner_id=self.owner_id,
email=self.email,
name=self.name,
phone=self.phone,
company=self.company,
linkedin_url=self.linkedin_url,
)
@classmethod
def from_contact_resource(cls, resource):
return cls(
owner_id=resource.owner_id,
email=resource.email,
name=resource.name,
phone=resource.phone,
company=resource.company,
linkedin_url=resource.linkedin_url,
)
|
ksweta/BeamIt-Server
|
beamit/resources/base.py
|
from abc import ABCMeta, abstractmethod, abstractproperty
class Resource(object):
__metaclass__ = ABCMeta
# TODO:
# - automate to_dict() and from_dict() based on members
# - automate camelCase and snake_case conversions
# - automate type conversions
@classmethod
def media_type(cls):
return cls.MEDIA_TYPE
def _dict_no_none(self, **kwargs):
return {
k: v
for k, v in kwargs.iteritems()
if v is not None
}
@abstractmethod
def to_dict(self):
pass
@classmethod
def from_dict(cls, resource_dict):
raise NotImplementedError()
def __hash__(self):
return hash(frozenset(self.to_dict()))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.to_dict() == other.to_dict()
def __ne__(self, other):
return self.to_dict() != other.to_dict()
class ResourceList(Resource):
@abstractproperty
def resources(self):
pass
def __len__(self):
return len(self.resources)
def __iter__(self):
return iter(self.resources)
def append(self, resource):
self.resources.append(resource)
class PaginatedResourceList(ResourceList):
def __init__(self, items=None, offset=None, limit=None, total_count=None):
self.items = items or []
self.offset = offset
self.limit = limit
self.total_count = total_count
@property
def resources(self):
return self.items
@classmethod
def items_name(cls):
raise NotImplementedError("`items_name` is not implemented")
@classmethod
def items_class(cls):
raise NotImplementedError("`items_class` is not implemented")
def to_dict(self):
return {
self.items_name(): [
entry.to_dict() for entry in self
],
"offset": self.offset,
"limit": self.limit,
"totalCount": self.total_count,
}
@classmethod
def from_dict(cls, dct):
return cls(
[cls.items_class().from_dict(model) for model in dct[cls.items_name()]],
offset=dct["offset"],
limit=dct["limit"],
total_count=dct["totalCount"],
)
|
ksweta/BeamIt-Server
|
beamit/controllers/signin.py
|
"""
Signin controller
"""
from flask import request
from json import dumps, loads
from requests import codes
from werkzeug.exceptions import NotFound, Unauthorized
from beamit.model.user import User
from beamit.resources.signin import SigninRequest, SigninResponse
def create_signin_routes(app):
@app.route('/api/signin', methods=["POST"])
def signin():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
signin_request_resource = SigninRequest.from_dict(loads(request.get_data()))
user = User.query.filter_by(email=signin_request_resource.email).first()
if not user:
raise NotFound(
"User not found for email ({})".format(signin_request_resource.email),
)
if not user.check_password(signin_request_resource.password):
raise Unauthorized(
"unauthorized user access for email({})".format(signin_request_resource.email),
)
signin_response = SigninResponse(user.id)
return dumps(signin_response.to_dict()), codes.ok
|
ksweta/BeamIt-Server
|
beamit/controllers/user.py
|
"""
User controller
"""
from flask import request
from json import dumps, loads
from requests import codes
from beamit.app import db
from beamit.model.user import User
from beamit.model.contact import Contact
from beamit.resources.user import User as UserResource
def create_user_routes(app):
@app.route("/api/user", methods=["POST"])
def create_user():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
user_resource = UserResource.from_dict(loads(request.get_data()))
user = User.from_user_resource(user_resource)
try:
db.session.add(user)
db.session.commit()
return dumps(user.to_user_resource().to_dict()), codes.created
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
@app.route("/api/user", methods=["PUT"])
def update_user():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
user_resource = UserResource.from_dict(loads(request.get_data()))
user = User.query.get_or_404(user_resource.id)
try:
user.email = user_resource.email
user.name = user_resource.name
user.phone = user_resource.phone
user.company = user_resource.company
user.linkedin_url = user_resource.linkedin_url
db.session.commit()
return dumps(user.to_user_resource().to_dict()), codes.ok
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
@app.route("/api/user/<int:user_id>", methods=["GET"])
def get_user(user_id):
app.logger.info("content-type: {}, user_id: {}".format(
request.headers.get('content-type'),
user_id,
))
user = User.query.get_or_404(user_id)
return dumps(user.to_user_resource().to_dict()), codes.ok
@app.route("/api/user/<int:user_id>", methods=["DELETE"])
def delete_user(user_id):
app.logger.info("content-type: {}, user_id: {}".format(
request.headers.get('content-type'),
user_id,
))
user = User.query.get_or_404(user_id)
contacts = Contact.query.filter_by(owner_id=user.id)
try:
for contact in contacts:
db.session.delete(contact)
db.session.delete(user)
db.session.commit()
return "", codes.no_content
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
|
ksweta/BeamIt-Server
|
beamit/resources/password.py
|
<gh_stars>0
from beamit.resources.base import Resource
class PasswordChangeRequest(Resource):
MEDIA_TYPE = 'application/vnd.beamit.password.change.request+json'
def __init__(self, email, password, new_password):
self.email = email
self.password = password
self.new_password = <PASSWORD>
def __repr__(self):
return "<PasswordChangeRequest email: {}, password: {}, new_password: {}>".format(
self.email,
self.password,
self.new_password,
)
def to_dict(self):
return dict(email=self.email, password=self.password, new_password=self.new_password)
@classmethod
def from_dict(cls, dct):
return cls(
email=dct.get("email"),
password=dct.get("password"),
new_password=dct.get("new_password"),
)
class PasswordChangeResponse(Resource):
MEDIA_TYPE = 'application/vnd.beamit.password.change.response+json'
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<PasswordChangeResponse user_id: {}>".format(self.user_id)
def to_dict(self):
return dict(user_id=self.user_id)
@classmethod
def from_dict(cls, dct):
return cls(user_id=dct.get("user_id"))
|
ksweta/BeamIt-Server
|
beamit/model/user.py
|
from werkzeug.security import (
generate_password_hash,
check_password_hash
)
from beamit.app import db
from beamit.resources.user import User as UserResource
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(60), unique=True, nullable=False)
password = db.Column(db.String(160), nullable=False)
name = db.Column(db.String(64))
phone = db.Column(db.String(15))
company = db.Column(db.String(50))
linkedin_url = db.Column(db.String(100))
photo = db.Column(db.LargeBinary)
contacts = db.relationship('Contact', backref="owner", lazy='dynamic')
def __init__(self, email, password, name=None, phone=None, company=None, linkedin_url=None, photo=None): # noqa
self.email = email
# Generate the one way hash password
self.set_password(password)
self.name = name
self.phone = phone
self.company = company
self.linkedin_url = linkedin_url
self.photo = photo
def __repr__(self):
return "<User id: {}, email: {}, password: {}, name: {}, phone: {}, company: {}, " \
"linkedin_url: {}, photo_present: {}>".format(
self.id,
self.email,
self.password,
self.name,
self.phone,
self.company,
self.linkedin_url,
True if self.photo else False,
)
def set_password(self, password):
"""
This method generates one way hash of the password
"""
self.password = generate_password_hash(password)
def check_password(self, password):
"""
This method generates one way hash of the given password and compares it with the stored
password
"""
return check_password_hash(self.password, password)
def to_user_resource(self):
return UserResource(
id=self.id,
email=self.email,
name=self.name,
phone=self.phone,
company=self.company,
linkedin_url=self.linkedin_url,
)
|
ksweta/BeamIt-Server
|
beamit/controllers/invite.py
|
<reponame>ksweta/BeamIt-Server<filename>beamit/controllers/invite.py
"""
Invite controller
"""
from flask import request
from json import loads
from requests import codes
from sendgrid import Mail, SendGridClient
from werkzeug.exceptions import InternalServerError
from beamit.model.user import User
from beamit.resources.invite import EmailInvite
def create_invite_routes(app):
def send_email(user, invitee_email):
sgc = SendGridClient(app.config.get("SENDGRID_USERNAME"),
app.config.get("SENDGRID_PASSWORD"))
app_email = app.config.get('APP_EMAIL')
android_download_link = app.config.get('ANDROID_APP_DOWNLOAD_LINK')
email_subject = "Try Beamit Android Application"
email_content = "{}({}) has invited you to try BeamIt application. \
Please download it from here({}).".format(
user.name,
user.email,
android_download_link)
mail = Mail()
mail.add_to(invitee_email)
mail.set_from(app_email)
mail.set_subject(email_subject)
mail.set_text(email_content)
status, response = sgc.send(mail)
app.logger.info("sendgrid status code: {}, response: {}".format(status, response))
if status != codes.ok:
# Minimal error check
raise InternalServerError("Couldn't send email: {}".format(response))
return "", status
@app.route('/api/invite/email', methods=['POST'])
def email_invite():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
email_invite = EmailInvite.from_dict(loads(request.get_data()))
# Make sure user_id does exits
user = User.query.get_or_404(email_invite.user_id)
return send_email(user, email_invite.invitee_email)
|
ksweta/BeamIt-Server
|
beamit/resources/contact.py
|
from beamit.resources.base import Resource, PaginatedResourceList
class Contact(Resource):
MEDIA_TYPE = 'application/vnd.beamit.contact+json'
def __init__(self, id, owner_id, email, name=None, phone=None, company=None, linkedin_url=None): # noqa
self.id = id
self.owner_id = owner_id
self.email = email
self.name = name
self.phone = phone
self.company = company
self.linkedin_url = linkedin_url
def __repr__(self):
return "<Contact id: {}, owner_id: {}, email: {}, name: {}, phone: {}, "\
"company: {}, linkedin_url: {}>".format(
self.id,
self.owner_id,
self.email,
self.name,
self.phone,
self.company,
self.linkedin_url,
)
def to_dict(self):
return dict(
id=self.id,
owner_id=self.owner_id,
email=self.email,
name=self.name,
phone=self.phone,
company=self.company,
linkedin_url=self.linkedin_url,
)
@classmethod
def from_dict(cls, dct):
return cls(
id=dct.get("id"),
owner_id=dct.get("owner_id"),
email=dct.get("email"),
name=dct.get("name"),
phone=dct.get("phone"),
company=dct.get("company"),
linkedin_url=dct.get("linkedin_url"),
)
class ContactList(PaginatedResourceList):
"""
Contact list for a owner
"""
MEDIA_TYPE = 'application/vnd.beamit.contact.list+json'
@classmethod
def items_name(cls):
return "contacts"
@classmethod
def items_class(cls):
return Contact
class ShareContact(Resource):
MEDIA_TYPE = 'application/vnd.beamit.contact.share+json'
def __init__(self, owner_id, subject_id):
"""
:param owner_id: who receives the contact information.
:param subject_id: Who shares the contact information.
"""
self.owner_id = owner_id
self.subject_id = subject_id
def __repr__(self):
return "<ShareContact owner_id: {}, subject_id: {}>".format(self.owner_id, self.subject_id)
def to_dict(self):
return dict(owner_id=self.owner_id, subject_id=self.subject_id)
@classmethod
def from_dict(cls, dct):
return cls(
owner_id=dct.get("owner_id"),
subject_id=dct.get("subject_id"),
)
|
ksweta/BeamIt-Server
|
beamit/controllers/password.py
|
"""
password controller
"""
from flask import request
from json import dumps, loads
from requests import codes
from werkzeug.exceptions import NotFound, Unauthorized
from beamit.app import db
from beamit.model.user import User
from beamit.resources.password import PasswordChangeRequest, PasswordChangeResponse
def create_password_routes(app):
@app.route("/api/password", methods=["POST"])
def change_password():
pswd_change_request_resource = PasswordChangeRequest.from_dict(loads(request.get_data()))
user = User.query.filter_by(email=pswd_change_request_resource.email).first()
if not user:
raise NotFound(
"User not found for email ({})".format(pswd_change_request_resource.email),
)
if not user.check_password(pswd_change_request_resource.password):
raise Unauthorized(
"unauthorized user access for email ({})".format(
pswd_change_request_resource.email,
)
)
try:
user.set_password(pswd_change_request_resource.new_password)
db.session.commit()
pswd_change = PasswordChangeResponse(user.id)
return dumps(pswd_change.to_dict()), codes.ok
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
|
ksweta/BeamIt-Server
|
beamit/controllers/contact.py
|
<reponame>ksweta/BeamIt-Server<gh_stars>0
"""
contact controller
"""
from flask import request
from json import dumps, loads
from requests import codes
from werkzeug.exceptions import BadRequest
from beamit.app import db
from beamit.model.contact import Contact
from beamit.model.user import User
from beamit.resources.contact import Contact as ContactResource, ContactList, ShareContact
def parse_int_arg(name):
value = request.args.get(name)
if value is None:
return None
try:
return int(value)
except ValueError:
raise BadRequest("{} is not a valid integer".format(name))
def create_contact_routes(app):
@app.route("/api/contact", methods=["POST"])
def contact():
app.logger.info("content-type: {}, data: {}".format(
request.headers.get('content-type'),
request.get_data(),
))
contact_resource = ContactResource.from_dict(loads(request.get_data()))
# Make sure user exists
User.query.get_or_404(contact_resource.owner_id)
contact_model = Contact.from_contact_resource(contact_resource)
try:
db.session.add(contact_model)
db.session.commit()
contact_resource = contact_model.to_contact_resource()
return dumps(contact_resource.to_dict()), codes.created
except Exception as error:
app.logger.exception(error)
db.session.rollback()
raise
@app.route("/api/contact/<int:contact_id>", methods=["GET"])
def get_contact(contact_id):
app.logger.info("content-type: {}, contact_id: {}".format(
request.headers.get('content-type'),
contact_id,
))
contact = Contact.query.get_or_404(contact_id)
return dumps(contact.to_contact_resource().to_dict()), codes.ok
@app.route("/api/contact/<int:contact_id>", methods=["DELETE"])
def delete_contact(contact_id):
app.logger.info("content-type: {}, contact_id: {}".format(
request.headers.get('content-type'),
contact_id,
))
contact = Contact.query.get_or_404(contact_id)
db.session.delete(contact)
db.session.commit()
return "", codes.no_content
@app.route("/api/contactlist/user/<int:owner_id>", methods=["GET"])
def get_contacts_for_user(owner_id):
record_offset = parse_int_arg("offset") or 0
# if limit is None that means return all contacts.
record_limit = parse_int_arg("limit")
app.logger.info("owner_id (user_id): {}, offset: {}, limit: {}",
owner_id,
record_offset,
record_limit)
# Make sure owner exists
User.query.get_or_404(owner_id)
if record_limit:
contacts = Contact.query.filter_by(owner_id=owner_id).limit(record_limit).offset(record_offset).all() # noqa
else:
contacts = Contact.query.filter_by(owner_id=owner_id).offset(record_offset).all()
contact_list = ContactList(items=[contact.to_contact_resource() for contact in contacts],
offset=record_offset,
limit=record_limit,
total_count=len(contacts))
return dumps(contact_list.to_dict()), codes.ok
@app.route("/api/contact/share", methods=["POST"])
def share_contact():
app.logger.info("data: {}".format(
request.get_data(),
))
share = ShareContact.from_dict(loads(request.get_data()))
owner_user = User.query.get_or_404(share.owner_id)
subject_user_model = User.query.get_or_404(share.subject_id)
contact_model = Contact(
owner_id=owner_user.id,
email=subject_user_model.email,
name=subject_user_model.name,
phone=subject_user_model.phone,
company=subject_user_model.company,
linkedin_url=subject_user_model.linkedin_url,
photo=subject_user_model.photo,
)
try:
db.session.add(contact_model)
db.session.commit()
contact_resource = contact_model.to_contact_resource()
return dumps(contact_resource.to_dict()), codes.created
except Exception as error:
app.logger.exception(error)
db.session.rollback()
raise
|
ksweta/BeamIt-Server
|
beamit/resources/user.py
|
<gh_stars>0
from beamit.resources.base import Resource
class User(Resource):
MEDIA_TYPE = 'application/vnd.beamit.user+json'
def __init__(self, id, email, name=None, phone=None, company=None, linkedin_url=None):
self.id = id
self.email = email
self.name = name
self.phone = phone
self.company = company
self.linkedin_url = linkedin_url
def __repr__(self):
return "<User id: {}, email: {}, name: {}, phone: {}, company: {}, " \
"linkedin_url: {}>".format(
self.id,
self.email,
self.name,
self.phone,
self.company,
self.linkedin_url,
)
def to_dict(self):
return dict(
id=self.id,
email=self.email,
name=self.name,
phone=self.phone,
company=self.company,
linkedin_url=self.linkedin_url,
)
@classmethod
def from_dict(cls, dct):
return cls(
id=dct.get("id"),
email=dct.get("email"),
name=dct.get("name"),
phone=dct.get("phone"),
company=dct.get("company"),
linkedin_url=dct.get("linkedin_url"),
)
|
ksweta/BeamIt-Server
|
beamit/resources/signup.py
|
from beamit.resources.base import Resource
class SignupRequest(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signup.request+json'
def __init__(self, email, password):
self.email = email
self.password = password
def __repr__(self):
return "<SignupRequest email: {}, password: {}>".format(
self.email,
self.password,
)
def to_dict(self):
return dict(email=self.email, password=self.password)
@classmethod
def from_dict(cls, dct):
return cls(
email=dct.get("email"),
password=dct.get("password"),
)
class SignupResponse(Resource):
MEDIA_TYPE = 'application/vnd.beamit.signup.response+json'
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<SignupResponse user_id: {}>".format(self.user_id)
def to_dict(self):
return dict(user_id=self.user_id)
@classmethod
def from_dict(cls, dct):
return cls(user_id=dct.get("user_id"))
|
ksweta/BeamIt-Server
|
beamit/controllers/photo.py
|
from flask import request, Response, send_file
from requests import codes
from beamit.app import db
from beamit.model.user import User
from beamit.model.contact import Contact
def create_photo_routes(app):
@app.route('/api/photo')
def get_photo():
"""
This is just a test method api. It will be removed in future.
"""
try:
app.logger.info("Get logger")
result = send_file('./resources/static/sfsu.jpg', mimetype='image/jpg')
return result, codes.ok
except Exception as error:
app.logger.exception(error)
raise error
@app.route('/api/photo/user/<int:user_id>', methods=["GET"])
def get_user_photo(user_id):
app.logger.info("get_user_photo=> user_id: {}".format(user_id))
user = User.query.get_or_404(user_id)
return Response(user.photo, mimetype='image/png')
@app.route('/api/photo/user/<int:user_id>', methods=["POST"])
def post_user_photo(user_id):
app.logger.info("post_user_photo=> user_id:{}, photo: {}".format(
user_id,
request.files['photo'].filename,
))
user = User.query.get_or_404(user_id)
try:
user.photo = request.files['photo'].read()
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
db.session.commit()
return "", codes.no_content
@app.route('/api/photo/contact/<int:contact_id>', methods=["GET"])
def get_contact_photo(contact_id):
app.logger.info("content-type: {}, contact_id: {}".format(
request.headers.get('content-type'),
contact_id,
))
app.logger.info("get_contact_photo=> contact_id: {}".format(contact_id))
contact = Contact.query.get_or_404(contact_id)
return Response(contact.photo, mimetype='image/png')
@app.route('/api/photo/contact/<int:contact_id>', methods=["POST"])
def post_contact_photo(contact_id):
app.logger.info("post_contact_photo=> contact_id: {}, photo: {}".format(
contact_id,
request.files['photo'].filename
))
contact = Contact.query.get_or_404(contact_id)
try:
contact.photo = request.files['photo'].read()
except Exception as e:
app.logger.exception(e)
db.session.rollback()
raise
db.session.commit()
return "", codes.ok
|
ksweta/BeamIt-Server
|
beamit/app.py
|
from os import environ
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__.split('.')[0])
app.debug = True
# App email. This email will be used to email.
app.config['APP_EMAIL'] = '<EMAIL>'
app.config['ANDROID_APP_DOWNLOAD_LINK'] = 'https://blooming-cliffs-9672.herokuapp.com/android/download'
# Adding db configuration. Select the UIR based on the requirement
if environ.get('DATABASE_URL'):
app.config['SQLALCHEMY_DATABASE_URI'] = environ.get('DATABASE_URL')
app.config['SENDGRID_USERNAME'] = environ.get('SENDGRID_USERNAME')
app.config['SENDGRID_PASSWORD'] = environ.get('SENDGRID_PASSWORD')
else:
from beamit.config import (
SENDGRID_PASSWORD,
SENDGRID_USERNAME,
SQLALCHEMY_DATABASE_URI,
SQLALCHEMY_ECHO,
)
app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI
app.config['SQLALCHEMY_ECHO'] = SQLALCHEMY_ECHO
app.config['SENDGRID_USERNAME'] = SENDGRID_USERNAME
app.config['SENDGRID_PASSWORD'] = SENDGRID_PASSWORD
# Set database
db = SQLAlchemy(app)
from beamit.controllers.contact import create_contact_routes # noqa
from beamit.controllers.invite import create_invite_routes # noqa
from beamit.controllers.landing_page import create_landing_page_routes # noqa
from beamit.controllers.signup import create_signup_routes # noqa
from beamit.controllers.signin import create_signin_routes # noqa
from beamit.controllers.password import create_password_routes # noqa
from beamit.controllers.photo import create_photo_routes # noqa
from beamit.controllers.user import create_user_routes # noqa
# These need to be here otherwose db.create_all() will not work
from beamit.model.user import User # noqa
from beamit.model.contact import Contact # noqa
# Setup the routes
create_contact_routes(app)
create_invite_routes(app)
create_landing_page_routes(app)
create_signup_routes(app)
create_signin_routes(app)
create_password_routes(app)
create_photo_routes(app)
create_user_routes(app)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.