content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import ndef
import pytest
import _test_record_base
def pytest_generate_tests(metafunc):
_test_record_base.generate_tests(metafunc)
class TestTextRecord(_test_record_base._TestRecordBase):
RECORD = ndef.text.TextRecord
ATTRIB = "text, language, encoding"
test_init_args_data = [
((), ('', 'en', 'UTF-8')),
((None, None, None), ('', 'en', 'UTF-8')),
(('Hello',), ('Hello', 'en', 'UTF-8')),
(('Hello', 'en',), ('Hello', 'en', 'UTF-8')),
(('Hello', 'en', 'UTF-8'), ('Hello', 'en', 'UTF-8')),
(('Hello', 'de', 'UTF-16'), ('Hello', 'de', 'UTF-16')),
(('Hello', 63*'a'), ('Hello', 63*'a', 'UTF-8')),
((u'Hallo', u'de',), ('Hallo', 'de', 'UTF-8')),
((b'Hallo', b'de',), ('Hallo', 'de', 'UTF-8')),
]
test_init_kwargs_data = [
(('T', 'de', 'UTF-16'), "text='T', language='de', encoding='UTF-16'"),
]
test_init_fail_data = [
((1,), ".text accepts str or bytes, but not int"),
(('', ''), ".language must be 1..63 characters, got 0"),
(('', 64*'a'), ".language must be 1..63 characters, got 64"),
(('', 'a', 'X'), ".encoding may be 'UTF-8' or 'UTF-16', but not 'X'"),
(('', 0,), ".language accepts str or bytes, but not int"),
(('', 'a', 0), ".encoding may be 'UTF-8' or 'UTF-16', but not '0'"),
]
test_decode_valid_data = [
('02656e', ("", "en", "UTF-8")),
('026465', ("", "de", "UTF-8")),
('02656e48656c6c6f', ("Hello", "en", "UTF-8")),
('82656efffe480065006c006c006f00', ("Hello", "en", "UTF-16")),
('02656cce94', (u"\u0394", "el", "UTF-8")),
('82656cfffe9403', (u"\u0394", "el", "UTF-16")),
]
test_decode_error_data = [
("82656e54", "can't be decoded as UTF-16"),
("02656efffe5400", "can't be decoded as UTF-8"),
("00", "language code length can not be zero"),
("01", "language code length exceeds payload"),
]
test_decode_relax = None
test_encode_error = None
test_format_args_data = [
((), "'', 'en', 'UTF-8'"),
(('a',), "'a', 'en', 'UTF-8'"),
(('a', 'de'), "'a', 'de', 'UTF-8'"),
]
test_format_str_data = [
((),
"NDEF Text Record ID '' Text '' Language 'en' Encoding 'UTF-8'"),
(('T'),
"NDEF Text Record ID '' Text 'T' Language 'en' Encoding 'UTF-8'"),
(('T', 'de'),
"NDEF Text Record ID '' Text 'T' Language 'de' Encoding 'UTF-8'"),
]
text_messages = [
('D101075402656e54455854',
[ndef.TextRecord('TEXT', 'en', 'UTF-8')]),
('9101075402656e54585431 5101075402656e54585432',
[ndef.TextRecord('TXT1', 'en', 'UTF-8'),
ndef.TextRecord('TXT2', 'en', 'UTF-8')]),
]
@pytest.mark.parametrize("encoded, message", text_messages)
def test_message_decode(encoded, message):
octets = bytes(bytearray.fromhex(encoded))
print(list(ndef.message_decoder(octets)))
assert list(ndef.message_decoder(octets)) == message
@pytest.mark.parametrize("encoded, message", text_messages)
def test_message_encode(encoded, message):
octets = bytes(bytearray.fromhex(encoded))
print(list(ndef.message_encoder(message)))
assert b''.join(list(ndef.message_encoder(message))) == octets
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def check(S,a,b):
if (a in S) and (b not in S):
return 0
if (b in S) and (a not in S):
return 0
return 1
def main():
S = str(input())
flag = 1
for a,b in [['N','S'],['E','W']]:
flag = min(check(S,a,b),flag)
if flag==1:
print('Yes')
else:
print('No')
if __name__ == '__main__':
main() |
from policy.common import Policy, Perms
import os
class Printers (Policy):
def __init__(self):
self._Policy__name = "Printers"
self._Policy__script_name = "printers.sh"
self._Policy__template = "printers.bash.j2"
self._Policy__perms = Perms.ROOT
def process(self, scope, path):
print("{name} processing {path} ({scope})".format(name=self.name,path=path,scope=scope))
shared_path = "{}/SharedPrinter".format(path)
port_path = "{}/PortPrinter".format(path)
local_path = "{}/LocalPrinter".format(path)
printers = []
if os.path.exists(shared_path):
print("SharedPrinters")
if os.path.exists(port_path):
print("PortPrinters")
for p in os.listdir(port_path):
with open("{}/{}/ipAddress".format(port_path,p)) as f:
addr = f.read()
with open("{}/{}/localName".format(port_path,p)) as f:
name = f.read()
prns = {'name': name, 'addr': addr}
printers.append(prns)
if os.path.exists(local_path):
print("LocalPrinters")
return ({'printers': printers})
@property
def data_roots(self):
return data_roots
data_roots = {
"Preferences/Printers": Printers
}
|
import os
import torchvision.models as models
import gemicai as gem
import unittest
data_path = os.path.join("..", "examples", "gemset", "CT")
data_set = os.path.join(data_path, "000001.gemset")
test_classifier_dir = os.path.join("test_directory")
model = models.resnet18(pretrained=True)
train_dataset = gem.DicomoDataset.get_dicomo_dataset(data_path, labels=['BodyPartExamined'])
eval_dataset = gem.DicomoDataset.get_dicomo_dataset(data_path, labels=['BodyPartExamined'])
class TestClassifier(unittest.TestCase):
def test_init_correct_usage(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
self.assertIsInstance(classifier, gem.Classifier)
self.assertIsInstance(classifier.module, gem.nn.Module)
self.assertIsInstance(classifier.classes, list)
self.assertIsInstance(classifier.layer_config, gem.functr.GEMICAIABCFunctor)
self.assertIsInstance(classifier.loss_function, gem.nn.Module)
self.assertIsInstance(classifier.optimizer, gem.torch.optim.Optimizer)
def test__init__wrong_layer_config_type(self):
with self.assertRaises(TypeError):
gem.Classifier(model, train_dataset.classes('BodyPartExamined'), layer_config=[])
def test__init__custom_layer_config_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'),
layer_config=gem.functr.DefaultLastLayerConfig())
self.assertIsInstance(classifier.layer_config, gem.functr.DefaultLastLayerConfig)
def test__init__wrong_loss_function_type(self):
with self.assertRaises(TypeError):
gem.Classifier(model, train_dataset.classes('BodyPartExamined'), loss_function=[])
def test__init__custom_loss_function_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'),
loss_function=gem.nn.MultiLabelMarginLoss())
self.assertIsInstance(classifier.loss_function, gem.nn.MultiLabelMarginLoss)
def test__init__wrong_optimizer_type(self):
with self.assertRaises(TypeError):
gem.Classifier(model, train_dataset.classes('BodyPartExamined'), optimizer=[])
def test__init__custom_optimizer_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'),
optimizer=gem.torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9))
self.assertIsInstance(classifier.optimizer, gem.torch.optim.SGD)
def test__init__wrong_enable_cuda_type(self):
try:
with self.assertRaises(TypeError):
gem.Classifier(model, train_dataset.classes('BodyPartExamined'), enable_cuda=list())
except RuntimeError:
None
def test__init__correct_device_type(self):
try:
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'), enable_cuda=True)
self.assertEqual(str(classifier.device), "cuda")
except RuntimeError:
None
def test__init__wrong_cuda_device_type(self):
try:
with self.assertRaises(TypeError):
gem.Classifier(model, train_dataset.classes('BodyPartExamined'), enable_cuda=True, cuda_device="6")
except RuntimeError:
None
def test__init__cuda_correct_initialization(self):
try:
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'),
enable_cuda=True, cuda_device=0)
self.assertEqual(str(classifier.device), "cuda:0")
except RuntimeError:
None
def test_train_correct_usage(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
classifier.train(train_dataset, epochs=1, pin_memory=True, test_dataset=eval_dataset)
def test_train_wrong_output_policy_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, pin_memory=True, output_policy=[])
def test_train_wrong_dataset_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train([], epochs=1)
def test_train_wrong_test_dataset_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, test_dataset=[])
def test_train_wrong_batch_size_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, batch_size="z", epochs=1, pin_memory=True)
def test_train_wrong_epochs_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs="z", pin_memory=True)
def test_train_negative_epochs(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=-1, pin_memory=True)
def test_train_wrong_num_workers_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, pin_memory=True, num_workers="z")
def test_train_negative_num_workers(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, pin_memory=True, num_workers=-1)
def test_train_pin_memory_wrong_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, pin_memory=None)
def test_train_wrong_verbosity_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, verbosity="z")
def test_train_negative_verbosity_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.train(train_dataset, epochs=1, verbosity=-1)
def test_evaluate_correct_usage(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
classifier.evaluate(eval_dataset)
def test_evaluate_wrong_output_policy_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(train_dataset, output_policy=[])
def test_evaluate_wrong_dataset_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate([])
def test_evaluate_wrong_batch_size_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, batch_size="z")
def test_evaluate_wrong_num_workers_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, num_workers="z")
def test_evaluate_def_negative_num_workers(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, num_workers=-10)
def test_evaluate_pin_memory_wrong_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, pin_memory="z")
def test_evaluate_wrong_verbosity_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, verbosity="z")
def test_evaluate_negative_verbosity_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.evaluate(eval_dataset, verbosity=-1)
def test_save_correct_path(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
os.mkdir(test_classifier_dir)
try:
classifier_path = os.path.join(test_classifier_dir, "1.gemclas")
classifier.save(classifier_path)
self.assertEqual(os.path.isfile(classifier_path), True)
finally:
try:
os.remove(classifier_path)
except FileNotFoundError:
None
os.rmdir(test_classifier_dir)
def test_save_invalid_path(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
classifier_path = os.path.join(test_classifier_dir, "1.gemclas")
with self.assertRaises(FileNotFoundError):
classifier.save(classifier_path)
def test_save_wrong_path_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
classifier_path = 1
with self.assertRaises(TypeError):
classifier.save(classifier_path)
def test_set_trainable_layers_layers_wrong_type(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
with self.assertRaises(TypeError):
classifier.set_trainable_layers({})
def test_set_trainable_layers_empty_layers(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
def test_mode(classifier, layer_mode):
classifier.set_trainable_layers([("fc", layer_mode)])
for name, param in classifier.module.named_parameters():
name = '.'.join(name.split('.')[:-1])
if name == "fc":
self.assertEqual(param.requires_grad, layer_mode)
classifier.set_trainable_layers([])
for name, param in classifier.module.named_parameters():
name = '.'.join(name.split('.')[:-1])
if name == "fc":
self.assertEqual(param.requires_grad, layer_mode)
test_mode(classifier, True)
test_mode(classifier, False)
def test_set_trainable_layers_correct_usage(self):
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
def test_mode(classifier, layer_mode):
classifier.set_trainable_layers([("fc", layer_mode)])
for name, param in classifier.module.named_parameters():
name = '.'.join(name.split('.')[:-1])
if name == "fc":
self.assertEqual(param.requires_grad, layer_mode)
test_mode(classifier, True)
test_mode(classifier, False)
def test_from_file_invalid_path(self):
classifier_path = os.path.join(test_classifier_dir, "1.gemclas")
with self.assertRaises(FileNotFoundError):
gem.Classifier.from_file(classifier_path)
def test_from_file_wrong_file_format(self):
classifier_path = 1
with self.assertRaises(TypeError):
gem.Classifier.from_file(classifier_path)
def test_from_file_wrong_file_format(self):
classifier_path = data_set
with self.assertRaises(gem.pickle.UnpicklingError):
gem.Classifier.from_file(classifier_path)
def test_from_file_pickled_file_but_wrong_data_inside(self):
os.mkdir(test_classifier_dir)
try:
test_file_path = os.path.join(test_classifier_dir, "1.gemclas")
variable = list()
with open(test_file_path, 'wb') as output:
gem.io.pickle.dump(variable, output)
self.assertEqual(os.path.isfile(test_file_path), True)
with self.assertRaises(TypeError):
gem.Classifier.from_file(test_file_path)
finally:
try:
os.remove(test_file_path)
except FileNotFoundError:
None
os.rmdir(test_classifier_dir)
def test_from_file_correct_usage(self):
os.mkdir(test_classifier_dir)
try:
self.maxDiff = None
test_file_path = os.path.join(test_classifier_dir, "1.gemclas")
classifier = gem.Classifier(model, train_dataset.classes('BodyPartExamined'))
classifier.save(test_file_path)
classifier = gem.Classifier.from_file(test_file_path)
self.assertIsInstance(classifier, gem.Classifier)
finally:
try:
os.remove(test_file_path)
except FileNotFoundError:
None
os.rmdir(test_classifier_dir)
if __name__ == '__main__':
unittest.main()
|
# RPG SIMPLES
# BIBLIOTECAS
import os # Sistema operacional
import sys # Sistema-interpretador
from random import random # Gerador de números aleatórios [0,1)
from time import sleep # Aguardar
# CLASSES
class Jogador():
"""
# JOGADOR
---------
Classe primária para criar um objeto do tipo `jogador`.
## ATRIBUTOS
- Vida
- Mana
- Ataque
## MÉTODOS
- `atacar()`: Retorna um valor (inteiro) correspondente ao dano físico.
- `magia()`: Retorna um valor (inteiro) correspondente ao dano por magia.
- `descanso()`: Recupera uma fraçã de alguns status do personagem.
- `status()`: Retorna um texto com os atributos do personagem.
"""
# Atributos básicos do personagem
# Aqui é possível configurar o balenceamento do jogo
ATRIBUTOS = {
"Vida" : 500,
"Mana" : 200,
"Ataque" : 100
}
# Valor que será aplicado nos atributos do personagem
# conforme a especialidade/classe de cada um
VANTAGENS = {
"Fraqueza" : 0.8,
"Normal" : 1.0,
"Força" : 1.2
}
# Fração mínima e máxima de dano, respectivamente
DANO_AMPLITUDE = (0.5, 1.5)
# Custo no uso de magia para a mana
MAGIA_CUSTO = 50
# Fração de vida e mana recuperada ao final de uma batalha
RECUPERAÇÃO = 0.1
def __init__(self):
"Configura os atributos básicos."
self.max_vida = self.ATRIBUTOS["Vida"]
self.vida = self.max_vida
self.max_mana = self.ATRIBUTOS["Mana"]
self.mana = self.max_mana
self.ataque = self.ATRIBUTOS["Ataque"]
def atacar(self):
"Calcula o valor de dano físico que o personagem vai infligir nesse turno."
return round(((self.DANO_AMPLITUDE[1]-self.DANO_AMPLITUDE[0])*random()+self.DANO_AMPLITUDE[0])*self.ataque)
def magia(self):
"Calcula o valor de dano mágico que o personagem vai infligir nesse turno."
# Custo do uso da magia
self.mana -= self.MAGIA_CUSTO
return round(((self.DANO_AMPLITUDE[1]-self.DANO_AMPLITUDE[0])*random()+self.DANO_AMPLITUDE[0])*self.max_mana)
def descanso(self):
"Recupera uma parte das estatísticas do jogador: vida e mana."
# Recuperação da vida
self.vida += round(self.max_vida * self.RECUPERAÇÃO)
if self.vida > self.max_vida:
self.vida = self.max_vida
# Recuperação da mana
self.mana += round(self.max_mana * self.RECUPERAÇÃO)
if self.mana > self.max_mana:
self.mana = self.max_mana
def status(self):
"Retorna uma `str` com as estatísticas do personagem."
return f"Vida: {self.vida}/{self.max_vida} | Mana: {self.mana}/{self.max_mana} | Ataque: {self.ataque}"
class Guerreiro(Jogador):
"""
# GUERREIRO
-----------
Classe forte e resistente, com muitos pontos de vida.
- Vida: +++
- Mana: +
- Ataque: ++
"""
def __init__(self):
"Atualiza os atributos básicos."
# Resgata os atributos da classe pai.
# Nese caso, não é necessário, pois não possuiu parâmetros.
super().__init__()
self.max_vida = round(self.max_vida * self.VANTAGENS["Força"])
self.vida = self.max_vida
self.max_mana = round(self.max_mana * self.VANTAGENS["Fraqueza"])
self.mana = self.max_mana
self.ataque = round(self.ataque * self.VANTAGENS["Normal"])
class Ninja(Jogador):
"""
# NINJA
-------
Classe preparada para o dano físico, com muitos pontos de ataque.
- Vida: +
- Mana: ++
- Ataque: +++
"""
def __init__(self):
"Atualiza os atributos básicos."
# Resgata os atributos da classe pai.
# Nese caso, não é necessário, pois não possuiu parâmetros.
super().__init__()
self.max_vida = round(self.max_vida * self.VANTAGENS["Fraqueza"])
self.vida = self.max_vida
self.max_mana = round(self.max_mana * self.VANTAGENS["Normal"])
self.mana = self.max_mana
self.ataque = round(self.ataque * self.VANTAGENS["Força"])
class Mago(Jogador):
"""
# MAGO
------
Classe especializada em magia, com muitos pontos de mana.
- Vida: ++
- Mana: +++
- Ataque: +
"""
def __init__(self):
"Atualiza os atributos básicos."
# Resgata os atributos da classe pai.
# Nese caso, não é necessário, pois não possuiu parâmetros.
super().__init__()
self.max_vida = round(self.max_vida * self.VANTAGENS["Normal"])
self.vida = self.max_vida
self.max_mana = round(self.max_mana * self.VANTAGENS["Força"])
self.mana = self.max_mana
self.ataque = round(self.ataque * self.VANTAGENS["Fraqueza"])
class Inimigo():
"""
# INIMIGO
---------
Classe primária para criar um objeto do tipo `inimigo`.
## ATRIBUTOS
- Vida
- Ataque
## MÉTODOS
- `atacar()`: Retorna um valor (inteiro) correspondente ao dano físico.
- `status()`: Retorna um texto com os atributos do personagem.
"""
ATRIBUTOS = dict(zip(
Jogador().ATRIBUTOS.keys(),
list(map(lambda x: x*0.65, list(Jogador.ATRIBUTOS.values())))
))
DANO_AMPLITUDE = (0.5, 1.5)
def __init__(self):
"Configura os atributos básicos."
self.max_vida = round(self.ATRIBUTOS["Vida"] * (0.5 + random()))
self.vida = self.max_vida
# self.max_mana = self.ATRIBUTOS["Mana"]
# self.mana = self.max_mana
self.ataque = round(self.ATRIBUTOS["Ataque"] * (0.5 + random()))
def atacar(self):
"Calcula o valor de dano físico que o inimgo vai infligir nesse turno."
return round(((self.DANO_AMPLITUDE[1]-self.DANO_AMPLITUDE[0])*random()+self.DANO_AMPLITUDE[0])*self.ataque)
def status(self):
"Retorna uma `str` com as estatísticas do inimigo."
# return f"Vida: {self.vida}/{self.max_vida} | Mana: {self.mana}/{self.max_mana} | Ataque: {self.ataque}"
return f"Vida: {self.vida}/{self.max_vida} | Ataque: {self.ataque}"
# FUNÇÕES
def clear():
"Limpa o terminal."
os.system('cls' if os.name=='nt' else 'clear')
# MAIN
# Roda apenas se este programa que está em execução e não caso tenha sido importado.
if __name__ == '__main__':
# Opções de clases
CLASSES = {
"Guerreiro" : Guerreiro(),
"Ninja" : Ninja(),
"Mago" : Mago()
}
clear() # Limpa o terminal
print("Classes disponíveis:")
# Mostra as classes disponíveis
for i in CLASSES:
print(f"- {i}")
# Escolha de classe
while True:
# Já "limpa" a string de entrada
escolha = input("\nEscolha a sua classe:").capitalize().replace(" ","")
try:
player = CLASSES[escolha]
break
except:
print("\nEscolha inválida!")
# Pontuação do jogador
score = 0
while True:
clear() # Limpa o terminal
print("Um novo inimigo aparece!\n")
inimigo = Inimigo() # Gera um novo inimigo
while True:
# Estatística dos objetos
print(f"INIMIGO: {inimigo.status()}")
print(f"JOGADOR: {player.status()}")
# Opções de ações
print("\nATACAR | MAGIA | SAIR")
while True:
# Escolha de ação do usuário
evento = input("\nO que fazer? ").lower().replace(" ","")
# ATACAR
if evento == "atacar":
dano = player.atacar() # Calcula o dano
print(f"\nVocê ataca o inimigo e inflige {dano} de dano.")
inimigo.vida -= dano # Aplica o dano
break
# MAGIA
elif evento == "magia":
# Verifica se possui mana suficiente
if player.mana >= player.MAGIA_CUSTO:
dano = player.magia() # Calcula o dano
print(f"\nVocê usa uma magia no inimigo e inflige {dano} de dano.")
inimigo.vida -= dano # Aplica o dano
break
else:
print("Mana insuficiente!")
# SAIR
elif evento == "sair":
print(f"\nFim de jogo!\nPontuação: {score}")
sys.exit() # Fecha o interpretador
else:
print("\nComando inválido!")
# Inimigo vivo, ataca
if inimigo.vida > 0:
sleep(1) # Espera
dano = inimigo.atacar() # Calcula o dano
print(f"O inimigo te ataca e inflige {dano} de dano.\n")
sleep(1) # Espera
player.vida -= dano # Aplica o dano
# Inimigo morto
else:
score += 1 # Aumenta pontuação
print("\nVocê aniquilou o inimigo!")
sleep(1) # Espera
player.descanso() # Restaura um pouco o player
print("\nVocê consegue descansar um pouco.")
sleep(2) # Espera
break
# Se jogador está sem vida
if player.vida <= 0:
print(f"\nFim de jogo!\nPontuação: {score}")
sys.quit() # Fecha o interpretador
|
import pytest
from types import SimpleNamespace
from _pytest.main import Session
from click.testing import CliRunner
from commands.login import cli as login
from commands.status import cli as status
from commands.config import cli as config
from tests.helpers.test_utils import mock_login_get_urls
import sys
sys.path.append('neoload')
__default_random_token = '12345678912345678901ae6d8af6abcdefabcdefabcdef'
__default_api_url = 'https://neoload-web-api.neotys.perfreleng.org/'
def pytest_addoption(parser):
parser.addoption('--token', action='store', default=__default_random_token)
parser.addoption('--url', action='store', default=__default_api_url)
parser.addoption('--workspace', action='store', default=None)
parser.addoption('--makelivecalls', action='store_true')
def pytest_configure(config):
if not config.option.makelivecalls:
setattr(config.option, 'markexpr', 'not makelivecalls')
def pytest_sessionstart(session: Session):
"""
Called after the Session object has been created
and before performing collection and entering the run test loop.
The test suite needs to start already logged in, because during the class initialization
of the commands, the function base_endpoint_with_workspace() throw an exception if not logged in.
"""
CliRunner().invoke(config, ["set", "status.resolvenames=False"])
CliRunner().invoke(login, ["xxxxx", '--url', "bad_url"])
@pytest.fixture
def neoload_login(request, monkeypatch):
token = request.config.getoption('--token')
api_url = request.config.getoption('--url')
workspace = request.config.getoption('--workspace')
makelivecalls = request.config.getoption('--makelivecalls')
if makelivecalls:
CliRunner().invoke(config, ["set", "status.resolvenames=True"])
CliRunner().invoke(config, ["set", "docker.zone=xWbV4"])
runner = CliRunner()
result_status = runner.invoke(status)
# do login if not already logged-in with the right credentials
if "aren't logged in" in result_status.output \
or "No settings is stored" in result_status.output \
or api_url not in result_status.output \
or '*' * (len(token) - 3) + token[-3:] not in result_status.output:
mock_login_get_urls(monkeypatch, 'SaaS')
cli_options = [token, '--url', api_url]
if workspace and len(workspace) > 0:
cli_options.extend(['--workspace', workspace])
runner.invoke(login, cli_options)
else:
print('\n@Before : Already logged on %s' % api_url)
@pytest.fixture
def valid_data():
return SimpleNamespace(
test_settings_id='2e4fb86c-ac70-459d-a452-8fa2e9101d16',
test_result_id='184e0b68-eb4e-4368-9f6e-a56fd9c177cf',
test_result_id_to_delete='07040512-23ca-4d9c-bdb7-a64450ea5949',
default_workspace_id='5e3acde2e860a132744ca916'
)
@pytest.fixture
def invalid_data():
return SimpleNamespace(uuid='75b63bc2-1234-1234-abcd-f712a69db723')
@pytest.fixture
def monkeypatch(request, monkeypatch):
token = request.config.getoption('--token')
# Disable mocks when a specific token is provided
return monkeypatch if token is __default_random_token else None
|
import logging, threading, time
from acq4.util import Qt
import falconoptics
from ..FilterWheel.filterwheel import FilterWheel, FilterWheelFuture, FilterWheelDevGui
class FalconTurret(FilterWheel):
def __init__(self, dm, config, name):
self.dev = falconoptics.Falcon(config_file=None, update_nonvolatile=True)
logger = logging.getLogger('falconoptics')
logger.setLevel(logging.CRITICAL)
# can't go to initial until home completes.
self._initialSlot = config.pop('initialSlot')
FilterWheel.__init__(self, dm, config, name)
if not self.dev.is_homed:
self._initialFuture = self.home()
if self._initialSlot is not None:
initThread = threading.Thread(target=self._setInitialPos)
initThread.start()
def _setInitialPos(self):
# used to wait on the initial home move and then switch to initial slot
while not self._initialFuture.isDone():
time.sleep(0.1)
self.setPosition(self._initialSlot)
def getPositionCount(self):
return self.dev._total_slides
def _getPosition(self):
return int(self.dev.current_slide) % self.dev._total_slides
def _setPosition(self, pos):
if pos == 'home':
self.dev.home(block=False)
else:
self.dev.move_to_slide(pos, block=False)
return FalconTurretFuture(self, pos)
def home(self):
"""Search for home position on turret; used to recalibrate wheel location.
"""
return self.setPosition('home')
def _stop(self):
self.dev.emergency_stop()
def isMoving(self):
return self.dev.is_moving
def deviceInterface(self, win):
return FalconDevGui(self)
def quit(self):
self.stop()
class FalconTurretFuture(FilterWheelFuture):
def _atTarget(self):
if self.position == 'home':
return self.dev.dev.is_homed
else:
return FilterWheelFuture._atTarget(self)
class FalconDevGui(FilterWheelDevGui):
def __init__(self, dev):
FilterWheelDevGui.__init__(self, dev)
self.btnWidget = Qt.QWidget()
self.layout.addWidget(self.btnWidget, self.layout.rowCount(), 0)
self.btnLayout = Qt.QGridLayout()
self.btnWidget.setLayout(self.btnLayout)
self.btnLayout.setContentsMargins(0, 0, 0, 0)
self.homeBtn = Qt.QPushButton("Find Home")
self.homeBtn.clicked.connect(self.dev.home)
self.btnLayout.addWidget(self.homeBtn, 0, 0, 1, 2)
self.leftBtn = Qt.QPushButton("<<<")
self.leftBtn.pressed.connect(self.moveLeft)
self.leftBtn.released.connect(self.stop)
self.btnLayout.addWidget(self.leftBtn, 1, 0)
self.rightBtn = Qt.QPushButton(">>>")
self.rightBtn.pressed.connect(self.moveRight)
self.rightBtn.released.connect(self.stop)
self.btnLayout.addWidget(self.rightBtn, 1, 1)
# Manual turret rotation is hacky but only meant for diagnosing
# filter position issues; normal operation should not need this.
def moveLeft(self):
self.dev.dev._motor_on()
self.dev.dev._target_velocity = -self.dev.dev._home_speed
def moveRight(self):
self.dev.dev._motor_on()
self.dev.dev._target_velocity = self.dev.dev._home_speed
def stop(self):
self.dev.dev._target_velocity = 0
self.dev.dev._motor_off()
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
class Config(TeaModel):
"""
Model for initing client
"""
def __init__(self, access_key_id=None, access_key_secret=None, security_token=None, protocol=None,
read_timeout=None, connect_timeout=None, http_proxy=None, https_proxy=None, endpoint=None, no_proxy=None,
max_idle_conns=None, user_agent=None, socks_5proxy=None, socks_5net_work=None):
# accesskey id
self.access_key_id = access_key_id
# accesskey secret
self.access_key_secret = access_key_secret
# security token
self.security_token = security_token
# http protocol
self.protocol = protocol
# read timeout
self.read_timeout = read_timeout
# connect timeout
self.connect_timeout = connect_timeout
# http proxy
self.http_proxy = http_proxy
# https proxy
self.https_proxy = https_proxy
# endpoint
self.endpoint = endpoint
# proxy white list
self.no_proxy = no_proxy
# max idle conns
self.max_idle_conns = max_idle_conns
# user agent
self.user_agent = user_agent
# socks5 proxy
self.socks_5proxy = socks_5proxy
# socks5 network
self.socks_5net_work = socks_5net_work
def validate(self):
pass
def to_map(self):
result = {}
result['accessKeyId'] = self.access_key_id
result['accessKeySecret'] = self.access_key_secret
result['securityToken'] = self.security_token
result['protocol'] = self.protocol
result['readTimeout'] = self.read_timeout
result['connectTimeout'] = self.connect_timeout
result['httpProxy'] = self.http_proxy
result['httpsProxy'] = self.https_proxy
result['endpoint'] = self.endpoint
result['noProxy'] = self.no_proxy
result['maxIdleConns'] = self.max_idle_conns
result['userAgent'] = self.user_agent
result['socks5Proxy'] = self.socks_5proxy
result['socks5NetWork'] = self.socks_5net_work
return result
def from_map(self, map={}):
self.access_key_id = map.get('accessKeyId')
self.access_key_secret = map.get('accessKeySecret')
self.security_token = map.get('securityToken')
self.protocol = map.get('protocol')
self.read_timeout = map.get('readTimeout')
self.connect_timeout = map.get('connectTimeout')
self.http_proxy = map.get('httpProxy')
self.https_proxy = map.get('httpsProxy')
self.endpoint = map.get('endpoint')
self.no_proxy = map.get('noProxy')
self.max_idle_conns = map.get('maxIdleConns')
self.user_agent = map.get('userAgent')
self.socks_5proxy = map.get('socks5Proxy')
self.socks_5net_work = map.get('socks5NetWork')
return self
class Dog(TeaModel):
def __init__(self, adopted_date=None, age=None, is_adopted=None, marks=None, name=None):
self.adopted_date = adopted_date
self.age = age
self.is_adopted = is_adopted
self.marks = marks
self.name = name
def validate(self):
if self.adopted_date:
self.validate_pattern(self.adopted_date, 'adopted_date', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}[Z]')
if self.name:
self.validate_max_length(self.name, 'name', 32)
def to_map(self):
result = {}
result['adopted_date'] = self.adopted_date
result['age'] = self.age
result['is_adopted'] = self.is_adopted
result['marks'] = []
if self.marks is not None:
for k in self.marks:
result['marks'].append(k)
else:
result['marks'] = None
result['name'] = self.name
return result
def from_map(self, map={}):
self.adopted_date = map.get('adopted_date')
self.age = map.get('age')
self.is_adopted = map.get('is_adopted')
self.marks = []
if map.get('marks') is not None:
for k in map.get('marks'):
self.marks.append(k)
else:
self.marks = None
self.name = map.get('name')
return self
class DogHome(TeaModel):
def __init__(self, dog_members=None, leader=None):
self.dog_members = dog_members
self.leader = leader
def validate(self):
if self.dog_members:
for k in self.dog_members:
if k:
k.validate()
if self.leader:
self.leader.validate()
def to_map(self):
result = {}
result['dog_members'] = []
if self.dog_members is not None:
for k in self.dog_members:
result['dog_members'].append(k.to_map() if k else None)
else:
result['dog_members'] = None
if self.leader is not None:
result['leader'] = self.leader.to_map()
else:
result['leader'] = None
return result
def from_map(self, map={}):
self.dog_members = []
if map.get('dog_members') is not None:
for k in map.get('dog_members'):
temp_model = Dog()
temp_model = temp_model.from_map(k)
self.dog_members.append(temp_model)
else:
self.dog_members = None
if map.get('leader') is not None:
temp_model = Dog()
self.leader = temp_model.from_map(map['leader'])
else:
self.leader = None
return self
class GetDemoDogAgeRequest(TeaModel):
def __init__(self, auth_token=None, product_instance_id=None, region_name=None, id=None):
self.auth_token = auth_token
self.product_instance_id = product_instance_id
self.region_name = region_name
self.id = id
def validate(self):
pass
def to_map(self):
result = {}
result['auth_token'] = self.auth_token
result['product_instance_id'] = self.product_instance_id
result['region_name'] = self.region_name
result['id'] = self.id
return result
def from_map(self, map={}):
self.auth_token = map.get('auth_token')
self.product_instance_id = map.get('product_instance_id')
self.region_name = map.get('region_name')
self.id = map.get('id')
return self
class GetDemoDogAgeResponse(TeaModel):
def __init__(self, req_msg_id=None, result_code=None, result_msg=None, age=None):
self.req_msg_id = req_msg_id
self.result_code = result_code
self.result_msg = result_msg
self.age = age
def validate(self):
pass
def to_map(self):
result = {}
result['req_msg_id'] = self.req_msg_id
result['result_code'] = self.result_code
result['result_msg'] = self.result_msg
result['age'] = self.age
return result
def from_map(self, map={}):
self.req_msg_id = map.get('req_msg_id')
self.result_code = map.get('result_code')
self.result_msg = map.get('result_msg')
self.age = map.get('age')
return self
|
import json
import logging
class simpleGraph():
def __initializeGraph(self, graphData: dict):
self.__graph = {}
for key, value in graphData.items():
neighbors = []
for neighbor, weight in value.items():
neighbors.append((int(neighbor), int(weight)))
self.__graph[int(key)] = neighbors
def __readInputFile(self, filename):
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self.log.debug("JSON data: {}".format(data))
self.__initializeGraph(data)
def __init__(self, filename, loggingLevel="WARNING"):
self.log = logging.getLogger("simpleGraph-logger")
self.log.setLevel(level=loggingLevel)
self.__readInputFile(filename)
self.__numVertices = len(self.__graph.keys())
self.log.info("Read in graph")
def getNumVertices(self):
return self.__numVertices
def getVertexIDs(self):
return list(self.__graph.keys())
def getUnweightedNeighbors(self, rank):
try:
return [i for i, _ in self.__graph[rank]]
except:
return []
def getWeightedNeighbors(self, rank):
try:
return self.__graph[rank]
except:
return [] |
#!/usr/bin/env python3
# -*- coding: iso-8859-15 -*-
from sqlalchemy import *
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
engine = ""
try:
from settings_table import *
except ImportError:
pass
Base = declarative_base()
########################################################################
class flaredb(Base):
""""""
__tablename__ = "ships"
class_id = Column(Integer, primary_key=True)
ship_name: str = Column(String)
dmg_type = Column(String)
dmg = Column(String)
aura = Column(String)
zen = Column(String)
#----------------------------------------------------------------------
def __init__(self, ship_name, dmg_type, dmg, aura, zen):
""""""
self.ship_name = ship_name
self.dmg_type = dmg_type
self.dmg = dmg
self.aura = aura
self.zen = zen
# create tables
Base.metadata.create_all(engine)
|
"""
gpcr_ecl_saltbridge.py
Identify GPCR structures with saltbridges between extracellular loops 2 and 3.
Handles all functions.
"""
# python standard library
from collections import OrderedDict
import json
import logging
import pathlib
import pickle
from urllib.error import HTTPError
from urllib.request import urlopen, urlretrieve
import warnings
# external libraries
from appdirs import user_cache_dir
from Bio import pairwise2
from tqdm import tqdm
DATA = pathlib.Path(user_cache_dir()) / 'gpcr_ecl_saltbridge'
def distance(x, y):
""" Calculate the euclidean distance between two point in three dimensional space. """
return ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2) ** 0.5
def get_gpcr_structures():
""" Retrieve available GPCR structures from GPCRDB. """
url = 'https://gpcrdb.org/services/structure/'
response = urlopen(url)
structures = json.loads(response.read().decode('utf-8'))
return structures
def get_gpcr_sequence_dict(protein_name):
""" Retrieve a dictionary with the residue id as key and a list of residue name and generic number as value. """
url = 'https://gpcrdb.org/services/residues/{}'.format(protein_name)
response = urlopen(url)
protein = json.loads(response.read().decode('utf-8'))
sequence_dict = OrderedDict()
for residue in protein:
sequence_dict[residue['sequence_number']] = [residue['amino_acid'], residue['display_generic_number']]
return sequence_dict
def generate_pdb_code_dict(directory=DATA):
""" Retrieve data from the GPCRDB to build or update a dictionary with pdb code as key and a list with gene name
and preferred chain as value. """
directory.mkdir(parents=True, exist_ok=True)
pdb_code_dict_path = directory / 'pdb_code_dict.pkl'
if pdb_code_dict_path.is_file():
pdb_code_dict = pickle.load(open(pdb_code_dict_path, 'rb'))
else:
pdb_code_dict = {}
structures = get_gpcr_structures()
for structure in structures:
if structure['pdb_code'] not in pdb_code_dict.keys():
pdb_code_dict[structure['pdb_code']] = [structure['protein'], structure['preferred_chain'].split(',')[0]]
with open(pdb_code_dict_path, 'wb') as pickle_file:
pickle.dump(pdb_code_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
return pdb_code_dict
def generate_sequence_dict(directory=DATA):
""" Retrieve data from the GPCRDB to build or update a dictionary with gene name as key and a
dictionary as value with residue id as key and a list with amino acid and GPCRDB number as value. """
directory.mkdir(parents=True, exist_ok=True)
sequence_dict_path = directory / 'sequence_dict.pkl'
if sequence_dict_path.is_file():
sequence_dict = pickle.load(open(sequence_dict_path, 'rb'))
else:
sequence_dict = {}
structures = get_gpcr_structures()
for structure in structures:
protein_name = structure['protein']
if protein_name not in sequence_dict.keys():
sequence_dict[protein_name] = get_gpcr_sequence_dict(protein_name)
with open(sequence_dict_path, 'wb') as pickle_file:
pickle.dump(sequence_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
return sequence_dict
def download_pdb_files(pdb_codes, directory=DATA):
""" Download pdb files from the PDB specified in the provided pdb code list and saves the files in
the provided directory. """
directory.mkdir(parents=True, exist_ok=True)
for pdb_code in pdb_codes:
try:
file_path = DATA / f'{pdb_code}.pdb'
if not file_path.is_file():
urlretrieve(f'https://files.rcsb.org/download/{pdb_code}.pdb', file_path)
logging.debug(f"Downloaded pdb file for {pdb_code} ...")
except HTTPError:
file_path = DATA / f'{pdb_code}.cif'
if not file_path.is_file():
urlretrieve(f'https://files.rcsb.org/download/{pdb_code}.cif', file_path)
logging.debug(f"Downloaded mmcif file for {pdb_code} ...")
return
def update_data(directory=DATA):
""" Update data retrieved from GPCRDB and PDB. """
pdb_code_dict = generate_pdb_code_dict(directory=directory)
sequences_dict = generate_sequence_dict(directory=directory)
download_pdb_files(pdb_code_dict.keys(), directory=directory)
return pdb_code_dict, sequences_dict
def read_pdb_structure(pdb_code, directory=DATA):
from Bio import BiopythonWarning
from Bio.PDB import PDBParser, MMCIFParser
file_path = directory / f'{pdb_code}.pdb'
if file_path.is_file():
parser = PDBParser()
else:
parser = MMCIFParser()
file_path = directory / f'{pdb_code}.cif'
with warnings.catch_warnings():
warnings.simplefilter('ignore', BiopythonWarning)
structure = parser.get_structure(pdb_code, file_path)
return structure
def generate_pdb_sequence_dict(pdb_code, preferred_chain, directory=DATA):
from Bio.SeqUtils import seq1
structure = read_pdb_structure(pdb_code, directory=directory)
pdb_sequence_dict = OrderedDict()
for residue in structure[0][preferred_chain].get_residues():
if residue.get_id()[0] == ' ': # no hetero atoms
pdb_sequence_dict[residue.get_id()[1]] = seq1(residue.get_resname())
return pdb_sequence_dict
def assign_generic_numbers_to_pdb(sequence_dict, pdb_sequence_dict):
sequence = ''.join([sequence_dict[residue][0] for residue in sequence_dict.keys()])
pdb_sequence = ''.join([pdb_sequence_dict[residue] for residue in pdb_sequence_dict.keys()])
alignment = pairwise2.align.globalxs(sequence, pdb_sequence, -10, 0)[0]
pdb_generic_numbers_dict = {}
pdb_sequence_ids = list(pdb_sequence_dict.keys())
counter = 1
pdb_counter = 0
for residue, pdb_residue in zip(alignment[0], alignment[1]):
if residue != '-' and pdb_residue != '-':
pdb_generic_numbers_dict[pdb_sequence_ids[pdb_counter]] = sequence_dict[counter][1]
counter += 1
pdb_counter += 1
else:
if residue != '-':
counter += 1
if pdb_residue != '-':
pdb_counter += 1
return pdb_generic_numbers_dict
def get_salt_bridges(directory=DATA):
""" This function analyzes pdb files to contain salt bridges between ECL2 and ECL3 and returns the pdb codes. """
ni_residues = {'ASP': ['OD1', 'OD2'], 'GLU': ['OE1', 'OE2']}
pi_residues = {'ARG': ['NE', 'NH1', 'NH2'], 'HIS': ['ND1', 'NE2'], 'LYS': ['NZ']}
pdb_code_dict, sequences_dict = update_data(directory=directory)
salt_bridge_dict = {}
for pdb_code in tqdm(pdb_code_dict.keys()):
distances = []
ecl2_ni, ecl2_pi, ecl3_ni, ecl3_pi = [], [], [], []
protein_name, preferred_chain = pdb_code_dict[pdb_code]
try:
pdb_sequence_dict = generate_pdb_sequence_dict(pdb_code, preferred_chain)
except KeyError:
chain_dict = {'6ORV': 'AP'} # erroneous chain identifiers in GPCRDB
if pdb_code in chain_dict.keys():
preferred_chain = chain_dict[pdb_code]
pdb_sequence_dict = generate_pdb_sequence_dict(pdb_code, preferred_chain)
else:
print(f'Error for {pdb_code} ...')
continue
pdb_generic_numbers_dict = assign_generic_numbers_to_pdb(sequences_dict[protein_name], pdb_sequence_dict)
structure = read_pdb_structure(pdb_code, directory=directory)
h4, h5, h6, h7 = False, False, False, False
h7_counter = 0
for residue in structure[0][preferred_chain].get_residues():
resid = residue.get_id()[1]
if resid in pdb_generic_numbers_dict.keys():
generic_number = pdb_generic_numbers_dict[resid]
if generic_number is not None:
if generic_number.split('.')[0] == '4':
h4 = True
elif generic_number.split('.')[0] == '5':
h5 = True
elif generic_number.split('.')[0] == '6':
h6 = True
elif generic_number.split('.')[0] == '7':
h7 = True
else:
generic_number = 'x.x'
residue_name = residue.get_resname()
for atom in residue.get_atoms():
atom_name = atom.get_name()
position = [x for x in atom.get_vector()]
if h4 and not h5:
if generic_number.split('.')[0] != '4':
if residue_name in ni_residues.keys():
if atom_name in ni_residues[residue_name]:
ecl2_ni.append(position)
if residue_name in pi_residues.keys():
if atom_name in pi_residues[residue_name]:
ecl2_pi.append(position)
if h6 and not h7:
if generic_number.split('.')[0] != '6':
if residue_name in ni_residues.keys():
if atom_name in ni_residues[residue_name]:
ecl3_ni.append(position)
if residue_name in pi_residues.keys():
if atom_name in pi_residues[residue_name]:
ecl3_pi.append(position)
if h7:
if h7_counter <= 3:
if atom_name == 'N':
h7_counter += 1
if residue_name in ni_residues.keys():
if atom_name in ni_residues[residue_name]:
ecl3_ni.append(position)
if residue_name in pi_residues.keys():
if atom_name in pi_residues[residue_name]:
ecl3_pi.append(position)
for ni in ecl2_ni:
for pi in ecl3_pi:
distances.append(distance(ni, pi))
for ni in ecl3_ni:
for pi in ecl2_pi:
distances.append(distance(ni, pi))
if len(distances) > 0:
if min(distances) < 5:
if protein_name in salt_bridge_dict.keys():
salt_bridge_dict[protein_name].append(pdb_code)
else:
salt_bridge_dict[protein_name] = [pdb_code]
return salt_bridge_dict
|
from .stage01_ale_analysis_io import stage01_ale_analysis_io
class stage01_ale_analysis_execute(stage01_ale_analysis_io):
pass; |
from itertools import permutations
from pathlib import Path
from number_class import NumberLeaf, NumberTree
from ycecream import y as ic
def part1():
lines = Path("input.txt").read_text().splitlines()
total = NumberTree.from_string(lines[0])
for line in lines[1:]:
add_on = NumberTree.from_string(line)
total += add_on
print("Part 1", total.magnitude)
def part2():
lines = Path("input.txt").read_text().splitlines()
numbers = [NumberTree.from_string(line) for line in lines]
result = max([(tup[0] + tup[1]).magnitude for tup in permutations(numbers, 2)])
print("Part 2", result)
if __name__ == "__main__":
part1()
part2()
|
"""
Main file to run the directory app.
"""
from django.apps import AppConfig
class DirectoryConfig(AppConfig):
"""
Specify directory name to run
"""
name = 'directory'
|
from django.shortcuts import render
from .models import UserProfile, Job, Skill, Degree, Project
# Create your views here.
def home(request):
profiles = UserProfile.objects.all()
jobs = Job.objects.all()
skills = Skill.objects.all()
degrees = Degree.objects.all()
projects = Project.objects.all()
return render(request, 'base.html', {'profiles': profiles, 'jobs': jobs, 'skills': skills, 'degrees': degrees, 'projects': projects}) |
from typing import Any, Callable, Iterable, Tuple, TypeVar
_T = TypeVar('_T')
_U = TypeVar('_U')
def _is_iterable(x: Any) -> bool:
return hasattr(x, '__iter__')
def identity(x: _T) -> _T:
"""The identity function."""
return x
def compose(f: Callable[[_T], _U], g: Callable[..., _T]) -> Callable[..., _U]:
"""
Create the composition of `f` and `g`, where the output of `g` is passed to `f`.
>>> def f(x):
... return 2 * x
>>> def g(x, y):
... return x - y
>>> compose(f, g)(1, 2)
-2
"""
return lambda *args: f(g(*args))
def tuple_params(f: Callable[..., _T]) -> Callable[[Tuple[Any, ...]], _T]:
"""Convert a function to take a tuple of its parameters."""
def tupled_f(args: Tuple[Any, ...]) -> _T:
return f(*args)
return tupled_f
def _map_with_args(f: Callable[..., _U], args: Iterable[_T]) -> Iterable[Tuple[_T, _U]]:
return zip(args, map(f, args))
def argmin(f: Callable[[_T], Any], args: Iterable[_T], *, key: Callable[..., Any]=identity) -> _T:
"""
The element in `args` that produces the smallest output of `f`.
If two values of `f` are minimal, returns the first set of arguments in `args`
that produces the minimal value of `f`.
>>> argmin(identity, [0, 1, 5, 3])
0
>>> argmin(tuple_params(lambda x, y: x + y), [(0, 1), (1, 5), (3, 2)])
(0, 1)
"""
return min(_map_with_args(f, args), key=lambda x: key(x[1]))[0]
def argmax(f: Callable[[_T], Any], args: Iterable[_T], *, key: Callable[..., Any]=identity) -> _T:
"""
The element in `args` that produces the largest output of `f`.
If two values of `f` are maximal, returns the first set of arguments in `args`
that produces the maximal value of `f`.
>>> argmax(identity, [0, 1, 5, 3])
5
>>> argmax(tuple_params(lambda x, y: x + y), [(0, 1), (1, 5), (3, 2)])
(1, 5)
"""
return max(_map_with_args(f, args), key=lambda x: key(x[1]))[0]
|
import re
from events import events
from settings import DEFAULT_RESPONSE
class EventHandler(object):
def __init__(self):
self.events = [(re.compile(pattern), callback) for pattern, callback in events]
def route(self, update):
msg = update.message.text
for event, callback in self.events:
match = event.match(msg)
if match:
answer = callback(update, match)
if answer:
return answer
else:
return DEFAULT_RESPONSE
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Import Modules
import re
import json
import requests as r
from .iotools import get_ghcnd_stations
from geopy.distance import great_circle
# -
def find_station(*args):
"""Find stations by certain Search Criteria
1 Arg: Search by Name
3 Arg: Search by Lat Lon Distance limit
"""
stns=0
if len(args) ==1:
station_name=args[0]
print("LOOKUP BY STATION NAME: ",station_name)
station_name=station_name.upper()
ghcnd_stations=get_ghcnd_stations()
stns=[x for x in ghcnd_stations[:,5] if re.search(station_name,x)]
print("GHCND ID LAT LON ELEV ST STATION NAME")
print("###############################################################")
for station_counter in range(len(stns)):
ghcnd_meta = ghcnd_stations[ghcnd_stations[:,5]== stns[station_counter]]
print(ghcnd_meta[0][0],ghcnd_meta[0][1],ghcnd_meta[0][2],ghcnd_meta[0][3],ghcnd_meta[0][4],ghcnd_meta[0][5])
elif len(args)==3:
station_lat=args[0]
station_lon=args[1]
distance_limit=args[2]
print("LOOKUP BY STATION LAT: ",station_lat," LON: ",station_lon, " DIST LIMIT (km): ",distance_limit)
target_latlon = (float(station_lat), float(station_lon))
ghcnd_stations=get_ghcnd_stations()
print("GHCND ID LAT LON ELEV ST STATION NAME DIST")
print("###########################################################################")
for ghcnd_counter in range(ghcnd_stations[:,0].size):
candidate_latlon=(ghcnd_stations[ghcnd_counter][1], ghcnd_stations[ghcnd_counter][2])
dist=great_circle(target_latlon, candidate_latlon).kilometers
if dist <= distance_limit:
print(ghcnd_stations[ghcnd_counter][0],
ghcnd_stations[ghcnd_counter][1],
ghcnd_stations[ghcnd_counter][2],
ghcnd_stations[ghcnd_counter][3],
ghcnd_stations[ghcnd_counter][4],
ghcnd_stations[ghcnd_counter][5],
"{:.2f}".format(dist),
)
else:
print("USAGE\n NAME or\n LAT LON DIST")
return None
return None
# MODULE: get_metadata
# Get Metadata From Station
# 2 sources
# - ghcnd-stations.txt
# - Historical Observing Metadata Repository
# (HOMR)
# ################################################
def get_metadata(station_id):
# Get Metadata info from station Inventory file
ghcnd_stations=get_ghcnd_stations()
ghcnd_meta = ghcnd_stations[ghcnd_stations[:,0] == station_id]
ghcnd_name="N/A"
ghcnd_lat="N/A"
ghcnd_lon="N/A"
ghcnd_alt="N/A"
ghcnd_id=ghcnd_meta[0][0]
ghcnd_lat=float(ghcnd_meta[0][1])
ghcnd_lon=float(ghcnd_meta[0][2])
ghcnd_alt=float(ghcnd_meta[0][3])
ghcnd_name=ghcnd_meta[0][5]
ghcnd_name = ghcnd_name.strip()
ghcnd_name = re.sub(' +',' ',ghcnd_name)
ghcnd_name = ghcnd_name.replace(" ","_")
# Get Metadata info from HOMR
homr_link='http://www.ncdc.noaa.gov/homr/services/station/search?qid=GHCND:'+station_id
ghcnd_state="N/A"
ghcnd_climdiv="N/A"
ghcnd_county="N/A"
ghcnd_nwswfo="N/A"
ghcnd_coopid="N/A"
ghcnd_wbanid="N/A"
try:
homr=r.get(homr_link)
homr_json=json.loads(homr.text)
except:
pass
# Get State Station is in (HOMR)
try:
ghcnd_state=json.dumps(homr_json['stationCollection']['stations'][0]['location']['nwsInfo']['climateDivisions'][0]['stateProvince'])
except:
pass
# Get Climate Division Station is in (HOMR)
try:
ghcnd_climdiv=json.dumps(homr_json['stationCollection']['stations'][0]['location']['nwsInfo']['climateDivisions'][0]['climateDivision'])
except:
pass
# Get County Station is in (HOMR)
try:
ghcnd_county=json.dumps(homr_json['stationCollection']['stations'][0]['location']['geoInfo']['counties'][0]['county'])
ghcnd_county=ghcnd_county.replace(" ","_")
except:
pass
# Get NWS WFO station is in (HOMR)
try:
ghcnd_nwswfo=json.dumps(homr_json['stationCollection']['stations'][0]['location']['nwsInfo']['nwsWfos'][0]['nwsWfo'])
except:
pass
# Get COOP ID if exists (HOMR)
has_coop=False
has_wban=False
try:
identifiers=homr_json['stationCollection']['stations'][0]['identifiers']
for counter in range(0,10):
for key, value in homr_json['stationCollection']['stations'][0]['identifiers'][counter].items():
if key == "idType" and homr_json['stationCollection']['stations'][0]['identifiers'][counter][key] == "COOP":
has_coop=True
if key == "idType" and homr_json['stationCollection']['stations'][0]['identifiers'][counter][key] == "WBAN":
has_wban=True
if key == "id" and has_coop:
ghcnd_coopid=homr_json['stationCollection']['stations'][0]['identifiers'][counter][key]
has_coop=False
if key == "id" and has_wban:
ghcnd_wbanid=homr_json['stationCollection']['stations'][0]['identifiers'][counter][key]
has_wban=False
except:
pass
# Write everything out
print(station_id)
print(" Station Name: ",ghcnd_name)
print(" Station Lat: ",ghcnd_lat)
print(" Station Lon: ",ghcnd_lon)
print(" Station Elev: ",ghcnd_alt)
print(" Station State: ",ghcnd_state.strip('""'))
print(" Station Climate Division: ",ghcnd_climdiv.strip('""'))
print(" Station County: ",ghcnd_county.strip('""'))
print(" Station NWS Office: ",ghcnd_nwswfo.strip('""'))
print(" Station COOP ID: ",ghcnd_coopid.strip('""'))
print(" Station WBAN ID: ",ghcnd_wbanid.strip('""'))
return None
|
import GPUtil
import os
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from firebase_admin import storage
import threading
import random
darknetPath = "/homes/ma438/scratch/darknet/"
rootDataPath = "/homes/ma438/scratch/darknet/labs/"
outputPath = "/homes/ma438/scratch/darknet/labsOut/"
outputImgPath = "/homes/ma438/scratch/darknet/labsImg/"
auth = "alphacam-94bcc-firebase-adminsdk-4i1bs-0ac44401da.json"
class FB:
def __init__(self):
cred = credentials.Certificate(auth)
self.default_app = firebase_admin.initialize_app(cred, {
'databaseURL': 'https://alphacam-94bcc.firebaseio.com',
'storageBucket': "alphacam-94bcc.appspot.com"
})
self.root = db.reference()
self.labs = db.reference("labs")
self.storage = storage.bucket(app=self.default_app)
def getAllLabImage(self):
dict = self.labs.get()
keys = dict.keys()
os.system("rm -rf %s" % rootDataPath)
os.system("mkdir %s" % rootDataPath)
for k in keys:
img = dict[k]['img']
if dict[k]['isLab'] == True:
wgetCmd = "wget -P %s %s" % (rootDataPath, img)
os.system(wgetCmd)
def analyseResult(self):
dict = self.labs.get()
keys = dict.keys()
for k in keys:
cnt = peopleCount(dict[k])
if cnt != -1:
dict[k]['people'] = peopleCount(dict[k])
# update
self.labs.update(dict)
def analyisSingle(self, lab):
lab = os.path.basename(os.path.normpath(lab['img']))
def bestGPU():
try:
return GPUtil.getFirstAvailable()
except RuntimeError:
return [random.randint(0, 7)]
def predict(gpu, lab, fb):
darknetCmd = "darknet -i %d detect cfg/yolov3.cfg yolov3.weights %s -out %s > %s" % (
gpu,
rootDataPath + lab,
outputImgPath + lab,
outputPath + lab + ".log")
mvCmd = "cp predictions.png %s%s" % (outputImgPath, lab)
print darknetCmd
os.system(darknetCmd)
# Check if error
try:
f = open(outputPath + lab + ".log")
if "CUDA Error" in f:
print "Failed return"
return
except IOError:
return
# os.system(mvCmd)
blob = fb.storage.blob(lab)
blob.upload_from_filename(outputImgPath + lab +".png")
fb.analyseResult()
def clean():
os.chdir(darknetPath)
os.system("rm -rf %s" % outputPath)
os.system("mkdir %s" % outputPath)
os.system("rm -rf %s" % outputImgPath)
os.system("mkdir %s" % outputImgPath)
def predictAll(fb):
labs = os.listdir(rootDataPath)
# TODO fix bug
threads = []
for l in labs:
t = threading.Thread(target=predict, args=(bestGPU()[0], l, fb,))
threads.append(t)
t.start()
# predict(bestGPU()[0], l, fb)
# block until all the threads finish (i.e. until all function_a functions finish)
for t in threads:
t.join()
def peopleCount(lab):
lab = os.path.basename(os.path.normpath(lab['img']))
file = open(outputPath + lab + ".log", 'r').read()
if "CUDA Error" in file:
return -1
count = file.count("person")
return count
|
from logicqubit.logic import *
from logicqubit.gates import *
from logicqubit.synthesis import *
A = Matrix([[15, 9, 5, -3],[9, 15, 3, -5],[5, 3, 15, -9],[-3, -5, -9, 15]])*(1/4)
dep = PauliDecomposition(A)
print(dep.get_a())
ket0 = Hilbert.ket(0)
ket1 = Hilbert.ket(1)
ket00 = Hilbert.kronProduct([ket0, ket0])
ket01 = Hilbert.kronProduct([ket0, ket1])
ket10 = Hilbert.kronProduct([ket1, ket0])
ket11 = Hilbert.kronProduct([ket1, ket1])
value = ket11.adjoint() * A * ket11
print(value.get()[0]) |
import logging
import simplejson as json
from restclients.models.bridge import BridgeCustomField
from restclients.bridge import get_resource
logger = logging.getLogger(__name__)
URL_PREFIX = "/api/author/custom_fields"
def get_custom_fields():
"""
Return a list of BridgeCustomField objects
"""
resp = get_resource(URL_PREFIX)
return _process_json_resp_data(resp)
def _process_json_resp_data(resp):
fields = []
resp_data = json.loads(resp)
if "custom_fields" in resp_data and\
len(resp_data["custom_fields"]) > 0:
for value in resp_data["custom_fields"]:
if "id" in value and "name" in value:
custom_field = BridgeCustomField(field_id=value["id"],
name=value["name"]
)
fields.append(custom_field)
return fields
def get_regid_field_id():
fields = get_custom_fields()
if len(fields) > 0:
for cf in fields:
if cf.is_regid():
return cf.field_id
return None
def new_regid_custom_field(uwregid):
"""
Return a BridgeCustomField object for REGID
to be used in a POST, PATCH request
"""
return BridgeCustomField(
field_id=get_regid_field_id(),
name=BridgeCustomField.REGID_NAME,
value=uwregid
)
|
import cv2
def readMap(mapFileName):
mapFile = open(mapFileName, "r")
lines = mapFile.readlines()
map = []
for line in lines:
line =line.split(',')
line = [int(x) for x in line]
map.append(line)
return map
def writeMap(imageFileName, mapFileName):
mapFile = open(mapFileName, "w")
img = cv2.imread(imageFileName, 0)
height, width = img.shape
for i in range(height):
for j in range(width):
if img[i][j] > 127:
mapFile.write('1')
else:
mapFile.write('0')
if j != width - 1:
mapFile.write(',')
if i != height - 1:
mapFile.write('\n')
mapFile.close()
print("Map saved to file: ", mapFileName)
cv2.imshow(imageFileName, img)
cv2.waitKey(0)
if __name__ == "__main__":
writeMap("map.png", "map.txt")
|
# Generated by Django 2.1.5 on 2020-10-17 21:32
from django.db import migrations, models
import django.db.models.deletion
import hostel.service.variables
class Migration(migrations.Migration):
initial = True
dependencies = [
('common', '0002_auto_20201017_2132'),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('netname', models.CharField(max_length=255, unique=True, validators=[hostel.service.variables.validate_netname])),
('start_unit', models.IntegerField(null=True)),
('rack_placement', models.CharField(choices=[('front', 'С передней стороны стойки'), ('back', 'С задней стороны стойки')], default='front', max_length=20)),
('whole_rack_depth', models.BooleanField(default=True)),
('type', models.CharField(choices=[('router', 'Маршрутизатор'), ('switch', 'Коммутатор'), ('server', 'Сервер'), ('transponder', 'Транспондер'), ('kvm', 'Виртуалка'), ('ups', 'Бесперебойник'), ('nas', 'NAS'), ('pdu', 'Управляемая розетка'), ('t8', 'T8'), ('mux', 'Мультиплексор'), ('phone', 'Телефон'), ('other', 'Непонятное')], max_length=20, null=True)),
('status', models.CharField(blank=True, choices=[('+', '+'), ('-', '—')], default='+', max_length=20, null=True)),
('comment', models.CharField(blank=True, max_length=2048, null=True)),
('community', models.CharField(blank=True, max_length=40, null=True)),
('version', models.CharField(blank=True, max_length=512, null=True)),
('is_managed', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('datacenter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='device', to='common.Datacenter')),
],
options={
'db_table': 'devices',
'managed': True,
},
),
]
|
from cms.extensions.toolbar import ExtensionToolbar
from cms.utils import get_language_list
from django.utils.encoding import force_text
from django.utils.translation import get_language_info
class TitleExtensionToolbar(ExtensionToolbar):
model = None
insert_after = None
def get_item_position(self, menu):
position = None
for items in menu._memo.values():
for item in items:
if force_text(getattr(item, 'name', None)) in (
force_text(self.insert_after),
'{0}...'.format(self.insert_after)
):
position = menu._item_position(item) + 1
break
return position
def populate(self):
current_page_menu = self._setup_extension_toolbar()
if not current_page_menu or not self.page:
return
languages = get_language_list(self.current_site.pk)
is_single_lang = len(languages) < 2
position = self.get_item_position(current_page_menu)
urls = self.get_title_extension_admin()
page = self._get_page()
titleset = page.title_set.filter(language__in=languages)
if hasattr(self.toolbar, 'edit_mode_active'):
not_edit_mode = not self.toolbar.edit_mode_active
else:
not_edit_mode = not self.toolbar.edit_mode
extended_menu = current_page_menu if is_single_lang else (
current_page_menu.get_or_create_menu(
key='{0}_menu'.format(self.model._meta.db_table),
verbose_name=self.model._meta.verbose_name,
position=position, disabled=not_edit_mode))
nodes = [(title_extension, url, title) for (
(title_extension, url), title) in zip(urls, titleset)]
for title_extension, url, title in nodes:
item_position = position if is_single_lang else None
language_str = get_language_info(title.language)['name_translated']
name = '{0}{1}'.format(
'' if is_single_lang else (language_str + ' '),
self.model._meta.verbose_name)
extended_menu.add_modal_item(
name, url=url, disabled=not_edit_mode, position=item_position)
|
import random
import typing as t
import unittest
from bamboo import (
ASGIApp,
ASGIHTTPEndpoint,
WSGIApp,
WSGIEndpoint,
WSGIServerForm,
WSGITestExecutor,
)
from bamboo.api import JsonApiData
from bamboo.request import http
from bamboo.sticky.http import data_format
from bamboo.util.string import rand_string
from ... import get_log_name
from ...asgi_util import ASGIServerForm, ASGITestExecutor
app_asgi = ASGIApp()
app_wsgi = WSGIApp()
PATH_ASGI_SERVER_LOG = get_log_name(__file__, "asgi")
PATH_WSGI_SERVER_LOG = get_log_name(__file__, "wsgi")
class UserJsonApi(JsonApiData):
user_id: str
name: str
email: str
age: int
class TestJsonApi(JsonApiData):
users: t.List[UserJsonApi]
total: int
def make_user_api_data() -> t.Dict[str, t.Any]:
user_id = rand_string(10)
name = rand_string(10)
email = rand_string(20)
age = random.randint(0, 100)
return {
"user_id": user_id,
"name": name,
"email": email,
"age": age,
}
def make_test_api_data() -> t.Dict[str, t.Any]:
total = random.randint(0, 100)
users = [make_user_api_data() for _ in range(total)]
return {
"users": users,
"total": total,
}
@app_asgi.route()
class TestASGIHTTPEndpoint(ASGIHTTPEndpoint):
@data_format(input=None, output=TestJsonApi)
async def do_GET(self) -> None:
assert await self.body == b""
self.send_json(make_test_api_data())
@data_format(input=TestJsonApi, output=None)
async def do_POST(self, rec_body: TestJsonApi) -> None:
self.send_only_status()
@data_format(input=JsonApiData, output=None, is_validate=False)
async def do_DELETE(self) -> None:
self.send_only_status()
@app_wsgi.route()
class TestWSGIEndpoint(WSGIEndpoint):
@data_format(input=None, output=TestJsonApi)
def do_GET(self) -> None:
assert self.body == b""
self.send_json(make_test_api_data())
@data_format(input=TestJsonApi, output=None)
def do_POST(self, rec_body: TestJsonApi) -> None:
self.send_only_status()
@data_format(input=JsonApiData, output=None, is_validate=False)
def do_DELETE(self) -> None:
self.send_only_status()
class TestStickyDataFormat(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
form_asgi = ASGIServerForm("", 8000, app_asgi, PATH_ASGI_SERVER_LOG)
form_wsgi = WSGIServerForm("", 8001, app_wsgi, PATH_WSGI_SERVER_LOG)
cls.executor_asgi = ASGITestExecutor(form_asgi).start_serve()
cls.executor_wsgi = WSGITestExecutor(form_wsgi).start_serve()
cls.uri_asgi = "http://localhost:8000"
cls.uri_wsgi = "http://localhost:8001"
@classmethod
def tearDownClass(cls) -> None:
cls.executor_asgi.close()
cls.executor_wsgi.close()
def test_asgi_output(self):
with http.get(self.uri_asgi) as res:
self.assertTrue(res.ok)
api_data = res.attach(TestJsonApi)
self.assertIsInstance(api_data, TestJsonApi)
def test_asgi_input_validation(self):
data = make_test_api_data()
with http.post(self.uri_asgi, json=data) as res:
self.assertTrue(res.ok)
def test_asgi_input_no_validation(self):
data = make_test_api_data()
with http.delete(self.uri_asgi, json=data) as res:
self.assertTrue(res.ok)
def test_wsgi_output(self):
with http.get(self.uri_wsgi) as res:
self.assertTrue(res.ok)
api_data = res.attach(TestJsonApi)
self.assertIsInstance(api_data, TestJsonApi)
def test_wsgi_input_validation(self):
data = make_test_api_data()
with http.post(self.uri_wsgi, json=data) as res:
self.assertTrue(res.ok)
def test_wsgi_input_no_validation(self):
data = make_test_api_data()
with http.delete(self.uri_wsgi, json=data) as res:
self.assertTrue(res.ok)
if __name__ == "__main__":
unittest.main()
|
'''
Created on Aug 27, 2015
@author: ash
'''
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import shapefile
# urlShpFile = "/home/ash/Data/tl_2014_39049_roads/tl_2014_39049_roads.shp"
urlShpFile = "/home/ash/Data/tl_2013_06_prisecroads/tl_2013_06_prisecroads.shp"
def synGraph():
G = nx.Graph()
N = 50
for i in xrange(N):
G.add_node(i)
pass
count = 0
while(count < N):
a = count
b = np.random.randint(N)
if a != b:
G.add_edge(a, b)
count += 1
pass
pass
plt.figure(1)
nx.draw_networkx(G)
# plt.show()
dfsT = nx.bfs_tree(G, source=10)
plt.figure(2)
nx.draw_networkx(dfsT)
plt.show()
pass
def findTree(roadGraph, node, shpLayout):
T = nx.dfs_tree(roadGraph, node)
# T = nx.minimum_spanning_tree(roadGraph)
# print node, T.edges()
tnodes = T.nodes(data=False)
# shpLayout = dict(zip(tnodes,tnodes))
plt.figure(1, figsize=(12,12))
nx.draw_networkx(T, pos=shpLayout, node_size=1, node_shape='d', alpha=0.25,node_color='r', edge_width=1, edge_color='b', with_labels=False)
plt.show()
pass
def syn2graph():
roadGraphd = nx.read_shp(urlShpFile)
roadGraph = roadGraphd.to_undirected()
nodeList = roadGraph.nodes(data=True)
_nodeList = roadGraph.nodes(data=False)
nNode = len(nodeList)
_pos = []
for i in xrange(nNode):
_pos.append(nodeList[i][0])
pass
nodedict = dict(zip(_nodeList,range(nNode)))
G = nx.Graph()
for i in range(nNode):
G.add_node(i)
pass
edgeList = nx.to_edgelist(roadGraph)
for _edge in edgeList:
node1 = _edge[0]
node2 = _edge[1]
node1idx = nodedict[node1]
node2idx = nodedict[node2]
G.add_edge(node1idx, node2idx)
pass
shpLayout = dict(zip(G,_pos))
plt.figure(1)
nx.draw_networkx(G, pos=shpLayout, node_size=1, edge_width=1, with_labels=False)
plt.show()
nodeLst = G.nodes(data=False)
for node in nodeLst:
_n = G.neighbors(node)
print len(_n), node
if(len(_n) > 3):
findTree(G, node, shpLayout)
pass
pass
def trypyshp():
shpf = shapefile.Reader('/home/ash/Data/tl_2014_39049_roads/tl_2014_39049_roads')
pass
if __name__ == '__main__':
# synGraph()
syn2graph()
# trypyshp()
pass |
from django.apps import AppConfig
class TranspilerConfig(AppConfig):
# default_auto_field = 'django.db.models.BigAutoField'
name = 'transpiler'
|
from bitstring import Bits, BitString, BitArray, ConstBitStream
import base64
class Addr:
def __init__(self,bank,addr) -> None:
self.bank = bank
self.addr = addr
def absolute_pos(self) -> int:
return (((self.bank-1)*BANK_SIZE)+self.addr)
@classmethod
def convert_to_addr(cls, long_addr) -> None:
bank = int(long_addr/BANK_SIZE)
addr = (long_addr%BANK_SIZE)+(BANK_SIZE if bank > 0 else 0)
return cls(bank,addr)
def __str__(self) -> str:
return f"{self.bank:#04X}:{self.addr:04X}"
def __add__(self, other):
if isinstance(other, int):
diff = other
elif isinstance(other, Addr):
diff = abs(self.absolute_pos() - other.absolute_pos())
return self.convert_to_addr(self.absolute_pos() + diff)
def __sub__(self, other):
if isinstance(other, int):
diff = other
elif isinstance(other, Addr):
diff = abs(self.absolute_pos() - other.absolute_pos())
return self.convert_to_addr(self.absolute_pos() - diff)
def __eq__(self, other) -> bool:
return self.absolute_pos() == other.absolute_pos()
def __gt__(self, other) -> bool:
return self.absolute_pos() > other.absolute_pos()
def __lt__(self, other) -> bool:
return self.absolute_pos() < other.absolute_pos()
def __ge__(self, other) -> bool:
return self.absolute_pos() >= other.absolute_pos()
def __le__(self, other) -> bool:
return self.absolute_pos() <= other.absolute_pos()
def __ne__(self, other) -> bool:
return self.absolute_pos() != other.absolute_pos()
class GBDataPacket:
def __init__(self, addr, packet_size, data) -> None:
self.addr = addr
self.packet_size = packet_size
self.data = data
@classmethod
def get_static_data(cls, addr, packet_size, length):
ROM.bytepos = addr.absolute_pos()
data = ROM.readlist([f'uint:{packet_size}']*length)
return cls(addr,packet_size,data)
@classmethod
def get_var_data(cls, addr, packet_size, target, bytealigned=True):
ROM.bytepos = addr.absolute_pos()
data = ROM.readto(target,bytealigned)
data_list = data.readlist([f'uint:{packet_size}']*int(data.len/packet_size))
return cls(addr,packet_size,data_list)
def collapse(self, rev=False) -> int:
out = 0
if rev:
self.data.reverse()
for val in self.data:
out = out << self.packet_size
out+=val
if rev:
self.data.reverse()
return out
def __str__(self) -> str:
return f"{self.addr} " + " ".join(map((lambda n: f"{n:02x}"), self.data))
def raw_dump(self) -> str:
out = ""
out+=(f"Start:{self.addr} Finish:{self.addr+len(self.data)} Length:{(len(self.data))} 2BPP:{len(self.data)/16:0.0f} 1BPP:{len(self.data)/8:0.0f}\n")
data_fmt = []
for i in range(int(len(self.data)/16)):
data_fmt.append(f"{(i*16):#07X} " + ' '.join(map(lambda n: f"{n:02X}", self.data[16*i:(16*i)+16])))
out+=('\n'.join(data_fmt))
if (len(self.data) % 16 != 0):
out+=(f"\n{len(data_fmt)*16:#07X} " + ' '.join(map(lambda n: f"{n:02X}", self.data[len(data_fmt)*16:])))
return out
def __len__(self):
return len(self.data)
def __getitem__(self, index) -> int:
return self.data[index]
class Sprite:
def __init__(self,addr,width,height,data) -> None:
self.addr = addr
self.width = width
self.height = height
self.data = data
def __str__(self):
return f"[Loc: {self.addr} => Width: {self.width}, Height: {self.height}]"
def to_json(self) -> dict:
return {'width': self.width, 'height': self.height, 'data': self.to_base64()}
@classmethod
def __expandRLEPacket(cls, bit_length, value) -> BitString:
return BitString((bit_length+value+1)*2)
@classmethod
def __findRLEBoundry(cls, sprite_data) -> Bits:
length_found = sprite_data.readto('0b0')
return length_found
@classmethod
def __mode1(cls,bit_planes,width) -> list:
bit_planes[1] = cls.__deltaDecode(bit_planes[1],width)
bit_planes[0] = cls.__deltaDecode(bit_planes[0],width)
return bit_planes
@classmethod
def __mode2(cls,bit_planes,width) -> list:
bit_planes[1] = cls.__deltaDecode(bit_planes[1],width)
bit_planes[0] = bit_planes[0] ^ bit_planes[1]
return bit_planes
@classmethod
def __mode3(cls,bit_planes,width) -> list:
bit_planes[1] = cls.__deltaDecode(bit_planes[1],width)
bit_planes[0] = cls.__deltaDecode(bit_planes[0],width)
bit_planes[0] = bit_planes[0] ^ bit_planes[1]
return bit_planes
@classmethod
def ___translate(cls, arr,row_num,coloumn_num):
matrix = [[0 for x in range(coloumn_num)] for y in range(row_num)]
for row in range(row_num):
for col in range(int(coloumn_num/8)):
for i in range(8):
matrix[row][col+i]=arr[(row*col)+row+i]
return matrix
@classmethod
def __fillMatrix(cls, arr,row_num, coloumn_num) -> BitArray:
#Array math is hard touch numbers at own risk
matrix = [[0 for x in range(coloumn_num*4)] for y in range(row_num*8)]
for row in range(row_num*8):
for col in range(coloumn_num*4):
matrix[row][col]=(''.join(arr[((col*row_num*16)+(row*2)):((col*row_num*16)+(row*2))+2].bin))
matrix[row] = ''.join(matrix[row])
output = BitArray()
for out_row in matrix:
output.append('0b'+out_row)
return output
@classmethod
def __bufferToList(cls, arr, row_num, coloumn_num) -> list:
#1 byte per row per tile
#1 byte per coloumn per tile
bufList = [0] * row_num*BYTE
column_bits = coloumn_num*BYTE
for row in range(row_num*BYTE):
bufList[row]=list(map(int,(','.join(arr[(row*column_bits):((row*column_bits)+column_bits)].bin).split(','))))
return bufList
@classmethod
def __combineBuffers(cls,bit_planes,high_bit_plane) -> list:
result = [[(bit_planes[high_bit_plane][i][j]<<1) + bit_planes[high_bit_plane^1][i][j] for j in range(len(bit_planes[high_bit_plane][0]))] for i in range(len(bit_planes[1]))]
return result
@classmethod
def __fillTileMatrix(cls, arr, sprite_height_tiles, sprite_width_tiles) -> list:
tile_side_px = 8
tile_size = tile_side_px*tile_side_px
out = []
for tile_row in range (sprite_height_tiles):
for row in range(tile_side_px):
temp = []
for col in range (sprite_width_tiles):
temp.extend(arr[((tile_row*tile_size*sprite_width_tiles)+(col*tile_size)+(row*tile_side_px)):((tile_row*tile_size*sprite_width_tiles)+(col*tile_size)+(row*tile_side_px))+tile_side_px])
out.append(temp)
return out
def print_pixels(self):
for row in self.data:
print(','.join(map(str,row)))
def __to_bignum(self) -> int:
output = 0
for row in self.data:
for col in row:
output = output << 2
output += col
return output
def to_base64(self) -> str:
num = self.__to_bignum()
num_bytes = num.to_bytes((int(self.height*self.width*TWO_BPP_TILE_SIZE)),'big')
return base64.b64encode(num_bytes).decode()
@classmethod
def __deltaDecode(cls, arr, width) -> BitArray:
output = BitArray()
currentBit = 0
for index, bit in enumerate(arr):
if index % (width*8) == 0:
currentBit = 0
if bit:
currentBit = (currentBit ^ 1)
output.append('0b%s' % currentBit)
return output
@classmethod
def __parseData(cls, packet_type, width, height, bit_plane):
while bit_plane.len < (width*height*ONE_BPP_TILE_SIZE*BYTE):
if packet_type == 0:
length = cls.__findRLEBoundry(ROM)
value = ROM.read((f"uint:{length.len}"))
zero_bits = cls.__expandRLEPacket(length.uint,value)
bit_plane.append(zero_bits)
packet_type = 1
else:
data_packet = ROM.read('bin:2')
if data_packet != '00':
bit_plane.append('0b'+data_packet)
else:
packet_type = 0
@classmethod
def parse_pkmn_sprite(cls, addr) -> None:
ROM.bytepos = addr.absolute_pos()
width = ROM.read('uint:4')
height = ROM.read('uint:4')
high_bit_plane = ROM.read('uint:1')
packet_type = ROM.read('uint:1')
bit_planes = [BitArray(), BitArray()]
cls.__parseData(packet_type,width,height,bit_planes[1])
zip_mode = -1
if ROM.peek('uint:1') == 0:
zip_mode = ROM.read('uint:1')
else:
zip_mode = ROM.read('uint:2')
packet_type = ROM.read('uint:1')
cls.__parseData(packet_type,width,height,bit_planes[0])
bit_planes[0] = cls.__fillMatrix(bit_planes[0],width,height)
bit_planes[1] = cls.__fillMatrix(bit_planes[1],width,height)
if zip_mode == 0:
bit_planes = cls.__mode1(bit_planes,width)
elif zip_mode == 2:
bit_planes = cls.__mode2(bit_planes,width)
else:
bit_planes = cls.__mode3(bit_planes,width)
bit_planes[0] = cls.__bufferToList(bit_planes[0],width,height)
bit_planes[1] = cls.__bufferToList(bit_planes[1],width,height)
sprite_data = cls.__combineBuffers(bit_planes,high_bit_plane)
return cls(addr,width,height,sprite_data)
@classmethod
def decode1BPP(cls,addr,width,height):
ROM.bytepos = addr.absolute_pos()
bit_planes = [BitArray(), BitArray()]
for i in range(width*height*BYTE):
bit_planes[0].append(ROM.peek('bits:8'))
bit_planes[1].append(ROM.read('bits:8'))
for i in range(2):
bit_planes[i] = cls.__fillTileMatrix(bit_planes[i],height,width)
sprite_data = cls.__combineBuffers(bit_planes,1)
return cls(addr,width,height,sprite_data)
@classmethod
def decode2BPP(cls,addr,width,height):
ROM.bytepos = addr.absolute_pos()
bit_planes = [BitArray(), BitArray()]
for i in range(width*height*BYTE*2):
bit_planes[0].append(ROM.read('bits:8'))
bit_planes[1].append(ROM.read('bits:8'))
for i in range(2):
bit_planes[i] = cls.__fillTileMatrix(bit_planes[i],height,width)
sprite_data = cls.__combineBuffers(bit_planes,1)
return cls(addr,width,height,sprite_data)
@classmethod
def decode_base64_sprite(cls, base64_sprite,width,height):
decoded_sprite_bytes = base64.b64decode(base64_sprite)
print(base64_sprite)
sprite_array = []
for data in decoded_sprite_bytes:
for i in range(3,-1,-1):
sprite_array.append((data >> (i*2)) & 0b11)
sprite = []
for i in range(0,int(len(sprite_array)),width*8):
sprite.append(sprite_array[i:i+(width*8)])
return cls(Addr(0,0),width,height,sprite)
class GBText:
STRING_END = 0x50
ALPHABET = {
0x00: "", #charmap "<NULL>"
0x49: "^", #charmap "<PAGE>"
#charmap "<PKMN>", # "<PK><MN>"
#charmap "<_CONT>", # implements "<CONT>"
#charmap "<SCROLL>", $4c
0x4E: "<", #Next
0x4F: " ",
0x57: "#",
0x50: "@", #charmap "@" string terminator
0x51: "*",
0x52: "A1",
0x53: "A2",
0x54: "POKé", #This is fine to leave multichar as it was only short hand for all four characters anyway
0x55: "+",
0x58: "$",
0x5F: "}", #charmap "<DEXEND>"
0x75: "…",
0x7F: " ",
0x80: "A",
0x81: "B",
0x82: "C",
0x83: "D",
0x84: "E",
0x85: "F",
0x86: "G",
0x87: "H",
0x88: "I",
0x89: "J",
0x8A: "K",
0x8B: "L",
0x8C: "M",
0x8D: "N",
0x8E: "O",
0x8F: "P",
0x90: "Q",
0x91: "R",
0x92: "S",
0x93: "T",
0x94: "U",
0x95: "V",
0x96: "W",
0x97: "X",
0x98: "Y",
0x99: "Z",
0x9A: "(",
0x9B: ")",
0x9C: ":",
0x9D: ";",
0x9E: "[",
0x9F: "]",
0xA0: "a",
0xA1: "b",
0xA2: "c",
0xA3: "d",
0xA4: "e",
0xA5: "f",
0xA6: "g",
0xA7: "h",
0xA8: "i",
0xA9: "j",
0xAA: "k",
0xAB: "l",
0xAC: "m",
0xAD: "n",
0xAE: "o",
0xAF: "p",
0xB0: "q",
0xB1: "r",
0xB2: "s",
0xB3: "t",
0xB4: "u",
0xB5: "v",
0xB6: "w",
0xB7: "x",
0xB8: "y",
0xB9: "z",
0xBA: "é",
0xBB: u"\u1E0B", #ḋ to represent 'd as one letter
0xBC: u"\u013A", #ĺ to represent 'l as one letter
0xBD: u"\u1E61", #ṡ to represent 's as one letter
0xBE: u"\u1E6B", #ṫ to represent 't as one letter
0xBF: u"\u1E7F", #ṿ to represent 'v as one letter
0xE0: "'",
0xE1: u"\u1D18", #ᴘ to represent PK as one letter
0xE2: u"\u1D0D", #ᴍ to represent MN as one letter
0xE3: "-",
0xE4: u"\u1E59", #ṙ to represent 'r as one letter
0xE5: u"\u1E41", #ṁ to represent 'm as one letter
0xE6: "?",
0xE7: "!",
0xE8: ".",
0xEC: "=",
0xED: ">",
0xEE: "_",
0xEF: "♂",
0x60: "<BOLD_A>", # unused
0x61: "<BOLD_B>", # unused
0x62: "<BOLD_C>", # unused
0x63: "<BOLD_D>", # unused
0x64: "<BOLD_E>", # unused
0x65: "<BOLD_F>", # unused
0x66: "<BOLD_G>", # unused
0x67: "<BOLD_H>", # unused
0x68: "<BOLD_I>", # unused
0x69: "<BOLD_V>",
0x6A: "<BOLD_S>",
0x6B: "<BOLD_L>", # unused
0x6C: "<BOLD_M>", # unused
0x6D: "<COLON>", # colon with tinier dots than ":"
0x6E: "ぃ", # hiragana small i, unused
0x6F: "ぅ", # hiragana small u, unused
0x70: "‘", # opening single quote
0x71: "’", # closing single quote
0x72: "“", # opening quote
0x73: "”", # closing quote
0x74: "·", # middle dot, unused
0x75: "…", # ellipsis
0x76: "ぁ", # hiragana small a, unused
0x77: "ぇ", # hiragana small e, unused
0x78: "ぉ", # hiragana small o, unused
0x79: "┌",
0x7A: "─",
0x7B: "┐",
0x7C: "│",
0x7D: "└",
0x7E: "┘",
0x7F: " ",
0xF0: "¥",
0xF1: "×",
0xF2: "<DOT>",
0xF3: "/",
0xF4: ",",
0xF5: "♀",
0xF6: "0",
0xF7: "1",
0xF8: "2",
0xF9: "3",
0xFA: "4",
0xFB: "5",
0xFC: "6",
0xFD: "7",
0xFE: "8",
0xFF: "9"
}
def decodeText(self) -> str:
return list(map(self.ALPHABET.get, self.packet.data))
def __init__(self,packet) -> None:
self.packet = packet
self.text = self.decodeText()
def __str__(self):
return "".join(self.text).strip('@')
def __len__(self):
return len(self.packet)
#Constants that have hard pointers in Red/Blue
ROM = ConstBitStream(filename='pokered.gbc')
BANK_SIZE = 0x4000
TWO_BPP_TILE_SIZE = 16
ONE_BPP_TILE_SIZE = 8
BYTE = 8
BIT = 1
NYBBLE = 4
TWO_BPP = 2
ONE_BPP = 1
POKEMON_NAME_LENGTH = 10
END_FILE = Addr.convert_to_addr(ROM.len/8)
POKEDEX_ORDER_POINTER = Addr(0x10,0x5024)
POKEDEX_ENTRY_POINTER = Addr(0x10,0x447e)
POKEMON_DATA_POINTER = Addr(0X0E,0x43DE)
POKEMON_NAME_POINTER = Addr(0x07,0x421e)
MOVE_NAME_POINTER = Addr(0x2C,0x4000)
MOVES_DATA_POINTER = Addr(0x0E,0x4000)
TM_HM_LIST_POINTER = Addr(0x04,0x7773)
FONT_START_POINTER = Addr(0x04,0x5a80)
EVO_TABLE_POINTER = Addr(0x0E,0x705C)
datamap = {'Index to Pokedex': [],
'Pokedex Entry Loc': [],
'EVO Table': []
}
for i in range(0,380,2):
datamap["Pokedex Entry Loc"].append(GBDataPacket.get_static_data(POKEDEX_ENTRY_POINTER+i,BYTE,2).collapse(rev=True))
datamap["Index to Pokedex"].append(GBDataPacket.get_static_data(POKEDEX_ORDER_POINTER+int(i/2),BYTE,1).collapse())
datamap['EVO Table'].append(GBDataPacket.get_static_data(EVO_TABLE_POINTER+i,BYTE,2).collapse(rev=True)) |
import multiprocessing
from myFunc import myFunc
if __name__ == '__main__':
for i in range(6):
process = multiprocessing.Process(target=myFunc, args=(i,))
process.start()
process.join()
|
import scrapy
from scrapy.spiders import CrawlSpider
from scrapy_app.spider_common import common_parser
class CrawlItem(scrapy.Item):
name = scrapy.Field()
link = scrapy.Field()
# default spider for retrieve href in the given URL
class CrawlerxSpider(CrawlSpider):
name = 'crawlerx'
def __init__(self, *args, **kwargs):
self.url = kwargs.get('url')
self.domain = kwargs.get('domain')
self.start_urls = [self.url]
self.allowed_domains = [self.domain]
self.settings = kwargs.get('settings')
super(CrawlerxSpider, self).__init__(*args, **kwargs)
def parse(self, response):
parsed_item = common_parser(self.settings)
crawled_data = []
for sel in response.xpath('//a'):
item = CrawlItem()
item['name'] = sel.xpath('text()').extract()
item['link'] = sel.xpath('@href').extract()
crawled_data.append(item)
parsed_item['data'] = crawled_data
yield parsed_item
|
from conans import ConanFile, CMake, tools
from conans.util import files
import yaml
class Bcm2837Conan(ConanFile):
name = "bcm2837"
version = str(yaml.load(tools.load("settings.yml"))['project']['version'])
license = "MIT"
url = "git@git.hiventive.com:socs/broadcom/bcm2837.git"
description = "BCM2837"
settings = "cppstd", "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [
True, False], "fPIE": [True, False]}
default_options = "shared=False", "fPIC=False", "fPIE=False"
generators = "cmake"
exports = "settings.yml"
exports_sources = "src/*", "CMakeLists.txt"
requires = "gtest/1.8.0@hiventive/stable", \
"communication/0.1.1@hiventive/testing", \
"pl011/0.1.1@hiventive/testing", \
"bcm2836-control/0.1.0@hiventive/testing", \
"bcm2835-armctrl-ic/0.2.0@hiventive/testing", \
"memory/0.1.0@hiventive/testing", \
"bcm2835-gpio/0.2.0@hiventive/testing", \
"button/0.1.0@hiventive/testing", \
"led/0.1.0@hiventive/testing", \
"uart-backend/0.1.0@hiventive/testing", \
"button-backend/0.1.0@hiventive/testing", \
"led-backend/0.1.0@hiventive/testing", \
"qmg2sc/0.6.0@hiventive/testing"
def configure(self):
self.options["qmg2sc"].target_aarch64 = True
def _configure_cmake(self):
cmake = CMake(self)
if self.settings.os != "Windows":
cmake.definitions["CMAKE_POSITION_INDEPENDENT_CODE"] = self.options.fPIC or self.options.fPIE
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.configure()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("*.h", dst="include", src="src")
self.copy("*.hpp", dst="include", src="src")
def package_info(self):
self.cpp_info.libs = ["bcm2837"]
|
#!/usr/bin/env python3
"""
SCRIPT: plot_semivariogram.py
Script for plotting empirical and fitted semivariograms based on data from
procOBA_NWP or procOBA_Sat, plus fit_semivariogram.py. For interactive
use.
REQUIREMENTS:
* Python 3
* Matplotlib
* Numpy
REVISION HISTORY:
20 Nov 2020: Eric Kemp. Initial specification.
"""
# Standard library
import configparser
import os
import sys
# Other libraries
import matplotlib.pyplot as plt
import numpy as np
import semivar
#------------------------------------------------------------------------------
def usage():
"""Print usage statement to standard out."""
print("Usage: %s CONFIGFILE PARAMFILE" %(sys.argv[0]))
print(" CONFIG is config file for this script")
print(" PARAMFILE contains best-fit parameters from fit_semivariogram.py")
#------------------------------------------------------------------------------
def read_param_file(paramfile):
"""Reads fitted parameters for semivariogram for plotting."""
lines = open(paramfile, "r").readlines()
sigma2_gage = None
sigma2_back = None
L_back = None
for line in lines:
key, value = line.split(":")
value = float(value)
if key == "SIGMA2_obs":
sigma2_gage = value
elif key == "SIGMA2_back":
sigma2_back = value
elif key == "L_back":
L_back = value
return sigma2_gage, sigma2_back, L_back
#------------------------------------------------------------------------------
# Check command line
if len(sys.argv) != 3:
print("[ERR] Bad command line arguments!")
usage()
sys.exit(1)
# Read config file
cfgfile = sys.argv[1]
if not os.path.exists(cfgfile):
print("[ERR] Config file %s does not exist!" %(cfgfile))
sys.exit(1)
config = configparser.ConfigParser()
config.read(cfgfile)
vario_filename, max_distance = semivar.read_input_section_cfg(config)
function_type = semivar.read_fit_section_cfg(config)
title, xlabel, ylabel, oblabel, bglabel = \
semivar.read_plot_section_cfg(config)
# Get the param file
paramfile = sys.argv[2]
if not os.path.exists(paramfile):
print("[ERR] Paramfile %s does not exist!" %(paramfile))
sys.exit(1)
# Read the datafile
distvector, variovector, samplesize = \
semivar.readdata(vario_filename, max_distance)
# Read the paramfile
sigma2_gage, sigma2_back, L_back = read_param_file(paramfile)
popt = [sigma2_gage, sigma2_back, L_back]
# Plot the semivariogram
distvector_tmp = np.array([0])
distvector = np.concatenate((distvector_tmp, distvector))
variovector_tmp = np.array([np.nan])
variovector = np.concatenate((variovector_tmp, variovector))
fit_func = semivar.fit_func_dict[function_type]
plt.plot(distvector, variovector, "b+",
distvector, fit_func(distvector, *popt), "r")
# Annotate
fulltitle = "%s\n"%(title)
fulltitle += "Based on %s comparisons of innovations\n" %(samplesize)
plt.title(fulltitle)
plt.xlabel(r"%s" %(xlabel))
plt.ylabel(r"%s"%(ylabel))
plt.legend(["Data", "%s Best Fit" %(function_type)],
loc='lower right')
params = r"$\sigma_{%s}^2 = %f, \sigma_{%s}^2=%f, L_{%s} = %f$" \
%(oblabel, sigma2_gage, bglabel, sigma2_back, bglabel, L_back)
plt.figtext(0.2, 0.9, params)
plt.grid(True)
plt.show()
|
import io
import os
import os.path as osp
import shutil
import warnings
import copy
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from torch.nn.modules.utils import _pair
from ...utils import get_random_string, get_shm_dir, get_thread_id
from ..builder import PIPELINES
from .loading import RawFrameDecode, SampleFrames
from .augmentations import Normalize
from collections.abc import Sequence
@PIPELINES.register_module()
class RawFrameDecode_WithDiff(RawFrameDecode):
def __init__(self, io_backend='disk', decoding_backend='cv2', **kwargs):
super().__init__(io_backend, decoding_backend, **kwargs)
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
raise NotImplementedError
# custom imgs_diff
imgs_diff = list()
# last_frame = None
for frame_idx in results['frame_inds_diff']:
frame_idx += offset
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs_diff.append(cur_frame)
# if last_frame is not None:
# imgs_diff.append(cur_frame - last_frame)
# last_frame = cur_frame
results['imgs_diff'] = imgs_diff
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
@PIPELINES.register_module()
class SampleFrames_WithDiff(SampleFrames):
def __init__(self,
clip_len,
interval_diff=None,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None,
):
super().__init__(clip_len, frame_interval, num_clips, temporal_jitter, twice_sample,
out_of_bound_opt, test_mode, start_index)
self.interval_diff = interval_diff
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
if self.interval_diff is None:
frame_inds_diff = np.around(np.linspace(frame_inds[0], frame_inds[-1], self.clip_len + 1))
results['frame_inds_diff'] = frame_inds_diff.astype(np.int)
else:
raise NotImplemented
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
@PIPELINES.register_module()
class SampleFrames_Custom(SampleFrames):
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
total_frames_offset=0,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
sampling_contrastive=None,
test_mode=False,
start_index=None,
):
super().__init__(clip_len, frame_interval, num_clips, temporal_jitter, twice_sample,
out_of_bound_opt, test_mode, start_index)
self.total_frames_offset = total_frames_offset
self.sampling_contrastive = sampling_contrastive
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
if self.sampling_contrastive is None:
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
elif self.sampling_contrastive == 'uniform':
avg_interval = (num_frames - ori_clip_len + 1)
else:
raise NotImplementedError
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips,), dtype=np.int)
return clip_offsets
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames'] + self.total_frames_offset
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
@PIPELINES.register_module()
class RawFrameDecode_Custom(RawFrameDecode):
def __init__(self, io_backend='disk', decoding_backend='cv2', extra_modalities=[], override_modality=None,
results_mapping=dict(), tempgrad_clip_len=None, **kwargs):
super().__init__(io_backend, decoding_backend, **kwargs)
self.override_modality = override_modality
self.extra_modalities = extra_modalities
self.results_mapping = results_mapping
self.tempgrad_clip_len = tempgrad_clip_len
def __call__(self, results):
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
if self.override_modality is not None:
modality = self.override_modality
for frame_idx in results['frame_inds']:
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.extend([x_frame, y_frame])
else:
break
results['imgs'] = imgs
if 'tempgrad' in self.extra_modalities:
# custom imgs_diff
imgs_diff = list()
# last_frame = None
filename_tmpl_diff = filename_tmpl.replace('img', 'tempgrad')
# print(results['frame_inds'])
if self.tempgrad_clip_len is not None:
tempgrad_clip_len = self.tempgrad_clip_len
else:
tempgrad_clip_len = len(results['frame_inds'])
for frame_idx in results['frame_inds'][:tempgrad_clip_len]:
frame_idx += offset
filepath = osp.join(directory, filename_tmpl_diff.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs_diff.append(cur_frame)
results['imgs_diff'] = imgs_diff
if 'flow_xym' in self.extra_modalities:
# custom imgs_diff
flow_xym = list()
# last_frame = None
filename_tmpl_diff = filename_tmpl.replace('img', 'flow_xym')
# print(results['frame_inds'])
flow_xym_clip_len = len(results['frame_inds'])
for frame_idx in results['frame_inds'][:flow_xym_clip_len]:
frame_idx += offset
filepath = osp.join(directory, filename_tmpl_diff.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
flow_xym.append(cur_frame)
results['imgs_flow_xym'] = flow_xym
if len(self.results_mapping) > 0:
for src, dst in self.results_mapping.items():
results[dst] = results[src]
results['original_shape'] = results['imgs'][0].shape[:2]
results['img_shape'] = results['imgs'][0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
# @PIPELINES.register_module()
# class Results_Mapping:
# """Fuse lazy operations.
#
# Fusion order:
# crop -> resize -> flip
#
# Required keys are "imgs", "img_shape" and "lazy", added or modified keys
# are "imgs", "lazy".
# Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
# """
# def __init__(self, mapping_dict=0):
# self.tg_random_shift = tg_random_shift
#
# def __call__(self, results):
#
# imgs_diff = results['imgs_diff']
#
# results['imgs_diff'] = imgs_diff
# del results[]
#
# return results
@PIPELINES.register_module()
class Fuse_WithDiff:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __init__(self, tg_random_shift=0):
self.tg_random_shift = tg_random_shift
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
imgs_diff = results['imgs_diff']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
left_copy = copy.deepcopy(left)
right_copy = copy.deepcopy(right)
top_copy = copy.deepcopy(top)
bottom_copy = copy.deepcopy(bottom)
if self.tg_random_shift > 0:
height = imgs_diff[0].shape[0]
width = imgs_diff[0].shape[1]
sigma = self.tg_random_shift
shift_pixels = np.random.normal(0, sigma, size=4).round().astype(int)
np.clip(shift_pixels, -2*sigma, 2*sigma, out=shift_pixels)
center_h = (top + bottom) / 2
center_w = (left + right) / 2
h = bottom - top
w = right - left
left = (center_w - max(0.5 * w + shift_pixels[0], 2)).round().astype(int)
right = (center_w + max(0.5 * w + shift_pixels[1], 2)).round().astype(int)
top = (center_h - max(0.5 * h + shift_pixels[2], 2)).round().astype(int)
bottom = (center_h + max(0.5 * h + shift_pixels[3], 2)).round().astype(int)
# left = max(0, left + shift_pixels[0])
# right = min(height, right + shift_pixels[1])
# top = max(0, top + shift_pixels[2])
# bottom = min(width, bottom + shift_pixels[3])
left = max(0, left)
# right = min(height, right)
top = max(0, top)
# bottom = min(width, bottom)
imgs_diff = [img[top:bottom, left:right] for img in imgs_diff]
# print(imgs_diff[0].shape)
# resize
if 0 in imgs_diff[0].shape:
print(imgs_diff[0].shape)
print(left, right, top, bottom)
print(left_copy, right_copy, top_copy, bottom_copy)
print(shift_pixels)
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
imgs_diff = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs_diff
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
for img in imgs_diff:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
results['imgs_diff'] = imgs_diff
del results['lazy']
return results
@PIPELINES.register_module()
class Fuse_OnlyDiff:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs_diff = results['imgs_diff']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs_diff = [img[top:bottom, left:right] for img in imgs_diff]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs_diff = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs_diff
]
# flip
if lazyop['flip']:
for img in imgs_diff:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs_diff'] = imgs_diff
del results['lazy']
return results
@PIPELINES.register_module()
class Trans_to_RGB:
def __init__(self, modality='Diff'):
self.modality = modality
def __call__(self, results):
imgs_diff = np.array(results['imgs_diff'])
if self.modality == 'Diff':
imgs_diff = np.array(imgs_diff)
imgs_diff = imgs_diff[1:] - imgs_diff[:-1]
# print(type(imgs_diff).item())
# print(imgs_diff)
imgs_diff += 255.0
imgs_diff /= 2.0
else:
raise NotImplemented
results['imgs_diff'] = imgs_diff
return results
@PIPELINES.register_module()
class Normalize_Diff(Normalize):
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False, raw_to_diff=True, redist_to_rgb=False):
super().__init__(mean, std, to_bgr, adjust_magnitude)
self.raw_to_diff = raw_to_diff
self.redist_to_rgb = redist_to_rgb
def __call__(self, results):
n = len(results['imgs_diff'])
h, w, c = results['imgs_diff'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs_diff']):
imgs[i] = img
if self.redist_to_rgb:
imgs = imgs[1:] - imgs[:-1]
imgs += 255.0
imgs /= 2.0
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
if self.raw_to_diff:
imgs = imgs[1:] - imgs[:-1]
# follow multi-view paper , first calucate diff then normalize
results['imgs_diff'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
@PIPELINES.register_module()
class FormatShape_Diff:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, num_clips=None, clip_len=None, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
self.num_clips = num_clips
self.clip_len = clip_len
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs_diff'], np.ndarray):
results['imgs_diff'] = np.array(results['imgs_diff'])
# imgs_diff = results['imgs_diff'][1:] - results['imgs_diff'][:-1]
imgs_diff = results['imgs_diff']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = self.num_clips if self.num_clips is not None else results['num_clips']
clip_len = self.clip_len if self.clip_len is not None else results['clip_len']
imgs_diff = imgs_diff.reshape((-1, num_clips, clip_len) + imgs_diff.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs_diff = np.transpose(imgs_diff, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs_diff = imgs_diff.reshape((-1,) + imgs_diff.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
results['imgs_diff'] = imgs_diff
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class Reset_img_shape:
def __init__(self):
pass
def __call__(self, results):
results['img_shape'] = results['original_shape']
return results
@PIPELINES.register_module()
class Normalize_Imgs2Diff(Normalize):
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False, redist_to_rgb=True):
super().__init__(mean, std, to_bgr, adjust_magnitude)
self.redist_to_rgb = redist_to_rgb
def __call__(self, results):
n = len(results['imgs'])
h, w, c = results['imgs'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs']):
imgs[i] = img
if self.redist_to_rgb:
num_clips = results['num_clips']
clip_len = results['clip_len']
if n == clip_len:
imgs = imgs[1:] - imgs[:-1]
imgs += 255.0
imgs /= 2.0
else:
assert n > clip_len and n % clip_len == 0
imgs = imgs[1:] - imgs[:-1]
select_index = np.arange(n)
select_index = np.delete(select_index, np.arange(clip_len - 1, n, clip_len))
imgs = imgs[select_index]
imgs += 255.0
imgs /= 2.0
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
# follow multi-view paper , first calucate diff then normalize
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
@PIPELINES.register_module()
class FormatShape_Imgs2Diff:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
# imgs_diff = results['imgs_diff'][1:] - results['imgs_diff'][:-1]
imgs_diff = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len'] - 1
imgs_diff = imgs_diff.reshape((-1, num_clips, clip_len) + imgs_diff.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs_diff = np.transpose(imgs_diff, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs_diff = imgs_diff.reshape((-1,) + imgs_diff.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
results['imgs'] = imgs_diff
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class DecordDecode_Custom:
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
Args:
mode (str): Decoding mode. Options are 'accurate' and 'efficient'.
If set to 'accurate', it will decode videos into accurate frames.
If set to 'efficient', it will adopt fast seeking but only return
key frames, which may be duplicated and inaccurate, and more
suitable for large scene-based video datasets. Default: 'accurate'.
"""
def __init__(self, mode='accurate', extra_modalities=[]):
self.mode = mode
assert mode in ['accurate', 'efficient']
if extra_modalities != []:
assert mode == 'accurate'
self.extra_modalities = extra_modalities
def __call__(self, results):
"""Perform the Decord decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
if self.mode == 'accurate':
imgs = container.get_batch(frame_inds).asnumpy()
imgs = list(imgs)
if 'tempgrad' in self.extra_modalities:
# custom imgs_diff
# last_frame = None
frame_inds_next = frame_inds + 1
imgs_next = container.get_batch(frame_inds_next).asnumpy()
imgs_next = list(imgs_next)
imgs_diff = [((frame.astype(np.float32) - frame_next.astype(np.float32) + 255.0) / 2.0).astype(np.uint8) for frame, frame_next in zip(imgs, imgs_next)]
results['imgs_diff'] = imgs_diff
elif self.mode == 'efficient':
# This mode is faster, however it always returns I-FRAME
container.seek(0)
imgs = list()
for idx in frame_inds:
container.seek(idx)
frame = container.next()
imgs.append(frame.asnumpy())
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(mode={self.mode})'
return repr_str |
######################################################################################
# python 3 script for killing user sessions of Ibcos gold classic
# author: Isaac Thompson 20/03/2021 - 26/05/2021, Lloyd Ltd
######################################################################################
import telnetlib
import time
import sys
import math as m
from getpass import getpass
#getting arguments from command line
args = sys.argv
#setting defaults
thresh = False
auto = False
exclude = False
max_sessions = 120
dynamic_thresh = False
host = 'localhost'
#reading arguments into variables
for arg in args:
if '-host' in arg:
host = arg[6:]
if '-auto' in arg:
auto = True
interval = int(arg[6:])
if '-th' in arg:
thresh = True
time_thresh = int(arg[4:])
if '-x' in arg:
exclude = True
exclude_str = arg[3:]
if '-max' in arg:
max_sessions = int(arg[5:])
dynamic_thresh = True
#getting login details
username = input("Username: ")
password = getpass()
loop = True
while loop:
# connection
HOST = host
PORT = 23
#getting port if port is specified in IP
if ':' in HOST:
PORT = int(HOST.split(':')[1])
HOST = HOST.split(':')[0]
USER = username
PASS = password
#attempting to connect to server
connected = False
while not connected:
try:
tn = telnetlib.Telnet(HOST, PORT)
print("connected")
connected = True
except:
print('unable to connect to host')
print('retrying...')
time.sleep(2)
# login
try:
tn.read_until(b'login: ')
tn.write((USER + '\n').encode('ascii'))
print("written login")
tn.read_until(b'assword: ')
tn.write((PASS + '\n').encode('ascii'))
print("written password")
# setting terminal type
tn.read_until(b'erminal type? ')
tn.write(b'vt100\n')
print("terminal type written")
except:
print('Unable to execute login sequence')
print('Retrying...')
tn.read_all()
tn.close()
time.sleep(2)
next
#setting dynamic threshold if max sessions specified
if dynamic_thresh:
tn.read_until(b'# ')
tn.write(b"show | grep 'Users = ' | cat\n")
out = tn.read_until(b':~').decode()
users = int(out.split('\r\n')[1].split('Users = ')[1].split()[0])
factor = (m.exp(max(0.0, (users - 0.8*0.96*max_sessions) / (0.2*0.96*max_sessions))) - 1) / (m.e - 1)
time_thresh = 60 - int(factor*20.0)
thresh = True
print(str(users) + ' of ' + str(max_sessions) + ', setting threshold: ' + str(time_thresh) + ' mins')
# w command
try:
tn.read_until(b'# ')
tn.write(b'w\n')
print("w command gone through")
time.sleep(1)
except:
print("w command failed")
print('Retrying...')
tn.read_all()
tn.close()
time.sleep(2)
next
# reading sessions into string
out = tn.read_until(b':~').decode()
sessions = out.split('\r\n')[3:-1]
sessions = list(map(lambda x: x.split(), sessions))
#initialising hitlist of sessions that will be killed
hitlist = []
#list of days of week - if a session has any of these in its show line then it will be killed as it means it has been logged in for more than a day, and might be a ghost session
week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
# finding sessions to kill
for session in sessions:
if ('m' in session[3]) and (session[7] == 'dbr'):
hitlist.append(session[1])
elif any(day in session[2] for day in week) and (session[1] != 'console') and (session[0] != 'root'):
hitlist.append(session[1])
elif thresh:
if ('s' not in session[3]) and (session[7] == 'dbr'):
minute = int(session[3].split(':')[0])
if minute >= time_thresh:
hitlist.append(session[1])
print('hit list compiled')
#getting list of pids to exclude (e.g tills, specific people, DPs) if wanting to avoid killing them
if exclude:
ex_pids = []
tn.read_until(b'# ')
show_command = 'show | grep -E -i "' + exclude_str + '"\n'
tn.write(show_command.encode('ascii'))
out = tn.read_until(b':~').decode()
sessions = out.split('\r\n')[1:-1]
for s in sessions:
pid = s.split()[-1]
ex_pids.append(pid)
# getting pids and killing
if len(hitlist) > 0:
for hit in hitlist:
get_pid_command = 'ps -t ' + hit + '\n'
tn.read_until(b'# ')
tn.write(get_pid_command.encode('ascii'))
out = tn.read_until(b':~').decode()
try:
pid = out.split('\n')[2].split()[0]
if exclude:
if pid in ex_pids:
continue
tn.read_until(b'# ')
kill_command = 'kill -9 ' + pid + '\n'
tn.write(kill_command.encode('ascii'))
print(hit + ' killed')
except:
print("Unable to kill " + hit)
else:
print('No eligible sessions')
# terminating connection
tn.read_until(b'# ')
tn.write(b'exit\n')
tn.close()
print('Disconnected\n')
if auto:
print('sleeping\n')
time.sleep(interval*60)
else:
loop = False
|
from .entrepreneur_profile_cleanup import (
clean_entrepreneur_profile_twitter_handles
)
from .expert_profile_cleanup import (
clean_expert_profile_twitter_handles
)
from .organization_cleanup import clean_organization_twitter_handles
|
import setuptools
with open("README.md", "r") as readme:
long_description = readme.read()
with open("VERSION", "r") as version_f:
version = version_f.read()
setuptools.setup(
name="FastAPIwee",
version=version,
author="German Gensetskyi",
author_email="Ignis2497@gmail.com",
description="FastAPIwee - FastAPI + PeeWee = <3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ignisor/FastAPIwee",
project_urls={
'Documentation': 'https://fastapiwee.qqmber.wtf',
},
packages=setuptools.find_packages(exclude=('tests', )),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'fastapi==0.66.0',
'pydantic==1.8.2',
'peewee==3.14.4',
],
)
|
from unittest import TestCase
import simplejson as json
from mock import patch, PropertyMock, Mock
from pydclib.pydclib import bin2hstr
from dc.core import config
from dc.core.Indexer import Indexer
from dc.core.State import State
from dc.core.StateContainer import StateContainer
from dc.core.misc import logger
from dc.core.OptimizedAddressState import OptimizedAddressState
from dc.core.TransactionInfo import TransactionInfo
from dc.core.txs.Transaction import Transaction
from dc.core.txs.TransferTransaction import TransferTransaction
from tests.core.txs.testdata import test_json_Simple, test_signature_Simple
from tests.misc.helper import get_alice_xmss, get_bob_xmss, get_slave_xmss, replacement_getTime, set_dc_dir
logger.initialize_default()
@patch('dc.core.txs.Transaction.logger')
class TestSimpleTransaction(TestCase):
def __init__(self, *args, **kwargs):
super(TestSimpleTransaction, self).__init__(*args, **kwargs)
with set_dc_dir('no_data'):
self.state = State()
self.alice = get_alice_xmss()
self.bob = get_bob_xmss()
self.slave = get_slave_xmss()
self.alice.set_ots_index(10)
self.maxDiff = None
def setUp(self):
self.tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
self.tx._data.nonce = 1
def test_create(self, m_logger):
# Alice sending coins to Bob
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk)
self.assertTrue(tx)
def test_create_negative_amount(self, m_logger):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk)
def test_create_negative_fee(self, m_logger):
with self.assertRaises(ValueError):
TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[-100],
message_data=None,
fee=-1,
xmss_pk=self.alice.pk)
def test_to_json(self, m_logger):
tx = TransferTransaction.create(addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk)
txjson = tx.to_json()
self.assertEqual(json.loads(test_json_Simple), json.loads(txjson))
def test_from_json(self, m_logger):
tx = Transaction.from_json(test_json_Simple)
tx.sign(self.alice)
self.assertIsInstance(tx, TransferTransaction)
# Test that common Transaction components were copied over.
self.assertEqual(0, tx.nonce)
self.assertEqual('010300a1da274e68c88b0ccf448e0b1916fa789b01eb2ed4e9ad565ce264c9390782a9c61ac02f',
bin2hstr(tx.addr_from))
self.assertEqual('01030038ea6375069f8272cc1a6601b3c76c21519455603d370036b97c779ada356'
'5854e3983bd564298c49ae2e7fa6e28d4b954d8cd59398f1225b08d6144854aee0e',
bin2hstr(tx.PK))
self.assertEqual('554f546305d4aed6ec71c759942b721b904ab9d65eeac3c954c08c652181c4e8', bin2hstr(tx.txhash))
self.assertEqual(10, tx.ots_key)
self.assertEqual(test_signature_Simple, bin2hstr(tx.signature))
# Test that specific content was copied over.
self.assertEqual('0103001d65d7e59aed5efbeae64246e0f3184d7c42411421eb385ba30f2c1c005a85ebc4419cfd',
bin2hstr(tx.addrs_to[0]))
self.assertEqual(100, tx.total_amount)
self.assertEqual(1, tx.fee)
def test_validate_tx(self, m_logger):
# If we change amount, fee, addr_from, addr_to, (maybe include xmss stuff) txhash should change.
# Here we use the tx already defined in setUp() for convenience.
# We must sign the tx before validation will work.
self.tx.sign(self.alice)
# We have not touched the tx: validation should pass.
self.assertTrue(self.tx.validate_or_raise())
def test_validate_tx2(self, m_logger):
tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
self.assertTrue(tx.validate_or_raise())
tx._data.transaction_hash = b'abc'
# Should fail, as we have modified with invalid transaction_hash
with self.assertRaises(ValueError):
tx.validate_or_raise()
def test_validate_tx3(self, m_logger):
tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
tx._data.signature = tx.signature * 4183 + tx.signature[0:104]
tx._data.transaction_hash = tx.generate_txhash()
with self.assertRaises(ValueError):
tx.validate_or_raise()
@patch('dc.core.txs.Transaction.config')
def test_validate_tx_invalid(self, m_config, m_logger):
# Test all the things that could make a TransferTransaction invalid
self.tx.sign(self.alice)
# Validation in creation, Protobuf, type conversion etc. gets in our way all the time!
# So to get dirty data to the validate() function, we need PropertyMocks
with patch('dc.core.txs.TransferTransaction.TransferTransaction.amounts',
new_callable=PropertyMock) as m_amounts:
# TX amount of 0 shouldn't be allowed.
m_amounts.return_value = [0]
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
with patch('dc.core.txs.TransferTransaction.TransferTransaction.fee', new_callable=PropertyMock) as m_fee:
m_fee.return_value = -1
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
with patch('dc.core.txs.TransferTransaction.TransferTransaction.addrs_to',
new_callable=PropertyMock) as m_addrs_to:
with patch('dc.core.txs.TransferTransaction.TransferTransaction.amounts',
new_callable=PropertyMock) as m_amounts:
# Validation could fail because len(m_addrs_to) != len(m_amounts),
# or if len(m_addrs_to) > transaction_multi_output_limit.
# This second patch level is to make sure the only the latter case happens.
m_amounts = [100, 100, 100, 100]
m_config.dev.transaction_multi_output_limit = 3
m_addrs_to.return_value = [2, 2, 2, 2]
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
with patch('dc.core.txs.TransferTransaction.TransferTransaction.addrs_to',
new_callable=PropertyMock) as m_addrs_to:
# len(addrs_to) must equal len(amounts)
m_addrs_to.return_value = [2, 2]
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
with patch('dc.core.txs.TransferTransaction.TransferTransaction.addr_from',
new_callable=PropertyMock) as m_addr_from:
m_addr_from.return_value = b'If this isnt invalid Ill eat my shoe'
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
with patch('dc.core.txs.TransferTransaction.TransferTransaction.addrs_to',
new_callable=PropertyMock) as m_addrs_to:
with patch('dc.core.txs.TransferTransaction.TransferTransaction.amounts',
new_callable=PropertyMock) as m_amounts:
m_amounts.return_value = [100, 100]
m_addrs_to.return_value = [self.bob.address, b'If this isnt invalid Ill eat my shoe']
with self.assertRaises(ValueError):
self.tx.validate_or_raise()
def test_validate_extended(self, m_logger):
"""
validate_extended() handles these parts of the validation:
1. Master/slave
2. balance, amount + fee from AddressState
3. OTS key reuse from AddressState
:return:
"""
alice_address_state = OptimizedAddressState.get_default(self.alice.address)
addresses_state = {
self.alice.address: alice_address_state
}
alice_address_state.pbdata.balance = 200
self.tx.validate_slave = Mock(autospec=Transaction.validate_slave, return_value=True)
self.tx.sign(self.alice)
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
result = self.tx.validate_all(state_container)
self.assertTrue(result)
# Suppose there was ots key reuse. The function should then return false.
state_container.paginated_bitfield.set_ots_key(addresses_state, self.tx.addr_from, self.tx.ots_key)
result = self.tx.validate_all(state_container)
self.assertFalse(result)
# Suppose the address doesn't have enough coins.
alice_address_state.pbdata.balance = 99
state_container.paginated_bitfield.set_ots_key(addresses_state, self.tx.addr_from, self.tx.ots_key)
result = self.tx.validate_all(state_container)
self.assertFalse(result)
def test_validate_transaction_pool(self, m_logger):
"""
Two TransferTransactions. Although they're the same, they are signed with different OTS indexes.
Therefore they should not conflict when they are both in the TransactionPool.
:return:
"""
tx = self.tx
tx2 = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
tx2.sign(self.alice)
tx_info = Mock(autospec=TransactionInfo, transaction=tx)
tx2_info = Mock(autospec=TransactionInfo, transaction=tx2)
transaction_pool = [(replacement_getTime(), tx_info), (replacement_getTime(), tx2_info)]
result = tx.validate_transaction_pool(transaction_pool)
self.assertTrue(result)
def test_validate_transaction_pool_reusing_ots_index(self, m_logger):
"""
Two different TransferTransactions. They are signed with the same OTS indexe, from the same public key.
Therefore they should conflict.
:return:
"""
tx = self.tx
tx2 = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=5,
xmss_pk=self.alice.pk
)
# alice_clone's OTS index is still at 10, while self.alice will be at 11 after signing.
alice_clone = get_alice_xmss()
alice_clone.set_ots_index(10)
tx.sign(self.alice)
tx2.sign(alice_clone)
tx_info = Mock(autospec=TransactionInfo, transaction=tx)
tx2_info = Mock(autospec=TransactionInfo, transaction=tx2)
transaction_pool = [(replacement_getTime(), tx_info), (replacement_getTime(), tx2_info)]
result = tx.validate_transaction_pool(transaction_pool)
self.assertFalse(result)
def test_validate_transaction_pool_different_pk_same_ots_index(self, m_logger):
"""
Two TransferTransactions. They are signed with the same OTS indexes, but from different public keys.
Therefore they should NOT conflict.
:return:
"""
tx = self.tx
tx2 = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.bob.pk
)
tx.sign(self.alice)
tx2.sign(self.bob)
tx_info = Mock(autospec=TransactionInfo, transaction=tx)
tx2_info = Mock(autospec=TransactionInfo, transaction=tx2)
transaction_pool = [(replacement_getTime(), tx_info), (replacement_getTime(), tx2_info)]
result = tx.validate_transaction_pool(transaction_pool)
self.assertTrue(result)
def test_apply_transfer_txn(self, m_logger):
"""
apply_state_changes() is the part that actually updates everybody's balances.
Then it forwards the addresses_state to _apply_state_changes_for_PK(), which updates everybody's addresses's
nonce, OTS key index, and associated TX hashes
If there is no AddressState for a particular Address, nothing is done.
"""
self.tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
addresses_state[self.alice.address].pbdata.balance = 200
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.tx.apply(self.state, state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
# Now Alice should have 99 coins left (200 - 100 - 1) and Bob should have 100 coins.
self.assertEqual(99, addresses_state[self.alice.address].balance)
self.assertEqual(100, addresses_state[self.bob.address].balance)
def test_apply_transfer_txn_tx_sends_to_self(self, m_logger):
"""
If you send coins to yourself, you should only lose the fee for the Transaction.
"""
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
addresses_state[self.alice.address].pbdata.balance = 200
tx = TransferTransaction.create(
addrs_to=[self.alice.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
tx.apply(self.state, state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.assertEqual(199, addresses_state[self.alice.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertIn(tx.txhash, state_container.paginated_tx_hash.key_value[storage_key])
def test_apply_transfer_txn_multi_send(self, m_logger):
"""
Test that apply_state_changes() also works with multiple recipients.
"""
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
addresses_state[self.alice.address].pbdata.balance = 200
tx_multisend = TransferTransaction.create(
addrs_to=[self.bob.address, self.slave.address],
amounts=[20, 20],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx_multisend.sign(self.alice)
ots_key = self.alice.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
tx_multisend.apply(self.state, state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.assertEqual(159, addresses_state[self.alice.address].balance)
self.assertEqual(20, addresses_state[self.bob.address].balance)
self.assertEqual(20, addresses_state[self.slave.address].balance)
def test_apply_state_changes_for_PK(self, m_logger):
"""
This updates the node's AddressState database with which OTS index a particular address should be on, and what
tx hashes is this address associated with.
Curiously enough, if the TX was signed by a master XMSS tree, it doesn't add this tx's txhash to the list of
txs that address is associated with.
:return:
"""
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address)
}
self.tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.tx._apply_state_changes_for_PK(state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.assertEqual(1, addresses_state[self.alice.address].nonce)
def test_apply_state_changes_for_PK_master_slave_XMSS(self, m_logger):
"""
If the TX was signed by a slave XMSS, the slave XMSS's AddressState should be updated (not the master's).
:return:
"""
tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.slave.pk,
master_addr=self.alice.address
)
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
tx.sign(self.slave)
ots_key = self.slave.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.slave.address, ots_key))
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, self.alice.ots_index))
tx._apply_state_changes_for_PK(state_container)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.slave.address, ots_key))
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, self.alice.ots_index))
self.assertEqual(1, addresses_state[self.slave.address].nonce)
self.assertEqual(0, addresses_state[self.alice.address].nonce)
def test_revert_transfer_txn(self, m_logger):
"""
Alice has sent 100 coins to Bob, using 1 as Transaction fee. Now we need to undo this.
"""
self.tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address)
}
addresses_state[self.alice.address].pbdata.balance = 200
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
state_container.paginated_bitfield.set_ots_key(addresses_state,
self.alice.address,
ots_key)
state_container.paginated_bitfield.put_addresses_bitfield(None)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
ots_key))
self.tx.apply(self.state, state_container)
self.tx.revert(self.state, state_container)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address,
ots_key))
self.assertEqual(0, addresses_state[self.alice.address].nonce)
self.assertEqual(200, addresses_state[self.alice.address].balance)
self.assertEqual(0, addresses_state[self.bob.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
storage_key = state_container.paginated_tx_hash.generate_key(self.bob.address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
def test_revert_transfer_txn_multi_send(self, m_logger):
"""
Alice has sent 20 coins to Bob and Slave each, using 1 as Transaction fee. Now we need to undo this.
"""
tx_multisend = TransferTransaction.create(
addrs_to=[self.bob.address, self.slave.address],
amounts=[20, 20],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx_multisend.sign(self.alice)
ots_key = self.alice.ots_index - 1
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
state_container.paginated_bitfield.set_ots_key(addresses_state, self.alice.address, ots_key)
state_container.paginated_bitfield.put_addresses_bitfield(None)
addresses_state[self.alice.address].pbdata.balance = 200
addresses_state[self.bob.address].pbdata.balance = 0
addresses_state[self.slave.address].pbdata.balance = 0
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
tx_multisend.apply(self.state, state_container)
tx_multisend.revert(self.state, state_container)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.assertEqual(200, addresses_state[self.alice.address].balance)
self.assertEqual(0, addresses_state[self.bob.address].balance)
self.assertEqual(0, addresses_state[self.slave.address].balance)
storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
storage_key = state_container.paginated_tx_hash.generate_key(self.bob.address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
storage_key = state_container.paginated_tx_hash.generate_key(self.slave.address, 1)
self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key])
def test_revert_transfer_txn_tx_sends_to_self(self, m_logger):
"""
Alice sent coins to herself, but she still lost the Transaction fee. Undo this.
"""
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.bob.address: OptimizedAddressState.get_default(self.bob.address)
}
addresses_state[self.alice.address].pbdata.balance = 200
tx = TransferTransaction.create(
addrs_to=[self.alice.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
state_container.paginated_bitfield.set_ots_key(addresses_state, self.alice.address, ots_key)
state_container.paginated_bitfield.put_addresses_bitfield(None)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
tx.apply(self.state, state_container)
tx.revert(self.state, state_container)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
self.assertEqual(200, addresses_state[self.alice.address].balance)
self.assertEqual(0, addresses_state[self.bob.address].balance)
def test_revert_state_changes_for_PK(self, m_logger):
"""
This is just an undo function.
:return:
"""
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address)
}
addresses_state[self.alice.address].pbdata.balance = 101
addresses_state[self.alice.address].pbdata.nonce = 1
tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.sign(self.alice)
ots_key = self.alice.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
state_container.paginated_bitfield.set_ots_key(addresses_state, self.alice.address, ots_key)
state_container.paginated_bitfield.put_addresses_bitfield(None)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
tx._apply_state_changes_for_PK(state_container)
tx._revert_state_changes_for_PK(state_container)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, ots_key))
def test_revert_state_changes_for_PK_master_slave_XMSS(self, m_logger):
addresses_state = {
self.alice.address: OptimizedAddressState.get_default(self.alice.address),
self.slave.address: OptimizedAddressState.get_default(self.slave.address)
}
addresses_state[self.alice.address].pbdata.balance = 101
addresses_state[self.slave.address].pbdata.nonce = 1
tx = TransferTransaction.create(
addrs_to=[self.bob.address],
amounts=[100],
message_data=None,
fee=1,
xmss_pk=self.slave.pk,
master_addr=self.alice.address
)
tx.sign(self.slave)
ots_key = self.slave.ots_index - 1
state_container = StateContainer(addresses_state=addresses_state,
tokens=Indexer(b'token', None),
slaves=Indexer(b'slave', None),
lattice_pk=Indexer(b'lattice_pk', None),
multi_sig_spend_txs=dict(),
votes_stats=dict(),
block_number=1,
total_coin_supply=100,
current_dev_config=config.dev,
write_access=True,
my_db=self.state._db,
batch=None)
state_container.paginated_bitfield.set_ots_key(addresses_state, self.slave.address, ots_key)
state_container.paginated_bitfield.put_addresses_bitfield(None)
self.assertTrue(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.slave.address, ots_key))
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, self.alice.ots_index))
tx._apply_state_changes_for_PK(state_container)
tx._revert_state_changes_for_PK(state_container)
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.slave.address, ots_key))
self.assertFalse(state_container.paginated_bitfield.load_bitfield_and_ots_key_reuse(self.alice.address, self.alice.ots_index))
def test_affected_address(self, m_logger):
# The default transaction params involve only two addresses.
affected_addresses = set()
self.tx.set_affected_address(affected_addresses)
self.assertEqual(2, len(affected_addresses))
# This transaction should involve 3 addresses.
affected_addresses = set()
tx = TransferTransaction.create(
addrs_to=[self.bob.address, self.slave.address],
amounts=[100, 100],
message_data=None,
fee=1,
xmss_pk=self.alice.pk
)
tx.set_affected_address(affected_addresses)
self.assertEqual(3, len(affected_addresses))
|
import os
LAST_VERSION_WITH_ERROR = 174
conan_version = int(filter(str.isdigit, os.popen('conan --version').read()))
if LAST_VERSION_WITH_ERROR >= conan_version :
os.system('cp -r conan_fixed_os_x/default /Users/$USER/.conan/profiles/')
os.system('cp -r conan_fixed_os_x/settings.yml /Users/$USER/.conan/') |
import random
cont = 1
pi = 'a'
print('Vamos jogar par ou impar?')
while True:
num = int(input('Digite um numero'))
rand = random.randint(1, 5)
while pi not in '´PIpi':
pi = str(input('Par ou Impar?[P/I]')).strip().upper()[0]
if (num+rand) % 2 == 0 and pi == 'P':
print(f'A soma dos numeros foi {num+rand}, portando voce Ganhou')
cont += 1
else:
print(f'A soma dos numeros foi {num+rand}, portanto voce Perdeu')
print(f'Voce perdeu na rodada {cont}')
break
|
# coding: utf-8
"""
Account - 虚拟账户,用于仿真交易
仿真交易分为两种模式:
1)strict - 严格模式,只能在正常交易时间段进行虚拟交易,参照真实交易规则;
2)loose - 宽松模式,不进行任何限制,允许任意添加交易记录
====================================================================
"""
import os
from datetime import datetime
import json
from tma import ACCOUNT_PATH
from tma.collector import get_price
class Order(object):
"""订单对象"""
pass
class Account:
def __init__(self, name, fund=-1, mode="strict"):
self.name = name
self.mode = mode
self.fund = fund
self.path = os.path.join(ACCOUNT_PATH, "account_%s.json" % self.name)
self._read_info()
def _read_info(self):
if os.path.exists(self.path):
self.info = json.load(open(self.path, 'r'))
else:
self.info = {
"name": self.name,
"path": self.path,
"fund": self.fund, # 把fund设置为-1表示不限制资金大小
"trades": {},
"create_date": datetime.now().date().__str__(),
}
def _save_info(self):
json.dump(self.info, open(self.path, 'w'), indent=2)
def buy(self, code, amount, price=None):
if not price:
price = get_price(code)
record = {
"code": code,
"amount": amount,
"price": price,
"date": datetime.now().date().__str__(),
"kind": "BUY"
}
if code not in self.info['trades'].keys():
self.info['trades'][code] = []
self.info['trades'][code].append(record)
self._save_info()
def sell(self):
pass
|
import os
import types
import unittest
from LAMARCK_ML.data_util import TypeShape, IOLabel, DFloat, Shape, DimNames
from LAMARCK_ML.individuals import ClassifierIndividualOPACDG, NetworkIndividualInterface
from LAMARCK_ML.models.models import GenerationalModel
from LAMARCK_ML.reproduction import Mutation, Recombination, AncestryEntity
from LAMARCK_ML.utils.dataSaver.dbSqlite3 import DSSqlite3
@unittest.skipIf((os.environ.get('test_fast', False) in {'True', 'true', '1'}), 'time consuming')
class TestDBSqlite3(unittest.TestCase):
class dummyModel(GenerationalModel):
def __init__(self, **kwargs):
super(TestDBSqlite3.dummyModel, self).__init__(**kwargs)
_data_nts = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 20)))
_target_nts = TypeShape(DFloat, Shape((DimNames.BATCH, 1), (DimNames.UNITS, 10)))
self.ci = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self.anc1 = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self.anc2 = ClassifierIndividualOPACDG(**{
NetworkIndividualInterface.arg_DATA_NTS: {IOLabel.DATA: (_data_nts, 'Dataset'),
IOLabel.TARGET: (_target_nts, 'Dataset')},
})
self._GENERATION = [self.ci]
self._GENERATION_IDX = 1
def mut(self):
mut = Mutation()
self._REPRODUCTION = [(mut, [AncestryEntity(mut.ID, self.anc1.id_name, [self.ci.id_name]),
AncestryEntity(mut.ID, self.anc2.id_name, [self.ci.id_name])])]
def rec(self):
rec = Recombination()
self._REPRODUCTION = [(rec, [AncestryEntity(rec.ID, self.ci.id_name, [self.anc1.id_name, self.anc2.id_name])])]
def test_db_generation(self):
db_file = './test_db_gen.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_evaluate', types.MethodType(ds.end_evaluate(
getattr(dummyM, '_end_evaluate')), dummyM))
dummyM._end_evaluate()
origin = dummyM.generation[0]
ind = ds.get_individual_by_name(origin.id_name)
self.assertEqual(origin, ind)
self.assertIsNot(origin, ind)
os.remove(db_file)
def test_db_ancestry_mut(self):
db_file = './test_db_anc_mut.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_reproduce', types.MethodType(ds.end_reproduce(
getattr(dummyM, '_end_reproduce')), dummyM))
dummyM.mut()
dummyM._end_reproduce()
_, anc_ent = ds.get_ancestry_for_ind(dummyM.anc1.id_name)
self.assertEqual(anc_ent.method, Mutation.ID)
self.assertEqual(anc_ent.descendant, dummyM.anc1.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.ci.id_name])
_, anc_ent = ds.get_ancestry_for_ind(dummyM.anc2.id_name)
self.assertEqual(anc_ent.method, Mutation.ID)
self.assertEqual(anc_ent.descendant, dummyM.anc2.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.ci.id_name])
self.assertEqual(ds.get_ancestry_for_ind(dummyM.ci.id_name), (None, None))
os.remove(db_file)
def test_db_ancestry_rec(self):
db_file = './test_db_anc_rec.db3'
ds = DSSqlite3(**{
DSSqlite3.arg_FILE: db_file,
})
dummyM = TestDBSqlite3.dummyModel()
setattr(dummyM, '_end_reproduce', types.MethodType(ds.end_reproduce(
getattr(dummyM, '_end_reproduce')), dummyM))
dummyM.rec()
dummyM._end_reproduce()
self.assertEqual(ds.get_ancestry_for_ind(dummyM.anc1.id_name), (None, None))
self.assertEqual(ds.get_ancestry_for_ind(dummyM.anc2.id_name), (None, None))
_, anc_ent = ds.get_ancestry_for_ind(dummyM.ci.id_name)
self.assertIsNotNone(anc_ent)
self.assertEqual(anc_ent.method, Recombination.ID)
self.assertEqual(anc_ent.descendant, dummyM.ci.id_name)
self.assertListEqual(anc_ent.ancestors, [dummyM.anc1.id_name, dummyM.anc2.id_name])
os.remove(db_file)
|
class Command(object):
is_global = True
def __init__(self, name):
self.name = name
self.cli = None
# For completion
self.matches = []
def valid_in_context(self, context):
return True
def complete(self, text, state):
'''
Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
'''
return None
def run(self, *args):
return False
|
#!/usr/bin/env python3
# [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt
# [txid] 3a461e6de82cb2365e9105b127e7e2976da998aeaf7284333304bd3ff78de2b6
# [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/
# [req] python -m pip install electrum
# [note] with open(r"..\reddit\python\hodl.py", 'r') as s: exec(s.read())
from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction
from electrum.bitcoin import deserialize_privkey, opcodes, push_script
from electrum.crypto import hash_160, sha256, sha256d
from electrum.ecc import ECPrivkey
from electrum.segwit_addr import encode
from electrum import constants
# The basic bitcoinlib utility scripts
x = lambda h: bytes.fromhex(h)
lx = lambda h: bytes.fromhex(h)[::-1]
b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','')
b2lx = lambda b: b[::-1].hex().replace('0x','')
# Very simple bitcoin script comiler
compile = lambda s: "".join([
opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s])
bech32_encode = lambda w: encode(constants.net.SEGWIT_HRP, 0, x(w))
# Set testnet
constants.set_testnet()
# Basic constants to build the TXNIN
wif = 'cNyQjVGD6ojbLFu1UCapLCM836kCrgMiC4qpVTV9CUx8kVc5kVGQ'
txid = x('ef3fad03b7fd4fe42956e41fccb10ef1a95d98083d3b9246b6c17a88e51c8def')
vout = 1
sats = 10_000
sequence = 0 # in retrospect "-3" in two's complement may be better
address = 'tb1q5rn69avl3ganw3cmhz5ldcxpash2kusq7sncfl'
sats_less_fees = sats - 500
locktime = 1602572140
# Build the Transaction Input
_, privkey, compressed = deserialize_privkey(wif)
pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
expiry = b2x(lx(b2x(locktime)))
witness_script = compile([
expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG'])
script_hash = b2x(sha256(x(witness_script)))
hodl_address = bech32_encode(script_hash)
prevout = TxOutpoint(txid=txid, out_idx=vout)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = sats
txin.nsequence = sequence
txin.script_sig = x(compile([])) # empty script (important!)
txin.witness_script = x(witness_script)
# Build the Transaction Output
txout = PartialTxOutput.from_address_and_value(address, sats_less_fees)
# Build and sign the transaction
tx = PartialTransaction.from_io([txin], [txout], locktime=locktime)
tx.version = 1
txin_index = 0
sig = tx.sign_txin(txin_index, privkey)
# Prepend number of elements in script per the spec.
script = [sig, witness_script]
size = bytes([len(script)])
txin.witness = size + x(compile(script))
# Get the serialized txn and compute txid
txn = tx.serialize()
# Display results
print("PayTo:", hodl_address)
print("wif:", wif)
print("pubk:", pubkey)
print("txid:", tx.txid())
print("txn:", txn)
|
# Generated by Django 2.2.4 on 2019-08-04 21:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jobHistory', '0002_auto_20190106_0202'),
]
operations = [
migrations.AlterField(
model_name='employer',
name='city',
field=models.CharField(blank=True, max_length=200, verbose_name='City'),
),
migrations.AlterField(
model_name='employer',
name='country',
field=models.CharField(blank=True, max_length=200, verbose_name='Country'),
),
migrations.AlterField(
model_name='employer',
name='county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='County or Parish'),
),
migrations.AlterField(
model_name='employer',
name='email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Email'),
),
migrations.AlterField(
model_name='employer',
name='industry',
field=models.CharField(blank=True, max_length=254, verbose_name='Industry'),
),
migrations.AlterField(
model_name='employer',
name='long_name',
field=models.CharField(max_length=254, null=True, unique=True, verbose_name='Long Name'),
),
migrations.AlterField(
model_name='employer',
name='phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Phone'),
),
migrations.AlterField(
model_name='employer',
name='short_name',
field=models.CharField(max_length=50, unique=True, verbose_name='Short Name'),
),
migrations.AlterField(
model_name='employer',
name='state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='State or Province'),
),
migrations.AlterField(
model_name='employer',
name='zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='End Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='end_year',
field=models.PositiveIntegerField(null=True, verbose_name='End Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='ending_pay',
field=models.CharField(max_length=50, verbose_name='Ending Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='hours_per_week',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Hours per Week'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='is_current_position',
field=models.BooleanField(default=True, verbose_name='Current Position?'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='position',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Position', verbose_name='Position'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_day',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Day'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_month',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Start Month'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='start_year',
field=models.PositiveIntegerField(verbose_name='Start Year'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='starting_pay',
field=models.CharField(max_length=50, verbose_name='Starting Pay'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Work City'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Work Country'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Work County or Parish'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Work State or Province'),
),
migrations.AlterField(
model_name='jobtimeperiod',
name='work_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Work Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='can_contact',
field=models.BooleanField(verbose_name='Can Contact?'),
),
migrations.AlterField(
model_name='position',
name='contributions_and_accomplishments',
field=models.TextField(blank=True, verbose_name='Contributions and Accomplishments'),
),
migrations.AlterField(
model_name='position',
name='employer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='jobHistory.Employer', verbose_name='Employer'),
),
migrations.AlterField(
model_name='position',
name='responsibilities',
field=models.TextField(blank=True, verbose_name='Responsibilities'),
),
migrations.AlterField(
model_name='position',
name='supervisor_city',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor City'),
),
migrations.AlterField(
model_name='position',
name='supervisor_country',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Country'),
),
migrations.AlterField(
model_name='position',
name='supervisor_county_or_parish',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor County or Parish'),
),
migrations.AlterField(
model_name='position',
name='supervisor_email',
field=models.EmailField(blank=True, max_length=254, verbose_name='Supervisor Email'),
),
migrations.AlterField(
model_name='position',
name='supervisor_given_name',
field=models.CharField(max_length=200, verbose_name='Supervisor Given Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_middle_name',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor Middle Name'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_prefix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Prefix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_name_suffix',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Name Suffix'),
),
migrations.AlterField(
model_name='position',
name='supervisor_phone',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Phone'),
),
migrations.AlterField(
model_name='position',
name='supervisor_state_or_province',
field=models.CharField(blank=True, max_length=200, verbose_name='Supervisor State or Province'),
),
migrations.AlterField(
model_name='position',
name='supervisor_surname',
field=models.CharField(max_length=200, verbose_name='Supervisor Surname'),
),
migrations.AlterField(
model_name='position',
name='supervisor_zip_or_postal_code',
field=models.CharField(blank=True, max_length=50, verbose_name='Supervisor Zip Code or Postal Code'),
),
migrations.AlterField(
model_name='position',
name='title',
field=models.CharField(max_length=200, verbose_name='Title'),
),
]
|
import pymht.tracker as tomht
import pymht.utils.helpFunctions as hpf
import xml.etree.ElementTree as ET
from pymht.utils.xmlDefinitions import *
from pysimulator.scenarios.defaults import *
import os
import datetime
def runPreinitializedVariations(scenario, path, pdList, lambdaphiList, nList, nMonteCarlo,
baseSeed, **kwargs):
changes = False
simList = scenario.getSimList()
try:
scenarioElement = ET.parse(path).getroot()
except Exception:
scenarioElement = None
if kwargs.get('overwrite', False) or (scenarioElement is None):
scenarioElement = ET.Element(scenarioTag, attrib={nameTag: scenario.name})
simList.storeGroundTruth(scenarioElement, scenario)
scenario.storeScenarioSettings(scenarioElement)
changes = True
variationsElement = scenarioElement.find('.{0:}[@{1:}="True"]'.format(variationsTag, preinitializedTag))
if variationsElement is None:
changes = True
variationsElement = ET.SubElement(
scenarioElement,variationsTag,attrib={preinitializedTag: "True"})
for P_d in pdList:
for lambda_phi in lambdaphiList:
for N in nList:
variationDict = {pdTag: P_d,
lambdaphiTag: lambda_phi,
nTag: N,
'localClutter':True,
'lambda_local':scenario.lambda_local}
variationElement = variationsElement.find(
'.{0:}[@{1:}="{4:}"][@{2:}="{5:}"][@{3:}="{6:}"]'.format(
variationTag, nTag, pdTag, lambdaphiTag, N, P_d, lambda_phi))
if variationElement is None:
changes = True
variationElement = ET.SubElement(
variationsElement, variationTag,
attrib={str(k): str(v) for k, v in variationDict.items()})
print(scenario.name, variationElement.attrib, nMonteCarlo, True)
changes |= runMonteCarloSimulations(
variationElement, scenario, simList, nMonteCarlo, baseSeed,
variationDict, True, **kwargs)
if changes:
_renameOldFiles(path)
hpf.writeElementToFile(path, scenarioElement)
def runInitializationVariations(scenario, path, pdList, lambdaphiList, M_N_list, nMonteCarlo,
baseSeed, **kwargs):
changes = True
simList = scenario.getSimList()
try:
scenarioElement = ET.parse(path).getroot()
except Exception:
scenarioElement = None
if kwargs.get('overwrite', False) or (scenarioElement is None):
changes = True
scenarioElement = ET.Element(scenarioTag, attrib={nameTag: scenario.name})
simList.storeGroundTruth(scenarioElement, scenario)
scenario.storeScenarioSettings(scenarioElement)
variationsElement = scenarioElement.find(
'.{0:}[@{1:}="False"]'.format(variationsTag, preinitializedTag))
if variationsElement is None:
changes = True
variationsElement = ET.SubElement(
scenarioElement,variationsTag,attrib={preinitializedTag: "False"})
for P_d in pdList:
for lambda_phi in lambdaphiList:
for (M, N) in M_N_list:
variationDict = {pdTag: P_d,
lambdaphiTag: lambda_phi,
nTag: 6,
mInitTag: M,
nInitTag: N}
variationElement = variationsElement.find(
'.{0:}[@{1:}="{5:}"][@{2:}="{6:}"][@{3:}="{7:}"][@{4:}="{8:}"]'.format(
variationTag, mInitTag, nInitTag, pdTag, lambdaphiTag, M, N, P_d, lambda_phi))
if variationElement is None:
changes = True
variationElement = ET.SubElement(
variationsElement, variationTag,
attrib={str(k): str(v) for k, v in variationDict.items()})
print(scenario.name, variationElement.attrib, nMonteCarlo, False)
changes |= runMonteCarloSimulations(
variationElement, scenario, simList, nMonteCarlo, baseSeed,
variationDict, False, **kwargs)
if changes:
_renameOldFiles(path)
hpf.writeElementToFile(path, scenarioElement)
def _renameOldFiles(path):
if os.path.exists(path):
modTime = os.path.getmtime(path)
timeString = datetime.datetime.fromtimestamp(modTime).strftime("%d.%m.%Y %H.%M")
head, tail = os.path.split(path)
filename, extension = os.path.splitext(tail)
newPath = os.path.join(head, filename + "_" + timeString + extension)
os.rename(path, newPath)
def runMonteCarloSimulations(variationElement, scenario, simList, nSim, baseSeed,
variationDict, preInitialized, **kwargs):
changes = False
P_d = variationDict[pdTag]
lambda_phi = variationDict[lambdaphiTag]
N = variationDict[nTag]
trackerArgs = (scenario.model,
scenario.radarPeriod,
lambda_phi,
scenario.lambda_nu)
trackerKwargs = {'maxSpeedMS': maxSpeedMS,
'M_required': variationDict.get(mInitTag,M_required),
'N_checks': variationDict.get(nInitTag,N_checks),
'position': scenario.p0,
'radarRange': scenario.radarRange,
'eta2': eta2,
'N': N,
'P_d': P_d,
'localClutter':variationDict.get('localClutter', False),
'lambda_local': variationDict.get('lambda_local',2),
'dynamicWindow': False}
trackersettingsElement = variationElement.find(trackerSettingsTag)
if trackersettingsElement is None:
changes = True
storeTrackerData(variationElement, trackerArgs, trackerKwargs)
if kwargs.get('printLog', True):
print("Running scenario iteration", end="", flush=True)
for i in range(nSim):
runElement = variationElement.find(
'{0:}[@{1:}="{2:}"]'.format(runTag, iterationTag,i+1))
if runElement is not None:
continue
if kwargs.get('printLog', True):
print('.', end="", flush=True)
seed = baseSeed + i
scanList, aisList = scenario.getSimulatedScenario(seed, simList, lambda_phi, P_d,
localClutter=False)
changes = True
try:
runSimulation(variationElement, simList, scanList, aisList, trackerArgs,
trackerKwargs, preInitialized,seed=seed, **kwargs)
except Exception as e:
import traceback
print("Scenario:", scenario.name)
print("preInitialized", preInitialized)
print("variationDict", variationDict)
print("Iteration", i)
print("Seed", seed)
print(e)
if kwargs.get('printLog', True):
print()
return changes
def runSimulation(variationElement, simList, scanList, aisList, trackerArgs,
trackerKwargs, preInitialized, **kwargs):
tracker = tomht.Tracker(*trackerArgs, **{**trackerKwargs, **kwargs})
startIndex = 1 if preInitialized else 0
if preInitialized:
tracker.preInitialize(simList)
for measurementList in scanList[startIndex:]:
scanTime = measurementList.time
aisPredictions = aisList.getMeasurements(scanTime)
tracker.addMeasurementList(measurementList, aisPredictions, pruneSimilar=not preInitialized)
tracker._storeRun(variationElement, preInitialized, **kwargs)
def storeTrackerData(variationElement, trackerArgs, trackerKwargs):
trackersettingsElement = ET.SubElement(variationElement, trackerSettingsTag)
for k, v in trackerKwargs.items():
ET.SubElement(trackersettingsElement, str(k)).text = str(v)
ET.SubElement(trackersettingsElement, "lambda_phi").text = str(trackerArgs[2])
ET.SubElement(trackersettingsElement, "lambda_nu").text = str(trackerArgs[3])
|
"""Bounding volume implementation
Based on code from:
http://www.markmorley.com/opengl/frustumculling.html
Notes regarding general implementation:
BoundingVolume objects are generally created by Grouping
and/or Shape nodes (or rather, the geometry nodes of Shape
nodes). Grouping nodes are able to create union
BoundingVolume objects from their children's bounding
volumes.
The RenderPass object (for visiting rendering passes)
defines a children method which will use the bounding
volumes to filter out those children which are not visible.
The first attempt to do that filtering will recursively
generate and cache the bounding volumes.
Setting your contextDefinition's debugBBox flag to True
will cause rendering of the bounding boxes when using
the Flat renderer.
"""
from OpenGLContext.arrays import *
from OpenGL.GL import *
from OpenGL.GL.ARB.occlusion_query import *
from OpenGL.GL.HP.occlusion_test import *
from OpenGL.GLUT import glutSolidCube
from vrml.vrml97 import nodetypes
from vrml import node, field, protofunctions, cache
from OpenGLContext import frustum, utilities, doinchildmatrix
from OpenGL.extensions import alternate
import exceptions
import logging
log = logging.getLogger( __name__ )
glBeginQuery = alternate( glBeginQuery, glBeginQueryARB )
glDeleteQueries = alternate( glDeleteQueries, glDeleteQueriesARB )
glEndQuery = alternate( glEndQuery, glEndQueryARB )
glGenQueries = alternate( glGenQueries, glGenQueriesARB )
glGetQueryObjectiv = alternate( glGetQueryObjectiv, glGetQueryObjectivARB )
glGetQueryObjectuiv = alternate( glGetQueryObjectiv, glGetQueryObjectuivARB )
try:
from vrml.arrays import frustcullaccel
except ImportError:
frustcullaccel = None
class UnboundedObject( exceptions.ValueError ):
"""Error raised when an object does not support bounding volumes"""
class BoundingVolume( node.Node ):
"""Base class for all bounding volumes
BoundingVolume is both a base class and a functional
bounding volume which is always considered visible.
Geometry which wishes to never be visible can return
a BoundingVolume as their boundingVolume.
"""
def visible (self, frustum, matrix=None, occlusion=0, mode=None):
"""Test whether volume is within given frustum"""
return 0
def getPoints( self, ):
"""Get the points which comprise the volume"""
return ()
class UnboundedVolume( BoundingVolume ):
"""A bounding volume which is always visible
Opposite of a BoundingVolume, geometry can return
an UnboundedVolume if they always wish to be visible.
"""
def visible( self, frustum, matrix=None, occlusion=0, mode=None ):
"""Test whether volume is within given frustum
We don't actually do anything here, just return true
"""
return 1
def getPoints( self, ):
"""Signal to parents that we require unbounded operation"""
raise UnboundedObject( """Attempt to get union of an unbounded volume""" )
class BoundingBox( BoundingVolume ):
"""Generic representation of a bounding box
A bounding box is a bounding volume which is implemented
as a set of points which can be tested against a frustum.
Although at the moment we don't use the distinction between
BoundingBox and AABoundingBox, the BoundingBox may
eventually be used to provide specialized support for
bitmap Text nodes (which should only need four points
to determine their visibility, rather than eight).
"""
points = field.newField( 'points', 'MFVec4f', 0, [])
if frustcullaccel:
# We have the C extension module, use it
def visible( self, frust, matrix=None, occlusion=0, mode=None ):
"""Determine whether this bounding-box is visible in frustum
frustum -- Frustum object holding the clipping planes
for the view
matrix -- a matrix which transforms the local
coordinates to the (world-space) coordinate
system in which the frustum is defined.
This version of the method uses the frustcullaccel
C extension module to do the actual culling once
the volume's points are multiplied by the matrix.
"""
if matrix is None:
matrix = frustum.viewingMatrix( )
points = self.getPoints()
points = dot( points, matrix )
culled, planeIndex = frustcullaccel.planeCull( frust.planes, points )
return not culled
else:
def visible( self, frust, matrix=None, occlusion=0, mode=None ):
"""Determine whether this bounding-box is visible in frustum
frustum -- Frustum object holding the clipping planes
for the view
matrix -- a matrix which transforms the local
coordinates to the (world-space) coordinate
system in which the frustum is defined.
This version of the method uses a pure-python loop
to do the actual culling once the points are
multiplied by the matrix. (i.e. it does not use the
frustcullaccel C extension module)
"""
if matrix is None:
matrix = frustum.viewingMatrix( )
points = self.getPoints()
points = dot( points, matrix )
points[:,-1] = 1.0
if frust:
for plane in frust.planes:
foundInFront = 0
for point in points:
distance = sum(plane*point)
if distance >= 0:
# point is in front of plane, so:
# this plane can't eliminate the object
foundInFront = 1
break
if not foundInFront:
#planePoint, planeNormal = utilities.plane2PointNormal(plane)
# got all the way through, this plane eliminated us!
return 0
else:
log.warn(
"""BoundingBox visible called with Null frustum""",
)
return 1
def getPoints(self):
"""Return set of points to test against the frustum"""
return self.points
@staticmethod
def union( boxes, matrix = None ):
"""Create BoundingBox union for the given bounding boxes
This uses the getPoints method of the given
bounding boxes to retrieve the extrema points
which must be present in the union. It then
calculates an Axis-Aligned bounding box shape
taking into account the matrix given.
It would seem somewhat more efficient here to
use the points array directly, but that would
have the effect of geometrically increasing the
number of checks for each succeeding parent,
while creating the axis-aligned bounding box
trades more work here to keep the constant-time
operation for the "visible" check.
Group nodes pass a None as the matrix, while
Transform nodes should pass their individual
transformation matrix (not the cumulative matrix).
boxes -- list of AABoundingBox instances
matrix -- if specified, the matrix to be applied
to the box coordinates before calculating the
resulting axis-aligned bounding box.
"""
points = []
for box in boxes:
if box:
set = box.getPoints()
if len(set) > 0:
points.append( set )
if not points:
return BoundingVolume()
points = tuple(points)
points = concatenate(points)
if matrix is not None:
points = dot( points, matrix )
return AABoundingBox.fromPoints( points )
def debugRender( self ):
"""Render this bounding box for debugging mode
XXX Should really use points for rendering GL_POINTS
geometry for the base class when it gets used.
"""
class AABoundingBox( BoundingBox ):
"""Representation of an axis-aligned bounding box
The axis-aligned bounding box defines the entire
bounding box with two pieces of data, a center position
and a size vector. Other than this, it is just a
point-based bounding box implementation.
"""
center = field.newField( 'center', 'SFVec3f', 0, (0,0,0))
size = field.newField( 'size','SFVec3f',0,(0,0,0))
query = field.newField( 'query', 'SFInt32',0,0)
def visible( self, frust, matrix=None, occlusion=0, mode=None ):
"""Allow for occlusion-checking as well as frustum culling"""
result = super( AABoundingBox, self).visible( frust, matrix, occlusion, mode )
if result and False and occlusion:
return self.occlusionVisible( mode=mode )
return result
def getPoints(self):
"""Return set of points to test against the frustum
If self.points field is not set, will calculate the
points from the center and size fields.
"""
if not len( self.points ):
cx,cy,cz = self.center
sx,sy,sz = self.size
sx /= 2.0
sy /= 2.0
sz /= 2.0
self.points = array([
(x,y,z,1)
for x in (cx-sx,cx+sx)
for y in (cy-sy,cy+sy)
for z in (cz-sz,cz+sz)
],'f')
return self.points
def debugRender( self ):
"""Render this bounding box for debugging mode
Draws the bounding box as a set of lines in the
current OpenGL matrix (in OpenGLContext's visiting
pattern, this is the matrix of the parent of the
node which is determining whether to cull the node
to which this bounding box is attached)
"""
# This code is not OpenGL 3.1 compatible
points = self.getPoints()
glDisable(GL_LIGHTING)
try:
glColor3f( 1,0,0)
for set in [
[points[0],points[1],points[3],points[2]],
[points[4],points[5],points[7],points[6]],
]:
glBegin( GL_LINE_LOOP )
try:
for point in set:
glVertex3dv( point[:3])
finally:
glEnd()
glBegin( GL_LINES )
try:
for i in range(4):
glVertex3dv( points[i][:3])
glVertex3dv( points[i+4][:3])
finally:
glEnd()
finally:
glEnable( GL_LIGHTING)
def occlusionVisible( self, mode=None ):
"""Render this bounding volume for an occlusion test
Requires one of:
OpenGL 2.x
ARB_occlusion_query
GL_HP_occlusion_test
"""
if (False and glGenQueries):
query = self.query
if not self.query:
self.query = query = glGenQueries(1)
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
try:
glDepthMask(GL_FALSE);
glBeginQuery(GL_SAMPLES_PASSED, query);
doinchildmatrix.doInChildMatrix( self._occlusionRender )
finally:
glEndQuery(GL_SAMPLES_PASSED);
# TODO: need to actually retrieve the value, be we want to
# finish all child queries at this level before checking that
# this particular query passed fragments or not...
else:
# This code is not OpenGL 3.1 compatible
glDepthMask(GL_FALSE)
try:
try:
glDisable(GL_LIGHTING)
try:
glEnable(occlusion_test.GL_OCCLUSION_TEST_HP)
try:
doinchildmatrix.doInChildMatrix( self._occlusionRender )
finally:
glDisable(occlusion_test.GL_OCCLUSION_TEST_HP)
finally:
glEnable(GL_LIGHTING)
finally:
glColorMask(GL_TRUE,GL_TRUE,GL_TRUE,GL_TRUE)
finally:
glDepthMask(GL_TRUE)
result = glGetBooleanv(occlusion_test.GL_OCCLUSION_TEST_RESULT_HP)
return result
def occlusionRender( self ):
"""Render this box to screen"""
doinchildmatrix.doInChildMatrix( self._occlusionRender )
def _occlusionRender(self):
"""Do the low-level rendering of the occlusion volume"""
glTranslate( *self.center )
glScale( *self.size )
glutSolidCube( 1.0 )
@classmethod
def fromPoints( cls, points ):
"""Calculate from an array of points"""
xes,yes,zes = points[:,0],points[:,1],points[:,2]
maxX,maxY,maxZ = xes[argmax(xes)],yes[argmax(yes)],zes[argmax(zes)]
minX,minY,minZ = xes[argmin(xes)],yes[argmin(yes)],zes[argmin(zes)]
size = maxX-minX, maxY-minY, maxZ-minZ
return cls(
center = (
(size[0])/2+minX,
(size[1])/2+minY,
(size[2])/2+minZ,
),
size = size,
)
def volumeFromCoordinate( node ):
"""Calculate a bounding volume for a coordinate node
This should work for all of:
IndexedFaceSet
IndexedLineSet
PointSet
IndexedPolygons
This just takes advantage of the common features of the
coordinate-based geometry types, ignoring the fact
that individual pieces of geometry may not actually use
all of the points within a volume.
Note:
this method is cache aware, it will return a cached
bounding box if possible, or calculate and cache the
bounding box before returning it.
XXX There is a pathological case which may be encountered
due to an optimization seen in certain VRML97
generators. Namely, these generators will create
an entire file with a single coordinate node with each
individual piece of geometry indexing into that
universal coordinate node. This would, in many designs
provide an optimization because the coordinate node
would never be swapped out, allowing the geometry to
remain within the GL as the current vertex/color
matrices. In this case, OpenGLContext will have no
bounding volume optimization at all, as it will take
rebounding volume of each piece of geometry to be the
bounding volume of the entire world.
"""
current = getCachedVolume( node )
if current:
return current
if (not node) or (not len(node.point)):
volume = BoundingVolume()
else:
volume = AABoundingBox.fromPoints( node.point )
if node:
# note that even if not node.point, we
# are dependent on that NULL node.point value
dependencies = [ (node,'point') ]
else:
dependencies = []
return cacheVolume( node, volume, dependencies )
### Abstraction/indirection for caching bounding volumes
## Because the code to cache bounding volumes is generally
## identical, we provide this set of two utility methods
## to retrieve and cache the volumes with proper dependency
## setup.
def cacheVolume( node, volume, nodeFieldPairs=()):
"""Cache bounding volume for the given node
node -- the node associated with the volume
volume -- the BoundingVolume object to be cached
nodeFieldPairs -- set of (node,fieldName) tuples giving
the dependencies for the volume. Should normally
include the node itself. If fieldName is None
a dependency is created on the node itself.
"""
holder = cache.CACHE.holder(node, key = "boundingVolume", data = volume )
for (n, attr) in nodeFieldPairs:
if n:
if attr is not None:
holder.depend( n, protofunctions.getField( n,attr) )
else:
holder.depend( n, None )
return volume
def getCachedVolume( node ):
"""Get currently-cached bounding volume for the node or None"""
return cache.CACHE.getData(node, key="boundingVolume")
|
import argparse
import sys
from webStegFS import console
default_proxies = {'https': 'https://165.139.149.169:3128',
'http': 'http://165.139.149.169:3128'}
def proxy_test(proxyL):
import requests
try:
r = requests.get('https://google.com', timeout=5)
assert(r.status_code is 200)
except:
print("Cannot connect to Internet or Google is down!")
return
# now test proxy functionality
try:
# Add something in here later to actually test proxy with given file
# store. Use google for now.
r = requests.get('http://www.sendspace.com', proxies=proxyL, timeout=5)
assert(r.status_code == 200)
except:
print("Given (or default) proxy is down, or took too long to respond")
return
else:
print("Proxy is operational")
return proxyL
def proxy_parser(proxyString=None):
if proxyString is None:
return proxy_test(default_proxies)
proxy = proxyString.split(':')[0]
port = proxyString.split(':')[1]
import ipaddress
try:
print(ipaddress.ip_address(proxy))
assert(int(port) > 0 & int(port) < 65536)
except:
print("Invalid IP address for proxy. Enter the proxy again.")
return
proxDict = {'https': 'https'+proxy+':'+port,
'http': 'http'+proxy+':'+port}
return proxDict # proxy_test(proxDict)
def main():
parser = argparse.ArgumentParser(description="Calls the main function of" +
" WebStegFS")
parser.add_argument('url', type=str, default='', nargs='?',
help='Specify the url to load a filesystem from')
parser.add_argument('-c', dest='cmdloop', default=False,
action='store_true', help='Use the command loop to' +
' access the filesystem')
parser.add_argument('-d', dest='debug', default=False, action='store_true',
help='Enable debug print statements. For dev use')
parser.add_argument('-w', dest='website', type=str, default='sendspace',
help='Use alternate online file stores from command' +
' line')
parser.add_argument('-p', dest="proxy", type=str, default='noproxy',
nargs='?', help='Use a specific proxy to access the' +
' web file store. There is a default if none is ' +
'provided. Format is simply an IP address with port' +
' at the end (e.x. 1.2.3.4:8080)')
parser.add_argument('-e', dest='encryption', type=str, default='noencrypt',
nargs='?', help='Use a specific encryption.')
parser.add_argument('-m', dest="mountpoint", type=str,
default='covertMount', help='Specify a foldername' +
' to mount the FUSE module at')
parser.add_argument('-s', dest='steganography', default='LSBsteg',
nargs='?', help='Use an alternate steganography' +
' class for encoding in images')
args = parser.parse_args()
run = True
if args.proxy == 'noproxy':
proxy = None
else:
proxy = proxy_parser(args.proxy)
if proxy is None:
run = False
if args.encryption == 'noencrypt':
encrypt = None
else:
if args.encryption is None:
args.encryption = 'xor' # xor is the default encryption class.
# In the absence of an argument for -e, xor is used.
encrypt = args.encryption
if run:
cons = console.Console(args.website, args.steganography,
encrypt, args.mountpoint, args.url,
proxy, args.cmdloop, args.debug)
if args.url:
cons.loadfs()
if args.cmdloop:
cons.cmdloop()
else:
cons.do_mount(None)
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt):
print("Goodbye!")
sys.exit(0)
|
# Copyright 2020 The fingym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://becominghuman.ai/genetic-algorithm-for-reinforcement-learning-a38a5612c4dc
from collections import deque
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import model_from_json, clone_model
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Dense, Embedding, Reshape
from tensorflow.keras.optimizers import Adam
from fingym import fingym
import argparse
import numpy as np
import copy
import os
class EvoAgent():
def __init__(self, env_state_dim, time_frame):
self.state_size = env_state_dim
self.time_frame = time_frame
# when this is full, start making predictions
self.state_fifo = deque(maxlen=self.time_frame)
# 0 - do nothing
# 1 - buy w/ multiplier .33
# 2 - buy w/ multiplier .5
# 3 - buy w/ multiplier .66
# 4 - sell w/ multiplier .33
# 5 - sell w/ multiplier .5
# 6 - sell w/ multiplier .66
self.action_size = 7
self.max_shares_to_trade_at_once = 100
self.model = self._build_compile_model()
def deep_copy(self):
new_agent = EvoAgent(self.state_size, self.time_frame)
new_agent.model.set_weights(self.model.get_weights())
return new_agent
def act(self, state, model):
self.state_fifo.append(state)
# do nothing for the first time frames until we can start the prediction
if len(self.state_fifo) < self.time_frame:
return np.zeros(2)
state = np.array(list(self.state_fifo))
state = np.reshape(state,(self.state_size*self.time_frame,1))
output_probabilities = model.predict_on_batch(state.T)[0]
output_probabilities = np.array(output_probabilities)
output_probabilities /= output_probabilities.sum()
try:
action = np.random.choice(range(self.action_size),1,p=output_probabilities).item()
except:
print('output probabilities: ', output_probabilities)
action = np.zeros(2)
env_action = self._nn_action_to_env_action(action)
return env_action
def save_model_weights(self, filepath):
self.model.save_weights(filepath)
def _build_compile_model(self):
model = Sequential()
input_size = self.state_size * self.time_frame
model.add(Dense(400, input_shape=(input_size,), activation='relu',kernel_initializer='he_uniform', use_bias=True, bias_initializer=keras.initializers.Constant(0.1)))
model.add(Dense(300, activation='relu', kernel_initializer='he_uniform', use_bias=True, bias_initializer=keras.initializers.Constant(0.1)))
#model.add(Dense(1, activation='softmax',use_bias=True))
model.add(Dense(self.action_size, activation='softmax',use_bias=True, bias_initializer=keras.initializers.Constant(0.1)))
# we won't really use loss or optimizer for evolutionary agents
#model.compile(loss='mse', optimizer=Adam(learning_rate=0.01))
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.01))
#print(model.summary())
return model
def _nn_action_to_env_action(self,nn_action):
env_action = [0,0]
if nn_action == 0:
env_action = [0,0]
if nn_action == 1:
env_action = [1, 0.33 * self.max_shares_to_trade_at_once]
if nn_action == 2:
env_action = [1, 0.5 * self.max_shares_to_trade_at_once]
if nn_action == 3:
env_action = [1, 0.66 * self.max_shares_to_trade_at_once]
if nn_action == 4:
env_action = [2, 0.33 * self.max_shares_to_trade_at_once]
if nn_action == 5:
env_action = [2, 0.5 * self.max_shares_to_trade_at_once]
if nn_action == 6:
env_action = [2, 0.66 * self.max_shares_to_trade_at_once]
return env_action
def create_random_agents(num_agents, state_size, time_frame):
agents = []
for _ in range(num_agents):
agent = EvoAgent(state_size, time_frame)
agents.append(agent)
return agents
def run_agent(env, agent):
state = env.reset()
# Removed time element from state
state = np.delete(state, 2)
done = False
while not done:
action = agent.act(state)
next_state, reward, done, info = env.step(action)
state = next_state
if len(state) > agent.state_size:
state = np.delete(state, 2)
return info['cur_val']
def return_average_score(env, agent, runs):
score = 0
print('***** agent score *****')
for i in range(runs):
score += run_agent(env, agent)
print('score: ', score)
return score/runs
def run_agents_n_times(env, agents, runs):
avg_score = []
for agent in agents:
avg_score.append(return_average_score(env, agent, runs))
return avg_score
def uniform_crossover(parentA, parentB):
print('crossover')
child_agent = parentA.deep_copy()
parentB_weights = parentB.model.get_weights()
weights = child_agent.model.get_weights()
for idx, weight in enumerate(weights):
if len(weight.shape) == 2:
for i0 in range(weight.shape[0]):
for i1 in range(weight.shape[1]):
if np.random.uniform() > 0.5:
weight[i0,i1] = parentB_weights[idx][i0,i1]
if len(weight.shape) == 1:
for i0 in range(weight.shape[0]):
if np.random.uniform() > 0.5:
weight[i0] = parentB_weights[idx][i0]
child_agent.model.set_weights(weights)
return child_agent
def mutate(agent):
print('mutate')
child_agent = agent.deep_copy()
mutation_power = 0.02
weights = child_agent.model.get_weights()
for weight in weights:
#print('weight len: ', len(weight.shape))
if len(weight.shape) == 2:
for i0 in range(weight.shape[0]):
for i1 in range(weight.shape[1]):
weight[i0,i1]+= mutation_power*np.random.randn()
if len(weight.shape) == 1:
for i0 in range(weight.shape[0]):
weight[i0]+= mutation_power*np.random.randn()
child_agent.model.set_weights(weights)
#print('parent_weights: ', agent.model.get_weights())
#print('child_weights: ', child_agent.model.get_weights())
return child_agent
def add_elite(env, agents, sorted_parent_indexes, elite_index = None, only_consider_top_n=10):
candidate_elite_index = sorted_parent_indexes[:only_consider_top_n]
if(elite_index is not None):
candidate_elite_index = np.append(candidate_elite_index,[elite_index])
top_score = None
top_elite_index = None
for i in candidate_elite_index:
score = return_average_score(env, agents[i],runs=3)
print("Score for elite i ", i, " is ", score)
if(top_score is None):
top_score = score
top_elite_index = i
elif(score > top_score):
top_score = score
top_elite_index = i
print("Elite selected with index ",top_elite_index, " and score", top_score)
dirname = os.path.dirname(__file__)
agents[top_elite_index].save_model_weights(os.path.join(dirname,'evo_weights.h5'))
child_agent = agents[top_elite_index].deep_copy()
return child_agent
def return_children(env, agents, sorted_parent_indexes, elite_index):
children_agents = []
for i in range(len(agents) -1):
parentA = sorted_parent_indexes[np.random.randint(len(sorted_parent_indexes))]
parentB = sorted_parent_indexes[np.random.randint(len(sorted_parent_indexes))]
children_agents.append(mutate(uniform_crossover(agents[parentA], agents[parentB])))
# now add one elite
elite_child = add_elite(env, agents, sorted_parent_indexes, elite_index)
children_agents.append(elite_child)
elite_index=len(children_agents)-1
return children_agents, elite_index
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=None)
parser.add_argument('env_id', nargs='?', default='SPY-Daily-v0', help='Select the environment to run')
args = parser.parse_args()
env = fingym.make(args.env_id)
# removing time element from state_dim
state_size = env.state_dim - 1
print('state_size: ', state_size)
time_frame = 30
num_agents = 400
agents = create_random_agents(num_agents, state_size, time_frame)
# first agent gets saved weights
dirname = os.path.dirname(__file__)
os.path.join(dirname,'evo_weights.h5')
weights_file=os.path.join(dirname,'evo_weights.h5')
if os.path.exists(weights_file):
print('loading existing weights')
agents[0].model.load_weights(weights_file)
# how many top agents to consider as parents
top_limit = 20
# run evolution until x generations
generations = 1000
elite_index = None
for generation in range(generations):
rewards = run_agents_n_times(env,agents,3) # average of x times
# sort by rewards
sorted_parent_indexes = np.argsort(rewards)[::-1][:top_limit]
top_rewards = []
for best_parent in sorted_parent_indexes:
top_rewards.append(rewards[best_parent])
print("Generation ", generation, " | Mean rewards: ", np.mean(rewards), " | Mean of top 5: ",np.mean(top_rewards[:5]))
print("Top ",top_limit," scores", sorted_parent_indexes)
print("Rewards for top: ",top_rewards)
children_agents, elite_index = return_children(env, agents, sorted_parent_indexes, elite_index)
agents = children_agents |
with open('day-08/input.txt', 'r') as file:
signals = file.readlines()
def find(patterns):
one = (next(p for p in patterns if len(p) == 2))
seven = (next(p for p in patterns if len(p) == 3))
four = (next(p for p in patterns if len(p) == 4))
eight = (next(p for p in patterns if len(p) == 7))
nine = (next(p for p in patterns if len(p) == 6 and all(x in p for x in four)))
zero = (next(p for p in patterns if len(p) == 6 and p != nine and all(x in p for x in one)))
six = (next(p for p in patterns if len(p) == 6 and p != zero and p != nine))
three = (next(p for p in patterns if len(p) == 5 and all(x in p for x in one)))
five = (next(p for p in patterns if len(p) == 5 and p != three and all(x in nine for x in p)))
two = (next(p for p in patterns if len(p) == 5 and p != five and p != three))
return [zero, one, two, three, four, five, six, seven, eight, nine]
answer1, answer2 = 0, 0
sort = lambda x: "".join(sorted(x))
for signal in signals:
patterns = signal.rstrip('\n').split(' ')[:10]
output = signal.rstrip('\n').split(' ')[11:]
patterns = list(map(sort, patterns))
output = list(map(sort, output))
patterns = find(patterns)
answer1 += sum(1 for val in output if val in [patterns[i] for i in [1,4,7,8]])
answer2 += int("".join([str(patterns.index(o)) for o in output]))
print(answer1, answer2)
|
import glob
import os
import shutil
for whl_path in glob.glob(os.path.join(os.getcwd(), 'dist', '*.whl')):
whl_name = os.path.basename(whl_path)
dist, version, python_tag, abi_tag, platform_tag = whl_name.split('-')
if 'manylinux' in platform_tag:
continue
platform_tag = platform_tag.replace('linux', 'manylinux1')
new_whl_name = '-'.join([dist, version, python_tag, abi_tag, platform_tag])
new_whl_path = os.path.join(os.path.dirname(whl_path), new_whl_name)
shutil.move(whl_path, new_whl_path)
|
from scripts import Warnings, Log, Constants
from scripts.backend.database import Database
"""
All database functions return a boolean value as for whether the operation or check was successful or not
"""
def get_user_name_list():
Log.debug("Retrieving all user names.")
Database.cursor.execute("SELECT Name FROM Users")
temp_result = Database.cursor.fetchall()
# Compiles the list of user names
result = []
for r in temp_result:
result.append(r[0])
Log.trace("Retrieved: " + str(result))
return result
def get_all_account():
Log.debug("Retrieving all user accounts.")
Database.cursor.execute("SELECT * FROM Users")
result = Database.cursor.fetchall()
Log.trace("Retrieved: " + str(result))
return result
def get_user_id(user_name: str, password: str) -> int:
Log.debug("Getting the ID of a user named '" + user_name + "'.")
Database.cursor.execute("SELECT ID FROM Users WHERE Name='" + user_name + "' and Password='" + password + "'")
fetched_data = Database.cursor.fetchall()
if len(fetched_data) <= 0:
return -1
# Obtain the user ID
id = int(fetched_data[0][0])
Log.info(
"Retrieved the ID '" + str(id) + "' for the user named '" + user_name + "' with password '" + password + "'")
return id
def get_user_name(user_id: int) -> str:
Log.debug("Getting the name of a user with id '" + str(user_id) + "'.")
Database.cursor.execute("SELECT Name FROM Users WHERE ID=" + str(user_id))
fetched_data = Database.cursor.fetchall()
if len(fetched_data) <= 0:
return -1
# Obtain the user name
name = fetched_data[0][0]
Log.info("Retrieved the name '" + name + "' for the user with id '" + str(user_id) + "'")
return name
def exists_user_by_id(user_id: int):
Log.info("Checking if a user exists with the ID '" + str(user_id) + "'.")
Database.cursor.execute("SELECT ID FROM Users WHERE ID=" + str(user_id))
# Checks the number of users found with the given user ID
result = Database.cursor.fetchall()
num_users = len(result)
Log.debug("Found " + str(num_users) + " users with the ID '" + str(user_id) + "'.")
if num_users == 1:
Log.info("Found the user with the ID '" + str(user_id) + "'.")
return True
elif num_users == 0:
Log.info("Did not find the user with the ID '" + str(user_id) + "'.")
return False
else:
Log.warning("Found multiple occurrences of the user with the ID '" + str(user_id) + "'.")
Warnings.not_to_reach()
return True
def exists_user_by_name(user_name: str):
Log.info("Checking if a user exists with the name '" + user_name + "'.")
Database.cursor.execute("SELECT * FROM Users WHERE Name='" + user_name + "'")
# Checks the number of users found with the given user name
num_users = len(Database.cursor.fetchall())
Log.debug("Found " + str(num_users) + " users with the name '" + user_name + "'.")
if num_users == 1:
Log.info("Found the user with the name '" + user_name + "'.")
return True
elif num_users == 0:
Log.info("Did not find the user with the name '" + user_name + "'.")
return False
else:
Log.warning("Found multiple occurrences of the user with the name '" + user_name + "'.")
Warnings.not_to_reach()
return True
def check_user(user_name, password):
Log.info("Checking if the user exists with the name '" + user_name + "' and password '" + password + "'.")
assert exists_user_by_name(user_name=user_name) is True
Database.cursor.execute("SELECT ID FROM Users WHERE Name='" + user_name + "' and Password='" + password + "'")
# Checks the number of users found with the given username/password
num_users = len(Database.cursor.fetchall())
Log.debug("Found " + str(num_users) + " users matching the credentials.")
if num_users == 1:
Log.info("A user matching the credentials has been found.")
return True
elif num_users == 0:
Log.info("A user matching the credentials has not been found.")
return False
else:
Log.warning("Multiple users matching the credentials have been found.")
Warnings.not_to_reach(popup=False)
return True
def add_user(user_name, password, permission=Constants.PERMISSION_LEVELS.get(Constants.PERMISSION_PUBLIC)):
"""
Creates and adds a new user to the database.
:param user_name: name
:param password: password
:param permission: 0=basic user, 1=admin user
:return: whether a user has been successfully committed to the database or not
"""
Log.info("Adding a new user to the database with the credentials: Name='" + user_name +
"', Password='" + password + "', Permission='" + str(permission) + "'")
assert permission in Constants.PERMISSION_LEVELS.values()
assert exists_user_by_name(user_name) is False
# Creates a user
Database.cursor.execute("INSERT INTO Users VALUES (NULL, ?,?,?)", (user_name, password, permission))
Database.connection.commit()
Log.debug("The insertion of the new user entry has been committed to the database.")
if exists_user_by_name(user_name) is True:
Log.info("The user has been successfully created.")
return True
else:
Log.info("The user has not been successfully created.")
return False
def delete_user(user_id):
"""
Deletes the user. Soft deletes all owned objects
:param user_id: user with user_id to delete
"""
Log.debug("Deleting the user with id '" + str(user_id) + "' from the database.")
assert exists_user_by_id(user_id=user_id) is True
# Deletes the user
Database.cursor.execute("DELETE FROM Users WHERE ID=" + str(user_id))
# Soft deletes all objects with owner id (replace with Constants.DATABASE_DELETED_ID)
Database.cursor.execute("UPDATE Datasets SET ID_Owner=" + str(Constants.DATABASE_DELETED_ID)
+ " WHERE ID_Owner=" + str(user_id))
Database.cursor.execute("UPDATE Models SET ID_Owner=" + str(Constants.DATABASE_DELETED_ID)
+ " WHERE ID_Owner=" + str(user_id))
# Saves the changes
Database.connection.commit()
Log.info("Deleted the user with id '" + str(user_id) + "'.")
return True
|
""" Interface to the SUNDIALS libraries """
import numpy as np
from .cvode import *
from .cvode_ls import *
from .nvector_serial import *
from .sundials_linearsolver import *
from .sundials_matrix import *
from .sundials_nvector import *
from .sundials_types import *
from .sundials_version import *
from .sunlinsol_dense import *
from .sunmatrix_dense import *
def _assert_version():
major = c_int()
minor = c_int()
patch = c_int()
len = 8
label = create_string_buffer(len)
status = SUNDIALSGetVersionNumber(byref(major), byref(minor), byref(patch), label, len)
assert status == 0
assert major.value == 5
_assert_version()
class CVodeSolver(object):
""" Interface to the CVode solver """
def __init__(self,
nx, nz, get_x, set_x, get_dx, get_z, set_time,
startTime,
maxStep=float('inf'),
relativeTolerance=1e-5,
maxNumSteps=500):
"""
Parameters:
nx number of continuous states
nz number of event indicators
get_x callback function to get the continuous states
set_x callback function to set the continuous states
get_dx callback function to get the derivatives
get_z callback function to get the event indicators
set_time callback function to set the time
startTime start time for the integration
maxStep maximum absolute value of step size allowed
relativeTolerance relative tolerance
maxNumSteps maximum number of internal steps to be taken by the solver in its attempt to reach tout
"""
self.get_x = get_x
self.set_x = set_x
self.get_dx = get_dx
self.get_z = get_z
self.set_time = set_time
self.error_info = None
self.discrete = nx == 0
if self.discrete:
# insert a dummy state
self.nx = 1
else:
self.nx = nx
self.nz = nz
self.x = N_VNew_Serial(self.nx)
self.abstol = N_VNew_Serial(self.nx)
self.px = NV_DATA_S(self.x)
self.pabstol = NV_DATA_S(self.abstol)
# initialize
if self.discrete:
x = np.ctypeslib.as_array(self.px, (self.nx,))
x[:] = 1.0
else:
self.get_x(self.px, self.nx)
abstol = np.ctypeslib.as_array(self.pabstol, (self.nx,))
abstol[:] = relativeTolerance
self.cvode_mem = CVodeCreate(CV_BDF)
# add function pointers as members to save them from GC
self.f_ = CVRhsFn(self.f)
self.g_ = CVRootFn(self.g)
self.ehfun_ = CVErrHandlerFn(self.ehfun)
assert CVodeInit(self.cvode_mem, self.f_, startTime, self.x) == CV_SUCCESS
assert CVodeSVtolerances(self.cvode_mem, relativeTolerance, self.abstol) == CV_SUCCESS
assert CVodeRootInit(self.cvode_mem, self.nz, self.g_) == CV_SUCCESS
self.A = SUNDenseMatrix(self.nx, self.nx)
self.LS = SUNLinSol_Dense(self.x, self.A)
assert CVodeSetLinearSolver(self.cvode_mem, self.LS, self.A) == CV_SUCCESS
assert CVodeSetMaxStep(self.cvode_mem, maxStep) == CV_SUCCESS
assert CVodeSetMaxNumSteps(self.cvode_mem, maxNumSteps) == CV_SUCCESS
assert CVodeSetNoInactiveRootWarn(self.cvode_mem) == CV_SUCCESS
assert CVodeSetErrHandlerFn(self.cvode_mem, self.ehfun_, None) == CV_SUCCESS
def ehfun(self, error_code, module, function, msg, user_data):
""" Error handler function """
self.error_info = (error_code, module.decode("utf-8"), function.decode("utf-8"), msg.decode("utf-8"))
def f(self, t, y, ydot, user_data):
""" Right-hand-side function """
self.set_time(t)
if self.discrete:
dx = np.ctypeslib.as_array(NV_DATA_S(ydot), (self.nx,))
dx[:] = 0.0
else:
self.set_x(NV_DATA_S(y), self.nx)
self.get_dx(NV_DATA_S(ydot), self.nx)
return 0
def g(self, t, y, gout, user_data):
""" Root function """
self.set_time(t)
if not self.discrete:
self.set_x(NV_DATA_S(y), self.nx)
self.get_z(gout, self.nz)
return 0
def step(self, t, tNext):
if not self.discrete:
# get the states
self.get_x(self.px, self.nx)
tret = realtype(0.0)
# perform one step
flag = CVode(self.cvode_mem, tNext, self.x, byref(tret), CV_NORMAL)
if not self.discrete:
# set the states
self.set_x(self.px, self.nx)
roots_found = np.zeros(shape=(self.nz,), dtype=np.int32)
if flag == CV_ROOT_RETURN:
p_roots_found = np.ctypeslib.as_ctypes(roots_found)
assert CVodeGetRootInfo(self.cvode_mem, p_roots_found) == CV_SUCCESS
elif flag < 0:
raise RuntimeError("CVode error (code %s) in module %s, function %s: %s" % self.error_info)
return flag > 0, roots_found, tret.value
def reset(self, time):
if not self.discrete:
self.get_x(self.px, self.nx)
# reset the solver
flag = CVodeReInit(self.cvode_mem, time, self.x)
def __del__(self):
# clean up
CVodeFree(byref(c_void_p(self.cvode_mem)))
|
#!/usr/bin/python
# Filename: venter_gff_snp_to_gff.py
"""
usage: %prog venter_gff ...
"""
# Output standardized GFF record for each SNP in file(s)
# ---
# This code is part of the Trait-o-matic project and is governed by its license.
import fileinput, os, sys
def main():
# return if we don't have the correct arguments
if len(sys.argv) < 2:
raise SystemExit(__doc__.replace("%prog", sys.argv[0]))
for line in fileinput.input():
l = line.strip().split('\t')
if len(l) < 9:
break
# filter on feature type and method
if not (l[2].endswith("_SNP") and l[8].startswith("Method1")):
continue
out_line = "chr" + l[0]
out_line += "\tCV\tSNP\t"
out_line += str(int(l[3]) + 1) + "\t" + l[4]
out_line += "\t.\t+\t.\t"
# append attributes to output
attributes = l[7].split(';')
alleles = attributes[0].split('/')
ref = alleles[0]
if l[2].startswith("heterozygous"):
out_line += "alleles " + "/".join(alleles)
else:
out_line += "alleles " + alleles[1]
out_line += ";ref_allele " + ref
out_line += ";RMR " + attributes[1][-1]
out_line += ";TR " + attributes[2][-1]
out_line += ";method " + l[8]
out_line += ";ID " + l[1]
print out_line
if __name__ == "__main__":
main() |
"""Contains Sensor class.
Sensor manages connection with board.
"""
import analogio
import board
class Sensor(analogio.AnalogIn):
@property
def voltage(self) -> float:
"""Return approx voltage."""
return self.value / 65535 * 3.3
|
from setuptools import setup, find_packages
PACKAGES = find_packages()
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='pyns',
version='0.4.10',
description='Neuroscout API wrapper',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/neuroscout/pyns',
author='Alejandro de la Vega',
author_email='aleph4@gmail.com',
install_requires=['requests>=2.21', 'pyjwt~=1.7.1', 'tqdm>=4.30.0'],
license='MIT',
packages=PACKAGES,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
zip_safe=False)
|
# pylint: disable=redefined-outer-name,protected-access
# pylint: disable=missing-function-docstring,missing-module-docstring,missing-class-docstring
from awesome_panel_extensions.frameworks.fast import FastTextInput
from tests.frameworks.fast.fast_test_app import create_fast_test_app
def test_constructor():
# When
textinput = FastTextInput(name="TextInput")
# Then
assert textinput.name == "TextInput"
assert textinput.value == ""
assert textinput.appearance == "outline"
assert textinput.autofocus is False
assert textinput.placeholder == ""
assert textinput.type_of_text == "text"
assert textinput.max_length == 100
assert textinput.min_length == 0
assert textinput.pattern is None
# assert textinput.size is None
assert textinput.spellcheck is False
assert textinput.required is False
assert textinput.disabled is False
assert textinput.readonly is False
if __name__.startswith("bokeh"):
textinput = FastTextInput(name="Be Fast!")
app = create_fast_test_app(
component=textinput,
parameters=[
"name",
"value",
"disabled",
"placeholder",
"appearance",
"autofocus",
"type_of_text",
# Some attributes do not work. See https://github.com/microsoft/fast/issues/3852
"max_length",
"min_length",
# "pattern",
# "size",
# "spellcheck",
# "required",
"readonly",
],
)
app.servable()
|
from .base import *
from .signals import *
#TODO: test storage of facet labels |
from django.contrib import admin
from {{ app_name }}.models import *
# Register your models here
# For more information on this file, see
# https://docs.djangoproject.com/en/{{ docs_version }}/intro/tutorial02/
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 3
class PollAdmin(admin.ModelAdmin):
inlines = [ChoiceInline]
admin.site.register(Poll, PollAdmin) |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from types import NoneType
from posts.models import Post
def home(request):
try:
post_new = Post.objects.all().filter(visible=True).latest('created')
posts = Post.objects.all().filter(visible=True).exclude(id__in=[post_new.id])
popular = Post.objects.all().filter(visible=True).order_by('-views')
popular = popular[0]
posts = Paginator(posts,5)
page = request.GET.get('page')
posts = posts.page(page)
except (PageNotAnInteger, NoneType) as e:
# If page is not an integer, deliver first page.
posts = posts.page(1)
page = 1
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
posts = posts.page(posts.num_pages)
except:
post_new = None
posts = None
popular = None
return render(request, 'index.html', { 'posts': posts, 'post_new': post_new, 'popular': popular }) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
from setuptools import setup, Extension
import versioneer
from Cython.Build import cythonize
import numpy
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'Cython',
'numpy',
'scipy',
'matplotlib',
'ez_setup',
'fipy',
'ciabatta',
'spatious',
'fealty',
'agaro',
'metropack',
]
test_requirements = [
'Cython',
'numpy',
'scipy',
'matplotlib',
'ez_setup',
'fipy',
'ciabatta',
'spatious',
'fealty',
'agaro',
'metropack',
]
extensions = cythonize([
Extension("ahoy.numerics", ["ahoy/numerics.pyx"],
include_dirs=[numpy.get_include()]),
])
console_scripts = [
'aplot_vis = ahoy.utils.scripts:plot_vis',
'aplot_linear_density = ahoy.utils.scripts:plot_linear_density',
'aplot_t_uds_scalar = ahoy.utils.scripts:plot_t_uds_scalar',
'aplot_t_uds_vector = ahoy.utils.scripts:plot_t_uds_vector',
'aplot_t_uds_abs_x = ahoy.utils.scripts:plot_t_uds_abs_x',
'aplot_t_Ds_scalar = ahoy.utils.scripts:plot_t_Ds_scalar',
'aplot_t_Ds_vector = ahoy.utils.scripts:plot_t_Ds_vector',
'aplot_t_rs_scalar = ahoy.utils.scripts:plot_t_rs_scalar',
'aplot_t_rs_vector = ahoy.utils.scripts:plot_t_rs_vector',
'aplot_t_rs_abs_x = ahoy.utils.scripts:plot_t_rs_abs_x',
'aplot_t_u_nets_scalar = ahoy.utils.scripts:plot_t_u_nets_scalar',
'aplot_t_u_nets_vector = ahoy.utils.scripts:plot_t_u_nets_vector',
'aplot_chi_uds_x = ahoy.utils.scripts:plot_chi_uds_x',
'aplot_pf_Ds_scalar = ahoy.utils.scripts:plot_pf_Ds_scalar',
'aplot_pf_uds_x = ahoy.utils.scripts:plot_pf_uds_x',
'aplot_Dr_0_Ds_scalar = ahoy.utils.scripts:plot_Dr_0_Ds_scalar',
'aplot_p_0_Ds_scalar = ahoy.utils.scripts:plot_p_0_Ds_scalar',
]
setup(
name='ahoy',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Agent-based simulations of active particles",
long_description=readme + '\n\n' + history,
author="Elliot Marsden",
author_email='elliot.marsden@gmail.com',
url='https://github.com/eddiejessup/ahoy',
packages=setuptools.find_packages(exclude=['docs', 'tests']),
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='ahoy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements,
ext_modules=extensions,
entry_points={
'console_scripts': console_scripts,
}
)
|
"""Match products to customers to maximize suitability.
https://www.codeeval.com/open_challenges/48/
"""
from itertools import permutations, product, izip
from fractions import gcd
from fileinput import input
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
VOWELS = 'aeiouy' # yes, 'y' is a vowel. see defn.
CONSONANTS = set(ALPHABET).difference(VOWELS)
class memoize(object):
def __init__(self, f):
self.f = f
self.cache = {}
def __call__(self, *args):
if args not in self.cache:
self.cache[args] = self.f(*args)
return self.cache[args]
@memoize
def suit(customer, product):
"""how suited is this customer product pair?
>>> suit('Ira', 'iPod')
2
>>> suit('Ira', 'iPhone')
3.0
>>> suit('Ira', 'Apple')
1
"""
num_p = len(product)
num_c = len(customer)
refer = CONSONANTS if num_p % 2 else VOWELS
output = sum(1 for c in customer.lower() if c in refer)
if gcd(num_c, num_p) > 1:
output *= 1.5
return output
#
if __name__ == '__main__':
import doctest
doctest.testmod()
for line in input():
line = line.strip()
if not line:
continue
customers, products = line.split(';')
customers = customers.split(',')
products = products.split(',')
# what is the suitability for a given p?
keyfn = lambda e: sum(suit(c, p) for c, p in izip(*e))
p = max(product(permutations(customers), permutations(products)), key=keyfn)
print keyfn(p)
|
from __future__ import absolute_import
from .ppo import PPO
from .a2c import A2C
from .acktr import ACKTR
|
import colorama
from colorama import Fore
colorama.init(autoreset=True)
def read_int(txt):
while True:
try:
read = int(input(txt))
except (ValueError, TypeError, KeyboardInterrupt):
print(Fore.RED + '\nPOR FAVOR,UTILIZE APENAS NÚMEROS!')
continue
else:
return read
def virgula(msg):
válido = False
while not válido:
entrada = input(msg).strip().replace(',', '.')
return float(entrada)
def prova(msg):
while True:
try:
prova = virgula(msg)
while prova > 6 or prova < 0:
print(Fore.RED + '\nATENÇÃO! O VALOR MÁXIMO DA PROVA É DE 6 PONTOS')
prova = virgula(msg)
except ValueError:
print(Fore.RED + '\nPOR FAVOR,UTILIZE APENAS NOTAS VÁLIDAS!')
else:
return prova
def teste(msg):
while True:
try:
teste = virgula(msg)
while teste > 1 or teste < 0:
print(Fore.RED + '\nATENÇÃO! O VALOR MÁXIMO DO TESTE É DE 1 PONTO ')
teste = virgula(msg)
except ValueError:
print(Fore.RED + '\nPOR FAVOR,UTILIZE APENAS NOTAS VÁLIDAS!')
else:
return teste
def atividade(msg):
while True:
try:
atividade = virgula(msg)
while atividade > 1 or atividade < 0:
print(Fore.RED + '\nATENÇÃO! O VALOR MÁXIMO DA ATIVIDADE 1 É DE 1 PONTO ')
atividade = virgula(msg)
except ValueError:
print(Fore.RED + '\nPOR FAVOR,UTILIZE APENAS NOTAS VÁLIDAS!')
else:
return atividade
def main():
print(Fore.MAGENTA + "\n-> O QUE DESEJA FAZER AGORA?\n[ 1 ] - FECHAR MAIS MÉDIAS.\n[ 2 ] - DAR UMA OLHADA NAS MÉDIAS JA FEITAS.\n[ 3 ] - APAGAR TODOS OS DADOS DO ARQUIVO.\n[ 4 ] - SAIR. ")
option = read_int('-> Sua Opção:')
return option |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an endpoint for getting details about a sheriffed bug."""
import json
import re
from dashboard import oauth2_decorator
from dashboard.common import request_handler
from dashboard.models import try_job
from dashboard.services import issue_tracker_service
BUGDROID = 'bugdroid1@chromium.org'
REVIEW_RE = r'(Review-Url|Reviewed-on): (https?:\/\/[\/\.\w\d]+)'
class BugDetailsHandler(request_handler.RequestHandler):
"""Gets details about a sheriffed bug."""
def post(self):
"""POST is the same as GET for this endpoint."""
self.get()
@oauth2_decorator.DECORATOR.oauth_required
def get(self):
"""Response handler to get details about a specific bug.
Request parameters:
bug_id: Bug ID number, as a string
"""
bug_id = int(self.request.get('bug_id'), 0)
if bug_id <= 0:
self.ReportError('Invalid or no bug id specified.')
return
http = oauth2_decorator.DECORATOR.http()
self.response.out.write(json.dumps(GetBugDetails(bug_id, http)))
def GetBugDetails(bug_id, http):
bug_details = _GetDetailsFromMonorail(bug_id, http)
bug_details['review_urls'] = _GetLinkedRevisions(
bug_details['comments'])
bug_details['bisects'] = _GetBisectsForBug(bug_id)
return bug_details
def _GetDetailsFromMonorail(bug_id, http):
issue_tracker = issue_tracker_service.IssueTrackerService(http)
bug_details = issue_tracker.GetIssue(bug_id)
if not bug_details:
return {'error': 'Failed to get bug details from monorail API'}
bug_details['comments'] = issue_tracker.GetIssueComments(bug_id)
owner = None
if bug_details.get('owner'):
owner = bug_details.get('owner').get('name')
return {
'comments': bug_details['comments'],
'owner': owner,
'published': bug_details['published'],
'state': bug_details['state'],
'status': bug_details['status'],
'summary': bug_details['summary'],
}
def _GetLinkedRevisions(comments):
"""Parses the comments for commits linked by bugdroid."""
review_urls = []
bugdroid_comments = [c for c in comments if c['author'] == BUGDROID]
for comment in bugdroid_comments:
m = re.search(REVIEW_RE, comment['content'])
if m:
review_urls.append(m.group(2))
return review_urls
def _GetBisectsForBug(bug_id):
bisects = try_job.TryJob.query(try_job.TryJob.bug_id == bug_id).fetch()
return [{
'status': b.status,
'bot': b.bot,
'buildbucket_link': '/buildbucket_job_status/%s' % b.buildbucket_job_id,
'metric': (b.results_data or {}).get('metric'),
} for b in bisects]
|
from typing import Optional, List, Tuple, Dict
import numpy as np
from banditpylib.data_pb2 import Feedback, Actions, Context
from .lilucb_heur_collaborative_utils import assign_arms, \
get_num_pulls_per_round, CentralizedLilUCBHeuristic
from .utils import MABCollaborativeFixedTimeBAIAgent, \
MABCollaborativeFixedTimeBAIMaster, MABCollaborativeFixedTimeBAILearner
class LilUCBHeuristicAgent(MABCollaborativeFixedTimeBAIAgent):
"""Agent of collaborative learning
:param int arm_num: number of arms of the bandit
:param int rounds: number of total rounds allowed
:param int horizon: total number of pulls allowed
:param Optional[str] name: alias name
"""
# Stages within the agent
UNASSIGNED = "unassigned"
CENTRALIZED_LEARNING = "centralized_learning"
LEARNING = "learning"
COMMUNICATION = "communication"
TERMINATION = "termination"
def __init__(self,
arm_num: int,
rounds: int,
horizon: int,
name: Optional[str] = None):
super().__init__(name)
self.__arm_num = arm_num
self.__rounds = rounds
self.__horizon = horizon
self.reset()
def _name(self) -> str:
return "lilucb_heuristic_collaborative_agent"
def reset(self):
self.__round_index = 0
self.__stage = self.UNASSIGNED
def set_input_arms(self, arms: List[int]):
if self.__stage != self.UNASSIGNED:
raise Exception("The agent is expected in stage unassigned. Got %s." %
self.__stage)
if self.__round_index == 0:
if len(arms) > 1:
self.__use_centralized_algo = True
self.__num_pulls_per_round = get_num_pulls_per_round(
rounds=self.__rounds,
horizon=self.__horizon,
use_centralized_learning=True)
else:
self.__use_centralized_algo = False
self.__num_pulls_per_round = get_num_pulls_per_round(
rounds=self.__rounds,
horizon=self.__horizon,
use_centralized_learning=False)
if arms[0] < 0:
# Terminate since there is only one active arm
self.__best_arm = arms[1]
self.__stage = self.TERMINATION
return
self.__assigned_arms = arms
# Maintain empirical informaiton of assigned arms
self.__assigned_arm_info: Dict[int, Tuple[float, int]] = {}
for arm_id in arms:
self.__assigned_arm_info[arm_id] = (0.0, 0)
if self.__round_index == (self.__rounds - 1):
# Last round
self.__best_arm = arms[0]
self.__stage = self.TERMINATION
else:
if self.__round_index == 0 and self.__use_centralized_algo:
# Confidence of 0.99 suggested in the paper
self.__central_algo = CentralizedLilUCBHeuristic(
self.__arm_num, 0.99, np.array(arms))
self.__central_algo.reset()
self.__stage = self.CENTRALIZED_LEARNING
else:
if len(self.__assigned_arms) > 1:
raise Exception("Got more than 1 arm in stage learning.")
self.__arm_to_broadcast = arms[0]
self.__stage = self.LEARNING
def actions(self, context: Context = None) -> Actions:
if self.__stage == self.UNASSIGNED:
raise Exception("%s: I can\'t act in stage unassigned." % self.name)
if self.__stage == self.CENTRALIZED_LEARNING:
if self.__round_index > 0:
raise Exception("Expected centralized learning in round 0. Got %d." %
self.__round_index)
if self.__central_algo.get_total_pulls(
) >= self.__num_pulls_per_round[0]:
# Early stop the centralized algorithm when it uses more than horizon
# / 2 pulls.
self.__stage = self.LEARNING
self.__arm_to_broadcast = np.random.choice(self.__assigned_arms)
self.__round_index += 1
return self.actions()
if len(self.__assigned_arms) == 1:
self.__stage = self.LEARNING
self.__arm_to_broadcast = self.__assigned_arms[0]
self.__round_index += 1
return self.actions()
central_algo_actions = self.__central_algo.actions()
if not central_algo_actions.arm_pulls:
# Centralized algorithm terminates before using up horizon / 2 pulls
self.__stage = self.LEARNING
self.__arm_to_broadcast = self.__central_algo.best_arm
self.__round_index += 1
return self.actions()
return central_algo_actions
elif self.__stage == self.LEARNING:
actions = Actions()
arm_pull = actions.arm_pulls.add()
arm_pull.arm.id = self.__arm_to_broadcast
arm_pull.times = self.__num_pulls_per_round[self.__round_index]
return actions
elif self.__stage == self.COMMUNICATION:
actions = Actions()
actions.state = Actions.WAIT
return actions
else:
# self.__stage == self.TERMINATION
actions = Actions()
actions.state = Actions.STOP
return actions
def update(self, feedback: Feedback):
if self.__stage not in [self.CENTRALIZED_LEARNING, self.LEARNING]:
raise Exception("%s: I can\'t do update in stage not learning." %
self.name)
for arm_feedback in feedback.arm_feedbacks:
old_arm_info = self.__assigned_arm_info[arm_feedback.arm.id]
new_arm_info = (
(old_arm_info[0] * old_arm_info[1] + sum(arm_feedback.rewards)) /
(old_arm_info[1] + len(arm_feedback.rewards)),
old_arm_info[1] + len(arm_feedback.rewards))
self.__assigned_arm_info[arm_feedback.arm.id] = new_arm_info
if self.__stage == self.CENTRALIZED_LEARNING:
self.__central_algo.update(feedback)
else:
# self.__stage == self.LEARNING
self.__stage = self.COMMUNICATION
@property
def best_arm(self) -> int:
if self.__stage != self.TERMINATION:
raise Exception('%s: I don\'t have an answer yet.' % self.name)
return self.__best_arm
def broadcast(self) -> Dict[int, Tuple[float, int]]:
if self.__stage != self.COMMUNICATION:
raise Exception('%s: I can\'t broadcast in stage %s.'\
% (self.name, self.__stage))
# Complete the current round
self.__round_index += 1
self.__stage = self.UNASSIGNED
message: Dict[int, Tuple[float, int]] = {}
message[self.__arm_to_broadcast] = self.__assigned_arm_info[
self.__arm_to_broadcast]
return message
class LilUCBHeuristicMaster(MABCollaborativeFixedTimeBAIMaster):
"""Master of collaborative learning
:param int arm_num: number of arms of the bandit
:param int rounds: number of total rounds allowed
:param int horizon: maximum number of pulls the agent can make
(over all rounds combined)
:param int num_agents: number of agents
:param Optional[str] name: alias name
"""
def __init__(self,
arm_num: int,
rounds: int,
horizon: int,
num_agents: int,
name: Optional[str] = None):
super().__init__(name)
self.__arm_num = arm_num
self.__comm_rounds = rounds - 1
self.__T = horizon
self.__num_agents = num_agents
def _name(self) -> str:
return "lilucb_heuristic_collaborative_master"
def reset(self):
self.__active_arms = list(range(self.__arm_num))
def initial_arm_assignment(self) -> Dict[int, List[int]]:
return assign_arms(self.__active_arms, list(range(self.__num_agents)))
def elimination(
self, messages: Dict[int, Dict[int,
Tuple[float,
int]]]) -> Dict[int, List[int]]:
aggregate_messages: Dict[int, Tuple[float, int]] = {}
for agent_id in messages.keys():
message_from_agent = messages[agent_id]
for arm_id in message_from_agent:
if arm_id not in aggregate_messages:
aggregate_messages[arm_id] = (0.0, 0)
arm_info = message_from_agent[arm_id]
new_pulls = aggregate_messages[arm_id][1] + arm_info[1]
new_em_mean_reward = (aggregate_messages[arm_id][0] * \
aggregate_messages[arm_id][1] + arm_info[0] * arm_info[1]) \
/ new_pulls
aggregate_messages[arm_id] = (new_em_mean_reward, new_pulls)
accumulated_arm_ids = np.array(list(aggregate_messages.keys()))
accumulated_em_mean_rewards = np.array(
list(map(lambda x: aggregate_messages[x][0],
aggregate_messages.keys())))
# Elimination
confidence_radius = np.sqrt(
self.__comm_rounds *
np.log(200 * self.__num_agents * self.__comm_rounds) /
(self.__T * max(1, self.__num_agents / len(self.__active_arms))))
highest_em_reward = np.max(accumulated_em_mean_rewards)
self.__active_arms = list(
accumulated_arm_ids[accumulated_em_mean_rewards >= (
highest_em_reward - 2 * confidence_radius)])
return assign_arms(self.__active_arms, list(messages.keys()))
class LilUCBHeuristicCollaborative(MABCollaborativeFixedTimeBAILearner):
"""Colaborative learner using lilucb heuristic as centralized policy
:param int num_agents: number of agents
:param int arm_num: number of arms of the bandit
:param int rounds: number of total rounds allowed
:param int horizon: maximum number of pulls the agent can make
(over all rounds combined)
:param Optional[str] name: alias name
"""
def __init__(self,
num_agents: int,
arm_num: int,
rounds: int,
horizon: int,
name: Optional[str] = None):
if arm_num <= 1:
raise ValueError('Number of arms is expected at least 2. Got %d.' %
arm_num)
if rounds <= 2:
raise ValueError('Number of rounds is expected at least 2. Got %d.' %
rounds)
if horizon <= rounds - 1:
raise ValueError(
'Horizon is expected at least total rounds minus one. Got %d.' %
horizon)
super().__init__(agent=LilUCBHeuristicAgent(arm_num=arm_num,
rounds=rounds,
horizon=horizon),
master=LilUCBHeuristicMaster(arm_num=arm_num,
rounds=rounds,
horizon=horizon,
num_agents=num_agents),
num_agents=num_agents,
name=name)
def _name(self) -> str:
return 'lilucb_heuristic_collaborative'
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
import papas
# Load long description from files
with open('README.rst', 'r') as readme, open('CHANGELOG.rst', 'r') as history:
long_description = '\n' + readme.read() + '\n\n' + history.read()
# A list of strings specifying what other distributions need to be installed
# when this package is installed.
install_requirements = [
'PyYAML>=3.12',
'configparser>=3.5',
'mpi4py>=3.0', # requires a MPI library (e.g., OpenMPI)
'networkx>=1.11',
'configparser>=3.5',
'Jinja2>=2.9',
'graphviz>=0.8.3'
]
# A list of strings specifying what other distributions need to be present
# in order for this setup script to run.
setup_requirements = [
'setuptools>=38.5',
'pip>=9.0',
'wheel>=0.30'
]
# A list of strings specifying what other distributions need to be present
# for this package tests to run.
with open('tests_requirements.txt', 'r') as tests_req:
tests_requirements = [l.strip() for l in tests_req.readlines()]
# A dictionary mapping of names of "extra" features to lists of strings
# describing those features' requirements. These requirements will not be
# automatically installed unless another package depends on them.
extras_requirements = {
'lint': ['flake8>=3.5'],
'reST': ['Sphinx>=1.6']
}
# For PyPI, the 'download_url' is a link to a hosted repository.
# Github hosting creates tarballs for download at
# https://github.com/{username}/{package}/archive/{tag}.tar.gz.
# To create a git tag
# git tag {papas.__version__} -m 'Adds a tag so that we can put package on PyPI'
# git push --tags origin master
setup(
name=papas.__title__,
version=papas.__version__,
description=papas.__description__,
long_description=long_description,
keywords=papas.__keywords__,
url=papas.__url__,
download_url='{}/archive/{}.tar.gz'.format(papas.__url__, papas.__version__),
author=papas.__author__,
author_email=papas.__author_email__,
license=papas.__license__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries'
],
platforms=['Linux'],
zip_safe=False,
python_requires='>=3.2',
include_package_data=True,
packages=find_packages(exclude=['tests']),
package_data={},
install_requires=install_requirements,
setup_requires=setup_requirements,
extras_require=extras_requirements,
tests_require=tests_requirements,
test_suite='tests'
)
|
#!/usr/bin/env python
'''
Code for isotope diffusion.
'''
import numpy as np
# from solver import solver
from solver import transient_solve_TR
# from Gasses import gasses
# from Sites import sites
from reader import read_input, read_init
import json
import scipy.interpolate as interpolate
from constants import *
import os
import sys
class isotopeDiffusion:
'''
Isotope diffusion class.
'''
def __init__(self,spin,config,isotope,stp,z,modeltime=None):
'''
Initialize Isotope diffusion class.
'''
self.c = config
self.isotope = isotope
try:
fn = os.path.splitext(self.c['InputFileNameIso'])
isofile = fn[0] + '_{}'.format(self.isotope) + fn[1]
print(isofile)
if isotope=='NoDiffusion':
isofile = fn[0] + '_dD' + fn[1]
input_iso, input_year_iso = read_input(os.path.join(self.c['InputFileFolder'],isofile))
if spin:
if self.c['spinup_climate_type']=='initial':
del_s0 = input_iso[0]
elif self.c['spinup_climate_type']=='mean':
del_s0 = np.mean(input_iso)
self.del_s = del_s0 * np.ones(stp)
self.del_z = del_s0 * np.ones_like(z)
self.iso_sig2_s = 0 * np.zeros(stp)
self.iso_sig2_z = 0 * np.ones_like(z)
elif not spin:
del_z_init = read_init(self.c['resultsFolder'], self.c['spinFileName'], 'IsoSpin_{}'.format(self.isotope))
iso_sig2_init = read_init(self.c['resultsFolder'], self.c['spinFileName'], 'iso_sig2_{}'.format(self.isotope))
self.del_z = del_z_init[1:]
self.iso_sig2_z = iso_sig2_init[1:]
Isf = interpolate.interp1d(input_year_iso,input_iso,self.c['int_type'],fill_value='extrapolate') # interpolation function
self.del_s = Isf(modeltime) # isotopes interpolated to modeltime
self.iso_sig2_s = np.zeros(stp)
except:
print('No external file for surface isotope values found ({}), but you specified in the config file that isotope diffusion is on. The model will generate its own synthetic isotope data for you.'.format(self.isotope))
print('Double check that file name is correct. New module will add d18O or dD to filename for input.')
if spin:
del_s0 = -50.0
print('Currently this is -50 per mil, regardless of isotope you choose.')
self.del_s = del_s0 * np.ones(stp)
self.del_z = del_s0 * np.ones_like(z)
self.iso_sig2_s = 0 * np.zeros(stp)
self.iso_sig2_z = 0 * np.ones_like(z)
elif not spin:
del_z_init = read_init(self.c['resultsFolder'], self.c['spinFileName'], 'IsoSpin_{}'.format(self.isotope))
iso_sig2_init = read_init(self.c['resultsFolder'], self.c['spinFileName'], 'iso_sig2_{}'.format(self.isotope))
self.del_z = del_z_init[1:]
self.iso_sig2_z = iso_sig2_init[1:]
ar1 = 0.9 # red noise memory coefficient
std_rednoise = 2 # red noise standard deviation
self.del_s = std_rednoise*np.random.randn(stp) # white noise
for x in range(1,stp):
self.del_s[x] = self.del_s[x-1]*ar1 + np.random.randn() # create red noise from white
self.del_s = self.del_s - 50
self.iso_sig2_s = np.zeros(stp)
if 'site_pressure' not in self.c:
print('site_pressure is not in .json; defaulting to 1013.25')
self.c['site_pressure'] = 1013.25
def isoDiff(self,IsoParams,iii):
'''
Isotope diffusion function
:param iter:
:param z:
:param dz:
:param rho:
:param iso:
:returns self.phi_t:
'''
for k,v in list(IsoParams.items()):
setattr(self,k,v)
nz_P = len(self.z) # number of nodes in z
nz_fv = nz_P - 2 # number of finite volumes in z
nt = 1 # number of time steps
# z_edges_vec = self.z[1:-2] + self.dz[2:-1] / 2 # uniform edge spacing of volume edges
# z_edges_vec = np.concatenate(([self.z[0]], z_edges_vec, [self.z[-1]]))
# z_P_vec = self.z
z_edges_vec1 = self.z[0:-1] + np.diff(self.z) / 2
z_edges_vec = np.concatenate(([self.z[0]], z_edges_vec1, [self.z[-1]]))
z_P_vec = self.z
### Node positions
phi_s = self.del_z[0] # isotope value at surface
# phi_s = self.del_z[1] # isotope value at surface
# if iii==0:
# print('Caution! line 121, isotopeDiffusion.py')
phi_0 = self.del_z # initial isotope profile
### Define diffusivity for each isotopic species
### Establish values needed for diffusivity calculation
m = 0.018 # kg/mol; molar mass of water
pz = 3.454e12 * np.exp(-6133 / self.Tz) # Pa; saturation vapor pressure over ice
alpha_18_z = 0.9722 * np.exp(11.839 / self.Tz) # fractionation factor for 18_O
# alpha_18_z = np.exp(11.839/Tz-28.224*np.power(10,-3)) # alternate formulation from Eric's python code
alpha_D_z = 0.9098 * np.exp(16288 / (self.Tz**2)) # fractionation factor for D
Po = 1.0 # reference pressure in atm
# P = 1.0
P = self.c['site_pressure']/1013.25 # Pressure at WAIS from Christo's thesis.
### Set diffusivity in air (units of m^2/s)
Da = 2.11e-5 * (self.Tz / 273.15)**1.94 * (Po / P)
### Calculate tortuosity
invtau = np.zeros(int(len(self.dz)))
b = 1.3 # Tortuosity parameter (Johnsen 2000)
invtau[self.rho < RHO_I / np.sqrt(b)] = 1.0 - (b * (self.rho[self.rho < RHO_I / np.sqrt(b)] / RHO_I)**2)
# b = 0.25 # Tortuosity parameter (Emma's value), effective b = 0.0625 (taken outside of squared term)
# invtau[self.rho < RHO_I / np.sqrt(b)] = 1.0 - (b * (self.rho[self.rho < RHO_I / np.sqrt(b)] / RHO_I))**2 #Emma
invtau[self.rho >= RHO_I / np.sqrt(b)] = 0.0
### Set diffusivity for each isotope
c_vol = np.ones_like(self.rho) # Just a filler here to make the diffusion work.
if ((self.isotope == '18') or (self.isotope == 'd18O')):
Da_18 = Da / 1.0285 # account for fractionation factor for 18_O, fixed Johnsen typo
D = m * pz * invtau * Da_18 * (1 / self.rho - 1 / RHO_I) / (R * self.Tz * alpha_18_z)
D = D + 1.5e-15 # Emma added - not sure why? prevent negative?
self.del_z = transient_solve_TR(z_edges_vec, z_P_vec, nt, self.dt, D, phi_0, nz_P, nz_fv, phi_s, self.rho, c_vol)
elif ((self.isotope == 'D') or (self.isotope == 'dD')):
Da_D = Da / 1.0251 # account for fractionation factor for D, fixed Johnsen typo
D = m * pz * invtau * Da_D * (1 / self.rho - 1 / RHO_I) / (R * self.Tz * alpha_D_z)
D[D<=0.0] = 1.0e-20
self.del_z = transient_solve_TR(z_edges_vec, z_P_vec, nt, self.dt, D, phi_0, nz_P, nz_fv, phi_s, self.rho, c_vol)
elif ((self.isotope == 'NoDiffusion') or (self.isotope == 'ND')):
D = np.zeros_like(self.z)
dsig2_dt = 2 * (-1*self.drho_dt/self.rho) * self.iso_sig2_z + 2 * D
self.iso_sig2_z = self.iso_sig2_z + dsig2_dt * self.dt
# Advect profile down
if self.bdot>0.0:
self.del_z = np.concatenate(([self.del_s[iii]], self.del_z[:-1]))
self.iso_sig2_z = np.concatenate(([self.iso_sig2_s[iii]],self.iso_sig2_z[:-1]))
else:
pass
# self.del_z[0] = self.del_z[1]
# print('Caution!!! You are altering the upper isotope value! Line 173, isotopeDiffusion.py')
return self.del_z, self.iso_sig2_z
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 12:05:07 2020
@author: henry
"""
import sqlalchemy as db
engine = db.create_engine('sqlite:///../GPOWarehouse.sqlite')
metadata = db.MetaData()
connection = engine.connect()
#Global vars
run_stats = db.table('run_stats', metadata,
db.Column('run_id', db.Integer, primary_key=True),
db.Column('from_date', db.DateTime(), nullable=False),
db.Column('to_date', db.DateTime(), nullable=False),
db.Column('runtime', db.BigInteger, nullable=False),
)
#Overall Collection Stats
collections = db.Table('collections', metadata,
db.Column('collection_id',db.Integer,primary_key=True),
db.Column('run_id', db.Integer,
db.ForeignKey("run_stats.run_id"),
nullable = False),
db.Column('collection_code',db.String(100),nullable=False),
db.Column('collection_name',db.String(100),nullable=False),
db.Column('package_count',db.BigInteger,nullable=False),
db.Column('granule_count',db.BigInteger,nullable=True)
)
#Package overview
package = db.Table('package', metadata,
db.Column('package_id', db.Integer, primary_key=True),
db.Column('run_id', db.Integer,
db.ForeignKey("run_stats.run_id"),
nullable = False),
db.Column('collection_id', db.Integer,
db.ForeignKey('collections.collection_id'),
nullable=False),
db.Column('package_type', db.String(10), nullable=False),
db.Column('package_name', db.String(100), nullable=False),
db.Column('package_link', db.String(100), nullable=False),
db.Column('doc_class', db.String(20), nullable = True),
db.Column('title', db.String(510), nullable = False),
db.Column('congress', db.Integer, nullable = True)
)
#Package details
package_detail = db.Table('package_detail', metadata,
db.Column('package_detail_id', db.Integer,
primary_key=True),
db.Column('run_id', db.Integer,
db.ForeignKey("run_stats.run_id"),
nullable = False),
db.Column('package_id', db.Integer,
db.ForeignKey("package.package_id"),
nullable = False),
db.Column('category', db.String(50), nullable = True),
db.Column('date_issued'),
db.Column('branch', db.String(50), nullable = True),
db.Column('pages', db.Integer, nullable=True),
db.Column('government_author_one', db.String(50), nullable=True),
db.Column('government_author_two', db.String(50), nullable=True),
db.Column('bill_type', db.String(10), nullable=True),
db.Column('congress', db.Integer, nullable=True),
db.Column('origin_chamber', db.String(10), nullable=True),
db.Column('current_chamber', db.String(10), nullable=True),
db.Column('session', db.Integer, nullable=True),
db.Column('bill_number', db.Integer, nullable=True),
db.Column('bill_version', db.String(5), nullable=True),
db.Column('is_appropriation', db.String(5), nullable=True),
db.Column('is_private', db.String(5), nullable=True),
db.Column('publisher', db.String(50), nullable=True),
)
#Committees
#Members
members = db.Table('members', metadata,
db.Column('member_id', db.Integer, nullable=False,
primary_key=True),
db.Column('run_id', db.Integer,
db.ForeignKey("run_stats.run_id"),
nullable = False),
db.Column('package_detail_id', db.Integer,
db.ForeignKey('package_detail.package_detail_id'),
nullable=False),
db.Column('package_id', db.Integer,
db.ForeignKey("package.package_id"),
nullable = False),
db.Column('bio_guide_id', db.String(10), nullable=False),
db.Column('gpoId', db.Integer, nullable=False),
db.Column('chamber', db.String(1), nullable=False),
db.Column('party', db.String(1), nullable=False),
db.Column('role', db.String(10), nullable=False),
db.Column('state', db.String(2), nullable=False),
db.Column('congress', db.Integer, nullable=False),
db.Column('authority_id', db.Integer, nullable=False)
)
metadata.create_all(engine) |
# Copyright (c) 2017 Trail of Bits, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binaryninja as binja
from binaryninja.enums import (
LowLevelILOperation, MediumLevelILOperation, RegisterValueType
)
import logging
import util
from debug import *
log = logging.getLogger(util.LOGNAME)
class JMPTable(object):
""" Simple container for jump table info """
def __init__(self, bv, rel_base, targets, rel_off=0):
self.rel_off = rel_off
self.rel_base = rel_base
# Calculate the absolute base address
mask = (1 << bv.address_size * 8) - 1
self.base_addr = (self.rel_base + self.rel_off) & mask
self.targets = [t & mask for t in targets]
def search_mlil_displ(il, ptr=False, _neg=False):
""" Searches for a MLIL_CONST[_PTR] as a child of an ADD or SUB
Args:
il (binja.LowLevelILInstruction): Instruction to parse
ptr (bool): Searches for CONST_PTR instead of CONST if True
_neg (bool): Used internally to negate the final output if needed
Returns:
int: located value
"""
# The il may be inside a MLIL_LOAD
if il.operation == MediumLevelILOperation.MLIL_LOAD:
return search_mlil_displ(il.src, ptr, _neg)
# Continue left/right for ADD/SUB only
if il.operation in [MediumLevelILOperation.MLIL_ADD,
MediumLevelILOperation.MLIL_SUB]:
_neg = (il.operation == MediumLevelILOperation.MLIL_SUB)
return (search_mlil_displ(il.left, ptr, _neg) or
search_mlil_displ(il.right, ptr, _neg))
# Terminate when we find a constant
const_type = MediumLevelILOperation.MLIL_CONST_PTR if ptr else MediumLevelILOperation.MLIL_CONST
if il.operation == const_type:
return il.constant * (-1 if _neg else 1)
# DEBUG('Reached end of expr: {}'.format(il))
def get_jmptable(bv, il):
""" Gathers jump table information (if any) being referenced at the given il
Args:
bv (binja.BinaryView)
il (binja.LowLevelILInstruction)
Returns:
JMPTable: Jump table info if found, None otherwise
"""
# Rule out other instructions
op = il.operation
if op not in [LowLevelILOperation.LLIL_JUMP_TO, LowLevelILOperation.LLIL_JUMP]:
return None
# Ignore any jmps that have an immediate address
if il.dest.operation in [LowLevelILOperation.LLIL_CONST,
LowLevelILOperation.LLIL_CONST_PTR]:
return None
# Ignore any jmps that have an immediate dereference (i.e. thunks)
if il.dest.operation == LowLevelILOperation.LLIL_LOAD and \
il.dest.src.operation in [LowLevelILOperation.LLIL_CONST,
LowLevelILOperation.LLIL_CONST_PTR]:
return None
func = il.function.source_function
il_func = func.low_level_il
# Gather all targets of the jump in case binja didn't lift this to LLIL_JUMP_TO
successors = []
tgt_table = func.get_low_level_il_at(il.address).dest.possible_values
if tgt_table.type == RegisterValueType.LookupTableValue:
successors.extend(tgt_table.mapping.values())
# Should be able to find table info now
tbl = None
# Jumping to a register
if il.dest.operation == LowLevelILOperation.LLIL_REG:
# This is likely a relative offset table
# Go up to MLIL and walk back a few instructions to find the values we need
mlil_func = func.medium_level_il
# (Roughly) find the MLIL instruction at this jump
inst_idx = func.get_low_level_il_at(il.address).instr_index
mlil_idx = il_func.get_medium_level_il_instruction_index(inst_idx)
# Find a MLIL_LOAD with the address/offset we need
while mlil_idx > 0:
mlil = mlil_func[mlil_idx]
if mlil.operation == MediumLevelILOperation.MLIL_SET_VAR and \
mlil.src.operation == MediumLevelILOperation.MLIL_LOAD:
# Possible jump table info here, try parsing it
base = search_mlil_displ(mlil.src, ptr=True)
offset = search_mlil_displ(mlil.src)
# If it worked return the table info
if None not in [base, offset]:
tbl = JMPTable(bv, base, successors, offset)
break
# Keep walking back
mlil_idx -= 1
# Full jump expression
else:
# Parse out the base address
base = util.search_displ_base(il.dest)
if base is not None:
tbl = JMPTable(bv, base, successors)
if tbl is not None:
DEBUG("Found jump table at {:x} with offset {:x}".format(tbl.base_addr, tbl.rel_off))
return tbl
|
import base64
import keys
def generate_password(formatted_time):
data_to_encode = keys.business_shortCode + \
keys.lipa_na_mpesa_passkey + formatted_time
encoded_string = base64.b64encode(data_to_encode.encode())
# print(encoded_string) b'MTc0Mzc5YmZiMjc5ZjlhYTliZGJjZjE1OGU5N2RkNzFhNDY3Y2QyZTBjODkzMDU5YjEwZjc4ZTZiNzJhZGExZWQyYzkxOTIwMTkwNjMwMDgyNjU3'
decoded_password = encoded_string.decode("utf-8")
# print(decoded_password)
return decoded_password
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import Node, Rel, Path, Relationship
def raises(error_, func_, *args, **kwargs):
try:
func_(*args, **kwargs)
except error_:
return True
else:
return False
def test_minimal_node_hydrate():
dehydrated = {
"self": "http://localhost:7474/db/data/node/0",
}
hydrated = Node.hydrate(dehydrated)
assert isinstance(hydrated, Node)
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_node_hydrate_with_properties():
dehydrated = {
"self": "http://localhost:7474/db/data/node/0",
"data": {
"name": "Alice",
"age": 33,
},
}
hydrated = Node.hydrate(dehydrated)
assert isinstance(hydrated, Node)
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_full_node_hydrate():
dehydrated = {
"extensions": {
},
"paged_traverse": "http://localhost:7474/db/data/node/0/paged/traverse/{returnType}{?pageSize,leaseTime}",
"labels": "http://localhost:7474/db/data/node/0/labels",
"outgoing_relationships": "http://localhost:7474/db/data/node/0/relationships/out",
"traverse": "http://localhost:7474/db/data/node/0/traverse/{returnType}",
"all_typed_relationships": "http://localhost:7474/db/data/node/0/relationships/all/{-list|&|types}",
"property": "http://localhost:7474/db/data/node/0/properties/{key}",
"all_relationships": "http://localhost:7474/db/data/node/0/relationships/all",
"self": "http://localhost:7474/db/data/node/0",
"outgoing_typed_relationships": "http://localhost:7474/db/data/node/0/relationships/out/{-list|&|types}",
"properties": "http://localhost:7474/db/data/node/0/properties",
"incoming_relationships": "http://localhost:7474/db/data/node/0/relationships/in",
"incoming_typed_relationships": "http://localhost:7474/db/data/node/0/relationships/in/{-list|&|types}",
"create_relationship": "http://localhost:7474/db/data/node/0/relationships",
"data": {
"name": "Alice",
"age": 33,
},
}
hydrated = Node.hydrate(dehydrated)
assert isinstance(hydrated, Node)
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_minimal_rel_hydrate():
dehydrated = {
"self": "http://localhost:7474/db/data/relationship/11",
}
hydrated = Rel.hydrate(dehydrated)
assert isinstance(hydrated, Rel)
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_rel_hydrate_with_type():
dehydrated = {
"self": "http://localhost:7474/db/data/relationship/11",
"type": "KNOWS",
}
hydrated = Rel.hydrate(dehydrated)
assert isinstance(hydrated, Rel)
assert hydrated.type == dehydrated["type"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_rel_hydrate_with_properties():
dehydrated = {
"self": "http://localhost:7474/db/data/relationship/11",
"data": {
"since": 1999,
},
}
hydrated = Rel.hydrate(dehydrated)
assert isinstance(hydrated, Rel)
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_rel_hydrate_with_type_and_properties():
dehydrated = {
"self": "http://localhost:7474/db/data/relationship/11",
"type": "KNOWS",
"data": {
"since": 1999,
},
}
hydrated = Rel.hydrate(dehydrated)
assert isinstance(hydrated, Rel)
assert hydrated.type == dehydrated["type"]
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_full_rel_hydrate():
dehydrated = {
"extensions": {
},
"start": "http://localhost:7474/db/data/node/23",
"property": "http://localhost:7474/db/data/relationship/11/properties/{key}",
"self": "http://localhost:7474/db/data/relationship/11",
"properties": "http://localhost:7474/db/data/relationship/11/properties",
"type": "KNOWS",
"end": "http://localhost:7474/db/data/node/22",
"data": {
"since": 1999,
},
}
hydrated = Rel.hydrate(dehydrated)
assert isinstance(hydrated, Rel)
assert hydrated.type == dehydrated["type"]
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
def test_full_relationship_hydrate():
dehydrated = {
"extensions": {
},
"start": "http://localhost:7474/db/data/node/23",
"property": "http://localhost:7474/db/data/relationship/11/properties/{key}",
"self": "http://localhost:7474/db/data/relationship/11",
"properties": "http://localhost:7474/db/data/relationship/11/properties",
"type": "KNOWS",
"end": "http://localhost:7474/db/data/node/22",
"data": {
"since": 1999,
},
}
hydrated = Relationship.hydrate(dehydrated)
assert isinstance(hydrated, Relationship)
assert hydrated.type == dehydrated["type"]
assert hydrated.properties == dehydrated["data"]
assert hydrated.bound
assert hydrated.resource.uri == dehydrated["self"]
# TODO: test hydration with supplied inst |
#!/usr/bin/env python
# coding=utf-8
"""
This script will aid in generating a test coverage report for PFASST++ including its examples.
A standard CPython 3.3 compatible Python interpreter with standard library support is required.
No additional modules.
Run it with argument `-h` for usage instructions.
.. moduleauthor:: Torbjörn Klatt <t.klatt@fz-juelich.de>
"""
from sys import version_info
# require at least Python 3.3
# (because subprocess.DEVNULL)
assert(version_info[0] >= 3 and version_info[1] >= 3)
import argparse
import os
import os.path
import shutil
import subprocess as sp
import re
import logging
from logging.config import dictConfig
dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'style': '{',
'format': '[{levelname!s:<8s}] {message!s}'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
)
class Options(object):
coverage_dir = ""
build_dir = ""
base_dir = ""
with_examples = True
tests = []
example_tests = []
tracefiles = []
final_tracefile = ""
options = Options()
options.base_dir = ""
def is_lcov_available():
try:
sp.check_call('lcov --version', shell=True, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except sp.CalledProcessError:
logging.critical("lcov command not available. It is required.")
return False
try:
sp.check_call('genhtml --version', shell=True, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
except sp.CalledProcessError:
logging.critical("genhtml command not available. It is required.")
return False
return True
def get_project_root():
logging.info("Determine project root directory")
curr_dir = os.path.abspath(os.path.curdir)
logging.debug("Trying current path: %s" % curr_dir)
if os.access(curr_dir + "/include", os.R_OK) and os.access(curr_dir + "/examples", os.R_OK):
logging.debug("Project root is: %s" % curr_dir)
options.base_dir = curr_dir
else:
logging.warning("Probably called from within the tools dir. "
"This should work but is not recommended. "
"Trying parent directory as project root.")
os.chdir("..")
get_project_root()
def setup_and_init_options():
help_string = "Note:\n" \
"This only works for builds made with GCC and the following CMake variables:\n" \
" -Dpfasst_WITH_GCC_PROF=ON -Dpfasst_BUILD_TESTS=ON"
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=help_string)
parser.add_argument('-d', '--build-dir', required=True,
help="name of build directory containing a debug build with GCC and enabled profiling")
parser.add_argument('-o', '--output', default='coverage',
help="output directory for generated coverage report")
parser.add_argument('--no-examples', default=False, action='store_true',
help="whether to not run and include tests from the examples")
parser.add_argument('--debug', default=False, action='store_true',
help="enables more verbose debugging output")
_args = parser.parse_args()
if _args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Debug mode enabled.")
get_project_root()
if not os.access(_args.build_dir, os.W_OK):
logging.critical("Given build path could not be found: %s" % _args.build_dir)
raise ValueError("Given build path could not be found: %s" % _args.build_dir)
options.build_dir = os.path.abspath(_args.build_dir)
if os.access(options.build_dir + "/CMakeCache.txt", os.W_OK):
with_gcc_prof = False
with_mpi = False
with open(options.build_dir + "/CMakeCache.txt", 'r') as cache:
for line in cache:
if "pfasst_WITH_GCC_PROF:BOOL=ON" in line:
with_gcc_prof = True
if "pfasst_WITH_MPI:BOOL=ON" in line:
with_mpi = True
if not with_gcc_prof:
raise RuntimeError("PFASST++ must be built with 'pfasst_WITH_GCC_PROF=ON'")
if with_mpi:
logging.warning("Coverage analysis only functional for non-MPI builds")
exit(0)
if not os.access(_args.output, os.W_OK):
logging.info("Output directory not found. Creating: %s" % _args.output)
os.mkdir(_args.output)
else:
logging.warning("Clearing out output directory: %s" % _args.output)
shutil.rmtree(_args.output)
os.mkdir(_args.output)
options.coverage_dir = os.path.abspath(_args.output)
options.with_examples = not _args.no_examples
if not options.with_examples:
logging.debug("Not running and tracing tests from examples.")
def get_test_directories():
logging.info("Looking for tests ...")
for root, dirs, files in os.walk(options.build_dir + '/tests'):
match_name = re.search('^.*/(?P<test_name>test_[a-zA-Z\-_]+)\.dir$', root)
match_is_example = re.search('^.*/tests/examples/.*$', root)
is_example = match_is_example is not None
if match_name is not None:
testname = match_name.groupdict()['test_name']
if is_example:
options.example_tests.append({'path': root, 'name': testname, 'is_example': is_example})
else:
options.tests.append({'path': root, 'name': testname, 'is_example': is_example})
logging.info("%d tests found" % (len(options.tests) + len(options.example_tests)))
logging.info(" %d general tests" % len(options.tests))
if options.with_examples:
logging.info(" %d tests for examples" % len(options.example_tests))
def run_test(path, name, is_example):
logging.info("- %s" % name)
logging.debug("Found in %s" % path)
output_file = open('%s/%s.log' % (options.coverage_dir, name), mode='a')
logging.debug("Output log: %s" % output_file.name)
os.chdir(os.path.abspath(path))
logging.debug("Deleting old tracing data ...")
print('### deleting old tracing data ...', file=output_file, flush=True)
sp.check_call('lcov --zerocounters --directory .', shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
os.chdir(options.build_dir)
logging.debug("Running test ...")
print('### running test ...', file=output_file, flush=True)
sp.check_call('ctest -R %s' % name, shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
os.chdir(os.path.abspath(path))
logging.debug("Capturing all tracing data ...")
print('### capturing all tracing data ...', file=output_file, flush=True)
sp.check_call('lcov --capture --directory . --output-file "%s.info.complete"' % name,
shell=True, stdout=output_file, stderr=output_file)
print('### done.', file=output_file, flush=True)
logging.debug("Removing unnecessary data ...")
print('### removing unnecessary data ...', file=output_file, flush=True)
try:
sp.check_call('lcov --remove "%s.info.complete" "%s/include/pfasst/easylogging++.h" --output-file %s.info.prelim'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
except sp.CalledProcessError as e:
logging.warning(e)
print('### done.', file=output_file, flush=True)
logging.debug("Extracting interesting tracing data ...")
print('### extracting interesting tracing data ...', file=output_file, flush=True)
try:
sp.check_call('lcov --extract "%s.info.prelim" "*%s/include/**/*" --output-file %s.info'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
options.tracefiles.append("%s/%s.info" % (os.path.abspath(path), name))
except sp.CalledProcessError as e:
logging.warning(e)
if is_example:
logging.debug("This test belongs to an example, thus also covering examples code")
try:
sp.check_call('lcov --extract "%s.info.prelim" "*%s/examples/**/*" --output-file %s.info.example'
% (name, options.base_dir, name),
shell=True, stdout=output_file, stderr=output_file)
options.tracefiles.append("%s/%s.info.example" % (os.path.abspath(path), name))
except sp.CalledProcessError as e:
logging.warning(e)
print('### done.', file=output_file, flush=True)
os.chdir(options.base_dir)
output_file.close()
def run_tests():
logging.info("Running general tests ...")
for test in options.tests:
run_test(**test)
if options.with_examples:
logging.info("Running tests for examples ...")
for example in options.example_tests:
run_test(**example)
def aggregate_tracefiles():
logging.info("Aggregating %d tracefiles ..." % len(options.tracefiles))
output_file = open('%s/aggegrating.log' % (options.coverage_dir,), mode='a')
logging.debug("Output log: %s" % output_file.name)
options.final_tracefile = "%s/all_tests.info" % options.coverage_dir
for tracefile in options.tracefiles:
logging.debug("- %s" % (tracefile))
print("### adding tracefile: %s" % (tracefile,), file=output_file, flush=True)
if os.access(options.final_tracefile, os.W_OK):
sp.check_call('lcov --add-tracefile "%s" --add-tracefile "%s" --output-file "%s"'
% (options.final_tracefile, tracefile, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
else:
sp.check_call('lcov --add-tracefile "%s" --output-file "%s"'
% (tracefile, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
print("### done.", file=output_file, flush=True)
output_file.close()
def generate_html():
logging.info("Generating HTML report ...")
output_file = open('%s/generate_html.log' % (options.coverage_dir,), mode='a')
sp.check_call('genhtml --output-directory %s --demangle-cpp --num-spaces 2 --sort '
'--title "PFASST++ Test Coverage" --prefix "%s" --function-coverage --legend "%s"'
% (options.coverage_dir, options.base_dir, options.final_tracefile),
shell=True, stdout=output_file, stderr=output_file)
output_file.close()
logging.info("Coverage report can be found in: file://%s/index.html" % options.coverage_dir)
if __name__ == "__main__":
if not is_lcov_available():
raise RuntimeError("Required commands could not be found.")
setup_and_init_options()
get_test_directories()
run_tests()
aggregate_tracefiles()
generate_html()
|
"""
Mask initial bases from alignment FASTA
"""
import argparse
from random import shuffle
from collections import defaultdict
import Bio
import numpy as np
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.Seq import Seq
from Bio import AlignIO, SeqIO
from scipy import sparse
def compactify_sequences(sparse_matrix, sequence_names):
sequence_groups = defaultdict(list)
for s, snps in zip(sequence_names, sparse_matrix):
ind = snps.nonzero()
vals = np.array(snps[ind])
if len(ind[1]):
sequence_groups[tuple(zip(ind[1], vals[0]))].append(s)
else:
sequence_groups[tuple()].append(s)
return sequence_groups
INITIALISATION_LENGTH = 1000000
def sequence_to_int_array(s, fill_value=110):
seq = np.frombuffer(str(s).lower().encode('utf-8'), dtype=np.int8).copy()
seq[(seq!=97) & (seq!=99) & (seq!=103) & (seq!=116)] = fill_value
return seq
# Function adapted from https://github.com/gtonkinhill/pairsnp-python
def calculate_snp_matrix(fastafile, consensus=None, zipped=False, fill_value=110):
# This function generate a sparse matrix where differences to the consensus are coded as integers.
row = np.empty(INITIALISATION_LENGTH)
col = np.empty(INITIALISATION_LENGTH, dtype=np.int64)
val = np.empty(INITIALISATION_LENGTH, dtype=np.int8)
r = 0
n_snps = 0
nseqs = 0
seq_names = []
filled_positions = []
current_length = INITIALISATION_LENGTH
if zipped:
fh = gzip.open(fastafile, 'rt')
else:
fh = open(fastafile, 'rt')
with fh as fasta:
for h,s in SimpleFastaParser(fasta):
if consensus is None:
align_length = len(s)
# Take consensus as first sequence
consensus = sequence_to_int_array(s, fill_value=fill_value)
else:
align_length = len(consensus)
nseqs +=1
seq_names.append(h)
if(len(s)!=align_length):
raise ValueError('Fasta file appears to have sequences of different lengths!')
s = sequence_to_int_array(s, fill_value=fill_value)
snps = (consensus!=s) & (s!=fill_value)
right = n_snps + np.sum(snps)
filled_positions.append(np.where(s==fill_value)[0])
if right >= (current_length/2):
current_length = current_length + INITIALISATION_LENGTH
row.resize(current_length)
col.resize(current_length)
val.resize(current_length)
row[n_snps:right] = r
col[n_snps:right] = np.flatnonzero(snps)
val[n_snps:right] = s[snps]
r += 1
n_snps = right
fh.close()
if nseqs==0:
raise ValueError('No sequences found!')
row = row[0:right]
col = col[0:right]
val = val[0:right]
sparse_snps = sparse.csc_matrix((val, (row, col)), shape=(nseqs, align_length))
return {'snps': sparse_snps, 'consensus': consensus, 'names': seq_names, 'filled_positions': filled_positions}
# Function adapted from https://github.com/gtonkinhill/pairsnp-python
def calculate_distance_matrix(sparse_matrix_A, sparse_matrix_B, consensus):
n_seqs_A = sparse_matrix_A.shape[0]
n_seqs_B = sparse_matrix_B.shape[0]
d = (1*(sparse_matrix_A==97)) * (sparse_matrix_B.transpose()==97)
d = d + (1*(sparse_matrix_A==99) * (sparse_matrix_B.transpose()==99))
d = d + (1*(sparse_matrix_A==103) * (sparse_matrix_B.transpose()==103))
d = d + (1*(sparse_matrix_A==116) * (sparse_matrix_B.transpose()==116))
d = d.todense()
n_comp = (1*(sparse_matrix_A==110) * ((sparse_matrix_B==110).transpose())).todense()
d = d + n_comp
temp_total = np.zeros((n_seqs_A, n_seqs_B))
temp_total[:] = (1*(sparse_matrix_A>0)).sum(1)
temp_total += (1*(sparse_matrix_B>0)).sum(1).transpose()
total_differences_shared = (1*(sparse_matrix_A>0)) * (sparse_matrix_B.transpose()>0)
n_total = np.zeros((n_seqs_A, n_seqs_B))
n_sum = (1*(sparse_matrix_A==110)).sum(1)
n_total[:] = n_sum
n_total += (1*(sparse_matrix_B==110)).sum(1).transpose()
diff_n = n_total - 2*n_comp
d = temp_total - total_differences_shared.todense() - d - diff_n
return d
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="generate priorities files based on genetic proximity to focal sample",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--alignment", type=str, required=True, help="FASTA file of alignment")
parser.add_argument("--reference", type = str, required=True, help="reference sequence")
parser.add_argument("--metadata", type = str, required=True, help="metadata")
parser.add_argument("--focal-alignment", type = str, required=True, help="focal smaple of sequences")
parser.add_argument("--output", type=str, required=True, help="FASTA file of output alignment")
args = parser.parse_args()
# load entire alignment and the alignment of focal sequences (upper case -- probably not necessary)
ref = sequence_to_int_array(SeqIO.read(args.reference, 'genbank').seq)
context_seqs_dict = calculate_snp_matrix(args.alignment, consensus=ref)
focal_seqs_dict = calculate_snp_matrix(args.focal_alignment, consensus = ref)
alignment_length = len(ref)
print("Done reading the alignments.")
# calculate number of masked sites in either set
mask_count_focal = np.array([len(x) for x in focal_seqs_dict['filled_positions']])
mask_count_context = {s: len(x) for s,x in zip(context_seqs_dict['names'], context_seqs_dict['filled_positions'])}
# for each context sequence, calculate minimal distance to focal set, weigh with number of N/- to pick best sequence
d = np.array(calculate_distance_matrix(context_seqs_dict['snps'], focal_seqs_dict['snps'], consensus = context_seqs_dict['consensus']))
closest_match = np.argmin(d+mask_count_focal/alignment_length, axis=1)
print("Done finding closest matches.")
minimal_distance_to_focal_set = {}
for context_index, focal_index in enumerate(closest_match):
minimal_distance_to_focal_set[context_seqs_dict['names'][context_index]] = (d[context_index, focal_index], focal_seqs_dict["names"][focal_index])
# for each focal sequence with close matches (using the index), we list all close contexts
close_matches = defaultdict(list)
for seq in minimal_distance_to_focal_set:
close_matches[minimal_distance_to_focal_set[seq][1]].append(seq)
for f in close_matches:
shuffle(close_matches[f])
close_matches[f].sort(key=lambda x: minimal_distance_to_focal_set[x][0] + mask_count_context[x]/alignment_length)
# export priorities
with open(args.output, 'w') as fh:
for i, seqid in enumerate(context_seqs_dict['names']):
# use distance as negative priority
# penalize masked (N or -) -- 333 masked sites==one mutations
# penalize if many sequences are close to the same focal one by using the index of the shuffled list of neighbours
# currently each position in this lists reduced priority by 0.2, i.e. 5 other sequences == one mutation
position = close_matches[minimal_distance_to_focal_set[seqid][1]].index(seqid)
priority = -minimal_distance_to_focal_set[seqid][0] - 0.1*position
fh.write(f"{seqid}\t{priority:1.2f}\n")
|
from dataclasses import dataclass
from typing import List
from spotdl.utils.spotify import SpotifyClient
from spotdl.types.song import Song
class PlaylistError(Exception):
"""
Base class for all exceptions related to playlists.
"""
@dataclass(frozen=True)
class Playlist:
name: str
url: str
tracks: List[Song]
description: str
author_url: str
author_name: str
@classmethod
def from_url(cls, url: str) -> "Playlist":
"""
Load playlist info and tracks from a Spotify playlist URL.
"""
spotify_client = SpotifyClient()
playlist = spotify_client.playlist(url)
if playlist is None:
raise PlaylistError("Invalid playlist URL.")
# Get urls
urls = cls.get_urls(url)
# Remove songs without id (country restricted/local tracks)
# And create song object for each track
tracks = [Song.from_url(url) for url in urls]
return cls(
name=playlist["name"],
url=url,
tracks=tracks,
description=playlist["description"],
author_url=playlist["external_urls"]["spotify"],
author_name=playlist["owner"]["display_name"],
)
@property
def length(self) -> int:
"""
Get Playlist length (number of tracks).
"""
return len(self.tracks)
@staticmethod
def get_urls(url: str) -> List[str]:
"""
Get URLs of all tracks in a playlist.
Useful for fetching the playlist.
In multithreaded fashion.
"""
spotify_client = SpotifyClient()
tracks = []
playlist_response = spotify_client.playlist_items(url)
if playlist_response is None:
raise PlaylistError(f"Wrong playlist id: {url}")
tracks = playlist_response["items"]
# Get all tracks from playlist
while playlist_response["next"]:
playlist_response = spotify_client.next(playlist_response)
# Failed to get response, break the loop
if playlist_response is None:
break
# Add tracks to the list
tracks.extend(playlist_response["items"])
return [
track["track"]["external_urls"]["spotify"]
for track in tracks
if track and track.get("track", {}).get("id")
]
|
#!/usr/bin/env python3
"""
Optimization transforms are special modules that take gradients as inputs
and output model updates.
Transforms are usually parameterized, and those parameters can be learned by
gradient descent, allow you to learn optimization functions from data.
"""
from .module_transform import ModuleTransform, ReshapedTransform
from .kronecker_transform import KroneckerTransform
from .transform_dictionary import TransformDictionary
from .metacurvature_transform import MetaCurvatureTransform
|
from datetime import datetime
from typing import Any, Optional
from phx_events.phx_messages import ChannelEvent, ChannelMessage, PHXEvent, PHXEventMessage, PHXMessage, Topic
def parse_event(event: ChannelEvent) -> ChannelEvent:
try:
return PHXEvent(event)
except ValueError:
return event
def make_message(
event: ChannelEvent,
topic: Topic,
ref: Optional[str] = None,
payload: Optional[dict[str, Any]] = None,
) -> ChannelMessage:
if payload is None:
payload = {}
processed_event = parse_event(event)
if isinstance(processed_event, PHXEvent):
return PHXEventMessage(event=processed_event, topic=topic, ref=ref, payload=payload)
else:
return PHXMessage(event=processed_event, topic=topic, ref=ref, payload=payload)
def generate_reference(event: ChannelEvent) -> str:
return f'{datetime.now():%Y%m%d%H%M%S}:{event}'
|
import copy
import numpy as np
from scipy.sparse import csgraph
import datetime
import os.path
import argparse
import yaml
from sklearn.decomposition import TruncatedSVD
from sklearn import cluster
from sklearn.decomposition import PCA
from conf import sim_files_folder, save_address
from util_functions import *
from Articles import ArticleManager
from Users.Users import UserManager
from Users.CoUsers import CoUserManager
from RewardManager import RewardManager
from DatasetRewardManager import DatasetRewardManager
from YahooRewardManager import YahooRewardManager
from DiffList.DiffManager import DiffManager
from conf import *
from LastFM_util_functions import *
from YahooExp_util_functions import *
from lib.LinUCB import LinUCBAlgorithm, Uniform_LinUCBAlgorithm, Hybrid_LinUCBAlgorithm
from lib.hLinUCB import HLinUCBAlgorithm
from lib.factorUCB import FactorUCBAlgorithm
from lib.CoLin import CoLinUCBAlgorithm
from lib.GOBLin import GOBLinAlgorithm
from lib.CLUB import *
from lib.PTS import PTSAlgorithm
from lib.UCBPMF import UCBPMFAlgorithm
from lib.FairUCB import FairUCBAlgorithm
from lib.ThompsonSampling import ThompsonSamplingAlgorithm
from lib.LinPHE import LinPHEAlgorithm
from lib.NeuralUCB import NeuralUCBAlgorithm
from lib.NeuralPHE import NeuralPHEAlgorithm
def pca_articles(articles, order):
X = []
for i, article in enumerate(articles):
X.append(article.featureVector)
pca = PCA()
X_new = pca.fit_transform(X)
# X_new = np.asarray(X)
# print('pca variance in each dim:', pca.explained_variance_ratio_)
# print X_new
# default is descending order, where the latend features use least informative dimensions.
if order == "random":
np.random.shuffle(X_new.T)
elif order == "ascend":
X_new = np.fliplr(X_new)
elif order == "origin":
X_new = X
for i, article in enumerate(articles):
articles[i].featureVector = X_new[i]
return
def generate_algorithms(alg_dict, W, system_params):
gen = alg_dict["general"] if "general" in alg_dict and alg_dict["general"] else {}
algorithms = {}
diffLists = DiffManager()
for i in alg_dict["specific"]:
print("")
print(str(i))
try:
tmpDict = globals()["create" + i + "Dict"](
alg_dict["specific"][i] if alg_dict["specific"][i] else {},
gen,
W,
system_params,
)
except KeyError:
tmpDict = createBaseAlgDict(
alg_dict["specific"][i] if alg_dict["specific"][i] else {},
gen,
W,
system_params,
)
try:
algorithms[i] = globals()[i + "Algorithm"](tmpDict)
except KeyError:
raise NotImplementedError(i + " not currently implemented")
diffLists.add_algorithm(i, algorithms[i].getEstimateSettings())
# print algorithms
return algorithms, diffLists
def addDatasetParams(rewardManagerDict):
rewardManagerDict["dataset"] = gen["dataset"]
if gen["dataset"] == "LastFM":
rewardManagerDict["relationFileName"] = LastFM_relationFileName
rewardManagerDict["address"] = LastFM_address
rewardManagerDict["save_address"] = LastFM_save_address
rewardManagerDict["FeatureVectorsFileName"] = LastFM_FeatureVectorsFileName
rewardManagerDict["itemNum"] = 19000
elif gen["dataset"] == "Delicious":
rewardManagerDict["relationFileName"] = Delicious_relationFileName
rewardManagerDict["address"] = Delicious_address
rewardManagerDict["save_address"] = Delicious_save_address
rewardManagerDict["FeatureVectorsFileName"] = Delicious_FeatureVectorsFileName
rewardManagerDict["itemNum"] = 190000
elif gen["dataset"] == "Yahoo":
rewardManagerDict["address"] = Yahoo_address
rewardManagerDict["save_address"] = Yahoo_save_address
rewardManagerDict["itemNum"] = 200000
def createW(gen):
OriginaluserNum = 2100
nClusters = 100
Gepsilon = 0.3
# won't work when there is a clusterfile procided. args.diagnol doesn't exist
if "clusterfile" in gen:
label = read_cluster_label(gen["clusterfile"])
rewardManagerDict["label"] = label
userNum = nClusters = int(args.clusterfile.name.split(".")[-1]) # Get cluster number.
W = initializeW_label(
nClusters,
rewardManagerDict["relationFileName"],
label,
args.diagnol,
args.showheatmap,
) # Generate user relation matrix
GW = initializeGW_label(Gepsilon, nClusters, relationFileName, label, args.diagnol)
else:
normalizedNewW, newW, label = initializeW_clustering(
OriginaluserNum, rewardManagerDict["relationFileName"], nClusters
)
rewardManagerDict["label"] = label
GW = initializeGW_clustering(Gepsilon, rewardManagerDict["relationFileName"], newW)
W = normalizedNewW
return W, GW, nClusters
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"--alg",
dest="alg",
help="Select a specific algorithm, could be CoLin, hLinUCB, factorUCB, LinUCB, etc.",
)
parser.add_argument("--contextdim", type=int, help="Set dimension of context features.")
parser.add_argument("--hiddendim", type=int, help="Set dimension of hidden features.")
parser.add_argument("--config", dest="config", help="yaml config file")
parser.add_argument(
"--dataset",
required=False,
choices=["LastFM", "Delicious"],
help="Select dataset to run. No Selection resuts in simulated rewards",
)
parser.add_argument(
"--clusterfile",
dest="clusterfile",
help="input an clustering label file",
metavar="FILE",
type=lambda x: is_valid_file(parser, x),
)
args = parser.parse_args()
cfg = {}
with open(args.config, "r") as ymlfile:
cfg = yaml.load(ymlfile)
gen = cfg["general"] if "general" in cfg else {}
simulating = True if not ("dataset" in gen) or gen["dataset"] == "None" else False
user = cfg["user"] if "user" in cfg else {}
article = cfg["article"] if "article" in cfg else {}
reco = cfg["reward"] if "reward" in cfg else {}
rewardManagerDict = {}
if args.contextdim:
context_dimension = args.contextdim
else:
context_dimension = gen["context_dimension"] if "context_dimension" in gen else 20
rewardManagerDict["context_dimension"] = context_dimension
if args.hiddendim:
latent_dimension = args.hiddendim
else:
latent_dimension = gen["hidden_dimension"] if "hidden_dimension" in gen else 0
rewardManagerDict["latent_dimension"] = latent_dimension
rewardManagerDict["training_iterations"] = gen["training_iterations"] if "training_iterations" in gen else 0
rewardManagerDict["testing_iterations"] = gen["testing_iterations"] if "testing_iterations" in gen else 100
rewardManagerDict["plot"] = gen["plot"] if "plot" in gen else True
rewardManagerDict["NoiseScale"] = 0.01
rewardManagerDict["epsilon"] = 0 # initialize W
ArticleGroups = article["groups"] if "groups" in article else 5
if "dataset" in gen:
if gen["dataset"] == "LastFM":
print("LastFM")
n_users = 2100
n_articles = 19000
elif gen["dataset"] == "Delicious":
print("Delicious")
n_users = 2100
n_articles = 190000
UserGroups = user["groups"] if "groups" in user else 5
rewardManagerDict["poolArticleSize"] = gen["pool_article_size"] if "pool_article_size" in gen else 10
rewardManagerDict["batchSize"] = gen["batch_size"] if "batch_size" in gen else 1
# Matrix parameters
# matrixNoise = 0.01
# rewardManagerDict["matrixNoise"] = lambda: np.random.normal(scale=matrixNoise)
# rewardManagerDict["sparseLevel"] = n_users
# if smaller or equal to 0 or larger or equal to usernum, matrix is fully connected
# Parameters for GOBLin
rewardManagerDict["Gepsilon"] = 1
use_coUsers = ("collaborative" in user) and user["collaborative"]
reward_type = reco["type"] if "type" in reco else "linear"
UM = UserManager(context_dimension + latent_dimension, user, argv={"l2_limit": 1})
UM.CoTheta()
# rewardManagerDict["W"] = UM.getW()
# rewardManagerDict["users"] = UM.getUsers()
rewardManagerDict["k"] = reco["k"] if "k" in reco else 1
addDatasetParams(rewardManagerDict)
W, GW, nClusters = createW(gen)
experiment = DatasetRewardManager(arg_dict=rewardManagerDict)
system_params = {
"context_dim": context_dimension,
"latent_dim": latent_dimension,
"n_users": n_users,
"n_clusters": nClusters,
"n_articles": n_articles,
}
algorithms, diffLists = generate_algorithms(cfg["alg"], W, system_params)
experiment.runAlgorithms(algorithms, diffLists)
|
from datetime import datetime
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.utils.translation import ugettext as _
class RegisterForm(UserCreationForm):
birth_date = forms.DateField(
required=False,
widget=forms.SelectDateWidget(
years=range(datetime.now().year, datetime.now().year - 120, -1)
),
)
class Meta:
model = get_user_model()
fields = (
"phone_number",
"email",
"first_name",
"last_name",
"birth_date",
"password1",
"password2",
)
class LoginForm(AuthenticationForm):
username = forms.CharField(label=_("Email / Phone Number"))
remember_me = forms.BooleanField(required=False)
error_messages = {
"invalid_login": _(
"Please enter a correct email / phone number and password. "
"Note that both fields may be case-sensitive."
),
"inactive": _("Sorry, your has been locked by an administrator."),
}
class EditForm(forms.ModelForm):
birth_date = forms.DateField(
required=False,
widget=forms.SelectDateWidget(
years=range(datetime.now().year, datetime.now().year - 120, -1)
),
)
class Meta:
model = get_user_model()
fields = (
"profile_pic",
"phone_number",
"first_name",
"last_name",
"birth_date",
)
|
import pytest
from pyregions.database.ponydatabase import sql_entities, region_database as rdb
from pyregions import standard_definition as sd
from pony.orm import db_session
import datetime
from loguru import logger
import importlib
@pytest.fixture
def empty_database(tmp_path)->rdb.RegionDatabase:
rdb.sql_entities = importlib.reload(sql_entities)
filename = tmp_path / "empty_database.sqlite"
logger.debug(f"Creating empty database as {filename}")
region_db = rdb.RegionDatabase(filename)
region_db.database.drop_all_tables(with_all_data = True)
region_db.database.create_tables()
return region_db
@pytest.fixture
def region_database(tmp_path) -> rdb.RegionDatabase:
rdb.sql_entities = importlib.reload(sql_entities)
filename = tmp_path / "region_database.sqlite"
logger.debug(f"Creating region database as {filename}")
empty_database = rdb.RegionDatabase(filename)
empty_database.database.drop_all_tables(with_all_data = True)
empty_database.database.create_tables()
with db_session:
region1 = empty_database.Region(
code = 'TEST1',
name = 'testregion1',
type = 'testregion'
)
empty_database.Region(
code = 'TEST2',
name = 'testregion2',
type = 'testregion'
)
report = empty_database.Report(
name = 'testReport',
agency = 'testAgency',
url = 'http://www.somewebsite.com',
date = datetime.datetime.now()
)
empty_database.add_scales()
scale = empty_database.get_scale('kilo')
tag1 = empty_database.Tag(value = 'tag1')
tag2 = empty_database.Tag(value = 'tag2')
series = empty_database.Series(
code = 'S1',
name = 'testseries1',
description = 'testdescription',
notes = '',
region = region1,
report = report,
scale = scale,
units = 'person',
years = [2012, 2013, 2014, 2015],
values = [1, 12, 123, 1234],
tags = [tag1, tag2]
)
return empty_database
def test_database_can_be_created():
rdb.RegionDatabase(':memory:')
def test_regions_can_be_added_to_the_database(empty_database):
test_regions = [
sd.StandardRegion('TE1', 'testregion1', 'testregion'),
sd.StandardRegion('TE2', 'testregion2', 'testregion'),
sd.StandardRegion('TE3', 'testregion3', 'testregion')
]
with db_session:
assert empty_database.Region.get(code = 'TE1') is None
assert empty_database.Region.get(code = 'TE2') is None
assert empty_database.Region.get(name = 'testregion3') is None
empty_database.import_regions(test_regions)
with db_session:
assert empty_database.Region.get(code = 'TE1') is not None
assert empty_database.Region.get(code = 'TE1').name == 'testregion1'
assert empty_database.Region.get(code = 'TE2') is not None
assert empty_database.Region.get(name = 'testregion3') is not None
def test_regions_can_be_added_to_the_database_if_some_already_exist(empty_database):
with db_session:
empty_database.Region(code = 'TE2', name = 'testregion2a', type = 'testregion')
test_regions = [
sd.StandardRegion('TE1', 'testregion1', 'testregion'),
sd.StandardRegion('TE2', 'testregion2b', 'testregion'),
sd.StandardRegion('TE3', 'testregion3', 'testregion')
]
with db_session:
assert empty_database.Region.get(code = 'TE1') is None
assert empty_database.Region.get(code = 'TE2').name == 'testregion2a'
assert empty_database.Region.get(name = 'testregion3') is None
empty_database.import_regions(test_regions)
with db_session:
assert empty_database.Region.get(code = 'TE1') is not None
assert empty_database.Region.get(code = 'TE1').name == 'testregion1'
assert empty_database.Region.get(code = 'TE2').name == 'testregion2a'
assert empty_database.Region.get(name = 'testregion3') is not None
def test_add_scales_to_database(empty_database):
with db_session:
empty_database.add_scales()
result = empty_database.Scale.get(code = 'kilo')
assert result.code == 'kilo'
def test_import_some_data_into_database(empty_database):
with db_session:
region = empty_database.Region(
code = 'TEST1',
name = 'testregion1',
type = 'testregion'
)
report = empty_database.Report(
name = 'testReport',
agency = 'testAgency',
url = 'http://www.somewebsite.com',
date = datetime.datetime.now()
)
scale = empty_database.Scale(
code = 'kilo',
multiplier = 1000
)
tag1 = empty_database.Tag(value = 'tag1')
tag2 = empty_database.Tag(value = 'tag2')
series = empty_database.Series(
code = 'S1',
name = 'testseries1',
description = 'testdescription',
notes = '',
region = region,
report = report,
scale = scale,
units = 'person',
years = [2012,2013,2014,2015],
values = [1,12,123,1234],
tags = [tag1, tag2]
)
def test_get_region_by_code(region_database):
with db_session:
result = region_database.get_region('TEST2')
assert result.name == 'testregion2'
def test_get_report_by_name(region_database):
with db_session:
result = region_database.get_report('testReport')
assert result.name == 'testReport'
def test_get_series_by_code(region_database):
region = 'TEST1'
report = 'testReport'
with db_session:
result = region_database.get_series(region, report, 'S1')
assert result.name == 'testseries1'
def test_get_scale_by_code(region_database):
with db_session:
assert region_database.get_scale('milli').code == 'milli'
def test_add_scales(empty_database):
with db_session:
assert empty_database.Scale.get(code = 'mega') is None
empty_database.add_scales()
scale = empty_database.Scale.get(code = 'mega')
assert scale is not None
assert scale.code == 'mega'
assert empty_database.Scale.get(code = 'unit') is not None
def test_add_namespace_iso_to_database(empty_database):
with db_session:
empty_database.add_namespace_iso()
assert empty_database.Region.get(code = 'USA') is not None
assert empty_database.Region.get(code = 'SUN') is not None
assert empty_database.Region.get(code = 'BRA') is not None
def test_add_usps_codes_to_database(empty_database):
with db_session:
# Make sure the database is empty
assert empty_database.Region.get(code = 'USA-NY') is None
empty_database.add_namespace_usps()
assert empty_database.Region.get(code = 'USA-NY') is not None
assert empty_database.Region.get(code = 'USA-PR') is not None |
#!/usr/bin/env python
from telnetlib import Telnet
import io
import datetime
import re
import xml.etree.cElementTree as ET
def test2():
root = ET.Element("root")
orbitRoot = ET.Element("root")
bodyList = [b'10',b'301']
now = datetime.datetime.now()
print("Accessing JPL Horizons via Telnet")
tn = Telnet('ssd.jpl.nasa.gov', 6775)
tn.read_until(b"Horizons> ")
for i in range(len(bodyList)):
tn.write(bodyList[i] + b"\n")
if(bodyList[i] == b'10'):
tn.read_until(b'Revised : ')
else:
tn.read_until(b'Revised: ')
planetInfo = tn.read_until(b'**')
tn.read_until(b'<cr>: ')
planetName = XMLPlanetData(planetInfo.decode(), root)
tn.write(b"E\n")
tn.read_until(b'[o,e,v,?] : ')
tn.write(b"v\n")
if i == 0:
tn.read_until(b'[ <id>,coord,geo ] : ')
tn.write(b"@399\n")
else:
tn.read_until(b'[ cr=(y), n, ? ] : ')
tn.write(b"y\n")
tn.read_until(b'[eclip, frame, body ] : ')
tn.write(b"eclip\n")
tn.read_until(b'] : ')
tn.write('{}-{}-{} {}:00\n'.format(now.year,now.month,now.day,now.hour).encode())
tn.read_until(b'] : ')
tn.write('{}-{}-{} {}:00\n'.format(now.year,now.month+3,now.day,now.hour).encode())
tn.read_until(b'? ] : ')
tn.write(b"1h\n")
tn.read_until(b'?] : ')
tn.write(b"y\n")
tn.read_until(b'$$SOE')
output = tn.read_until(b'$$EOE')
print("Got return value from JPL Horizons for value " +planetName)
buffer = io.StringIO(output.decode())
out2 = ""
for line in buffer:
if line != '$$EOE':
out2 += line
buffer.close()
writeDataToXML(out2, planetName, orbitRoot)
tn.read_until(b'[R]edisplay, ? : ')
tn.write(b"N\n")
tn.read_until(b"Horizons> ")
tn.write(b"exit\n")
tn.close()
tree2 = ET.ElementTree(orbitRoot)
tree2.write("orbitData.xml")
print("End")
def writeDataToXML(data, planetName, root):
planet = ET.SubElement(root, 'planet')
planet.set("name",planetName)
buffer = io.StringIO(data)
tempNum = 1
for line in buffer:
if len(line.split()) == 0 or line[1] == "L":
m = 1
elif line[1] == 'X':
counter = 0
for value in line.split():
temp = re.sub("[^0-9.E\-]", "", value)
print(temp*30)
if temp != "":
if counter == 0:
ET.SubElement(thisTimeStep, 'X').text = temp
counter+= 1
elif counter == 1:
ET.SubElement(thisTimeStep, 'Y').text = temp
counter+=1
elif counter == 2:
ET.SubElement(thisTimeStep, 'Z').text = temp
counter+=1
elif line[1] == 'V':
counter = 0
for value in line.split():
temp = re.sub("[^0-9.E\-]", "", value)
if temp != "":
if counter == 0:
ET.SubElement(thisTimeStep, 'VX').text = temp
counter+= 1
elif counter == 1:
ET.SubElement(thisTimeStep, 'VY').text = temp
counter+=1
elif counter == 2:
ET.SubElement(thisTimeStep, 'VZ').text = temp
counter+=1
else:
thisTimeStep = ET.SubElement(planet, 'dataPoint')
thisTimeStep.set('timeStamp', line.split()[3] + " " + line.split()[4])
def XMLPlanetData(data, root):
planet = ET.SubElement(root, 'planet')
data = data.split()
planetName = data[3]
if planetName == "134340":
planetName = "Pluto"
elif planetName == "International":
planetName = "ISS"
elif planetName == "Tesla":
planetName = "Starman"
elif planetName == "New":
planetName = "New Horizons"
return planetName
test2()
|
from model.key import *
from util.logger import get_logger
from util.keys import replace_nonprintable
class KeyExtractor:
keys = {}
logger = get_logger(__name__)
@staticmethod
def process_transactions(transactions):
for tx in transactions:
KeyExtractor._merge_results(
KeyExtractor._get_rw_keys_for_transaction(tx))
return KeyExtractor.keys
@staticmethod
def _merge_results(rw_sets):
for key_change in rw_sets:
k = key_change.rwset["key"]
if k not in KeyExtractor.keys:
KeyExtractor.keys[k] = Key(k)
KeyExtractor.logger.debug(
f"New key: {replace_nonprintable(k)}, namespace: {key_change.namespace}")
KeyExtractor.keys[k].history.append(key_change)
@staticmethod
def _get_rw_keys_for_transaction(tx):
read_sets, rq_sets, write_sets = tx.get_rw_sets()
rw_sets = []
for rws in read_sets:
rw_sets.append(KeyChange(tx, *rws, READ))
for rqi in rq_sets:
rw_sets.append(KeyChange(tx, *rqi, RANGE_QUERY))
for rws in write_sets:
rw_sets.append(KeyChange(tx, *rws, WRITE))
return rw_sets
@staticmethod
def key_reference_exists(wk: str, rk: str):
'''
Does wk contain uuid of rk?
'''
wkey = Key(wk)
rkey = Key(rk)
wk_set = set(wkey.components[1:])
rk_set = set(rkey.components[1:])
return wk_set.intersection(rk_set) != set()
|
from uwimg import *
# 1. Getting and setting pixels
im = load_image("data/dog.jpg")
for row in range(im.h):
for col in range(im.w):
set_pixel(im, col, row, 0, 0)
save_image(im, "dog_no_red")
# 3. Grayscale image
im = load_image("data/colorbar.png")
graybar = rgb_to_grayscale(im)
save_image(graybar, "graybar")
# 4. Shift Image
im = load_image("data/dog.jpg")
shift_image(im, 0, .4)
shift_image(im, 1, .4)
shift_image(im, 2, .4)
save_image(im, "overflow")
# 5. Clamp Image
clamp_image(im)
save_image(im, "doglight_fixed")
# 6-7. Colorspace and saturation
im = load_image("data/dog.jpg")
rgb_to_hsv(im)
shift_image(im, 1, .2)
clamp_image(im)
hsv_to_rgb(im)
save_image(im, "dog_saturated")
# 8. A small amount of extra credit
im = load_image("data/dog.jpg")
rgb_to_hsv(im)
scale_image(im, 1, 2)
clamp_image(im)
hsv_to_rgb(im)
save_image(im, "dog_scale_saturated")
|
#!/bin/python3
import sys
# Hackerrank Python3 environment does not provide math.gcd
# as of the time of writing. We define it ourselves.
def gcd(n, m):
while m > 0:
n, m = m, n % m
return n
def lcm(x, y):
return (x * y) // gcd(x, y)
def between(s1, s2):
import functools
cd = functools.reduce(gcd, s2)
cm = functools.reduce(lcm, s1)
return tuple(x for x in range(cm, cd + 1) if cd % x == 0 and x % cm == 0)
n, m = input().strip().split(' ')
n, m = [int(n),int(m)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
b = [int(b_temp) for b_temp in input().strip().split(' ')]
btw = between(a, b)
print(len(btw))
|
def fib():
a, b = 1, 1
while True:
yield b
a, b = b, a + b
def pares(seq):
for n in seq:
if n % 2 == 0:
yield n
def menores_4M(seq):
for n in seq:
if n > 4000000:
break
yield n
print (sum(pares(menores_4M(fib()))))
|
import argparse
import asyncio
import atexit
import contextlib
import os
import socket
import ssl
import subprocess
import sys
import tempfile
import time
from collections import namedtuple
from urllib.parse import urlencode, urlunparse
import pytest
import aioredis
import aioredis.sentinel
TCPAddress = namedtuple("TCPAddress", "host port")
RedisServer = namedtuple("RedisServer", "name tcp_address unixsocket version password")
SentinelServer = namedtuple(
"SentinelServer", "name tcp_address unixsocket version masters"
)
# Public fixtures
@pytest.fixture(scope="session")
def event_loop():
"""Creates new event loop."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
def _unused_tcp_port():
"""Find an unused localhost TCP port from 1024-65535 and return it."""
with contextlib.closing(socket.socket()) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1]
@pytest.fixture(scope="session")
def tcp_port_factory():
"""A factory function, producing different unused TCP ports."""
produced = set()
def factory():
"""Return an unused port."""
port = _unused_tcp_port()
while port in produced:
port = _unused_tcp_port()
produced.add(port)
return port
return factory
@pytest.fixture
def create_connection(_closable):
"""Wrapper around aioredis.create_connection."""
async def f(*args, **kw):
conn = await aioredis.create_connection(*args, **kw)
_closable(conn)
return conn
return f
@pytest.fixture(
params=[aioredis.create_redis, aioredis.create_redis_pool], ids=["single", "pool"]
)
def create_redis(_closable, request):
"""Wrapper around aioredis.create_redis."""
factory = request.param
async def f(*args, **kw):
redis = await factory(*args, **kw)
_closable(redis)
return redis
return f
@pytest.fixture
def create_pool(_closable):
"""Wrapper around aioredis.create_pool."""
async def f(*args, **kw):
redis = await aioredis.create_pool(*args, **kw)
_closable(redis)
return redis
return f
@pytest.fixture
def create_sentinel(_closable):
"""Helper instantiating RedisSentinel client."""
async def f(*args, **kw):
# make it fail fast on slow CIs (if timeout argument is omitted)
kw.setdefault("timeout", 0.001)
client = await aioredis.sentinel.create_sentinel(*args, **kw)
_closable(client)
return client
return f
@pytest.fixture
async def pool(create_pool, server):
"""Returns RedisPool instance."""
return await create_pool(server.tcp_address)
@pytest.fixture
async def redis(create_redis, server):
"""Returns Redis client instance."""
redis = await create_redis(server.tcp_address)
await redis.flushall()
yield redis
@pytest.fixture
async def redis_sentinel(create_sentinel, sentinel):
"""Returns Redis Sentinel client instance."""
redis_sentinel = await create_sentinel([sentinel.tcp_address], timeout=2)
assert await redis_sentinel.ping() == b"PONG"
return redis_sentinel
@pytest.fixture
def _closable(event_loop):
conns = []
async def close():
waiters = []
while conns:
conn = conns.pop(0)
conn.close()
waiters.append(conn.wait_closed())
if waiters:
await asyncio.gather(*waiters)
try:
yield conns.append
finally:
event_loop.run_until_complete(close())
@pytest.fixture(scope="session")
def server(start_server):
"""Starts redis-server instance."""
return start_server("A")
@pytest.fixture(scope="session")
def serverB(start_server):
"""Starts redis-server instance."""
return start_server("B")
@pytest.fixture(scope="session")
def sentinel(start_sentinel, request, start_server):
"""Starts redis-sentinel instance with one master -- masterA."""
# Adding main+replica for normal (no failover) tests:
main_no_fail = start_server("main-no-fail")
start_server("replica-no-fail", slaveof=main_no_fail)
# Adding master+slave for failover test;
mainA = start_server("mainA")
start_server("replicaA", slaveof=mainA)
return start_sentinel("main", mainA, main_no_fail)
@pytest.fixture(params=["path", "query"])
def server_tcp_url(server, request):
def make(**kwargs):
netloc = "{0.host}:{0.port}".format(server.tcp_address)
path = ""
if request.param == "path":
if "password" in kwargs:
netloc = ":{0}@{1.host}:{1.port}".format(
kwargs.pop("password"), server.tcp_address
)
if "db" in kwargs:
path = "/{}".format(kwargs.pop("db"))
query = urlencode(kwargs)
return urlunparse(("redis", netloc, path, "", query, ""))
return make
@pytest.fixture
def server_unix_url(server):
def make(**kwargs):
query = urlencode(kwargs)
return urlunparse(("unix", "", server.unixsocket, "", query, ""))
return make
# Internal stuff #
# Taken from python3.9
class BooleanOptionalAction(argparse.Action):
def __init__(
self,
option_strings,
dest,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None,
):
_option_strings = []
for option_string in option_strings:
_option_strings.append(option_string)
if option_string.startswith("--"):
option_string = "--no-" + option_string[2:]
_option_strings.append(option_string)
if help is not None and default is not None:
help += f" (default: {default})"
super().__init__(
option_strings=_option_strings,
dest=dest,
nargs=0,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar,
)
def __call__(self, parser, namespace, values, option_string=None):
if option_string in self.option_strings:
setattr(namespace, self.dest, not option_string.startswith("--no-"))
def format_usage(self):
return " | ".join(self.option_strings)
def pytest_addoption(parser):
parser.addoption(
"--redis-server",
default=[],
action="append",
help="Path to redis-server executable," " defaults to `%(default)s`",
)
parser.addoption(
"--ssl-cafile",
default="tests/ssl/cafile.crt",
help="Path to testing SSL CA file",
)
parser.addoption(
"--ssl-dhparam",
default="tests/ssl/dhparam.pem",
help="Path to testing SSL DH params file",
)
parser.addoption(
"--ssl-cert", default="tests/ssl/cert.pem", help="Path to testing SSL CERT file"
)
parser.addoption(
"--uvloop", action=BooleanOptionalAction, help="Run tests with uvloop"
)
def _read_server_version(redis_bin):
args = [redis_bin, "--version"]
with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
version = proc.stdout.readline().decode("utf-8")
for part in version.split():
if part.startswith("v="):
break
else:
raise RuntimeError(f"No version info can be found in {version}")
return tuple(map(int, part[2:].split(".")))
@contextlib.contextmanager
def config_writer(path):
with open(path, "wt") as f:
def write(*args):
print(*args, file=f)
yield write
REDIS_SERVERS = []
VERSIONS = {}
def format_version(srv):
return "redis_v{}".format(".".join(map(str, VERSIONS[srv])))
@pytest.fixture(scope="session")
def start_server(_proc, request, tcp_port_factory, server_bin):
"""Starts Redis server instance.
Caches instances by name.
``name`` param -- instance alias
``config_lines`` -- optional list of config directives to put in config
(if no config_lines passed -- no config will be generated,
for backward compatibility).
"""
version = _read_server_version(server_bin)
verbose = request.config.getoption("-v") > 3
servers = {}
def timeout(t):
end = time.time() + t
while time.time() <= end:
yield True
raise RuntimeError("Redis startup timeout expired")
def maker(name, config_lines=None, *, slaveof=None, password=None):
print("Start REDIS", name)
assert slaveof is None or isinstance(slaveof, RedisServer), slaveof
if name in servers:
return servers[name]
port = tcp_port_factory()
tcp_address = TCPAddress("localhost", port)
if sys.platform == "win32":
unixsocket = None
else:
unixsocket = f"/tmp/aioredis.{port}.sock"
dumpfile = f"dump-{port}.rdb"
data_dir = tempfile.gettempdir()
dumpfile_path = os.path.join(data_dir, dumpfile)
stdout_file = os.path.join(data_dir, f"aioredis.{port}.stdout")
tmp_files = [dumpfile_path, stdout_file]
if config_lines:
config = os.path.join(data_dir, f"aioredis.{port}.conf")
with config_writer(config) as write:
write("daemonize no")
write('save ""')
write("dir ", data_dir)
write("dbfilename", dumpfile)
write("port", port)
if unixsocket:
write("unixsocket", unixsocket)
tmp_files.append(unixsocket)
if password:
write(f'requirepass "{password}"')
write("# extra config")
for line in config_lines:
write(line)
if slaveof is not None:
write(
"slaveof {0.tcp_address.host} {0.tcp_address.port}".format(
slaveof
)
)
if password:
write(f'masterauth "{password}"')
args = [config]
tmp_files.append(config)
else:
args = [
"--daemonize",
"no",
"--save",
'""',
"--dir",
data_dir,
"--dbfilename",
dumpfile,
"--port",
str(port),
]
if unixsocket:
args += [
"--unixsocket",
unixsocket,
]
if password:
args += [f'--requirepass "{password}"']
if slaveof is not None:
args += [
"--slaveof",
str(slaveof.tcp_address.host),
str(slaveof.tcp_address.port),
]
if password:
args += [f'--masterauth "{password}"']
f = open(stdout_file, "w")
atexit.register(f.close)
proc = _proc(
server_bin,
*args,
stdout=f,
stderr=subprocess.STDOUT,
_clear_tmp_files=tmp_files,
)
with open(stdout_file) as f:
for _ in timeout(10):
assert proc.poll() is None, ("Process terminated", proc.returncode)
log = f.readline()
if log and verbose:
print(name, ":", log, end="")
if "The server is now ready to accept connections " in log:
break
if slaveof is not None:
for _ in timeout(10):
log = f.readline()
if log and verbose:
print(name, ":", log, end="")
if "sync: Finished with success" in log:
break
info = RedisServer(name, tcp_address, unixsocket, version, password)
servers.setdefault(name, info)
print("Ready REDIS", name)
return info
return maker
@pytest.fixture(scope="session")
def start_sentinel(_proc, request, tcp_port_factory, server_bin):
"""Starts Redis Sentinel instances."""
version = _read_server_version(server_bin)
verbose = request.config.getoption("-v") > 3
sentinels = {}
def timeout(t):
end = time.time() + t
while time.time() <= end:
yield True
raise RuntimeError("Redis startup timeout expired")
def maker(
name,
*masters,
quorum=1,
noslaves=False,
down_after_milliseconds=3000,
failover_timeout=1000,
):
key = (name,) + masters
if key in sentinels:
return sentinels[key]
port = tcp_port_factory()
tcp_address = TCPAddress("localhost", port)
data_dir = tempfile.gettempdir()
config = os.path.join(data_dir, f"aioredis-sentinel.{port}.conf")
stdout_file = os.path.join(data_dir, f"aioredis-sentinel.{port}.stdout")
tmp_files = [config, stdout_file]
if sys.platform == "win32":
unixsocket = None
else:
unixsocket = os.path.join(data_dir, f"aioredis-sentinel.{port}.sock")
tmp_files.append(unixsocket)
with config_writer(config) as write:
write("daemonize no")
write('save ""')
write("port", port)
if unixsocket:
write("unixsocket", unixsocket)
write("loglevel debug")
for master in masters:
write(
"sentinel monitor",
master.name,
"127.0.0.1",
master.tcp_address.port,
quorum,
)
write(
"sentinel down-after-milliseconds",
master.name,
down_after_milliseconds,
)
write("sentinel failover-timeout", master.name, failover_timeout)
write("sentinel auth-pass", master.name, master.password)
f = open(stdout_file, "w")
atexit.register(f.close)
proc = _proc(
server_bin,
config,
"--sentinel",
stdout=f,
stderr=subprocess.STDOUT,
_clear_tmp_files=tmp_files,
)
# XXX: wait sentinel see all masters and slaves;
all_masters = {m.name for m in masters}
if noslaves:
all_slaves = {}
else:
all_slaves = {m.name for m in masters}
with open(stdout_file) as f:
for _ in timeout(30):
assert proc.poll() is None, ("Process terminated", proc.returncode)
log = f.readline()
if log and verbose:
print(name, ":", log, end="")
for m in masters:
if f"# +monitor master {m.name}" in log:
all_masters.discard(m.name)
if "* +slave slave" in log and f"@ {m.name}" in log:
all_slaves.discard(m.name)
if not all_masters and not all_slaves:
break
else:
raise RuntimeError("Could not start Sentinel")
masters = {m.name: m for m in masters}
info = SentinelServer(name, tcp_address, unixsocket, version, masters)
sentinels.setdefault(key, info)
return info
return maker
@pytest.fixture(scope="session")
def ssl_proxy(_proc, request, tcp_port_factory):
by_port = {}
cafile = os.path.abspath(request.config.getoption("--ssl-cafile"))
certfile = os.path.abspath(request.config.getoption("--ssl-cert"))
dhfile = os.path.abspath(request.config.getoption("--ssl-dhparam"))
assert os.path.exists(
cafile
), "Missing SSL CA file, run `make certificate` to generate new one"
assert os.path.exists(
certfile
), "Missing SSL CERT file, run `make certificate` to generate new one"
assert os.path.exists(
dhfile
), "Missing SSL DH params, run `make certificate` to generate new one"
ssl_ctx = ssl.create_default_context(cafile=cafile)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
ssl_ctx.load_dh_params(dhfile)
def sockat(unsecure_port):
if unsecure_port in by_port:
return by_port[unsecure_port]
secure_port = tcp_port_factory()
_proc(
"/usr/bin/socat",
"openssl-listen:{port},"
"dhparam={param},"
"cert={cert},verify=0,fork".format(
port=secure_port, param=dhfile, cert=certfile
),
f"tcp-connect:localhost:{unsecure_port}",
)
time.sleep(1) # XXX
by_port[unsecure_port] = secure_port, ssl_ctx
return secure_port, ssl_ctx
return sockat
@pytest.yield_fixture(scope="session")
def _proc():
processes = []
tmp_files = set()
def run(*commandline, _clear_tmp_files=(), **kwargs):
proc = subprocess.Popen(commandline, **kwargs)
processes.append(proc)
tmp_files.update(_clear_tmp_files)
return proc
try:
yield run
finally:
while processes:
proc = processes.pop(0)
proc.terminate()
proc.wait()
for path in tmp_files:
try:
os.remove(path)
except OSError:
pass
def pytest_collection_modifyitems(session, config, items):
skip_by_version = []
for item in items[:]:
marker = item.get_closest_marker("redis_version")
if marker is not None:
try:
version = VERSIONS[item.callspec.getparam("server_bin")]
except (KeyError, ValueError, AttributeError):
# TODO: throw noisy warning
continue
if version < marker.kwargs["version"]:
skip_by_version.append(item)
item.add_marker(pytest.mark.skip(reason=marker.kwargs["reason"]))
if "ssl_proxy" in item.fixturenames:
item.add_marker(
pytest.mark.skipif(
"not os.path.exists('/usr/bin/socat')",
reason="socat package required (apt-get install socat)",
)
)
if len(items) != len(skip_by_version):
for i in skip_by_version:
items.remove(i)
def pytest_configure(config):
bins = config.getoption("--redis-server")[:]
cmd = "which redis-server"
if not bins:
with os.popen(cmd) as pipe:
path = pipe.read().rstrip()
assert path, (
"There is no redis-server on your computer." " Please install it first"
)
REDIS_SERVERS[:] = [path]
else:
REDIS_SERVERS[:] = bins
VERSIONS.update({srv: _read_server_version(srv) for srv in REDIS_SERVERS})
assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS)
class DynamicFixturePlugin:
@pytest.fixture(scope="session", params=REDIS_SERVERS, ids=format_version)
def server_bin(self, request):
"""Common for start_server and start_sentinel
server bin path parameter.
"""
return request.param
config.pluginmanager.register(DynamicFixturePlugin(), "server-bin-fixture")
if config.getoption("--uvloop"):
try:
import uvloop
except ImportError:
raise RuntimeError("Can not import uvloop, make sure it is installed")
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
|
# 高雄市公有路外停車場一覽表
import sys
import urllib.request
import json
import csv
def fetch(query_limit):
# 指定查詢筆數上限
url = 'http://data.kcg.gov.tw/api/action/datastore_search?resource_id=cd53c749-abeb-47db-b58c-5954d6c337a1'
if query_limit > 0:
url = url+'&limit={}'.format(query_limit)
req = urllib.request.urlopen(url)
# 確認資料庫編碼 (utf-8)
charset = req.info().get_content_charset()
# print('資料庫編碼:', charset)
# response_data = req.read().decode(charset)
response_data = req.read()
# 全部資料
json_data = json.loads(response_data)
return json_data
if __name__ == '__main__':
qlimit = int(input('設定查詢筆數 (0.all | -1.quit): '))
if qlimit == -1:
sys.exit()
json_data = fetch(qlimit)
print(json.dumps(json_data, sort_keys=True, indent=4), file=open('parking.json', 'wt'))
print('停車場欄位:')
for col in json_data['result']['fields']:
print(col)
print('\n'+'停車場資料:')
for row in json_data['result']['records']:
print(row)
# encoding='utf8' (不支援 ASCII)
# newline='' (若未設定,每列後會再斷行)
parking_data = json_data['result']['records']
with open('parking.csv', 'w', newline='', encoding='utf8') as parking_csv:
# 資料寫入檔案
csvwriter = csv.writer(parking_csv)
count = 0
for row in parking_data:
if count == 0:
header = row.keys()
csvwriter.writerow(header)
count += 1
csvwriter.writerow(row.values())
print('\n'+'資料筆數:', count)
|
import os
import sys
import argparse
import json
from fractions import Fraction
from typing import List, Tuple, Dict, Set
from random import sample, choice, randint
from soadata import DataSystem, DataSystemConfig, ServiceCost
if not (sys.version_info.major == 3 and sys.version_info.minor >= 5):
print("This script requires Python 3.5 or higher!")
print("You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
sys.exit(1)
parser = argparse.ArgumentParser(description = 'Generates a simulation for service oriented architecture')
parser.add_argument("-c", "--configfile", help="the json configuration file", required = True)
args = parser.parse_args()
MAX_ITEMS_MAGNITUDE = 48 #2^48
MAX_PROCESSING_MAGNITUDE_MICRO_SEC = 27 # 2^27
class ScriptConfig:
def __init__(self):
self.config_file = args.configfile
scriptconfig = ScriptConfig()
def load_config():
with open(scriptconfig.config_file, 'r') as jsonfile:
return json.load(jsonfile)
wholeconfig = load_config()
dataconfig = DataSystemConfig.from_obj(wholeconfig["experiment"])
print(dataconfig)
serviceCost = ServiceCost.from_obj(wholeconfig["calculator"]["cost"])
for _ in range(dataconfig.datasystem_count):
dataSystem = DataSystem(dataconfig, service_cost = serviceCost)
dataSystem.prepare()
dataSystem.get_usage_overview().summarise()
|
# Generated by Django 3.1.12 on 2021-07-05 15:13
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('category', '0002_category_color'),
('board', '0005_auto_20210705_0029'),
]
operations = [
migrations.AlterField(
model_name='board',
name='category',
field=models.ManyToManyField(null=True, related_name='board_category', to='category.Category'),
),
migrations.AlterField(
model_name='board',
name='like',
field=models.ManyToManyField(null=True, related_name='like', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='board',
name='scrap',
field=models.ManyToManyField(null=True, related_name='scrap', to=settings.AUTH_USER_MODEL),
),
]
|
# start of importing
import os
import logging
from emoji import emojize
from gettext import gettext as _
from gettext import translation
from telegram import InlineKeyboardButton, InlineKeyboardMarkup,\
KeyboardButton, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import (
Updater,
CommandHandler,
CallbackQueryHandler,
ConversationHandler,
MessageHandler,
Filters
)
# end of importing
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
# ###################################################################
# 4 availbale states
# 1 - language - when it is tarted
# 2 - options of services
# 3 - contact
# 4 - final - if user enters contact not shares
LANGUAGE, OPTIONS, CONTACT, FINAL = range(4)
# language options are stored directly in the memory
language = {}
keyboard_option = {}
# Helper functions with langauge and keyboard
def update_language(user_id, option):
# print(user_id)
global language
if option in ['en', 'uz', 'ru']:
language[str(user_id)] = option
def get_language(user_id):
global language
return language.get(str(user_id), None)
def update_keyboard(user_id, keyboard_):
global keyboard_option
keyboard_option[str(user_id)] = keyboard_
def get_keyboard(user_id):
global keyboard_option
return keyboard_option.get(str(user_id), None)
# end of the helper functions with keyboards and language
# install language globally
def return_translation_function(user_id):
option = get_language(user_id)
language_ = translation(option, localedir='locales',
languages=['ru', 'uz', 'en'])
language_.install()
_ = language_.gettext
return _
# the function which is triggered when start command is given
def start_language(update, context):
reply_keyboard = [['UZ', 'RU', 'EN']]
# print(update.message.from_user.id)
update.message.reply_text(
"Iltimos, tilni tanlang\n"
'Пожалуйста, выберите ваш язык\n'
'Please, choose your language\n',
reply_markup=ReplyKeyboardMarkup(
reply_keyboard, resize_keyboard=True, one_time_keyboard=True)
)
return LANGUAGE
def get_list_of_services(user_id):
_ = return_translation_function(user_id)
return [
_('Logo Design'),
_('Web Development'),
_('Mobile Development'),
_('Telegram Bot'),
_('SEO Optimization'),
_('E-commerce'),
]
def confirm_language(update, context):
global language
text = update.message.text.lower()
user = update.message.from_user
try:
update_language(user.id, text)
except Exception as e:
print('The language you chose is not available yet.')
update.message.reply_text(
_('Language you chose is not available yet.')
)
return LANGUAGE
# print(get_language(user_id=user.id))
_ = return_translation_function(user_id=user.id)
update.message.reply_text(
_('Your language: {}').format(get_language(user.id).title())
)
keyboard = []
services = get_list_of_services(user.id)
for index, service in enumerate(services):
keyboard.append([InlineKeyboardButton(
service, callback_data=str(index))])
keyboard.append([InlineKeyboardButton(_('{} Done').format(
emojize(':ok:', use_aliases=True)), callback_data='done')])
update_keyboard(user.id, keyboard)
r_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text(
_('Please choose one of our services'),
reply_markup=r_markup
)
return OPTIONS
# helper functions
def change_text(text):
checkbox = emojize(':white_check_mark:', use_aliases=True)
if checkbox in text:
return " ".join(text.split()[1:])
return '{} {}'.format(checkbox, text)
def make_message_from_list(list_of_orders, user_id):
_ = return_translation_function(user_id)
message = _('Your orders so far')
for element in list_of_orders:
message += '\n' + element
return message
def string_from_array(keyboard):
array = []
for button in keyboard:
if "{}".format(emojize(':white_check_mark:', use_aliases=True)) in button[0].text:
array.append(button[0].text)
return array
# end of the helpher functions
def options(update, context):
query = update.callback_query
user_id = query.message.chat.id
bot = context.bot
keyboard = get_keyboard(user_id)
_ = return_translation_function(user_id)
if query.data == 'done':
array = string_from_array(keyboard)
if len(array) == 0:
return OPTIONS
text = make_message_from_list(array, user_id)
bot.edit_message_text(
chat_id=user_id,
message_id=query.message.message_id,
text=text,
)
bot.send_message(
chat_id='694902869',
text="Possible Client\n{} : @{}\nOrders: \n{}".format(
query.message.chat.first_name,
query.message.chat.username,
"\n".join(text.split('\n')[1:])
)
)
# Next state going
contact_keyboard = [
KeyboardButton(text=_("Send My Contact"), request_contact=True),
KeyboardButton(text=_("I have other number")),
]
bot.send_message(
chat_id=query.message.chat.id,
text=_("Would you mind sharing your contact?"),
reply_markup=ReplyKeyboardMarkup(
[contact_keyboard], one_time_keyboard=True, resize_keyboard=True)
)
return CONTACT
else:
text = change_text(keyboard[int(query.data)][0].text)
keyboard[int(query.data)][0].text = text
reply_markup = InlineKeyboardMarkup(keyboard)
bot.edit_message_text(
chat_id=user_id,
message_id=query.message.message_id,
text=_('Please choose one of our services'),
reply_markup=reply_markup
)
return OPTIONS
def contact_send(update, context):
reply_markup = ReplyKeyboardRemove()
user_id = str(update.message.from_user.id)
_ = return_translation_function(user_id)
contact = update.message
context.bot.send_message(
chat_id='694902869',
text='FROM BOT\nPHONE NUMBER: +'+(contact.contact.phone_number) +
'\n------------------------\n',
reply_markup=reply_markup
)
update.message.reply_text(
_("Thank you very much, we will contact you soon")
)
return ConversationHandler.END
def contact_get(update, context):
user_id = update.message.from_user.id
_ = return_translation_function(user_id)
update.message.reply_text(
"{}\n{}\n{}\n{}\n".format(_("Please then send me your contact"),
_("Make sure it is in the form as follows"),
_("+998 93 578 97 68"),
_("Go ahead"))
)
return FINAL
def contact_request(update, context):
reply_markup = ReplyKeyboardRemove()
user_id = str(update.message.from_user.id)
_ = return_translation_function(user_id)
contact = update.message.text
context.bot.send_message(
chat_id='694902869',
text='FROM BOT\nPHONE NUMBER: ' +
str(contact) + '\n------------------------\n',
reply_markup=reply_markup
)
update.message.reply_text(
_("Thank you very much, we will contact you soon")
)
return ConversationHandler.END
def contact_wrong(update, context):
user_id = update.message.from_user.id
_ = return_translation_function(user_id)
update.message.reply_text(
"{}\n{}\n".format(_("Please make sure that your contact's pattern is as follows"),
_("+998 93 578 97 68"))
)
return FINAL
def error_handler(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# first-bot
# 1017586683:AAH9YvHhXuIrWRiQkz0VdbY6zJEkMe23l9c
# 856336707:AAFRfR3dP7XZLZL5MDuR2D2HitAvfUeYt94
# greatsoftbot
TOKEN = '856336707:AAFRfR3dP7XZLZL5MDuR2D2HitAvfUeYt94'
NAME = "telegram-bot-test-first"
PORT = os.environ.get('PORT')
updater = Updater(TOKEN, use_context=True)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start_language)],
states={
LANGUAGE: [
MessageHandler(Filters.regex(
'^(RU|EN|UZ)$'), confirm_language),
MessageHandler(Filters.text, start_language)
],
OPTIONS: [
CallbackQueryHandler(options, pattern=r'[0-6]|^done$')
],
CONTACT: [
MessageHandler(Filters.contact, contact_send),
MessageHandler(Filters.text, contact_get)
],
FINAL: [
MessageHandler(Filters.regex(
'^(\+998[\s]?\d{2}[\s\.-]?\d{3}[\s\.-]?\d{2}[\s\.-]?\d{2})$'),
contact_request),
MessageHandler(Filters.text, contact_wrong)
]
},
fallbacks=[CommandHandler('start', start_language)]
)
dp.add_handler(conv_handler)
dp.add_error_handler(error_handler)
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=TOKEN)
updater.bot.setWebhook("https://{}.herokuapp.com/{}".format(NAME, TOKEN))
updater.idle()
if __name__ == "__main__":
main()
|
def GenerateConfig(context):
resources = [{
'name': context.properties['infra_id'] + '-bootstrap-public-ip',
'type': 'compute.v1.address',
'properties': {
'region': context.properties['region']
}
}, {
'name': context.properties['infra_id'] + '-bootstrap',
'type': 'compute.v1.instance',
'properties': {
'disks': [{
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskSizeGb': context.properties['root_volume_size'],
'sourceImage': context.properties['image']
}
}],
'machineType': 'zones/' + context.properties['zone'] + '/machineTypes/' + context.properties['machine_type'],
'metadata': {
'items': [{
'key': 'user-data',
'value': '{"ignition":{"config":{"replace":{"source":"' + context.properties['bootstrap_ign'] + '","verification":{}}},"timeouts":{},"version":"2.1.0"},"networkd":{},"passwd":{},"storage":{},"systemd":{}}',
}]
},
'networkInterfaces': [{
'subnetwork': context.properties['control_subnet'],
'accessConfigs': [{
'natIP': '$(ref.' + context.properties['infra_id'] + '-bootstrap-public-ip.address)'
}]
}],
'tags': {
'items': [
context.properties['infra_id'] + '-master',
context.properties['infra_id'] + '-bootstrap'
]
},
'zone': context.properties['zone']
}
}, {
'name': context.properties['infra_id'] + '-bootstrap-instance-group',
'type': 'compute.v1.instanceGroup',
'properties': {
'namedPorts': [
{
'name': 'ignition',
'port': 22623
}, {
'name': 'https',
'port': 6443
}
],
'network': context.properties['cluster_network'],
'zone': context.properties['zone']
}
}]
return {'resources': resources}
|
from datetime import timedelta
import hikari
import lavasnek_rs
import tanjun
from avgamah.core.client import Client
from avgamah.utils.buttons import DELETE_ROW
from avgamah.utils.time import pretty_timedelta_shortened
from . import fetch_lavalink
now_playing_component = tanjun.Component()
@now_playing_component.with_slash_command
@tanjun.with_own_permission_check(
hikari.Permissions.SEND_MESSAGES
| hikari.Permissions.VIEW_CHANNEL
| hikari.Permissions.EMBED_LINKS
| hikari.Permissions.CONNECT
| hikari.Permissions.SPEAK
)
@tanjun.as_slash_command("nowplaying", "See Currently Playing Song")
async def now_playing(ctx: tanjun.abc.Context) -> None:
lavalink = fetch_lavalink(ctx)
node = await lavalink.get_guild_node(ctx.guild_id)
if not node or not node.now_playing:
return await ctx.respond(
"There's nothing playing at the moment!", component=DELETE_ROW
)
total = pretty_timedelta_shortened(
timedelta(seconds=node.now_playing.track.info.length / 1000)
)
now = pretty_timedelta_shortened(
timedelta(seconds=node.now_playing.track.info.position / 1000)
)
embed = hikari.Embed(
title="Now Playing",
description=f"[{node.now_playing.track.info.title}]({node.now_playing.track.info.uri})",
color=0x00FF00,
)
fields = [
("Requested by", f"<@{node.now_playing.requester}>", True),
("Author", node.now_playing.track.info.author, True),
("Duration", f"{now}/{total}", True),
]
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
await ctx.respond(embed=embed, component=DELETE_ROW)
@tanjun.as_loader
def load_components(client: Client):
client.add_component(now_playing_component.copy())
|
import threading
import sys
import time
a = 0
b = 0
def lidar():
global a
while True:
a += 1
time.sleep(0.25)
def ir():
global b
while True:
b += 1
time.sleep(0.5)
# creating threads
sensor1 = threading.Thread(target=lidar)
sensor2 = threading.Thread(target=ir)
# starting threads
sensor1.daemon=True
sensor2.daemon=True
sensor1.start()
sensor2.start()
try:
print(a, b)
time.sleep(1)
except KeyboardInterrupt:
time.sleep(2)
# wait until all threads finish
sys.exit()
sensor1.join()
sensor2.join()
|
# pylint: disable=missing-function-docstring
from typing import List
import torch
def sample_data(counts: List[int], dims: List[int]) -> List[torch.Tensor]:
return [torch.randn(count, dim) for count, dim in zip(counts, dims)]
def sample_means(counts: List[int], dims: List[int]) -> List[torch.Tensor]:
return [torch.randn(count, dim) for count, dim in zip(counts, dims)]
def sample_spherical_covars(counts: List[int]) -> List[torch.Tensor]:
return [torch.rand(count) for count in counts]
def sample_diag_covars(counts: List[int], dims: List[int]) -> List[torch.Tensor]:
return [torch.rand(count, dim).squeeze() for count, dim in zip(counts, dims)]
def sample_full_covars(counts: List[int], dims: List[int]) -> List[torch.Tensor]:
result = []
for count, dim in zip(counts, dims):
A = torch.randn(count, dim * 10, dim)
result.append(A.permute(0, 2, 1).bmm(A).squeeze())
return result
|
from bs4 import BeautifulSoup
soup = BeautifulSoup('<html><p>line 1</p><div><a>line 2</a></div></html>', features="lxml")
print(soup.find('p').nextSibling.name)
|
import math
import numpy as np
import torch
from torch import nn as nn
from torch import einsum
import torch.nn.functional as F
from config import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import SelectiveKernelConv, ConvBnAct, create_attn
from .registry import register_model
from .resnet import ResNet
from einops import rearrange
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
'resnet': _cfg(
url=''),
}
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def calc_rel_pos(n):
pos = torch.meshgrid(torch.arange(n), torch.arange(n))
pos = rearrange(torch.stack(pos), 'n i j -> (i j) n') # [n*n, 2] pos[n] = (i, j)
rel_pos = pos[None, :] - pos[:, None] # [n*n, n*n, 2] rel_pos[n, m] = (rel_i, rel_j)
rel_pos += n - 1 # shift value range from [-n+1, n-1] to [0, 2n-2]
return rel_pos
class LambdaLayer(nn.Module):
def __init__(
self,
dim,
dim_k,
r = 15,
heads = 4):
super().__init__()
self.heads = heads
self.dim = dim
self.dim_k = dim_k
self.dim_v = dim // heads
self.r = r
self.to_q = nn.Sequential(
nn.Conv2d(dim, dim_k * heads, 1, bias = False),
nn.BatchNorm2d(dim_k * heads)
)
self.to_k = nn.Conv2d(dim, dim_k, 1, bias = False)
self.to_v = nn.Sequential(
nn.Conv2d(dim, self.dim_v, 1, bias = False),
nn.BatchNorm2d(self.dim_v)
)
self.softmax = nn.Softmax(dim=1)
self.embeddings = nn.Parameter(torch.randn(dim_k, 1, 1, self.r, self.r))
self.padding = (self.r - 1) // 2
def compute_position_lambdas(self, embeddings, values):
b, v, w, h = values.shape
values = values.view(b, 1, v, w, h)
position_lambdas = F.conv3d(values, embeddings, padding=(0, self.padding, self.padding))
position_lambdas = position_lambdas.view(b, self.dim_k, v, w*h)
return position_lambdas
def lambda_layer(self, queries, keys, embeddings, values):
position_lambdas = self.compute_position_lambdas(embeddings, values)
keys = self.softmax(keys)
b, v, w, h = values.shape
queries = queries.view(b, self.heads, self.dim_k, w*h)
keys = keys.view(b, self.dim_k, w*h)
values = values.view(b, self.dim_v, w*h)
content_lambda = einsum('bkm,bvm->bkv', keys, values)
content_output = einsum('bhkn,bkv->bhvn', queries, content_lambda)
position_output = einsum('bhkn,bkvn->bhvn', queries, position_lambdas)
output = content_output + position_output
output = output.reshape(b, -1, w, h)
return output
def forward(self, x):
q = self.to_q(x)
k = self.to_k(x)
v = self.to_v(x)
output = self.lambda_layer(q, k, self.embeddings, v)
return output
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,
reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(Bottleneck, self).__init__()
width = int(math.floor(planes * (base_width / 64)) * cardinality)
first_planes = width // reduce_first
outplanes = planes * self.expansion
first_dilation = first_dilation or dilation
use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
if stride > 1:
self.conv2_down = nn.AvgPool2d(3, 2, padding=1)
else:
self.conv2_down = None
self.conv2 = LambdaLayer(
width,
dim_k=16,
r = 15,
heads = 4,
)
#self.conv2 = nn.Conv2d(
# first_planes, width, kernel_size=3, stride=1 if use_aa else stride,
# padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)
self.bn2 = norm_layer(width)
self.act2 = act_layer(inplace=True)
self.aa = aa_layer(channels=width, stride=stride) if use_aa else None
self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)
self.bn3 = norm_layer(outplanes)
self.se = create_attn(attn_layer, outplanes)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.bn3.weight)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
if self.drop_block is not None:
x = self.drop_block(x)
x = self.act2(x)
if self.aa is not None:
x = self.aa(x)
if self.conv2_down is not None:
x = self.conv2_down(x)
x = self.conv3(x)
x = self.bn3(x)
if self.drop_block is not None:
x = self.drop_block(x)
if self.se is not None:
x = self.se(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
residual = self.downsample(residual)
x += residual
x = self.act3(x)
return x
def _create_lambdanet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
ResNet, variant, default_cfg=default_cfgs[variant], pretrained=pretrained, **kwargs)
@register_model
def lambdanet50(pretrained=False, **kwargs):
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
return _create_lambdanet('resnet', pretrained, **model_args) |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: jupyter_notebooks/Test_trainer.ipynb
from uti.interface import *
class Trainer():
'''Take interface, which is preprocessed data
create learner base on the data
save the model in the dest
'''
def __init__(self,data,pre_trained=True,model_dir=None):
self.interface = data
self.pre_trained = pre_trained
if not os.path.exists(self.interface.csv_path/'models'):
os.makedirs(self.interface.csv_path/'models')
self.dest = self.interface.csv_path/'models'
#hard code path, get url when host
#fix
#################
self.url = '/home/jupyter/insight_project/Project-M/data/preprocessed/csv/models/general-clasifier-0.84'
#################
def _create_leaner(self,one_data):
learn = text_classifier_learner(one_data,AWD_LSTM,drop_mult=0.5,
loss_func=FlattenedLoss(LabelSmoothingCrossEntropy),
metrics=[accuracy], #FBeta(beta=1)
model_dir = self.dest
)
if torch.cuda.is_available():
learn.to_fp16()
learn.load(self.url)
learn.freeze_to(-1)
return learn
def train_individual_clasifier(self):
num_models = len(self.interface.data_list)
counter_to_print = 0
print('Creating models... total job =',num_models)
for data in self.interface.data_list:
print(f'Creating model #{counter_to_print+1}...')
learn = self._create_leaner(data)
learn.fit_one_cycle(1,1e-1,moms=(0.8,0.7))
current_valid_score = learn.recorder.metrics[0][0].item()
model_path = learn.save(f'{self.interface.dataset_name[counter_to_print]}_{current_valid_score:.2f}',
return_path=True, with_opt=False)
print(f'Model saved at {model_path}')
counter_to_print += 1
# print(f'Saving model to {self.dest}')
# print(f'model name: {self.interface.dataset_name[counter_to_print]}_{current_valid_score}')
|
from fractions import Fraction
from functools import reduce
from fractions import gcd
def product(fracs):
n = reduce(lambda x, y: Fraction(x.numerator*y.numerator, 1), fracs)
d = reduce(lambda x, y: Fraction(1, x.denominator*y.denominator), fracs)
g = gcd(n.numerator, d.denominator)
return n.numerator//g, d.denominator//g
if __name__ == '__main__':
fracs = []
for _ in range(int(input())):
fracs.append(Fraction(*map(int, input().split())))
result = product(fracs)
print(*result)
|
# ----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
from plugins.chapiv1rpc.chapi.Exceptions import *
from gcf.geni.util.urn_util import is_valid_urn
from tools.geni_constants import *
from gcf.sfa.trust.certificate import Certificate
from tools.chapi_log import *
import types
import uuid
import dateutil.parser
from dateutil.tz import tzutc
# Create a subset 'fields' dictionary with only fields in given list
def select_fields(field_definitions, field_list):
subset = {}
for key in field_definitions.keys():
if key in field_list:
subset[key] = field_definitions[key]
return subset
# Base class for checking validity of call arguments
class ArgumentCheck:
def validate(self, options, arguments):
raise CHAPIv1NotImplementedError('Base Class: ArgumentCheck.validate')
# Argument check based on sets of mandatory and supplemental fields
# defined for a given object schema, indicating which are required
# or permitted fields for a given operation
class FieldsArgumentCheck(ArgumentCheck):
# Args:
# field_option - The name of the option key contianing field information
# to verify against the object schema
# additional_required_options - Additional required options keys
# mandatory_fields: Any CH/MA/SA must implement these fields in their
# object model
# supplemental_fields: This particular CH/MA/SA implements these
# addtional fields
# argument_types: dictionary of additional arguments (not in options) and their required types
def __init__(self, field_option, additional_required_options, \
mandatory_fields, supplemental_fields, \
argument_types=None, matchable = []):
self._field_option = field_option
self._additional_required_options = additional_required_options
self._mandatory_fields = mandatory_fields
self._supplemental_fields = supplemental_fields
self._argument_types = argument_types
self._matchable_fields = matchable
def validate(self, options, arguments):
if self._field_option and self._field_option not in options.keys():
raise CHAPIv1ArgumentError("Missing Option: " \
+ str(self._field_option))
# Check to see that all the additional required options are present
if self._additional_required_options:
for required_option in self._additional_required_options:
if not required_option in options:
raise CHAPIv1ArgumentError("Missing Option: " \
+ required_option)
# Check all the typed arguments
self.validateArgumentFormats(arguments)
# Normalize field inputs (e.g turn all dates into UTC)
self.normalizeFields(options)
# Make sure all object fields provided are recognized
def validateFieldList(self, fields):
for field in fields:
if not field in self._mandatory_fields and \
not field in self._supplemental_fields:
raise CHAPIv1ArgumentError("Unrecognized field : " + field)
# Format for parsing/formatting datetime with timezone
FORMAT_DATETIME_TZ = "%Y-%m-%dT%H:%M:%SZ"
# Modify the 'fields' option to normalize inputs (e.g. turn all dates into UTC TZ)
def normalizeFields(self, options):
if 'fields' not in options: return
for field_name, field_value in options['fields'].items():
field_type = self.fieldTypeForFieldName(field_name)
if field_type == 'DATETIME':
# Store all dates as 'naive UTC'
# If any date doesn't have a TZ, assume it is UTC
# If it does have a TZ, convert to UTC and strip TZ info
# Then store converted value into the proper 'fields' slot
try:
parsed_datetime = dateutil.parser.parse(field_value)
if parsed_datetime.tzinfo:
parsed_datetime = parsed_datetime.astimezone(tzutc())
utc_field_value = parsed_datetime.strftime(FieldsArgumentCheck.FORMAT_DATETIME_TZ)
options['fields'][field_name] = utc_field_value
chapi_debug('ArgCheck', 'DATETIME convert: %s %s' % (field_value, utc_field_value))
except Exception:
pass # Can't normalize, will fail when we try to check valid format
# Take a list of {field : value} dictionaries
# Make sure all field name/value pairs are recognized and of proper type
def validateFieldValueDictionary(self, field_values):
for field, value in field_values.iteritems():
if self._matchable_fields and not field in self._matchable_fields:
raise CHAPIv1ArgumentError("Unrecognized field : " + field)
if not (self._matchable_fields or field in self._mandatory_fields \
or field in self._supplemental_fields):
raise CHAPIv1ArgumentError("Unrecognized field : " + field)
self.validateFieldValueFormat(field, value)
# Determine the type of a given field
def fieldTypeForFieldName(self, field):
if field in self._mandatory_fields and \
'TYPE' in self._mandatory_fields[field]:
field_type = self._mandatory_fields[field]['TYPE']
elif field in self._supplemental_fields and \
'TYPE' in self._supplemental_fields[field]:
field_type = self._supplemental_fields[field]['TYPE']
elif field in self._matchable_fields and \
'TYPE' in self._matchable_fields[field]:
field_type = self._matchable_fields[field]['TYPE']
else:
raise CHAPIv1ArgumentError("No type defined for field: %s" % field)
return field_type
# Validate that a given field has proper format by looking up type
def validateFieldValueFormat(self, field, value):
field_type = self.fieldTypeForFieldName(field)
self.validateTypedField(field, field_type, value)
# Validate format arguments (not options)
def validateArgumentFormats(self, arguments):
if arguments is not None and len(arguments) > 0 and self._argument_types == None:
raise CHAPIv1ArgumentError("No argument types provided for arguments : %s" % arguments)
for arg_name, arg_value in arguments.items():
if not arg_name in self._argument_types:
raise CHAPIv1ArgumentError("No argument type provided for argument %s" % arg_name)
arg_type = self._argument_types[arg_name]
self.validateTypedField(arg_name, arg_type, arg_value)
# Validate that a given field value of given type has proper format
def validateTypedField(self, field, field_type, value):
properly_formed = True
if field_type == "URN":
if isinstance(value, list):
for v in value:
if isinstance(v, basestring): v = str(v) # Support UNICODE
if not is_valid_urn(v):
properly_formed = False
break
else:
if isinstance(value, basestring): value = str(value) # Support UNICODE
properly_formed = is_valid_urn(value)
elif field_type == "UID":
try:
if isinstance(value, list):
for v in value: uuid.UUID(v)
else:
uuid.UUID(value)
except Exception as e:
properly_formed = False
elif field_type == "UID_OR_NULL":
return value is None or \
self.validateTypedField(field, 'UID', value)
elif field_type == "STRING":
pass # Always true
elif field_type == "INTEGER" or field_type == "POSITIVE":
try:
v = int(value)
if field_type == "POSITIVE":
properly_formed = (v > 0)
except Exception as e:
properly_formed = False
elif field_type == "DATETIME":
properly_formed = False
if value:
try:
parsed_value = dateutil.parser.parse(value)
properly_formed = True
except Exception, e:
pass
elif field_type == "DATETIME_OR_NULL":
properly_formed = False
if value:
try:
parsed_value = dateutil.parser.parse(value)
properly_formed = True
except Exception, e:
pass
elif value is None:
# If the value is None, that's fine
properly_formed = True
elif field_type == "EMAIL":
properly_formed = value.find('@')>= 0 and value.find('.') >= 0
elif field_type == "KEY":
pass # *** No standard format
elif field_type == "BOOLEAN":
properly_formed = (type(value) is bool or
value.lower() in ['t', 'f', 'true', 'false'])
elif field_type == "CREDENTIALS":
try:
Credential(string=value)
except Exception as e:
properly_formed = False
elif field_type == "CERTIFICATE":
try:
cert = Certificate()
cert.load_from_string(value)
except Exception as e:
properly_formed = False
elif field_type == "CONTEXT_TYPE":
# Must be a number and one of the defined attributes
try:
index = int(value)
properly_formed = index in attribute_type_names
except Exception as e:
properly_formed = False
elif field_type == "ATTRIBUTE_SET":
if type(value) != dict:
propertly_formed = False
else:
# Must be
# {"PROJECT" : project_uid}, or {"SLICE" : slice_uid} or {"MEMBER" : member_uid}
# or we tolerate any other tag/value
for attr_key, attr_value in value.items():
if attr_key in ['PROJECT', 'SLICE', 'MEMBER']:
try:
uuid.UUID(attr_value)
except Exception as e:
properly_formed = False
else:
raise CHAPIv1ArgumentError("Unsupported field type : %s %s" % (field, field_type))
if not properly_formed:
raise CHAPIv1ArgumentError("Ill-formed argument of type %s field %s: %s" % \
(field_type, field, value))
# Check that provided values are legitimate
def checkAllowedFields(self, field_values, field_detail_key, \
allowed_detail_values):
for field in field_values:
value = field_values[field]
field_details = None
if self._mandatory_fields.has_key(field):
field_details = self._mandatory_fields[field]
if not field_details and \
self._supplemental_fields.has_key(field):
field_details = self._supplemental_fields[field]
if not field_details:
raise CHAPIv1ArgumentError("Unrecognized field : " + field)
# There must be an details entry for this field in the specs
if not field_detail_key in field_details.keys():
raise CHAPIv1ArgumentError("Required field detail " + \
"key missing for %s: %s" % \
(field, field_detail_key))
# The field detail must be one of the allowed values
field_detail = field_details[field_detail_key]
if field_detail not in allowed_detail_values:
raise CHAPIv1ArgumentError("Detail Key not allowed: %s (field %s, value %s, field_detail_key %s)" % \
(str(field_detail), field, value, field_detail_key))
# Check that all required fields are represented in field list
def checkRequiredFields(self, field_values, field_specs, \
field_detail_key, \
required_detail_value):
for field_name in field_specs.keys():
all_field_detail = field_specs[field_name]
if all_field_detail.has_key(field_detail_key):
field_detail = all_field_detail[field_detail_key]
if field_detail == required_detail_value:
# This is a required field. Is it present?
if not field_name in field_values.keys():
raise CHAPIv1ArgumentError("Required field not provided: " + field_name)
# Lookup - 'match' [{FIELD : VALUE], {FIELD : VALUE} ...]
# - 'filter' [FIELD, FIELD, FIELD]
class LookupArgumentCheck(FieldsArgumentCheck):
def __init__(self, mandatory_fields, supplemental_fields, matchable = []):
FieldsArgumentCheck.__init__(self, 'match', None, mandatory_fields, \
supplemental_fields, None, matchable)
def validate(self, options, arguments):
FieldsArgumentCheck.validate(self, options, arguments)
if 'match' in options:
self.validateFieldValueDictionary(options['match'])
if 'filter' in options:
self.validateFieldList(options['filter'])
# Lookup - 'match' [{FIELD : VALUE], {FIELD : VALUE} ...]
# - 'filter' [FIELD, FIELD, FIELD]
class LookupArgumentCheckMatchOptional(FieldsArgumentCheck):
def __init__(self, mandatory_fields, supplemental_fields, matchable = None):
FieldsArgumentCheck.__init__(self, None, None, mandatory_fields, \
supplemental_fields, None, matchable)
def validate(self, options, arguments):
FieldsArgumentCheck.validate(self, options, arguments)
if 'match' in options:
self.validateFieldValueDictionary(options['match'])
if 'filter' in options:
self.validateFieldList(options['filter'])
# Create - 'fields' [{FIELD : VALUE], {FIELD : VALUE} ...]
# Make sure that all other fields are {"Create" : "Allowed"}
# Make sure all required fields in the object spec are present
class CreateArgumentCheck(FieldsArgumentCheck):
def __init__(self, mandatory_fields, supplemental_fields, argument_types=None):
FieldsArgumentCheck.__init__(self, 'fields', \
None, \
mandatory_fields, supplemental_fields, argument_types)
def validate(self, options, arguments):
FieldsArgumentCheck.validate(self, options, arguments)
if 'fields' in options:
self.validateFieldList(options['fields'])
self.validateFieldValueDictionary(options['fields'])
self.checkAllowedFields(options['fields'], \
'CREATE', \
['REQUIRED', 'ALLOWED'])
self.checkRequiredFields(options['fields'], \
self._mandatory_fields,
'CREATE', \
'REQUIRED')
self.checkRequiredFields(options['fields'], \
self._supplemental_fields,
'CREATE', \
'REQUIRED')
# Update - 'fields' [{FIELD : VALUE], {FIELD : VALUE} ...]
# For each field, check that there is an {'Update' : True} entry in
# object spec
class UpdateArgumentCheck(FieldsArgumentCheck):
def __init__(self, mandatory_fields, supplemental_fields, argument_types = None):
FieldsArgumentCheck.__init__(self, 'fields',
None,
mandatory_fields, supplemental_fields, argument_types)
def validate(self, options, arguments):
FieldsArgumentCheck.validate(self, options, arguments)
if 'fields' in options:
self.validateFieldList(options['fields'])
self.validateFieldValueDictionary(options['fields'])
self.checkAllowedFields(options['fields'], \
'UPDATE', \
[True])
# Validate only arguments, not option fields
class SimpleArgumentCheck(FieldsArgumentCheck):
def __init__(self, argument_types):
FieldsArgumentCheck.__init__(self, None, None, {}, {}, argument_types)
def validate(self, options, arguments):
self.validateArgumentFormats(arguments)
class ValidURNCheck(ArgumentCheck):
def __init__(self, urn_key) : self._urn_key = urn_key
def validate(self, options, arguments):
if not options.has_key(urn_key):
raise CHAPIv1ArgumentError('Option key missing: ' + self._urn_key)
urns = options[self._urn_key]
if urns.instanceof(types.StringType): urns = [urns]
for urn in urns:
if not is_valid_urn(urn):
raise CHAPIv1ArgumentError("Invalid URN: " + urn)
|
# https://www.acmicpc.net/problem/2110
if __name__ == "__main__":
N, C = map(int, input().split())
nums = []
for _ in range(N):
nums.append(int(input()))
nums.sort()
minDist, maxDist = 1, nums[-1] - nums[0]
bestDist = minDist
while maxDist >= minDist:
cur, count = nums[0], 1
mid = (maxDist + minDist) // 2
for i in range(1, N):
if cur + mid <= nums[i]:
cur = nums[i]
count += 1
if count >= C:
minDist = mid + 1
bestDist = mid
else:
maxDist = mid - 1
print(f"Result : {bestDist}")
|
# *-* coding: utf-8 *-*
from threading import Thread
from subprocess import Popen, PIPE
import numpy as np
from .logger import Logger
## A class for capturing raw frames from the virtual framebuffer
class FrameCapturer(Thread):
def __init__(self, raw_frame_queue, loglevel, display_num, width, height, depth, fps):
super(FrameCapturer, self).__init__()
self.raw_frame_queue = raw_frame_queue # the queue for raw frames
self.width, self.height = width, height # the width and height of a raw frame
self.frame_size = width * height * 3 # the data size of a raw frame
self.rec_fps = fps # the frame rate applied for ffmpeg
self.frame_num = 0 # the frame number
self.active = True # the flag for running this class
self.prev_frame = ''.encode() # the raw frame captured in the previous time
self.pipe = self.init_recording(loglevel, display_num, depth)
# start recording with ffmpeg
def init_recording(self, loglevel, display_num, depth):
record_cmd = [
'ffmpeg', '-loglevel', loglevel, '-f', 'x11grab',
'-vcodec', 'rawvideo', '-an', '-s', '%dx%d' % (self.width, self.height),
'-i', ':%d+0,0' % display_num, '-r', str(self.rec_fps),
'-f', 'image2pipe', '-vcodec', 'rawvideo', '-pix_fmt', 'bgr%d' % depth, '-'
]
try:
pipe = Popen(record_cmd, stdout=PIPE)
except:
Logger.print_err('Could not start capturing frame')
exit(1)
return pipe
# capture a raw frame from the virtual framebuffer
def get_frame(self):
while True:
frame = self.pipe.stdout.read(self.frame_size)
if frame != self.prev_frame:
self.prev_frame = frame
try:
raw_frame = np.fromstring(frame, dtype=np.uint8).reshape(self.height, self.width, 3)
except:
Logger.print_err('Could not get new frame')
exit(1)
return raw_frame
# terminate capturing
def terminate(self):
self.active = False
# repeat capturing raw frames
def run(self):
while self.active:
raw_frame = self.get_frame()
frame_num = self.frame_num
self.frame_num += 1
self.raw_frame_queue.put((frame_num, raw_frame))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.